From 7bc86f23ec394438b5f391315b1f4e116eac2063 Mon Sep 17 00:00:00 2001
From: David Turner
Date: Mon, 23 Sep 2019 10:46:25 +0100
Subject: [PATCH 01/94] Wait longer for leader failure in logs test (#46958)
`testLogsWarningPeriodicallyIfClusterNotFormed` simulates a leader failure and
waits for long enough that a failing leader check is scheduled. However it does
not wait for the failing check to actually fail, which requires another two
actions and therefore might take up to 200ms more. Unlucky timing would result
in this test failing, for instance:
./gradle ':server:test' \
--tests "org.elasticsearch.cluster.coordination.CoordinatorTests.testLogsWarningPeriodicallyIfClusterNotFormed" \
-Dtests.jvm.argline="-Dhppc.bitmixer=DETERMINISTIC" \
-Dtests.seed=F18CDD0EBEB5653:E9BC1A8B062E697A
This commit adds the extra delay needed for the leader failure to complete as
expected.
Fixes #46920
---
.../cluster/coordination/CoordinatorTests.java | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java
index 9a0238960b5..f968f6f4742 100644
--- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java
+++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java
@@ -1216,9 +1216,15 @@ public class CoordinatorTests extends AbstractCoordinatorTestCase {
clusterNode.disconnect();
}
- cluster.runFor(defaultMillis(LEADER_CHECK_INTERVAL_SETTING) + defaultMillis(LEADER_CHECK_TIMEOUT_SETTING),
+ cluster.runFor(defaultMillis(LEADER_CHECK_TIMEOUT_SETTING) // to wait for any in-flight check to time out
+ + defaultMillis(LEADER_CHECK_INTERVAL_SETTING) // to wait for the next check to be sent
+ + 2 * DEFAULT_DELAY_VARIABILITY, // to send the failing check and receive the disconnection response
"waiting for leader failure");
+ for (final ClusterNode clusterNode : cluster.clusterNodes) {
+ assertThat(clusterNode.getId() + " is CANDIDATE", clusterNode.coordinator.getMode(), is(CANDIDATE));
+ }
+
for (int i = scaledRandomIntBetween(1, 10); i >= 0; i--) {
final MockLogAppender mockLogAppender = new MockLogAppender();
try {
From ef0b75765b0820baa0afa4cd5a4e0117a49afce9 Mon Sep 17 00:00:00 2001
From: Mark Vieira
Date: Tue, 28 May 2019 17:52:35 -0700
Subject: [PATCH 02/94] Add explicit build flag for experimenting with test
execution cacheability (#42649)
* Add build flag for ignoring random test seed as task input
* Fix checkstyle violations
---
.../elasticsearch/gradle/BuildPlugin.groovy | 21 ++------
...emPropertyCommandLineArgumentProvider.java | 30 +++++++++++
.../testfixtures/TestFixturesPlugin.java | 53 +++++++++++--------
3 files changed, 63 insertions(+), 41 deletions(-)
create mode 100644 buildSrc/src/main/java/org/elasticsearch/gradle/SystemPropertyCommandLineArgumentProvider.java
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy
index 9afbd436400..595bd173730 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy
@@ -843,7 +843,7 @@ class BuildPlugin implements Plugin {
}
test.jvmArgumentProviders.add(nonInputProperties)
- test.extensions.getByType(ExtraPropertiesExtension).set('nonInputProperties', nonInputProperties)
+ test.extensions.add('nonInputProperties', nonInputProperties)
test.executable = "${ext.get('runtimeJavaHome')}/bin/java"
test.workingDir = project.file("${project.buildDir}/testrun/${test.name}")
@@ -865,7 +865,8 @@ class BuildPlugin implements Plugin {
}
// we use './temp' since this is per JVM and tests are forbidden from writing to CWD
- test.systemProperties 'java.io.tmpdir': './temp',
+ test.systemProperties 'gradle.dist.lib': new File(project.class.location.toURI()).parent,
+ 'java.io.tmpdir': './temp',
'java.awt.headless': 'true',
'tests.gradle': 'true',
'tests.artifact': project.name,
@@ -881,7 +882,6 @@ class BuildPlugin implements Plugin {
}
// don't track these as inputs since they contain absolute paths and break cache relocatability
- nonInputProperties.systemProperty('gradle.dist.lib', new File(project.class.location.toURI()).parent)
nonInputProperties.systemProperty('gradle.worker.jar', "${project.gradle.getGradleUserHomeDir()}/caches/${project.gradle.gradleVersion}/workerMain/gradle-worker.jar")
nonInputProperties.systemProperty('gradle.user.home', project.gradle.getGradleUserHomeDir())
@@ -1007,19 +1007,4 @@ class BuildPlugin implements Plugin {
})
}
}
-
- private static class SystemPropertyCommandLineArgumentProvider implements CommandLineArgumentProvider {
- private final Map systemProperties = [:]
-
- void systemProperty(String key, Object value) {
- systemProperties.put(key, value)
- }
-
- @Override
- Iterable asArguments() {
- return systemProperties.collect { key, value ->
- "-D${key}=${value.toString()}".toString()
- }
- }
- }
}
diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/SystemPropertyCommandLineArgumentProvider.java b/buildSrc/src/main/java/org/elasticsearch/gradle/SystemPropertyCommandLineArgumentProvider.java
new file mode 100644
index 00000000000..7e808724035
--- /dev/null
+++ b/buildSrc/src/main/java/org/elasticsearch/gradle/SystemPropertyCommandLineArgumentProvider.java
@@ -0,0 +1,30 @@
+package org.elasticsearch.gradle;
+
+import org.gradle.api.tasks.Input;
+import org.gradle.process.CommandLineArgumentProvider;
+
+import java.util.LinkedHashMap;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+public class SystemPropertyCommandLineArgumentProvider implements CommandLineArgumentProvider {
+ private final Map systemProperties = new LinkedHashMap<>();
+
+ public void systemProperty(String key, Object value) {
+ systemProperties.put(key, value);
+ }
+
+ @Override
+ public Iterable asArguments() {
+ return systemProperties.entrySet()
+ .stream()
+ .map(entry -> "-D" + entry.getKey() + "=" + entry.getValue())
+ .collect(Collectors.toList());
+ }
+
+ // Track system property keys as an input so our build cache key will change if we add properties but values are still ignored
+ @Input
+ public Iterable getPropertyNames() {
+ return systemProperties.keySet();
+ }
+}
diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java
index 81b431772c2..556e938875e 100644
--- a/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java
+++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java
@@ -22,7 +22,9 @@ import com.avast.gradle.dockercompose.ComposeExtension;
import com.avast.gradle.dockercompose.DockerComposePlugin;
import com.avast.gradle.dockercompose.tasks.ComposeUp;
import org.elasticsearch.gradle.OS;
+import org.elasticsearch.gradle.SystemPropertyCommandLineArgumentProvider;
import org.elasticsearch.gradle.precommit.TestingConventionsTasks;
+import org.gradle.api.Action;
import org.gradle.api.DefaultTask;
import org.gradle.api.Plugin;
import org.gradle.api.Project;
@@ -142,7 +144,8 @@ public class TestFixturesPlugin implements Plugin {
configureServiceInfoForTask(
task,
fixtureProject,
- task::systemProperty
+ (name, host) ->
+ task.getExtensions().getByType(SystemPropertyCommandLineArgumentProvider.class).systemProperty(name, host)
);
task.dependsOn(fixtureProject.getTasks().getByName("postProcessFixture"));
})
@@ -165,28 +168,32 @@ public class TestFixturesPlugin implements Plugin {
private void configureServiceInfoForTask(Task task, Project fixtureProject, BiConsumer consumer) {
// Configure ports for the tests as system properties.
// We only know these at execution time so we need to do it in doFirst
- task.doFirst(theTask ->
- fixtureProject.getExtensions().getByType(ComposeExtension.class).getServicesInfos()
- .forEach((service, infos) -> {
- infos.getTcpPorts()
- .forEach((container, host) -> {
- String name = "test.fixtures." + service + ".tcp." + container;
- theTask.getLogger().info("port mapping property: {}={}", name, host);
- consumer.accept(
- name,
- host
- );
- });
- infos.getUdpPorts()
- .forEach((container, host) -> {
- String name = "test.fixtures." + service + ".udp." + container;
- theTask.getLogger().info("port mapping property: {}={}", name, host);
- consumer.accept(
- name,
- host
- );
- });
- })
+ task.doFirst(new Action() {
+ @Override
+ public void execute(Task theTask) {
+ fixtureProject.getExtensions().getByType(ComposeExtension.class).getServicesInfos()
+ .forEach((service, infos) -> {
+ infos.getTcpPorts()
+ .forEach((container, host) -> {
+ String name = "test.fixtures." + service + ".tcp." + container;
+ theTask.getLogger().info("port mapping property: {}={}", name, host);
+ consumer.accept(
+ name,
+ host
+ );
+ });
+ infos.getUdpPorts()
+ .forEach((container, host) -> {
+ String name = "test.fixtures." + service + ".udp." + container;
+ theTask.getLogger().info("port mapping property: {}={}", name, host);
+ consumer.accept(
+ name,
+ host
+ );
+ });
+ });
+ }
+ }
);
}
From 5fd7505efc4d5e69804b49cf945e1eace9ae5e44 Mon Sep 17 00:00:00 2001
From: Alpar Torok
Date: Mon, 23 Sep 2019 12:48:47 +0300
Subject: [PATCH 03/94] Testfixtures allow a single service only (#46780)
This PR adds some restrictions around testfixtures to make sure the same service ( as defiend in docker-compose.yml ) is not shared between multiple projects.
Sharing would break running with --parallel.
Projects can still share fixtures as long as each has it;s own service within.
This is still useful to share some of the setup and configuration code of the fixture.
Project now also have to specify a service name when calling useCluster to refer to a specific service.
If this is not the case all services will be claimed and the fixture can't be shared.
For this reason fixtures have to explicitly specify if they are using themselves ( fixture and tests in the same project ).
---
.../testfixtures/TestFixtureExtension.java | 59 +++++++++++++++++++
.../testfixtures/TestFixturesPlugin.java | 19 ++++--
distribution/docker/build.gradle | 2 +
plugins/repository-hdfs/build.gradle | 2 +-
plugins/repository-s3/build.gradle | 3 +
x-pack/qa/kerberos-tests/build.gradle | 3 +-
x-pack/qa/oidc-op-tests/build.gradle | 2 +-
x-pack/qa/openldap-tests/build.gradle | 2 +-
.../third-party/active-directory/build.gradle | 2 +-
9 files changed, 83 insertions(+), 11 deletions(-)
diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixtureExtension.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixtureExtension.java
index b4ddcf0bed1..1521b797133 100644
--- a/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixtureExtension.java
+++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixtureExtension.java
@@ -18,20 +18,65 @@
*/
package org.elasticsearch.gradle.testfixtures;
+import org.gradle.api.GradleException;
import org.gradle.api.NamedDomainObjectContainer;
import org.gradle.api.Project;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Optional;
+
public class TestFixtureExtension {
private final Project project;
final NamedDomainObjectContainer fixtures;
+ final Map serviceToProjectUseMap = new HashMap<>();
public TestFixtureExtension(Project project) {
this.project = project;
this.fixtures = project.container(Project.class);
}
+ public void useFixture() {
+ useFixture(this.project.getPath());
+ }
+
public void useFixture(String path) {
+ addFixtureProject(path);
+ serviceToProjectUseMap.put(path, this.project.getPath());
+ }
+
+ public void useFixture(String path, String serviceName) {
+ addFixtureProject(path);
+ String key = getServiceNameKey(path, serviceName);
+ serviceToProjectUseMap.put(key, this.project.getPath());
+
+ Optional otherProject = this.findOtherProjectUsingService(key);
+ if (otherProject.isPresent()) {
+ throw new GradleException(
+ "Projects " + otherProject.get() + " and " + this.project.getPath() + " both claim the "+ serviceName +
+ " service defined in the docker-compose.yml of " + path + "This is not supported because it breaks " +
+ "running in parallel. Configure dedicated services for each project and use those instead."
+ );
+ }
+ }
+
+ private String getServiceNameKey(String fixtureProjectPath, String serviceName) {
+ return fixtureProjectPath + "::" + serviceName;
+ }
+
+ private Optional findOtherProjectUsingService(String serviceName) {
+ return this.project.getRootProject().getAllprojects().stream()
+ .filter(p -> p.equals(this.project) == false)
+ .filter(p -> p.getExtensions().findByType(TestFixtureExtension.class) != null)
+ .map(project -> project.getExtensions().getByType(TestFixtureExtension.class))
+ .flatMap(ext -> ext.serviceToProjectUseMap.entrySet().stream())
+ .filter(entry -> entry.getKey().equals(serviceName))
+ .map(Map.Entry::getValue)
+ .findAny();
+ }
+
+ private void addFixtureProject(String path) {
Project fixtureProject = this.project.findProject(path);
if (fixtureProject == null) {
throw new IllegalArgumentException("Could not find test fixture " + fixtureProject);
@@ -42,6 +87,20 @@ public class TestFixtureExtension {
);
}
fixtures.add(fixtureProject);
+ // Check for exclusive access
+ Optional otherProject = this.findOtherProjectUsingService(path);
+ if (otherProject.isPresent()) {
+ throw new GradleException("Projects " + otherProject.get() + " and " + this.project.getPath() + " both " +
+ "claim all services from " + path + ". This is not supported because it breaks running in parallel. " +
+ "Configure specific services in docker-compose.yml for each and add the service name to `useFixture`"
+ );
+ }
}
+ boolean isServiceRequired(String serviceName, String fixtureProject) {
+ if (serviceToProjectUseMap.containsKey(fixtureProject)) {
+ return true;
+ }
+ return serviceToProjectUseMap.containsKey(getServiceNameKey(fixtureProject, serviceName));
+ }
}
diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java
index 556e938875e..93c91cbee51 100644
--- a/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java
+++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java
@@ -20,6 +20,7 @@ package org.elasticsearch.gradle.testfixtures;
import com.avast.gradle.dockercompose.ComposeExtension;
import com.avast.gradle.dockercompose.DockerComposePlugin;
+import com.avast.gradle.dockercompose.ServiceInfo;
import com.avast.gradle.dockercompose.tasks.ComposeUp;
import org.elasticsearch.gradle.OS;
import org.elasticsearch.gradle.SystemPropertyCommandLineArgumentProvider;
@@ -58,9 +59,6 @@ public class TestFixturesPlugin implements Plugin {
ext.set("testFixturesDir", testfixturesDir);
if (project.file(DOCKER_COMPOSE_YML).exists()) {
- // the project that defined a test fixture can also use it
- extension.fixtures.add(project);
-
Task buildFixture = project.getTasks().create("buildFixture");
Task pullFixture = project.getTasks().create("pullFixture");
Task preProcessFixture = project.getTasks().create("preProcessFixture");
@@ -106,6 +104,7 @@ public class TestFixturesPlugin implements Plugin {
configureServiceInfoForTask(
postProcessFixture,
project,
+ false,
(name, port) -> postProcessFixture.getExtensions()
.getByType(ExtraPropertiesExtension.class).set(name, port)
);
@@ -144,6 +143,7 @@ public class TestFixturesPlugin implements Plugin {
configureServiceInfoForTask(
task,
fixtureProject,
+ true,
(name, host) ->
task.getExtensions().getByType(SystemPropertyCommandLineArgumentProvider.class).systemProperty(name, host)
);
@@ -165,14 +165,23 @@ public class TestFixturesPlugin implements Plugin {
);
}
- private void configureServiceInfoForTask(Task task, Project fixtureProject, BiConsumer consumer) {
+ private void configureServiceInfoForTask(
+ Task task, Project fixtureProject, boolean enableFilter, BiConsumer consumer
+ ) {
// Configure ports for the tests as system properties.
// We only know these at execution time so we need to do it in doFirst
+ TestFixtureExtension extension = task.getProject().getExtensions().getByType(TestFixtureExtension.class);
task.doFirst(new Action() {
@Override
public void execute(Task theTask) {
fixtureProject.getExtensions().getByType(ComposeExtension.class).getServicesInfos()
- .forEach((service, infos) -> {
+ .entrySet().stream()
+ .filter(entry -> enableFilter == false ||
+ extension.isServiceRequired(entry.getKey(), fixtureProject.getPath())
+ )
+ .forEach(entry -> {
+ String service = entry.getKey();
+ ServiceInfo infos = entry.getValue();
infos.getTcpPorts()
.forEach((container, host) -> {
String name = "test.fixtures." + service + ".tcp." + container;
diff --git a/distribution/docker/build.gradle b/distribution/docker/build.gradle
index 7bf973e7edc..e4f0a04d4e9 100644
--- a/distribution/docker/build.gradle
+++ b/distribution/docker/build.gradle
@@ -6,6 +6,8 @@ import org.elasticsearch.gradle.testfixtures.TestFixturesPlugin
apply plugin: 'elasticsearch.standalone-rest-test'
apply plugin: 'elasticsearch.test.fixtures'
+testFixtures.useFixture()
+
configurations {
dockerPlugins
dockerSource
diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle
index 43b58ea7f39..7d849856aa8 100644
--- a/plugins/repository-hdfs/build.gradle
+++ b/plugins/repository-hdfs/build.gradle
@@ -37,7 +37,7 @@ versions << [
'hadoop2': '2.8.1'
]
-testFixtures.useFixture ":test:fixtures:krb5kdc-fixture"
+testFixtures.useFixture ":test:fixtures:krb5kdc-fixture", "hdfs"
configurations {
hdfsFixture
diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle
index 99eb86a4e00..ab4597cf7f4 100644
--- a/plugins/repository-s3/build.gradle
+++ b/plugins/repository-s3/build.gradle
@@ -146,6 +146,9 @@ task thirdPartyTest(type: Test) {
if (useFixture) {
apply plugin: 'elasticsearch.test.fixtures'
+
+ testFixtures.useFixture()
+
task writeDockerFile {
File minioDockerfile = new File("${project.buildDir}/minio-docker/Dockerfile")
outputs.file(minioDockerfile)
diff --git a/x-pack/qa/kerberos-tests/build.gradle b/x-pack/qa/kerberos-tests/build.gradle
index 81e5d746cc7..3b6530a69d8 100644
--- a/x-pack/qa/kerberos-tests/build.gradle
+++ b/x-pack/qa/kerberos-tests/build.gradle
@@ -1,13 +1,12 @@
import java.nio.file.Path
import java.nio.file.Paths
-import java.nio.file.Files
apply plugin: 'elasticsearch.testclusters'
apply plugin: 'elasticsearch.standalone-rest-test'
apply plugin: 'elasticsearch.rest-test'
apply plugin: 'elasticsearch.test.fixtures'
-testFixtures.useFixture ":test:fixtures:krb5kdc-fixture"
+testFixtures.useFixture ":test:fixtures:krb5kdc-fixture", "peppa"
dependencies {
testCompile project(':x-pack:plugin:core')
diff --git a/x-pack/qa/oidc-op-tests/build.gradle b/x-pack/qa/oidc-op-tests/build.gradle
index 9328447597e..13f2ef4927d 100644
--- a/x-pack/qa/oidc-op-tests/build.gradle
+++ b/x-pack/qa/oidc-op-tests/build.gradle
@@ -10,7 +10,7 @@ dependencies {
testCompile project(path: xpackModule('core'), configuration: 'testArtifacts')
testCompile project(path: xpackModule('security'), configuration: 'testArtifacts')
}
-testFixtures.useFixture ":x-pack:test:idp-fixture"
+testFixtures.useFixture ":x-pack:test:idp-fixture", "oidc-provider"
String ephemeralPort;
task setupPorts {
diff --git a/x-pack/qa/openldap-tests/build.gradle b/x-pack/qa/openldap-tests/build.gradle
index 9fc5a9b3b31..805023b5413 100644
--- a/x-pack/qa/openldap-tests/build.gradle
+++ b/x-pack/qa/openldap-tests/build.gradle
@@ -7,7 +7,7 @@ dependencies {
testCompile project(path: xpackModule('core'), configuration: 'testArtifacts')
}
-testFixtures.useFixture ":x-pack:test:idp-fixture"
+testFixtures.useFixture ":x-pack:test:idp-fixture", "openldap"
Project idpFixtureProject = xpackProject("test:idp-fixture")
String outputDir = "${project.buildDir}/generated-resources/${project.name}"
diff --git a/x-pack/qa/third-party/active-directory/build.gradle b/x-pack/qa/third-party/active-directory/build.gradle
index 2d4af2b46bb..b76b25b08ea 100644
--- a/x-pack/qa/third-party/active-directory/build.gradle
+++ b/x-pack/qa/third-party/active-directory/build.gradle
@@ -15,7 +15,7 @@ processTestResources {
compileTestJava.options.compilerArgs << "-Xlint:-rawtypes,-unchecked"
-// we have to repeat these patterns because the security test resources are effectively in the src of this project
+// we have to repeat these patterns because the security test resources are effectively in the src of this p
forbiddenPatterns {
exclude '**/*.key'
exclude '**/*.p12'
From f06aa0c6c00d31578c44f0bdc122b132e76387df Mon Sep 17 00:00:00 2001
From: Henning Andersen <33268011+henningandersen@users.noreply.github.com>
Date: Mon, 23 Sep 2019 13:31:41 +0200
Subject: [PATCH 04/94] Fix G1 GC default IHOP (#46169)
G1 GC were setup to use an `InitiatingHeapOccupancyPercent` of 75. This
could leave used memory at a very high level for an extended duration,
triggering the real memory circuit breaker even at low activity levels.
The value is a threshold for old generation usage relative to total heap
size and thus it should leave room for the new generation. Default in
G1 is to allow up to 60 percent for new generation and this could mean that the
threshold was effectively at 135% heap usage. GC would still kick in of course and
eventually enough mixed collections would take place such that adaptive adjustment
of IHOP kicks in.
The JVM has adaptive setting of the IHOP, but this does not kick in
until it has sampled a few collections. A newly started, relatively
quiet server with primarily new generation activity could thus
experience heap above 95% frequently for a duration.
The changes here are two-fold:
1. Use 30% default for IHOP (the JVM default of 45 could still mean
105% heap usage threshold and did not fully ensure not to hit the
circuit breaker with low activity)
2. Set G1ReservePercent=25. This is used by the adaptive IHOP mechanism,
meaning old/mixed GC should kick in no later than at 75% heap. This
ensures IHOP stays compatible with the real memory circuit breaker also
after being adjusted by adaptive IHOP.
---
distribution/src/config/jvm.options | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/distribution/src/config/jvm.options b/distribution/src/config/jvm.options
index 699664cb254..075106100ea 100644
--- a/distribution/src/config/jvm.options
+++ b/distribution/src/config/jvm.options
@@ -43,7 +43,8 @@
# 10-:-XX:-UseConcMarkSweepGC
# 10-:-XX:-UseCMSInitiatingOccupancyOnly
# 10-:-XX:+UseG1GC
-# 10-:-XX:InitiatingHeapOccupancyPercent=75
+# 10-:-XX:G1ReservePercent=25
+# 10-:-XX:InitiatingHeapOccupancyPercent=30
## DNS cache policy
# cache ttl in seconds for positive DNS lookups noting that this overrides the
From 2da040601be092f3817317ad4cbe4a131e3f29db Mon Sep 17 00:00:00 2001
From: Armin Braun
Date: Mon, 23 Sep 2019 15:01:47 +0200
Subject: [PATCH 05/94] Fix Bug in Snapshot Status Response Timestamps (#46919)
(#46970)
Fixing a corner case where snapshot total time calculation was off when
getting the `SnapshotStatus` of an in-progress snapshot.
Closes #46913
---
.../admin/cluster/snapshots/status/SnapshotStats.java | 4 ++++
.../admin/cluster/snapshots/status/SnapshotStatus.java | 1 +
.../snapshots/status/TransportSnapshotsStatusAction.java | 7 ++++++-
.../admin/cluster/snapshots/status/SnapshotStatsTests.java | 5 +++--
4 files changed, 14 insertions(+), 3 deletions(-)
diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStats.java
index 16410eefbf0..c242d01ed74 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStats.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStats.java
@@ -20,6 +20,7 @@
package org.elasticsearch.action.admin.cluster.snapshots.status;
import org.elasticsearch.Version;
+import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
@@ -70,6 +71,7 @@ public class SnapshotStats implements Writeable, ToXContentObject {
long incrementalSize, long totalSize, long processedSize) {
this.startTime = startTime;
this.time = time;
+ assert time >= 0 : "Tried to initialize snapshot stats with negative total time [" + time + "]";
this.incrementalFileCount = incrementalFileCount;
this.totalFileCount = totalFileCount;
this.processedFileCount = processedFileCount;
@@ -323,6 +325,8 @@ public class SnapshotStats implements Writeable, ToXContentObject {
// Update duration
time = endTime - startTime;
}
+ assert time >= 0
+ : "Update with [" + Strings.toString(stats) + "][" + updateTimestamps + "] resulted in negative total time [" + time + "]";
}
@Override
diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java
index 293a14d731b..9e4318bf1e8 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java
@@ -103,6 +103,7 @@ public class SnapshotStatus implements ToXContentObject, Writeable {
this.shards = Objects.requireNonNull(shards);
this.includeGlobalState = includeGlobalState;
shardsStats = new SnapshotShardsStats(shards);
+ assert time >= 0 : "time must be >= 0 but received [" + time + "]";
updateShardStats(startTime, time);
}
diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java
index 063f051b136..1d0c3ed4d8c 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java
@@ -238,9 +238,14 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction= startTime || (endTime == 0L && snapshotInfo.state().completed() == false)
+ : "Inconsistent timestamps found in SnapshotInfo [" + snapshotInfo + "]";
builder.add(new SnapshotStatus(new Snapshot(repositoryName, snapshotId), state,
Collections.unmodifiableList(shardStatusBuilder), snapshotInfo.includeGlobalState(),
- startTime, snapshotInfo.endTime() - startTime));
+ startTime,
+ // Use current time to calculate overall runtime for in-progress snapshots that have endTime == 0
+ (endTime == 0 ? threadPool.absoluteTimeInMillis() : endTime) - startTime));
}
}
}
diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatsTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatsTests.java
index 2822a9661fd..76f35bcdcc3 100644
--- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatsTests.java
+++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatsTests.java
@@ -28,8 +28,9 @@ public class SnapshotStatsTests extends AbstractXContentTestCase
@Override
protected SnapshotStats createTestInstance() {
- long startTime = randomNonNegativeLong();
- long time = randomNonNegativeLong();
+ // Using less than half of Long.MAX_VALUE for random time values to avoid long overflow in tests that add the two time values
+ long startTime = randomLongBetween(0, Long.MAX_VALUE / 2 - 1);
+ long time = randomLongBetween(0, Long.MAX_VALUE / 2 - 1);
int incrementalFileCount = randomIntBetween(0, Integer.MAX_VALUE);
int totalFileCount = randomIntBetween(0, Integer.MAX_VALUE);
int processedFileCount = randomIntBetween(0, Integer.MAX_VALUE);
From b09aba4c55eeb2a3a3c8cad1705d4d98b84f2ef1 Mon Sep 17 00:00:00 2001
From: James Rodewig
Date: Mon, 23 Sep 2019 09:18:01 -0400
Subject: [PATCH 06/94] [DOCS] Reformat rollover index API docs (#46778)
---
docs/reference/indices/create-index.asciidoc | 2 +
.../reference/indices/rollover-index.asciidoc | 237 +++++++++++++-----
2 files changed, 174 insertions(+), 65 deletions(-)
diff --git a/docs/reference/indices/create-index.asciidoc b/docs/reference/indices/create-index.asciidoc
index fef6ff96a52..afb7ab91232 100644
--- a/docs/reference/indices/create-index.asciidoc
+++ b/docs/reference/indices/create-index.asciidoc
@@ -35,6 +35,7 @@ creating an index, you can specify the following:
--
(Optional, string) Name of the index you wish to create.
+// tag::index-name-reqs[]
Index names must meet the following criteria:
- Lowercase only
@@ -43,6 +44,7 @@ Index names must meet the following criteria:
- Cannot start with `-`, `_`, `+`
- Cannot be `.` or `..`
- Cannot be longer than 255 bytes (note it is bytes, so multi-byte characters will count towards the 255 limit faster)
+// end::index-name-reqs[]
--
diff --git a/docs/reference/indices/rollover-index.asciidoc b/docs/reference/indices/rollover-index.asciidoc
index 8372de55024..294c38790e1 100644
--- a/docs/reference/indices/rollover-index.asciidoc
+++ b/docs/reference/indices/rollover-index.asciidoc
@@ -1,5 +1,37 @@
[[indices-rollover-index]]
-=== Rollover Index
+=== Rollover index API
+++++
+Rollover index
+++++
+
+Assigns an <> to a new index
+when the alias's existing index meets a condition you provide.
+
+[source,console]
+----
+POST /alias1/_rollover/twitter
+{
+ "conditions": {
+ "max_age": "7d",
+ "max_docs": 1000,
+ "max_size": "5gb"
+ }
+}
+----
+// TEST[s/^/PUT my_old_index_name\nPUT my_old_index_name\/_alias\/alias1\n/]
+
+
+[[rollover-index-api-request]]
+==== {api-request-title}
+
+
+`POST //_rollover/`
+
+`POST //_rollover/`
+
+
+[[rollover-index-api-desc]]
+==== {api-description-title}
The rollover index API rolls an <> to a new index when
the existing index meets a condition you provide. You can use this API to retire
@@ -24,17 +56,102 @@ from the original (rolled-over) index.
In this scenario, the write index will have its rollover alias' `is_write_index` set to `false`, while the newly created index
will now have the rollover alias pointing to it as the write index with `is_write_index` as `true`.
-The available conditions are:
-[[index-rollover-conditions]]
-.`conditions` parameters
-[options="header"]
-|===
-| Name | Description
-| max_age | The maximum age of the index
-| max_docs | The maximum number of documents the index should contain. This does not add documents multiple times for replicas
-| max_size | The maximum estimated size of the primary shard of the index
-|===
+[[rollover-wait-active-shards]]
+===== Wait for active shards
+
+Because the rollover operation creates a new index to rollover to, the
+<> setting on
+index creation applies to the rollover action.
+
+
+[[rollover-index-api-path-params]]
+==== {api-path-parms-title}
+
+``::
+(Required, string)
+Name of the existing index alias
+to assign to the target index.
+
+
+``::
++
+--
+(Optional*, string)
+Name of the target index to create and assign the index alias.
+
+include::{docdir}/indices/create-index.asciidoc[tag=index-name-reqs]
+
+*This parameter is not required
+if the alias is assigned to an index name that ends with `-` and a number,
+such as `logs-000001`.
+In this case,
+the name of the new index follows the same pattern,
+incrementing the number.
+For example,
+`logs-000001` increments to `logs-000002`.
+This number is zero-padded with a length of 6,
+regardless of the prior index name.
+
+If the existing index for the alias does not match this pattern,
+this parameter is required.
+--
+
+
+[[rollover-index-api-query-params]]
+==== {api-query-parms-title}
+
+`dry_run`::
+(Optional, boolean)
+If `true`,
+the request checks whether the index matches provided conditions
+but does not perform a rollover.
+Defaults to `false`.
+
+include::{docdir}/rest-api/common-parms.asciidoc[tag=include-type-name]
+
+include::{docdir}/rest-api/common-parms.asciidoc[tag=wait_for_active_shards]
+
+include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
+
+
+[[rollover-index-api-request-body]]
+==== {api-request-body-title}
+
+include::{docdir}/rest-api/common-parms.asciidoc[tag=aliases]
+
+`conditions`::
++
+--
+(Required, object)
+Set of conditions the index alias's existing index must met to roll over.
+
+Parameters include:
+
+`max_age`::
+(Optional, <>)
+Maximum age of the index.
+
+`max_docs`::
+(Optional, integer)
+Maximum number of documents in the index.
+This number does *not* include documents in replica shards.
+
+`max_size`::
+(Optional, <>)
+Maximum estimated size of the primary shard of the index.
+--
+
+include::{docdir}/rest-api/common-parms.asciidoc[tag=mappings]
+
+include::{docdir}/rest-api/common-parms.asciidoc[tag=settings]
+
+
+[[rollover-index-api-example]]
+==== {api-examples-title}
+
+[[rollover-index-basic-ex]]
+===== Basic example
[source,console]
--------------------------------------------------
@@ -63,7 +180,7 @@ POST /logs_write/_rollover <2>
contains 1,000 or more documents, or has an index size at least around 5GB, then the `logs-000002` index is created
and the `logs_write` alias is updated to point to `logs-000002`.
-The above request might return the following response:
+The API returns the following response:
[source,console-result]
--------------------------------------------------
@@ -86,8 +203,41 @@ The above request might return the following response:
<2> Whether the rollover was dry run.
<3> The result of each condition.
-[float]
-==== Naming the new index
+[[rollover-index-settings-ex]]
+===== Specify settings for the target index
+
+The settings, mappings, and aliases for the new index are taken from any
+matching <>. Additionally, you can specify
+`settings`, `mappings`, and `aliases` in the body of the request, just like the
+<> API. Values specified in the request
+override any values set in matching index templates. For example, the following
+`rollover` request overrides the `index.number_of_shards` setting:
+
+[source,console]
+--------------------------------------------------
+PUT /logs-000001
+{
+ "aliases": {
+ "logs_write": {}
+ }
+}
+
+POST /logs_write/_rollover
+{
+ "conditions" : {
+ "max_age": "7d",
+ "max_docs": 1000,
+ "max_size": "5gb"
+ },
+ "settings": {
+ "index.number_of_shards": 2
+ }
+}
+--------------------------------------------------
+
+
+[[rollover-index-specify-index-ex]]
+===== Specify a target index name
If the name of the existing index ends with `-` and a number -- e.g.
`logs-000001` -- then the name of the new index will follow the same pattern,
@@ -110,8 +260,9 @@ POST /my_alias/_rollover/my_new_index_name
--------------------------------------------------
// TEST[s/^/PUT my_old_index_name\nPUT my_old_index_name\/_alias\/my_alias\n/]
-[float]
-==== Using date math with the rollover API
+
+[[_using_date_math_with_the_rollover_api]]
+===== Use date math with a rollover
It can be useful to use <> to name the
rollover index according to the date that the index rolled over, e.g.
@@ -187,53 +338,15 @@ GET /%3Clogs-%7Bnow%2Fd%7D-*%3E%2C%3Clogs-%7Bnow%2Fd-1d%7D-*%3E%2C%3Clogs-%7Bnow
// TEST[continued]
// TEST[s/now/2016.10.31||/]
-[float]
-==== Defining the new index
-The settings, mappings, and aliases for the new index are taken from any
-matching <>. Additionally, you can specify
-`settings`, `mappings`, and `aliases` in the body of the request, just like the
-<> API. Values specified in the request
-override any values set in matching index templates. For example, the following
-`rollover` request overrides the `index.number_of_shards` setting:
-
-[source,console]
---------------------------------------------------
-PUT /logs-000001
-{
- "aliases": {
- "logs_write": {}
- }
-}
-
-POST /logs_write/_rollover
-{
- "conditions" : {
- "max_age": "7d",
- "max_docs": 1000,
- "max_size": "5gb"
- },
- "settings": {
- "index.number_of_shards": 2
- }
-}
---------------------------------------------------
-
-[float]
-==== Dry run
+[[rollover-index-api-dry-run-ex]]
+===== Dry run
The rollover API supports `dry_run` mode, where request conditions can be
-checked without performing the actual rollover:
+checked without performing the actual rollover.
[source,console]
--------------------------------------------------
-PUT /logs-000001
-{
- "aliases": {
- "logs_write": {}
- }
-}
-
POST /logs_write/_rollover?dry_run
{
"conditions" : {
@@ -243,17 +356,11 @@ POST /logs_write/_rollover?dry_run
}
}
--------------------------------------------------
+// TEST[s/^/PUT logs-000001\nPUT logs-000001\/_alias\/logs_write\n/]
-[float]
-==== Wait For Active Shards
-
-Because the rollover operation creates a new index to rollover to, the
-<> setting on
-index creation applies to the rollover action as well.
[[indices-rollover-is-write-index]]
-[float]
-==== Write Index Alias Behavior
+===== Roll over a write index
The rollover alias when rolling over a write index that has `is_write_index` explicitly set to `true` is not
swapped during rollover actions. Since having an alias point to multiple indices is ambiguous in distinguishing
From d4d1182677ce9c2715b4064fed5c34b73a8a5021 Mon Sep 17 00:00:00 2001
From: Luca Cavanna
Date: Mon, 23 Sep 2019 17:00:37 +0200
Subject: [PATCH 07/94] update _common.json format (#46872)
API spec now use an object for the documentation field. _common was not updated yet. This commit updates _common.json and its corresponding parser.
Closes #46744
Co-Authored-By: Tomas Della Vedova
---
.../resources/rest-api-spec/api/_common.json | 6 +-
.../restspec/ClientYamlSuiteRestSpec.java | 66 +++++++++++--------
.../restspec/ClientYamlSuiteRestApiTests.java | 45 +++++++++++++
3 files changed, 87 insertions(+), 30 deletions(-)
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/_common.json b/rest-api-spec/src/main/resources/rest-api-spec/api/_common.json
index 69a1f8fb8ce..1505db774f0 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/api/_common.json
+++ b/rest-api-spec/src/main/resources/rest-api-spec/api/_common.json
@@ -1,6 +1,8 @@
{
- "description": "Parameters that are accepted by all API endpoints.",
- "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/common-options.html",
+ "documentation" : {
+ "description": "Parameters that are accepted by all API endpoints.",
+ "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/common-options.html"
+ },
"params": {
"pretty": {
"type": "boolean",
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/restspec/ClientYamlSuiteRestSpec.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/restspec/ClientYamlSuiteRestSpec.java
index 70665ad5d9b..f0d1b13d98d 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/restspec/ClientYamlSuiteRestSpec.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/restspec/ClientYamlSuiteRestSpec.java
@@ -18,6 +18,12 @@
*/
package org.elasticsearch.test.rest.yaml.restspec;
+import org.elasticsearch.common.io.PathUtils;
+import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
+import org.elasticsearch.common.xcontent.NamedXContentRegistry;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+
import java.io.IOException;
import java.io.InputStream;
import java.io.UncheckedIOException;
@@ -30,12 +36,6 @@ import java.util.Map;
import java.util.Set;
import java.util.stream.Stream;
-import org.elasticsearch.common.io.PathUtils;
-import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
-import org.elasticsearch.common.xcontent.NamedXContentRegistry;
-import org.elasticsearch.common.xcontent.XContentParser;
-import org.elasticsearch.common.xcontent.json.JsonXContent;
-
/**
* Holds the specification used to turn {@code do} actions in the YAML suite into REST api calls.
*/
@@ -43,7 +43,7 @@ public class ClientYamlSuiteRestSpec {
private final Set globalParameters = new HashSet<>();
private final Map restApiMap = new HashMap<>();
- private ClientYamlSuiteRestSpec() {}
+ ClientYamlSuiteRestSpec() {}
private void addApi(ClientYamlSuiteRestApi restApi) {
ClientYamlSuiteRestApi previous = restApiMap.putIfAbsent(restApi.getName(), restApi);
@@ -99,27 +99,7 @@ public class ClientYamlSuiteRestSpec {
JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) {
String filename = jsonFile.getFileName().toString();
if (filename.equals("_common.json")) {
- String currentFieldName = null;
- while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
- if (parser.currentToken() == XContentParser.Token.FIELD_NAME) {
- currentFieldName = parser.currentName();
- } else if (parser.currentToken() == XContentParser.Token.START_OBJECT
- && "params".equals(currentFieldName)) {
- while (parser.nextToken() == XContentParser.Token.FIELD_NAME) {
- String param = parser.currentName();
- if (restSpec.globalParameters.contains(param)) {
- throw new IllegalArgumentException("Found duplicate global param [" + param + "]");
- }
- restSpec.globalParameters.add(param);
- parser.nextToken();
- if (parser.currentToken() != XContentParser.Token.START_OBJECT) {
- throw new IllegalArgumentException("Expected params field in rest api definition to " +
- "contain an object");
- }
- parser.skipChildren();
- }
- }
- }
+ parseCommonSpec(parser, restSpec);
} else {
ClientYamlSuiteRestApi restApi = restApiParser.parse(jsonFile.toString(), parser);
String expectedApiName = filename.substring(0, filename.lastIndexOf('.'));
@@ -134,4 +114,34 @@ public class ClientYamlSuiteRestSpec {
throw new UncheckedIOException("Can't parse rest spec file: [" + jsonFile + "]", ex);
}
}
+
+ static void parseCommonSpec(XContentParser parser, ClientYamlSuiteRestSpec restSpec) throws IOException {
+ String currentFieldName = null;
+ parser.nextToken();
+ assert parser.currentToken() == XContentParser.Token.START_OBJECT;
+ while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
+ if (parser.currentToken() == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (parser.currentToken() == XContentParser.Token.START_OBJECT) {
+ if ("params".equals(currentFieldName)) {
+ while (parser.nextToken() == XContentParser.Token.FIELD_NAME) {
+ String param = parser.currentName();
+ if (restSpec.globalParameters.contains(param)) {
+ throw new IllegalArgumentException("Found duplicate global param [" + param + "]");
+ }
+ restSpec.globalParameters.add(param);
+ parser.nextToken();
+ if (parser.currentToken() != XContentParser.Token.START_OBJECT) {
+ throw new IllegalArgumentException("Expected params field in rest api definition to " +
+ "contain an object");
+ }
+ parser.skipChildren();
+ }
+ } else {
+ parser.skipChildren();
+ }
+ }
+ }
+
+ }
}
diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/restspec/ClientYamlSuiteRestApiTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/restspec/ClientYamlSuiteRestApiTests.java
index a3c6544137a..e2b9a4cddb4 100644
--- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/restspec/ClientYamlSuiteRestApiTests.java
+++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/restspec/ClientYamlSuiteRestApiTests.java
@@ -29,6 +29,18 @@ import java.util.List;
public class ClientYamlSuiteRestApiTests extends ESTestCase {
+ public void testParseCommonSpec() throws IOException {
+ XContentParser parser = createParser(YamlXContent.yamlXContent, COMMON_SPEC);
+ ClientYamlSuiteRestSpec restSpec = new ClientYamlSuiteRestSpec();
+ ClientYamlSuiteRestSpec.parseCommonSpec(parser, restSpec);
+ assertTrue(restSpec.isGlobalParameter("pretty"));
+ assertTrue(restSpec.isGlobalParameter("human"));
+ assertTrue(restSpec.isGlobalParameter("error_trace"));
+ assertTrue(restSpec.isGlobalParameter("source"));
+ assertTrue(restSpec.isGlobalParameter("filter_path"));
+ assertFalse(restSpec.isGlobalParameter("unknown"));
+ }
+
public void testPathMatching() throws IOException {
XContentParser parser = createParser(YamlXContent.yamlXContent, REST_SPEC_API);
ClientYamlSuiteRestApi restApi = new ClientYamlSuiteRestApiParser().parse("index.json", parser);
@@ -66,6 +78,39 @@ public class ClientYamlSuiteRestApiTests extends ESTestCase {
}
}
+ private static final String COMMON_SPEC = "{\n"+
+ " \"documentation\" : {\n"+
+ " \"url\": \"Parameters that are accepted by all API endpoints.\",\n"+
+ " \"documentation\": \"https://www.elastic.co/guide/en/elasticsearch/reference/current/common-options.html\"\n"+
+ " },\n"+
+ " \"params\": {\n"+
+ " \"pretty\": {\n"+
+ " \"type\": \"boolean\",\n"+
+ " \"description\": \"Pretty format the returned JSON response.\",\n"+
+ " \"default\": false\n"+
+ " },\n"+
+ " \"human\": {\n"+
+ " \"type\": \"boolean\",\n"+
+ " \"description\": \"Return human readable values for statistics.\",\n"+
+ " \"default\": true\n"+
+ " },\n"+
+ " \"error_trace\": {\n"+
+ " \"type\": \"boolean\",\n"+
+ " \"description\": \"Include the stack trace of returned errors.\",\n"+
+ " \"default\": false\n"+
+ " },\n"+
+ " \"source\": {\n"+
+ " \"type\": \"string\",\n"+
+ " \"description\": \"The URL-encoded request definition." +
+ " Useful for libraries that do not accept a request body for non-POST requests.\"\n"+
+ " },\n"+
+ " \"filter_path\": {\n"+
+ " \"type\": \"list\",\n"+
+ " \"description\": \"A comma-separated list of filters used to reduce the response.\"\n"+
+ " }\n"+
+ " }\n"+
+ "}\n";
+
private static final String REST_SPEC_API = "{\n" +
" \"index\":{\n" +
" \"documentation\":{\n" +
From a815f8b930255b03e69dc422163a1086ec869256 Mon Sep 17 00:00:00 2001
From: Lisa Cawley
Date: Mon, 23 Sep 2019 08:45:01 -0700
Subject: [PATCH 08/94] [DOCS] Group rollup and transform content (#46882)
---
docs/reference/data-rollup-transform.asciidoc | 16 ++++++++++++
docs/reference/index.asciidoc | 4 +--
docs/reference/rollup/api-quickref.asciidoc | 11 +++++---
docs/reference/rollup/index.asciidoc | 25 ++++++++-----------
docs/reference/rollup/overview.asciidoc | 13 ++++++----
.../rollup/rollup-agg-limitations.asciidoc | 4 +--
.../rollup/rollup-getting-started.asciidoc | 13 ++++++----
.../rollup/rollup-search-limitations.asciidoc | 12 ++++-----
.../rollup/understanding-groups.asciidoc | 6 ++---
9 files changed, 63 insertions(+), 41 deletions(-)
create mode 100644 docs/reference/data-rollup-transform.asciidoc
diff --git a/docs/reference/data-rollup-transform.asciidoc b/docs/reference/data-rollup-transform.asciidoc
new file mode 100644
index 00000000000..5fe08d6f0d5
--- /dev/null
+++ b/docs/reference/data-rollup-transform.asciidoc
@@ -0,0 +1,16 @@
+[[data-rollup-transform]]
+= Roll up or transform your data
+
+[partintro]
+--
+
+{es} offers the following methods for manipulating your data:
+
+* <>
++
+include::rollup/index.asciidoc[tag=rollup-intro]
+* {stack-ov}/ml-dataframes.html[Transforming your data]
+
+--
+
+include::rollup/index.asciidoc[]
diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc
index 8eaac30aa50..e0cbe106681 100644
--- a/docs/reference/index.asciidoc
+++ b/docs/reference/index.asciidoc
@@ -50,10 +50,10 @@ include::sql/index.asciidoc[]
include::monitoring/index.asciidoc[]
-include::rollup/index.asciidoc[]
-
include::frozen-indices.asciidoc[]
+include::data-rollup-transform.asciidoc[]
+
include::high-availability.asciidoc[]
include::security/index.asciidoc[]
diff --git a/docs/reference/rollup/api-quickref.asciidoc b/docs/reference/rollup/api-quickref.asciidoc
index d6be3e4e5b6..8a64d9df17f 100644
--- a/docs/reference/rollup/api-quickref.asciidoc
+++ b/docs/reference/rollup/api-quickref.asciidoc
@@ -1,7 +1,10 @@
[role="xpack"]
[testenv="basic"]
[[rollup-api-quickref]]
-== API Quick Reference
+=== {rollup-cap} API quick reference
+++++
+API quick reference
+++++
experimental[]
@@ -15,7 +18,7 @@ Most rollup endpoints have the following base:
[float]
[[rollup-api-jobs]]
-=== /job/
+==== /job/
* {ref}/rollup-put-job.html[PUT /_rollup/job/+++]: Create a {rollup-job}
* {ref}/rollup-get-job.html[GET /_rollup/job]: List {rollup-jobs}
@@ -26,13 +29,13 @@ Most rollup endpoints have the following base:
[float]
[[rollup-api-data]]
-=== /data/
+==== /data/
* {ref}/rollup-get-rollup-caps.html[GET /_rollup/data//_rollup_caps+++]: Get Rollup Capabilities
* {ref}/rollup-get-rollup-index-caps.html[GET //_rollup/data/+++]: Get Rollup Index Capabilities
[float]
[[rollup-api-index]]
-=== //
+==== //
* {ref}/rollup-search.html[GET //_rollup_search]: Search rollup data
diff --git a/docs/reference/rollup/index.asciidoc b/docs/reference/rollup/index.asciidoc
index 64dc233f82f..99180e2f32d 100644
--- a/docs/reference/rollup/index.asciidoc
+++ b/docs/reference/rollup/index.asciidoc
@@ -1,10 +1,7 @@
[role="xpack"]
[testenv="basic"]
[[xpack-rollup]]
-= Rolling up historical data
-
-[partintro]
---
+== Rolling up historical data
experimental[]
@@ -12,20 +9,20 @@ Keeping historical data around for analysis is extremely useful but often avoide
archiving massive amounts of data. Retention periods are thus driven by financial realities rather than by the
usefulness of extensive historical data.
-The Rollup feature in {xpack} provides a means to summarize and store historical data so that it can still be used
-for analysis, but at a fraction of the storage cost of raw data.
+// tag::rollup-intro[]
+The {stack} {rollup-features} provide a means to summarize and store historical
+data so that it can still be used for analysis, but at a fraction of the storage
+cost of raw data.
+// end::rollup-intro[]
-
-* <>
-* <>
-* <>
-* <>
+* <>
+* <>
+* <>
+* <>
* <>
-* <>
+* <>
---
-
include::overview.asciidoc[]
include::api-quickref.asciidoc[]
include::rollup-getting-started.asciidoc[]
diff --git a/docs/reference/rollup/overview.asciidoc b/docs/reference/rollup/overview.asciidoc
index 90c5e20a850..843cd5c0584 100644
--- a/docs/reference/rollup/overview.asciidoc
+++ b/docs/reference/rollup/overview.asciidoc
@@ -1,7 +1,10 @@
[role="xpack"]
[testenv="basic"]
[[rollup-overview]]
-== Overview
+=== {rollup-cap} overview
+++++
+Overview
+++++
experimental[]
@@ -23,7 +26,7 @@ reading often diminishes with time. It's not useless -- it could easily contrib
value often leads to deletion rather than paying the fixed storage cost.
[float]
-=== Rollup store historical data at reduced granularity
+==== Rollup stores historical data at reduced granularity
That's where Rollup comes into play. The Rollup functionality summarizes old, high-granularity data into a reduced
granularity format for long-term storage. By "rolling" the data up into a single summary document, historical data
@@ -39,7 +42,7 @@ automates this process of summarizing historical data.
Details about setting up and configuring Rollup are covered in <>
[float]
-=== Rollup uses standard query DSL
+==== Rollup uses standard query DSL
The Rollup feature exposes a new search endpoint (`/_rollup_search` vs the standard `/_search`) which knows how to search
over rolled-up data. Importantly, this endpoint accepts 100% normal {es} Query DSL. Your application does not need to learn
@@ -53,7 +56,7 @@ But if your queries, aggregations and dashboards only use the available function
data is trivial.
[float]
-=== Rollup merges "live" and "rolled" data
+==== Rollup merges "live" and "rolled" data
A useful feature of Rollup is the ability to query both "live", realtime data in addition to historical "rolled" data
in a single query.
@@ -67,7 +70,7 @@ It will take the results from both data sources and merge them together. If the
"rolled" data, live data is preferred to increase accuracy.
[float]
-=== Rollup is multi-interval aware
+==== Rollup is multi-interval aware
Finally, Rollup is capable of intelligently utilizing the best interval available. If you've worked with summarizing
features of other products, you'll find that they can be limiting. If you configure rollups at daily intervals... your
diff --git a/docs/reference/rollup/rollup-agg-limitations.asciidoc b/docs/reference/rollup/rollup-agg-limitations.asciidoc
index 9f8b6f66ade..6f9f949bf8b 100644
--- a/docs/reference/rollup/rollup-agg-limitations.asciidoc
+++ b/docs/reference/rollup/rollup-agg-limitations.asciidoc
@@ -1,7 +1,7 @@
[role="xpack"]
[testenv="basic"]
[[rollup-agg-limitations]]
-== Rollup Aggregation Limitations
+=== {rollup-cap} aggregation limitations
experimental[]
@@ -9,7 +9,7 @@ There are some limitations to how fields can be rolled up / aggregated. This pa
you are aware of them.
[float]
-=== Limited aggregation components
+==== Limited aggregation components
The Rollup functionality allows fields to be grouped with the following aggregations:
diff --git a/docs/reference/rollup/rollup-getting-started.asciidoc b/docs/reference/rollup/rollup-getting-started.asciidoc
index 27f9d9cd406..3b57e968a9e 100644
--- a/docs/reference/rollup/rollup-getting-started.asciidoc
+++ b/docs/reference/rollup/rollup-getting-started.asciidoc
@@ -1,7 +1,10 @@
[role="xpack"]
[testenv="basic"]
[[rollup-getting-started]]
-== Getting Started
+=== Getting started with {rollups}
+++++
+Getting started
+++++
experimental[]
@@ -23,7 +26,7 @@ look like this:
// NOTCONSOLE
[float]
-=== Creating a Rollup Job
+==== Creating a rollup job
We'd like to rollup these documents into hourly summaries, which will allow us to generate reports and dashboards with any time interval
one hour or greater. A rollup job might look like this:
@@ -103,7 +106,7 @@ After you execute the above command and create the job, you'll receive the follo
----
[float]
-=== Starting the job
+==== Starting the job
After the job is created, it will be sitting in an inactive state. Jobs need to be started before they begin processing data (this allows
you to stop them later as a way to temporarily pause, without deleting the configuration).
@@ -117,7 +120,7 @@ POST _rollup/job/sensor/_start
// TEST[setup:sensor_rollup_job]
[float]
-=== Searching the Rolled results
+==== Searching the rolled results
After the job has run and processed some data, we can use the <> endpoint to do some searching. The Rollup feature is designed
so that you can use the same Query DSL syntax that you are accustomed to... it just happens to run on the rolled up data instead.
@@ -292,7 +295,7 @@ In addition to being more complicated (date histogram and a terms aggregation, p
the date_histogram uses a `7d` interval instead of `60m`.
[float]
-=== Conclusion
+==== Conclusion
This quickstart should have provided a concise overview of the core functionality that Rollup exposes. There are more tips and things
to consider when setting up Rollups, which you can find throughout the rest of this section. You may also explore the <>
diff --git a/docs/reference/rollup/rollup-search-limitations.asciidoc b/docs/reference/rollup/rollup-search-limitations.asciidoc
index d55787f3cec..f6315e12a30 100644
--- a/docs/reference/rollup/rollup-search-limitations.asciidoc
+++ b/docs/reference/rollup/rollup-search-limitations.asciidoc
@@ -1,7 +1,7 @@
[role="xpack"]
[testenv="basic"]
[[rollup-search-limitations]]
-== Rollup Search Limitations
+=== {rollup-cap} search limitations
experimental[]
@@ -11,7 +11,7 @@ live data is thrown away, you will always lose some flexibility.
This page highlights the major limitations so that you are aware of them.
[float]
-=== Only one Rollup index per search
+==== Only one {rollup} index per search
When using the <> endpoint, the `index` parameter accepts one or more indices. These can be a mix of regular, non-rollup
indices and rollup indices. However, only one rollup index can be specified. The exact list of rules for the `index` parameter are as
@@ -33,7 +33,7 @@ may be able to open this up to multiple rollup jobs.
[float]
[[aggregate-stored-only]]
-=== Can only aggregate what's been stored
+==== Can only aggregate what's been stored
A perhaps obvious limitation, but rollups can only aggregate on data that has been stored in the rollups. If you don't configure the
rollup job to store metrics about the `price` field, you won't be able to use the `price` field in any query or aggregation.
@@ -81,7 +81,7 @@ The response will tell you that the field and aggregation were not possible, bec
// TESTRESPONSE[s/"stack_trace": \.\.\./"stack_trace": $body.$_path/]
[float]
-=== Interval Granularity
+==== Interval granularity
Rollups are stored at a certain granularity, as defined by the `date_histogram` group in the configuration. This means you
can only search/aggregate the rollup data with an interval that is greater-than or equal to the configured rollup interval.
@@ -111,7 +111,7 @@ That said, if multiple jobs are present in a single rollup index with varying in
with the largest interval to satisfy the search request.
[float]
-=== Limited querying components
+==== Limited querying components
The Rollup functionality allows `query`'s in the search request, but with a limited subset of components. The queries currently allowed are:
@@ -128,7 +128,7 @@ If you attempt to use an unsupported query, or the query references a field that
thrown. We expect the list of support queries to grow over time as more are implemented.
[float]
-=== Timezones
+==== Timezones
Rollup documents are stored in the timezone of the `date_histogram` group configuration in the job. If no timezone is specified, the default
is to rollup timestamps in `UTC`.
diff --git a/docs/reference/rollup/understanding-groups.asciidoc b/docs/reference/rollup/understanding-groups.asciidoc
index a59c19fbf5c..eb1b47e8a16 100644
--- a/docs/reference/rollup/understanding-groups.asciidoc
+++ b/docs/reference/rollup/understanding-groups.asciidoc
@@ -1,7 +1,7 @@
[role="xpack"]
[testenv="basic"]
[[rollup-understanding-groups]]
-== Understanding Groups
+=== Understanding groups
experimental[]
@@ -121,7 +121,7 @@ Ultimately, when configuring `groups` for a job, think in terms of how you might
then include those in the config. Because Rollup Search allows any order or combination of the grouped fields, you just need to decide
if a field is useful for aggregating later, and how you might wish to use it (terms, histogram, etc)
-=== Grouping Limitations with heterogeneous indices
+==== Grouping limitations with heterogeneous indices
There was previously a limitation in how Rollup could handle indices that had heterogeneous mappings (multiple, unrelated/non-overlapping
mappings). The recommendation at the time was to configure a separate job per data "type". For example, you might configure a separate
@@ -192,7 +192,7 @@ PUT _rollup/job/combined
--------------------------------------------------
// NOTCONSOLE
-=== Doc counts and overlapping jobs
+==== Doc counts and overlapping jobs
There was previously an issue with document counts on "overlapping" job configurations, driven by the same internal implementation detail.
If there were two Rollup jobs saving to the same index, where one job is a "subset" of another job, it was possible that document counts
From 199fff8a55fc31d23512abaf102a0270538e202d Mon Sep 17 00:00:00 2001
From: Eray
Date: Mon, 23 Sep 2019 19:46:39 +0300
Subject: [PATCH 09/94] Allow max_children only in top level nested sort
(#46731)
This commit restricts the usage of max_children to the top level nested sort since it is ignored on the other levels.
---
.../search/sort/FieldSortBuilder.java | 17 +++++++++--
.../search/sort/GeoDistanceSortBuilder.java | 2 ++
.../search/sort/ScriptSortBuilder.java | 2 ++
.../search/sort/FieldSortIT.java | 30 +++++++++++++++++++
4 files changed, 48 insertions(+), 3 deletions(-)
diff --git a/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java
index 4b52aa82a37..a4e5f4c5262 100644
--- a/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java
+++ b/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java
@@ -409,16 +409,15 @@ public class FieldSortBuilder extends SortBuilder {
throw new QueryShardException(context,
"max_children is only supported on last level of nested sort");
}
- // new nested sorts takes priority
+ validateMaxChildrenExistOnlyInTopLevelNestedSort(context, nestedSort);
nested = resolveNested(context, nestedSort);
} else {
nested = resolveNested(context, nestedPath, nestedFilter);
}
}
-
IndexFieldData> fieldData = context.getForField(fieldType);
if (fieldData instanceof IndexNumericFieldData == false
- && (sortMode == SortMode.SUM || sortMode == SortMode.AVG || sortMode == SortMode.MEDIAN)) {
+ && (sortMode == SortMode.SUM || sortMode == SortMode.AVG || sortMode == SortMode.MEDIAN)) {
throw new QueryShardException(context, "we only support AVG, MEDIAN and SUM on number based fields");
}
final SortField field;
@@ -437,6 +436,18 @@ public class FieldSortBuilder extends SortBuilder {
}
}
+ /**
+ * Throws an exception if max children is not located at top level nested sort.
+ */
+ static void validateMaxChildrenExistOnlyInTopLevelNestedSort(QueryShardContext context, NestedSortBuilder nestedSort) {
+ for (NestedSortBuilder child = nestedSort.getNestedSort(); child != null; child = child.getNestedSort()) {
+ if (child.getMaxChildren() != Integer.MAX_VALUE) {
+ throw new QueryShardException(context,
+ "max_children is only supported on top level of nested sort");
+ }
+ }
+ }
+
@Override
public boolean equals(Object other) {
if (this == other) {
diff --git a/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java
index 1bc6af2c966..9ec51753dac 100644
--- a/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java
+++ b/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java
@@ -66,6 +66,7 @@ import java.util.Locale;
import java.util.Objects;
import static org.elasticsearch.index.query.AbstractQueryBuilder.parseInnerQueryBuilder;
+import static org.elasticsearch.search.sort.FieldSortBuilder.validateMaxChildrenExistOnlyInTopLevelNestedSort;
import static org.elasticsearch.search.sort.NestedSortBuilder.NESTED_FIELD;
/**
@@ -630,6 +631,7 @@ public class GeoDistanceSortBuilder extends SortBuilder
"max_children is only supported on last level of nested sort");
}
// new nested sorts takes priority
+ validateMaxChildrenExistOnlyInTopLevelNestedSort(context, nestedSort);
nested = resolveNested(context, nestedSort);
} else {
nested = resolveNested(context, nestedPath, nestedFilter);
diff --git a/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java
index 8d5690d8583..4ebb8f2689c 100644
--- a/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java
+++ b/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java
@@ -60,6 +60,7 @@ import java.util.Locale;
import java.util.Objects;
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
+import static org.elasticsearch.search.sort.FieldSortBuilder.validateMaxChildrenExistOnlyInTopLevelNestedSort;
import static org.elasticsearch.search.sort.NestedSortBuilder.NESTED_FIELD;
/**
@@ -325,6 +326,7 @@ public class ScriptSortBuilder extends SortBuilder {
"max_children is only supported on last level of nested sort");
}
// new nested sorts takes priority
+ validateMaxChildrenExistOnlyInTopLevelNestedSort(context, nestedSort);
nested = resolveNested(context, nestedSort);
} else {
nested = resolveNested(context, nestedPath, nestedFilter);
diff --git a/server/src/test/java/org/elasticsearch/search/sort/FieldSortIT.java b/server/src/test/java/org/elasticsearch/search/sort/FieldSortIT.java
index d3f21867ab1..ae435d23ed0 100644
--- a/server/src/test/java/org/elasticsearch/search/sort/FieldSortIT.java
+++ b/server/src/test/java/org/elasticsearch/search/sort/FieldSortIT.java
@@ -1421,6 +1421,20 @@ public class FieldSortIT extends ESIntegTestCase {
.endObject()
.endObject()
.endObject()
+ .startObject("bar")
+ .field("type", "nested")
+ .startObject("properties")
+ .startObject("foo")
+ .field("type", "text")
+ .field("fielddata", true)
+ .startObject("fields")
+ .startObject("sub")
+ .field("type", "keyword")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
.endObject()
.endObject()
.endObject()
@@ -1471,6 +1485,22 @@ public class FieldSortIT extends ESIntegTestCase {
assertThat(hits[0].getSortValues()[0], is("bar"));
assertThat(hits[1].getSortValues()[0], is("abc"));
+ {
+ SearchPhaseExecutionException exc = expectThrows(SearchPhaseExecutionException.class,
+ () -> client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(SortBuilders
+ .fieldSort("nested.bar.foo")
+ .setNestedSort(new NestedSortBuilder("nested")
+ .setNestedSort(new NestedSortBuilder("nested.bar")
+ .setMaxChildren(1)))
+ .order(SortOrder.DESC))
+ .get()
+ );
+ assertThat(exc.toString(),
+ containsString("max_children is only supported on top level of nested sort"));
+ }
+
// We sort on nested sub field
searchResponse = client().prepareSearch()
.setQuery(matchAllQuery())
From a61050378308ec892c823f8cb19e8f364371205c Mon Sep 17 00:00:00 2001
From: Costin Leau
Date: Mon, 23 Sep 2019 18:59:46 +0300
Subject: [PATCH 10/94] SQL: Add PIVOT support (#46489)
Add initial PIVOT support for transforming a regular table into a
statistics table around an arbitrary pivoting column:
SELECT * FROM
(SELECT languages, country, salary, FROM mp)
PIVOT (AVG(salary) FOR countries IN ('NL', 'DE', 'ES', 'RO', 'US'))
In the current implementation PIVOT allows only one aggregation however
this restriction is likely to be lifted in the future.
Also not all aggregations are working, in particular MatrixStats are not yet supported.
(cherry picked from commit d91263746a222915c570d4a662ec48c1d6b4f583)
---
.../xpack/sql/qa/jdbc/FetchSizeTestCase.java | 57 +-
.../sql/qa/src/main/resources/pivot.csv-spec | 206 ++
x-pack/plugin/sql/src/main/antlr/SqlBase.g4 | 25 +-
.../plugin/sql/src/main/antlr/SqlBase.tokens | 372 +--
.../sql/src/main/antlr/SqlBaseLexer.tokens | 370 +--
.../xpack/sql/analysis/analyzer/Analyzer.java | 51 +-
.../xpack/sql/analysis/analyzer/Verifier.java | 111 +-
...ionCursor.java => CompositeAggCursor.java} | 93 +-
...ggsRowSet.java => CompositeAggRowSet.java} | 40 +-
.../sql/execution/search/PivotCursor.java | 74 +
.../sql/execution/search/PivotRowSet.java | 139 +
.../xpack/sql/execution/search/Querier.java | 40 +-
...Set.java => SchemaCompositeAggRowSet.java} | 6 +-
.../search/SchemaDelegatingRowSet.java | 52 +
.../sql/execution/search/SourceGenerator.java | 4 +
.../search/extractor/BucketExtractors.java | 1 +
.../search/extractor/PivotExtractor.java | 71 +
.../xpack/sql/expression/Alias.java | 2 +-
.../xpack/sql/expression/Attribute.java | 25 +-
.../xpack/sql/expression/ExpressionId.java | 4 +
.../xpack/sql/expression/Expressions.java | 27 +
.../xpack/sql/expression/FieldAttribute.java | 15 +-
.../xpack/sql/expression/Literal.java | 2 +-
.../sql/expression/LiteralAttribute.java | 10 +-
.../sql/expression/UnresolvedAttribute.java | 2 +-
.../expression/function/ScoreAttribute.java | 4 +-
.../function/aggregate/AggregateFunction.java | 6 +-
.../aggregate/AggregateFunctionAttribute.java | 5 +-
.../grouping/GroupingFunctionAttribute.java | 4 +-
.../scalar/ScalarFunctionAttribute.java | 4 +-
.../xpack/sql/optimizer/Optimizer.java | 53 +-
.../xpack/sql/parser/LogicalPlanBuilder.java | 51 +-
.../xpack/sql/parser/SqlBaseBaseListener.java | 48 +
.../xpack/sql/parser/SqlBaseBaseVisitor.java | 28 +
.../xpack/sql/parser/SqlBaseLexer.java | 873 +++----
.../xpack/sql/parser/SqlBaseListener.java | 40 +
.../xpack/sql/parser/SqlBaseParser.java | 2258 ++++++++++-------
.../xpack/sql/parser/SqlBaseVisitor.java | 24 +
.../xpack/sql/plan/logical/Aggregate.java | 2 +-
.../xpack/sql/plan/logical/Pivot.java | 142 ++
.../xpack/sql/plan/physical/PivotExec.java | 63 +
.../xpack/sql/planner/Mapper.java | 7 +
.../xpack/sql/planner/QueryFolder.java | 352 +--
.../xpack/sql/planner/Verifier.java | 20 +-
.../querydsl/container/PivotColumnRef.java | 51 +
.../querydsl/container/QueryContainer.java | 44 +-
.../xpack/sql/session/Cursors.java | 6 +-
.../xpack/sql/session/ListCursor.java | 2 +-
.../analyzer/VerifierErrorMessagesTests.java | 55 +-
.../CompositeAggregationCursorTests.java | 18 +-
.../xpack/sql/optimizer/OptimizerTests.java | 26 +-
.../planner/PostOptimizerVerifierTests.java | 77 +
.../xpack/sql/planner/QueryFolderTests.java | 17 +
53 files changed, 3966 insertions(+), 2113 deletions(-)
create mode 100644 x-pack/plugin/sql/qa/src/main/resources/pivot.csv-spec
rename x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/{CompositeAggregationCursor.java => CompositeAggCursor.java} (72%)
rename x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/{CompositeAggsRowSet.java => CompositeAggRowSet.java} (70%)
create mode 100644 x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/PivotCursor.java
create mode 100644 x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/PivotRowSet.java
rename x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/{SchemaCompositeAggsRowSet.java => SchemaCompositeAggRowSet.java} (77%)
create mode 100644 x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SchemaDelegatingRowSet.java
create mode 100644 x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/PivotExtractor.java
create mode 100644 x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Pivot.java
create mode 100644 x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/PivotExec.java
create mode 100644 x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/PivotColumnRef.java
create mode 100644 x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/PostOptimizerVerifierTests.java
diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/FetchSizeTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/FetchSizeTestCase.java
index 3da3c0ba73b..f12f069a3b3 100644
--- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/FetchSizeTestCase.java
+++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/FetchSizeTestCase.java
@@ -149,4 +149,59 @@ public class FetchSizeTestCase extends JdbcIntegrationTestCase {
assertTrue("No more entries left after row " + rs.getRow(), (i+j == 23 || rs.next()));
}
}
-}
+
+ /**
+ * Explicit pagination test for PIVOT.
+ * Checks that the paging properly consumes the necessary amount of aggregations and the
+ * page size affects the result not the intermediate query.
+ */
+ public void testPivotPaging() throws Exception {
+ Request request = new Request("PUT", "/test_pivot/_bulk");
+ request.addParameter("refresh", "true");
+ StringBuilder bulk = new StringBuilder();
+ String[] continent = new String[] { "AF", "AS", "EU", "NA", "SA", "AQ", "AU" };
+ for (int i = 0; i <= 100; i++) {
+ bulk.append("{\"index\":{}}\n");
+ bulk.append("{\"item\":").append(i % 10)
+ .append(", \"entry\":").append(i)
+ .append(", \"amount\" : ").append(randomInt(999))
+ .append(", \"location\" : \"").append(continent[i % (continent.length)]).append("\"")
+ .append("}\n");
+ }
+ request.setJsonEntity(bulk.toString());
+ assertEquals(200, client().performRequest(request).getStatusLine().getStatusCode());
+
+ try (Connection c = esJdbc();
+ Statement s = c.createStatement()) {
+
+ String query = "SELECT * FROM "
+ + "(SELECT item, amount, location FROM test_pivot)"
+ + " PIVOT (AVG(amount) FOR location IN ( 'AF', 'AS', 'EU', 'NA', 'SA', 'AQ', 'AU') )";
+ // set size smaller than an agg page
+ s.setFetchSize(3);
+ try (ResultSet rs = s.executeQuery(query)) {
+ assertEquals(8, rs.getMetaData().getColumnCount());
+ for (int i = 0; i < 10; i++) {
+ assertTrue(rs.next());
+ // the page was set to a pivot row (since the initial 3 is lower as a pivot page takes number of pivot entries + 1)
+ assertEquals(1, rs.getFetchSize());
+ assertEquals(Long.valueOf(i), rs.getObject("item"));
+ }
+ assertFalse(rs.next());
+ }
+
+ // now try with a larger fetch size (8 * 2 + something) - should be 2
+ s.setFetchSize(20);
+ try (ResultSet rs = s.executeQuery(query)) {
+ for (int i = 0; i < 10; i++) {
+ assertTrue(rs.next());
+ //
+ assertEquals(2, rs.getFetchSize());
+ assertEquals(Long.valueOf(i), rs.getObject("item"));
+ }
+ assertFalse(rs.next());
+ }
+ }
+ assertNoSearchContexts();
+ }
+}
\ No newline at end of file
diff --git a/x-pack/plugin/sql/qa/src/main/resources/pivot.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/pivot.csv-spec
new file mode 100644
index 00000000000..c7e47a4304b
--- /dev/null
+++ b/x-pack/plugin/sql/qa/src/main/resources/pivot.csv-spec
@@ -0,0 +1,206 @@
+averageWithOneValue
+schema::languages:bt|'F':d
+SELECT * FROM (SELECT languages, gender, salary FROM test_emp) PIVOT (AVG(salary) FOR gender IN ('F'));
+
+ languages | 'F'
+---------------+------------------
+null |62140.666666666664
+1 |47073.25
+2 |50684.4
+3 |53660.0
+4 |49291.5
+5 |46705.555555555555
+;
+
+averageWithAliasAndOneValue
+schema::languages:bt|'F':d
+SELECT * FROM (SELECT languages, gender, salary FROM test_emp) PIVOT (AVG(salary) AS "AVG" FOR gender IN ('F'));
+
+ languages | 'F'
+---------------+------------------
+null |62140.666666666664
+1 |47073.25
+2 |50684.4
+3 |53660.0
+4 |49291.5
+5 |46705.555555555555
+;
+
+averageWithAliasedValue
+schema::languages:bt|XX:d
+SELECT * FROM (SELECT languages, gender, salary FROM test_emp) PIVOT (AVG(salary) FOR gender IN ('F' AS "XX"));
+
+ languages | XX
+---------------+------------------
+null |62140.666666666664
+1 |47073.25
+2 |50684.4
+3 |53660.0
+4 |49291.5
+5 |46705.555555555555
+;
+
+averageWithTwoValues
+schema::languages:bt|'M':d|'F':d
+SELECT * FROM (SELECT languages, gender, salary FROM test_emp) PIVOT (AVG(salary) FOR gender IN ('M', 'F'));
+
+ languages | 'M' | 'F'
+---------------+-----------------+------------------
+null |48396.28571428572|62140.666666666664
+1 |49767.22222222222|47073.25
+2 |44103.90909090909|50684.4
+3 |51741.90909090909|53660.0
+4 |47058.90909090909|49291.5
+5 |39052.875 |46705.555555555555
+;
+
+averageWithTwoValuesAndAlias
+schema::languages:bt|XY:d|XX:d
+SELECT * FROM (SELECT languages, gender, salary FROM test_emp) PIVOT (AVG(salary) FOR gender IN ('M' AS "XY", 'F' "XX"));
+
+ languages | XY | XX
+---------------+-----------------+------------------
+null |48396.28571428572|62140.666666666664
+1 |49767.22222222222|47073.25
+2 |44103.90909090909|50684.4
+3 |51741.90909090909|53660.0
+4 |47058.90909090909|49291.5
+5 |39052.875 |46705.555555555555
+;
+
+averageWithThreeValuesIncludingNull
+schema::languages:bt|'M':d|'F':d
+SELECT * FROM (SELECT languages, gender, salary FROM test_emp) PIVOT (AVG(salary) FOR gender IN ('M', 'F'));
+
+ languages | 'M' | 'F'
+---------------+-----------------+------------------
+null |48396.28571428572|62140.666666666664
+1 |49767.22222222222|47073.25
+2 |44103.90909090909|50684.4
+3 |51741.90909090909|53660.0
+4 |47058.90909090909|49291.5
+5 |39052.875 |46705.555555555555
+;
+
+
+averageWithOneValueAndLimit
+schema::languages:bt|'F':d
+SELECT * FROM (SELECT languages, gender, salary FROM test_emp) PIVOT (AVG(salary) FOR gender IN ('F')) LIMIT 3;
+
+ languages | 'F'
+---------------+------------------
+null |62140.666666666664
+1 |47073.25
+2 |50684.4
+;
+
+averageWithTwoValuesAndLimit
+schema::languages:bt|'M':d|'F':d
+SELECT * FROM (SELECT languages, gender, salary FROM test_emp) PIVOT (AVG(salary) FOR gender IN ('M', 'F')) LIMIT 3;
+
+ languages | 'M' | 'F'
+---------------+-----------------+------------------
+null |48396.28571428572|62140.666666666664
+1 |49767.22222222222|47073.25
+2 |44103.90909090909|50684.4
+;
+
+
+averageWithTwoValuesAndTinyLimit
+schema::languages:bt|'M':d|'F':d
+SELECT * FROM (SELECT languages, gender, salary FROM test_emp) PIVOT (AVG(salary) FOR gender IN ('M', 'F')) LIMIT 1;
+
+ languages | 'M' | 'F'
+---------------+-----------------+------------------
+null |48396.28571428572|62140.666666666664
+;
+
+
+averageWithTwoValuesAndSmallLimit
+schema::languages:bt|'M':d|'F':d
+SELECT * FROM (SELECT languages, gender, salary FROM test_emp) PIVOT (AVG(salary) FOR gender IN ('M', 'F')) LIMIT 2;
+
+ languages | 'M' | 'F'
+---------------+-----------------+------------------
+null |48396.28571428572|62140.666666666664
+1 |49767.22222222222|47073.25
+;
+
+averageWithOneValueAndOrder
+schema::languages:bt|'F':d
+SELECT * FROM (SELECT languages, gender, salary FROM test_emp) PIVOT (AVG(salary) FOR gender IN ('F')) ORDER BY languages DESC LIMIT 4;
+
+ languages | 'F'
+---------------+------------------
+5 |46705.555555555555
+4 |49291.5
+3 |53660.0
+2 |50684.4
+;
+
+averageWithTwoValuesAndOrderDesc
+schema::languages:bt|'M':d|'F':d
+SELECT * FROM (SELECT languages, gender, salary FROM test_emp) PIVOT (AVG(salary) FOR gender IN ('M', 'F')) ORDER BY languages DESC;
+
+ languages | 'M' | 'F'
+---------------+-----------------+------------------
+5 |39052.875 |46705.555555555555
+4 |47058.90909090909|49291.5
+3 |51741.90909090909|53660.0
+2 |44103.90909090909|50684.4
+1 |49767.22222222222|47073.25
+null |48396.28571428572|62140.666666666664
+;
+
+averageWithTwoValuesAndOrderDescAndLimit
+schema::languages:bt|'M':d|'F':d
+SELECT * FROM (SELECT languages, gender, salary FROM test_emp) PIVOT (AVG(salary) FOR gender IN ('M', 'F')) ORDER BY languages DESC LIMIT 2;
+
+ languages | 'M' | 'F'
+---------------+-----------------+------------------
+5 |39052.875 |46705.555555555555
+4 |47058.90909090909|49291.5
+;
+
+averageWithTwoValuesAndOrderAsc
+schema::languages:bt|'M':d|'F':d
+SELECT * FROM (SELECT languages, gender, salary FROM test_emp) PIVOT (AVG(salary) FOR gender IN ('M', 'F')) ORDER BY languages ASC;
+
+ languages | 'M' | 'F'
+---------------+-----------------+------------------
+null |48396.28571428572|62140.666666666664
+1 |49767.22222222222|47073.25
+2 |44103.90909090909|50684.4
+3 |51741.90909090909|53660.0
+4 |47058.90909090909|49291.5
+5 |39052.875 |46705.555555555555
+;
+
+
+sumWithoutSubquery
+schema::birth_date:ts|emp_no:i|first_name:s|gender:s|hire_date:ts|last_name:s|1:i|2:i
+SELECT * FROM test_emp PIVOT (SUM(salary) FOR languages IN (1, 2)) LIMIT 5;
+
+ birth_date | emp_no | first_name | gender | hire_date | last_name | 1 | 2
+---------------------+---------------+---------------+---------------+---------------------+---------------+---------------+---------------
+null |10041 |Uri |F |1989-11-12 00:00:00.0|Lenart |56415 |null
+null |10043 |Yishay |M |1990-10-20 00:00:00.0|Tzvieli |34341 |null
+null |10044 |Mingsen |F |1994-05-21 00:00:00.0|Casley |39728 |null
+1952-04-19 00:00:00.0|10009 |Sumant |F |1985-02-18 00:00:00.0|Peac |66174 |null
+1953-01-07 00:00:00.0|10067 |Claudi |M |1987-03-04 00:00:00.0|Stavenow |null |52044
+1953-01-23 00:00:00.0|10019 |Lillian |null |1999-04-30 00:00:00.0|Haddadi |73717 |null
+;
+
+averageWithOneValueAndMath
+schema::languages:bt|'F':d
+SELECT * FROM (SELECT languages, gender, salary FROM test_emp) PIVOT (ROUND(AVG(salary) / 2) FOR gender IN ('F'));
+
+ languages | 'F'
+---------------+---------------
+null |31070.0
+1 |23537.0
+2 |25342.0
+3 |26830.0
+4 |24646.0
+5 |23353.0
+;
\ No newline at end of file
diff --git a/x-pack/plugin/sql/src/main/antlr/SqlBase.g4 b/x-pack/plugin/sql/src/main/antlr/SqlBase.g4
index 76af159be90..86c11952498 100644
--- a/x-pack/plugin/sql/src/main/antlr/SqlBase.g4
+++ b/x-pack/plugin/sql/src/main/antlr/SqlBase.g4
@@ -90,7 +90,7 @@ orderBy
;
querySpecification
- : SELECT setQuantifier? selectItem (',' selectItem)*
+ : SELECT setQuantifier? selectItems
fromClause?
(WHERE where=booleanExpression)?
(GROUP BY groupBy)?
@@ -98,7 +98,7 @@ querySpecification
;
fromClause
- : FROM relation (',' relation)*
+ : FROM relation (',' relation)* pivotClause?
;
groupBy
@@ -123,6 +123,10 @@ setQuantifier
| ALL
;
+selectItems
+ : selectItem (',' selectItem)*
+ ;
+
selectItem
: expression (AS? identifier)? #selectExpression
;
@@ -154,6 +158,18 @@ relationPrimary
| '(' relation ')' (AS? qualifiedName)? #aliasedRelation
;
+pivotClause
+ : PIVOT '(' aggs=pivotArgs FOR column=qualifiedName IN '(' vals=pivotArgs ')' ')'
+ ;
+
+pivotArgs
+ : namedValueExpression (',' namedValueExpression)*
+ ;
+
+namedValueExpression
+ : valueExpression (AS? identifier)?
+ ;
+
expression
: booleanExpression
;
@@ -343,6 +359,7 @@ whenClause
;
// http://developer.mimer.se/validator/sql-reserved-words.tml
+// https://developer.mimer.com/wp-content/uploads/standard-sql-reserved-words-summary.pdf
nonReserved
: ANALYZE | ANALYZED
| CATALOGS | COLUMNS | CURRENT_DATE | CURRENT_TIME | CURRENT_TIMESTAMP
@@ -355,7 +372,7 @@ nonReserved
| LAST | LIMIT
| MAPPED | MINUTE | MONTH
| OPTIMIZED
- | PARSED | PHYSICAL | PLAN
+ | PARSED | PHYSICAL | PIVOT | PLAN
| QUERY
| RLIKE
| SCHEMAS | SECOND | SHOW | SYS
@@ -397,6 +414,7 @@ EXPLAIN: 'EXPLAIN';
EXTRACT: 'EXTRACT';
FALSE: 'FALSE';
FIRST: 'FIRST';
+FOR: 'FOR';
FORMAT: 'FORMAT';
FROM: 'FROM';
FROZEN: 'FROZEN';
@@ -434,6 +452,7 @@ ORDER: 'ORDER';
OUTER: 'OUTER';
PARSED: 'PARSED';
PHYSICAL: 'PHYSICAL';
+PIVOT: 'PIVOT';
PLAN: 'PLAN';
RIGHT: 'RIGHT';
RLIKE: 'RLIKE';
diff --git a/x-pack/plugin/sql/src/main/antlr/SqlBase.tokens b/x-pack/plugin/sql/src/main/antlr/SqlBase.tokens
index 7eeec75f9c9..9771af465bb 100644
--- a/x-pack/plugin/sql/src/main/antlr/SqlBase.tokens
+++ b/x-pack/plugin/sql/src/main/antlr/SqlBase.tokens
@@ -35,105 +35,107 @@ EXPLAIN=34
EXTRACT=35
FALSE=36
FIRST=37
-FORMAT=38
-FROM=39
-FROZEN=40
-FULL=41
-FUNCTIONS=42
-GRAPHVIZ=43
-GROUP=44
-HAVING=45
-HOUR=46
-HOURS=47
-IN=48
-INCLUDE=49
-INNER=50
-INTERVAL=51
-IS=52
-JOIN=53
-LAST=54
-LEFT=55
-LIKE=56
-LIMIT=57
-MAPPED=58
-MATCH=59
-MINUTE=60
-MINUTES=61
-MONTH=62
-MONTHS=63
-NATURAL=64
-NOT=65
-NULL=66
-NULLS=67
-ON=68
-OPTIMIZED=69
-OR=70
-ORDER=71
-OUTER=72
-PARSED=73
-PHYSICAL=74
-PLAN=75
-RIGHT=76
-RLIKE=77
-QUERY=78
-SCHEMAS=79
-SECOND=80
-SECONDS=81
-SELECT=82
-SHOW=83
-SYS=84
-TABLE=85
-TABLES=86
-TEXT=87
-THEN=88
-TRUE=89
-TO=90
-TYPE=91
-TYPES=92
-USING=93
-VERIFY=94
-WHEN=95
-WHERE=96
-WITH=97
-YEAR=98
-YEARS=99
-ESCAPE_ESC=100
-FUNCTION_ESC=101
-LIMIT_ESC=102
-DATE_ESC=103
-TIME_ESC=104
-TIMESTAMP_ESC=105
-GUID_ESC=106
-ESC_END=107
-EQ=108
-NULLEQ=109
-NEQ=110
-LT=111
-LTE=112
-GT=113
-GTE=114
-PLUS=115
-MINUS=116
-ASTERISK=117
-SLASH=118
-PERCENT=119
-CAST_OP=120
-CONCAT=121
-DOT=122
-PARAM=123
-STRING=124
-INTEGER_VALUE=125
-DECIMAL_VALUE=126
-IDENTIFIER=127
-DIGIT_IDENTIFIER=128
-TABLE_IDENTIFIER=129
-QUOTED_IDENTIFIER=130
-BACKQUOTED_IDENTIFIER=131
-SIMPLE_COMMENT=132
-BRACKETED_COMMENT=133
-WS=134
-UNRECOGNIZED=135
-DELIMITER=136
+FOR=38
+FORMAT=39
+FROM=40
+FROZEN=41
+FULL=42
+FUNCTIONS=43
+GRAPHVIZ=44
+GROUP=45
+HAVING=46
+HOUR=47
+HOURS=48
+IN=49
+INCLUDE=50
+INNER=51
+INTERVAL=52
+IS=53
+JOIN=54
+LAST=55
+LEFT=56
+LIKE=57
+LIMIT=58
+MAPPED=59
+MATCH=60
+MINUTE=61
+MINUTES=62
+MONTH=63
+MONTHS=64
+NATURAL=65
+NOT=66
+NULL=67
+NULLS=68
+ON=69
+OPTIMIZED=70
+OR=71
+ORDER=72
+OUTER=73
+PARSED=74
+PHYSICAL=75
+PIVOT=76
+PLAN=77
+RIGHT=78
+RLIKE=79
+QUERY=80
+SCHEMAS=81
+SECOND=82
+SECONDS=83
+SELECT=84
+SHOW=85
+SYS=86
+TABLE=87
+TABLES=88
+TEXT=89
+THEN=90
+TRUE=91
+TO=92
+TYPE=93
+TYPES=94
+USING=95
+VERIFY=96
+WHEN=97
+WHERE=98
+WITH=99
+YEAR=100
+YEARS=101
+ESCAPE_ESC=102
+FUNCTION_ESC=103
+LIMIT_ESC=104
+DATE_ESC=105
+TIME_ESC=106
+TIMESTAMP_ESC=107
+GUID_ESC=108
+ESC_END=109
+EQ=110
+NULLEQ=111
+NEQ=112
+LT=113
+LTE=114
+GT=115
+GTE=116
+PLUS=117
+MINUS=118
+ASTERISK=119
+SLASH=120
+PERCENT=121
+CAST_OP=122
+CONCAT=123
+DOT=124
+PARAM=125
+STRING=126
+INTEGER_VALUE=127
+DECIMAL_VALUE=128
+IDENTIFIER=129
+DIGIT_IDENTIFIER=130
+TABLE_IDENTIFIER=131
+QUOTED_IDENTIFIER=132
+BACKQUOTED_IDENTIFIER=133
+SIMPLE_COMMENT=134
+BRACKETED_COMMENT=135
+WS=136
+UNRECOGNIZED=137
+DELIMITER=138
'('=1
')'=2
','=3
@@ -171,88 +173,90 @@ DELIMITER=136
'EXTRACT'=35
'FALSE'=36
'FIRST'=37
-'FORMAT'=38
-'FROM'=39
-'FROZEN'=40
-'FULL'=41
-'FUNCTIONS'=42
-'GRAPHVIZ'=43
-'GROUP'=44
-'HAVING'=45
-'HOUR'=46
-'HOURS'=47
-'IN'=48
-'INCLUDE'=49
-'INNER'=50
-'INTERVAL'=51
-'IS'=52
-'JOIN'=53
-'LAST'=54
-'LEFT'=55
-'LIKE'=56
-'LIMIT'=57
-'MAPPED'=58
-'MATCH'=59
-'MINUTE'=60
-'MINUTES'=61
-'MONTH'=62
-'MONTHS'=63
-'NATURAL'=64
-'NOT'=65
-'NULL'=66
-'NULLS'=67
-'ON'=68
-'OPTIMIZED'=69
-'OR'=70
-'ORDER'=71
-'OUTER'=72
-'PARSED'=73
-'PHYSICAL'=74
-'PLAN'=75
-'RIGHT'=76
-'RLIKE'=77
-'QUERY'=78
-'SCHEMAS'=79
-'SECOND'=80
-'SECONDS'=81
-'SELECT'=82
-'SHOW'=83
-'SYS'=84
-'TABLE'=85
-'TABLES'=86
-'TEXT'=87
-'THEN'=88
-'TRUE'=89
-'TO'=90
-'TYPE'=91
-'TYPES'=92
-'USING'=93
-'VERIFY'=94
-'WHEN'=95
-'WHERE'=96
-'WITH'=97
-'YEAR'=98
-'YEARS'=99
-'{ESCAPE'=100
-'{FN'=101
-'{LIMIT'=102
-'{D'=103
-'{T'=104
-'{TS'=105
-'{GUID'=106
-'}'=107
-'='=108
-'<=>'=109
-'<'=111
-'<='=112
-'>'=113
-'>='=114
-'+'=115
-'-'=116
-'*'=117
-'/'=118
-'%'=119
-'::'=120
-'||'=121
-'.'=122
-'?'=123
+'FOR'=38
+'FORMAT'=39
+'FROM'=40
+'FROZEN'=41
+'FULL'=42
+'FUNCTIONS'=43
+'GRAPHVIZ'=44
+'GROUP'=45
+'HAVING'=46
+'HOUR'=47
+'HOURS'=48
+'IN'=49
+'INCLUDE'=50
+'INNER'=51
+'INTERVAL'=52
+'IS'=53
+'JOIN'=54
+'LAST'=55
+'LEFT'=56
+'LIKE'=57
+'LIMIT'=58
+'MAPPED'=59
+'MATCH'=60
+'MINUTE'=61
+'MINUTES'=62
+'MONTH'=63
+'MONTHS'=64
+'NATURAL'=65
+'NOT'=66
+'NULL'=67
+'NULLS'=68
+'ON'=69
+'OPTIMIZED'=70
+'OR'=71
+'ORDER'=72
+'OUTER'=73
+'PARSED'=74
+'PHYSICAL'=75
+'PIVOT'=76
+'PLAN'=77
+'RIGHT'=78
+'RLIKE'=79
+'QUERY'=80
+'SCHEMAS'=81
+'SECOND'=82
+'SECONDS'=83
+'SELECT'=84
+'SHOW'=85
+'SYS'=86
+'TABLE'=87
+'TABLES'=88
+'TEXT'=89
+'THEN'=90
+'TRUE'=91
+'TO'=92
+'TYPE'=93
+'TYPES'=94
+'USING'=95
+'VERIFY'=96
+'WHEN'=97
+'WHERE'=98
+'WITH'=99
+'YEAR'=100
+'YEARS'=101
+'{ESCAPE'=102
+'{FN'=103
+'{LIMIT'=104
+'{D'=105
+'{T'=106
+'{TS'=107
+'{GUID'=108
+'}'=109
+'='=110
+'<=>'=111
+'<'=113
+'<='=114
+'>'=115
+'>='=116
+'+'=117
+'-'=118
+'*'=119
+'/'=120
+'%'=121
+'::'=122
+'||'=123
+'.'=124
+'?'=125
diff --git a/x-pack/plugin/sql/src/main/antlr/SqlBaseLexer.tokens b/x-pack/plugin/sql/src/main/antlr/SqlBaseLexer.tokens
index 603e67fec88..adb6142e865 100644
--- a/x-pack/plugin/sql/src/main/antlr/SqlBaseLexer.tokens
+++ b/x-pack/plugin/sql/src/main/antlr/SqlBaseLexer.tokens
@@ -35,104 +35,106 @@ EXPLAIN=34
EXTRACT=35
FALSE=36
FIRST=37
-FORMAT=38
-FROM=39
-FROZEN=40
-FULL=41
-FUNCTIONS=42
-GRAPHVIZ=43
-GROUP=44
-HAVING=45
-HOUR=46
-HOURS=47
-IN=48
-INCLUDE=49
-INNER=50
-INTERVAL=51
-IS=52
-JOIN=53
-LAST=54
-LEFT=55
-LIKE=56
-LIMIT=57
-MAPPED=58
-MATCH=59
-MINUTE=60
-MINUTES=61
-MONTH=62
-MONTHS=63
-NATURAL=64
-NOT=65
-NULL=66
-NULLS=67
-ON=68
-OPTIMIZED=69
-OR=70
-ORDER=71
-OUTER=72
-PARSED=73
-PHYSICAL=74
-PLAN=75
-RIGHT=76
-RLIKE=77
-QUERY=78
-SCHEMAS=79
-SECOND=80
-SECONDS=81
-SELECT=82
-SHOW=83
-SYS=84
-TABLE=85
-TABLES=86
-TEXT=87
-THEN=88
-TRUE=89
-TO=90
-TYPE=91
-TYPES=92
-USING=93
-VERIFY=94
-WHEN=95
-WHERE=96
-WITH=97
-YEAR=98
-YEARS=99
-ESCAPE_ESC=100
-FUNCTION_ESC=101
-LIMIT_ESC=102
-DATE_ESC=103
-TIME_ESC=104
-TIMESTAMP_ESC=105
-GUID_ESC=106
-ESC_END=107
-EQ=108
-NULLEQ=109
-NEQ=110
-LT=111
-LTE=112
-GT=113
-GTE=114
-PLUS=115
-MINUS=116
-ASTERISK=117
-SLASH=118
-PERCENT=119
-CAST_OP=120
-CONCAT=121
-DOT=122
-PARAM=123
-STRING=124
-INTEGER_VALUE=125
-DECIMAL_VALUE=126
-IDENTIFIER=127
-DIGIT_IDENTIFIER=128
-TABLE_IDENTIFIER=129
-QUOTED_IDENTIFIER=130
-BACKQUOTED_IDENTIFIER=131
-SIMPLE_COMMENT=132
-BRACKETED_COMMENT=133
-WS=134
-UNRECOGNIZED=135
+FOR=38
+FORMAT=39
+FROM=40
+FROZEN=41
+FULL=42
+FUNCTIONS=43
+GRAPHVIZ=44
+GROUP=45
+HAVING=46
+HOUR=47
+HOURS=48
+IN=49
+INCLUDE=50
+INNER=51
+INTERVAL=52
+IS=53
+JOIN=54
+LAST=55
+LEFT=56
+LIKE=57
+LIMIT=58
+MAPPED=59
+MATCH=60
+MINUTE=61
+MINUTES=62
+MONTH=63
+MONTHS=64
+NATURAL=65
+NOT=66
+NULL=67
+NULLS=68
+ON=69
+OPTIMIZED=70
+OR=71
+ORDER=72
+OUTER=73
+PARSED=74
+PHYSICAL=75
+PIVOT=76
+PLAN=77
+RIGHT=78
+RLIKE=79
+QUERY=80
+SCHEMAS=81
+SECOND=82
+SECONDS=83
+SELECT=84
+SHOW=85
+SYS=86
+TABLE=87
+TABLES=88
+TEXT=89
+THEN=90
+TRUE=91
+TO=92
+TYPE=93
+TYPES=94
+USING=95
+VERIFY=96
+WHEN=97
+WHERE=98
+WITH=99
+YEAR=100
+YEARS=101
+ESCAPE_ESC=102
+FUNCTION_ESC=103
+LIMIT_ESC=104
+DATE_ESC=105
+TIME_ESC=106
+TIMESTAMP_ESC=107
+GUID_ESC=108
+ESC_END=109
+EQ=110
+NULLEQ=111
+NEQ=112
+LT=113
+LTE=114
+GT=115
+GTE=116
+PLUS=117
+MINUS=118
+ASTERISK=119
+SLASH=120
+PERCENT=121
+CAST_OP=122
+CONCAT=123
+DOT=124
+PARAM=125
+STRING=126
+INTEGER_VALUE=127
+DECIMAL_VALUE=128
+IDENTIFIER=129
+DIGIT_IDENTIFIER=130
+TABLE_IDENTIFIER=131
+QUOTED_IDENTIFIER=132
+BACKQUOTED_IDENTIFIER=133
+SIMPLE_COMMENT=134
+BRACKETED_COMMENT=135
+WS=136
+UNRECOGNIZED=137
'('=1
')'=2
','=3
@@ -170,88 +172,90 @@ UNRECOGNIZED=135
'EXTRACT'=35
'FALSE'=36
'FIRST'=37
-'FORMAT'=38
-'FROM'=39
-'FROZEN'=40
-'FULL'=41
-'FUNCTIONS'=42
-'GRAPHVIZ'=43
-'GROUP'=44
-'HAVING'=45
-'HOUR'=46
-'HOURS'=47
-'IN'=48
-'INCLUDE'=49
-'INNER'=50
-'INTERVAL'=51
-'IS'=52
-'JOIN'=53
-'LAST'=54
-'LEFT'=55
-'LIKE'=56
-'LIMIT'=57
-'MAPPED'=58
-'MATCH'=59
-'MINUTE'=60
-'MINUTES'=61
-'MONTH'=62
-'MONTHS'=63
-'NATURAL'=64
-'NOT'=65
-'NULL'=66
-'NULLS'=67
-'ON'=68
-'OPTIMIZED'=69
-'OR'=70
-'ORDER'=71
-'OUTER'=72
-'PARSED'=73
-'PHYSICAL'=74
-'PLAN'=75
-'RIGHT'=76
-'RLIKE'=77
-'QUERY'=78
-'SCHEMAS'=79
-'SECOND'=80
-'SECONDS'=81
-'SELECT'=82
-'SHOW'=83
-'SYS'=84
-'TABLE'=85
-'TABLES'=86
-'TEXT'=87
-'THEN'=88
-'TRUE'=89
-'TO'=90
-'TYPE'=91
-'TYPES'=92
-'USING'=93
-'VERIFY'=94
-'WHEN'=95
-'WHERE'=96
-'WITH'=97
-'YEAR'=98
-'YEARS'=99
-'{ESCAPE'=100
-'{FN'=101
-'{LIMIT'=102
-'{D'=103
-'{T'=104
-'{TS'=105
-'{GUID'=106
-'}'=107
-'='=108
-'<=>'=109
-'<'=111
-'<='=112
-'>'=113
-'>='=114
-'+'=115
-'-'=116
-'*'=117
-'/'=118
-'%'=119
-'::'=120
-'||'=121
-'.'=122
-'?'=123
+'FOR'=38
+'FORMAT'=39
+'FROM'=40
+'FROZEN'=41
+'FULL'=42
+'FUNCTIONS'=43
+'GRAPHVIZ'=44
+'GROUP'=45
+'HAVING'=46
+'HOUR'=47
+'HOURS'=48
+'IN'=49
+'INCLUDE'=50
+'INNER'=51
+'INTERVAL'=52
+'IS'=53
+'JOIN'=54
+'LAST'=55
+'LEFT'=56
+'LIKE'=57
+'LIMIT'=58
+'MAPPED'=59
+'MATCH'=60
+'MINUTE'=61
+'MINUTES'=62
+'MONTH'=63
+'MONTHS'=64
+'NATURAL'=65
+'NOT'=66
+'NULL'=67
+'NULLS'=68
+'ON'=69
+'OPTIMIZED'=70
+'OR'=71
+'ORDER'=72
+'OUTER'=73
+'PARSED'=74
+'PHYSICAL'=75
+'PIVOT'=76
+'PLAN'=77
+'RIGHT'=78
+'RLIKE'=79
+'QUERY'=80
+'SCHEMAS'=81
+'SECOND'=82
+'SECONDS'=83
+'SELECT'=84
+'SHOW'=85
+'SYS'=86
+'TABLE'=87
+'TABLES'=88
+'TEXT'=89
+'THEN'=90
+'TRUE'=91
+'TO'=92
+'TYPE'=93
+'TYPES'=94
+'USING'=95
+'VERIFY'=96
+'WHEN'=97
+'WHERE'=98
+'WITH'=99
+'YEAR'=100
+'YEARS'=101
+'{ESCAPE'=102
+'{FN'=103
+'{LIMIT'=104
+'{D'=105
+'{T'=106
+'{TS'=107
+'{GUID'=108
+'}'=109
+'='=110
+'<=>'=111
+'<'=113
+'<='=114
+'>'=115
+'>='=116
+'+'=117
+'-'=118
+'*'=119
+'/'=120
+'%'=121
+'::'=122
+'||'=123
+'.'=124
+'?'=125
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java
index 901318258c0..5fdd1f9124d 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java
@@ -40,6 +40,7 @@ import org.elasticsearch.xpack.sql.plan.logical.Join;
import org.elasticsearch.xpack.sql.plan.logical.LocalRelation;
import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan;
import org.elasticsearch.xpack.sql.plan.logical.OrderBy;
+import org.elasticsearch.xpack.sql.plan.logical.Pivot;
import org.elasticsearch.xpack.sql.plan.logical.Project;
import org.elasticsearch.xpack.sql.plan.logical.SubQueryAlias;
import org.elasticsearch.xpack.sql.plan.logical.UnaryPlan;
@@ -419,7 +420,7 @@ public class Analyzer extends RuleExecutor {
return result;
}
- private List expandStar(UnresolvedStar us, List output) {
+ static List expandStar(UnresolvedStar us, List output) {
List expanded = new ArrayList<>();
// a qualifier is specified - since this is a star, it should be a CompoundDataType
@@ -460,24 +461,7 @@ public class Analyzer extends RuleExecutor {
}
}
} else {
- // add only primitives
- // but filter out multi fields (allow only the top-level value)
- Set seenMultiFields = new LinkedHashSet<>();
-
- for (Attribute a : output) {
- if (!DataTypes.isUnsupported(a.dataType()) && a.dataType().isPrimitive()) {
- if (a instanceof FieldAttribute) {
- FieldAttribute fa = (FieldAttribute) a;
- // skip nested fields and seen multi-fields
- if (!fa.isNested() && !seenMultiFields.contains(fa.parent())) {
- expanded.add(a);
- seenMultiFields.add(a);
- }
- } else {
- expanded.add(a);
- }
- }
- }
+ expanded.addAll(Expressions.onlyPrimitiveFieldAttributes(output));
}
return expanded;
@@ -954,12 +938,24 @@ public class Analyzer extends RuleExecutor {
}
return a;
}
+ if (plan instanceof Pivot) {
+ Pivot p = (Pivot) plan;
+ if (p.childrenResolved()) {
+ if (hasUnresolvedAliases(p.values())) {
+ p = new Pivot(p.source(), p.child(), p.column(), assignAliases(p.values()), p.aggregates());
+ }
+ if (hasUnresolvedAliases(p.aggregates())) {
+ p = new Pivot(p.source(), p.child(), p.column(), p.values(), assignAliases(p.aggregates()));
+ }
+ }
+ return p;
+ }
return plan;
}
private boolean hasUnresolvedAliases(List extends NamedExpression> expressions) {
- return expressions != null && expressions.stream().anyMatch(e -> e instanceof UnresolvedAlias);
+ return expressions != null && Expressions.anyMatch(expressions, e -> e instanceof UnresolvedAlias);
}
private List assignAliases(List extends NamedExpression> exprs) {
@@ -1277,13 +1273,20 @@ public class Analyzer extends RuleExecutor {
protected LogicalPlan rule(LogicalPlan plan) {
if (plan instanceof Project) {
Project p = (Project) plan;
- return new Project(p.source(), p.child(), cleanSecondaryAliases(p.projections()));
+ return new Project(p.source(), p.child(), cleanChildrenAliases(p.projections()));
}
if (plan instanceof Aggregate) {
Aggregate a = (Aggregate) plan;
- // clean group expressions
- return new Aggregate(a.source(), a.child(), cleanAllAliases(a.groupings()), cleanSecondaryAliases(a.aggregates()));
+ // aliases inside GROUP BY are irellevant so remove all of them
+ // however aggregations are important (ultimately a projection)
+ return new Aggregate(a.source(), a.child(), cleanAllAliases(a.groupings()), cleanChildrenAliases(a.aggregates()));
+ }
+
+ if (plan instanceof Pivot) {
+ Pivot p = (Pivot) plan;
+ return new Pivot(p.source(), p.child(), trimAliases(p.column()), cleanChildrenAliases(p.values()),
+ cleanChildrenAliases(p.aggregates()));
}
return plan.transformExpressionsOnly(e -> {
@@ -1294,7 +1297,7 @@ public class Analyzer extends RuleExecutor {
});
}
- private List cleanSecondaryAliases(List extends NamedExpression> args) {
+ private List cleanChildrenAliases(List extends NamedExpression> args) {
List cleaned = new ArrayList<>(args.size());
for (NamedExpression ne : args) {
cleaned.add((NamedExpression) trimNonTopLevelAliases(ne));
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java
index d5a4cb436e6..5c4b89209fa 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java
@@ -13,6 +13,8 @@ import org.elasticsearch.xpack.sql.expression.Exists;
import org.elasticsearch.xpack.sql.expression.Expression;
import org.elasticsearch.xpack.sql.expression.Expressions;
import org.elasticsearch.xpack.sql.expression.FieldAttribute;
+import org.elasticsearch.xpack.sql.expression.Literal;
+import org.elasticsearch.xpack.sql.expression.NamedExpression;
import org.elasticsearch.xpack.sql.expression.UnresolvedAttribute;
import org.elasticsearch.xpack.sql.expression.function.Function;
import org.elasticsearch.xpack.sql.expression.function.FunctionAttribute;
@@ -33,13 +35,16 @@ import org.elasticsearch.xpack.sql.plan.logical.Limit;
import org.elasticsearch.xpack.sql.plan.logical.LocalRelation;
import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan;
import org.elasticsearch.xpack.sql.plan.logical.OrderBy;
+import org.elasticsearch.xpack.sql.plan.logical.Pivot;
import org.elasticsearch.xpack.sql.plan.logical.Project;
import org.elasticsearch.xpack.sql.plan.logical.command.Command;
import org.elasticsearch.xpack.sql.stats.FeatureMetric;
import org.elasticsearch.xpack.sql.stats.Metrics;
import org.elasticsearch.xpack.sql.tree.Node;
import org.elasticsearch.xpack.sql.type.DataType;
+import org.elasticsearch.xpack.sql.type.DataTypes;
import org.elasticsearch.xpack.sql.type.EsField;
+import org.elasticsearch.xpack.sql.util.Holder;
import org.elasticsearch.xpack.sql.util.StringUtils;
import java.util.ArrayList;
@@ -64,6 +69,7 @@ import static org.elasticsearch.xpack.sql.stats.FeatureMetric.LOCAL;
import static org.elasticsearch.xpack.sql.stats.FeatureMetric.ORDERBY;
import static org.elasticsearch.xpack.sql.stats.FeatureMetric.WHERE;
import static org.elasticsearch.xpack.sql.type.DataType.GEO_SHAPE;
+import static org.elasticsearch.xpack.sql.util.CollectionUtils.combine;
/**
* The verifier has the role of checking the analyzed tree for failures and build a list of failures following this check.
@@ -237,6 +243,7 @@ public final class Verifier {
checkForScoreInsideFunctions(p, localFailures);
checkNestedUsedInGroupByOrHaving(p, localFailures);
checkForGeoFunctionsOnDocValues(p, localFailures);
+ checkPivot(p, localFailures);
// everything checks out
// mark the plan as analyzed
@@ -464,20 +471,39 @@ public final class Verifier {
private static boolean checkGroupByInexactField(LogicalPlan p, Set localFailures) {
if (p instanceof Aggregate) {
- Aggregate a = (Aggregate) p;
-
- // The grouping can not be an aggregate function or an inexact field (e.g. text without a keyword)
- a.groupings().forEach(e -> e.forEachUp(c -> {
- EsField.Exact exact = c.getExactInfo();
- if (exact.hasExact() == false) {
- localFailures.add(fail(c, "Field [" + c.sourceText() + "] of data type [" + c.dataType().typeName + "] " +
- "cannot be used for grouping; " + exact.errorMsg()));
- }
- }, FieldAttribute.class));
+ return onlyExactFields(((Aggregate) p).groupings(), localFailures);
}
return true;
}
+ // The grouping can not be an aggregate function or an inexact field (e.g. text without a keyword)
+ private static boolean onlyExactFields(List expressions, Set localFailures) {
+ Holder onlyExact = new Holder<>(Boolean.TRUE);
+
+ expressions.forEach(e -> e.forEachUp(c -> {
+ EsField.Exact exact = c.getExactInfo();
+ if (exact.hasExact() == false) {
+ localFailures.add(fail(c, "Field [{}] of data type [{}] cannot be used for grouping; {}", c.sourceText(),
+ c.dataType().typeName, exact.errorMsg()));
+ onlyExact.set(Boolean.FALSE);
+ }
+ }, FieldAttribute.class));
+
+ return onlyExact.get();
+ }
+
+ private static boolean onlyRawFields(Iterable extends Expression> expressions, Set localFailures) {
+ Holder onlyExact = new Holder<>(Boolean.TRUE);
+
+ expressions.forEach(e -> e.forEachDown(c -> {
+ if (c instanceof Function || c instanceof FunctionAttribute) {
+ localFailures.add(fail(c, "No functions allowed (yet); encountered [{}]", c.sourceText()));
+ onlyExact.set(Boolean.FALSE);
+ }
+ }));
+ return onlyExact.get();
+ }
+
private static boolean checkGroupByTime(LogicalPlan p, Set localFailures) {
if (p instanceof Aggregate) {
Aggregate a = (Aggregate) p;
@@ -625,8 +651,9 @@ public final class Verifier {
Project proj = (Project) p;
proj.projections().forEach(e -> e.forEachDown(f ->
localFailures.add(fail(f, "[{}] needs to be part of the grouping", Expressions.name(f))), GroupingFunction.class));
- } else if (p instanceof Aggregate) {
- // if it does have a GROUP BY, check if the groupings contain the grouping functions (Histograms)
+ }
+ // if it does have a GROUP BY, check if the groupings contain the grouping functions (Histograms)
+ else if (p instanceof Aggregate) {
Aggregate a = (Aggregate) p;
a.aggregates().forEach(agg -> agg.forEachDown(e -> {
if (a.groupings().size() == 0
@@ -749,4 +776,62 @@ public final class Verifier {
}
}, FieldAttribute.class)), OrderBy.class);
}
-}
+
+ private static void checkPivot(LogicalPlan p, Set localFailures) {
+ p.forEachDown(pv -> {
+ // check only exact fields are used inside PIVOTing
+ if (onlyExactFields(combine(pv.groupingSet(), pv.column()), localFailures) == false
+ || onlyRawFields(pv.groupingSet(), localFailures) == false) {
+ // if that is not the case, no need to do further validation since the declaration is fundamentally wrong
+ return;
+ }
+
+ // check values
+ DataType colType = pv.column().dataType();
+ for (NamedExpression v : pv.values()) {
+ // check all values are foldable
+ Expression ex = v instanceof Alias ? ((Alias) v).child() : v;
+ if (ex instanceof Literal == false) {
+ localFailures.add(fail(v, "Non-literal [{}] found inside PIVOT values", v.name()));
+ }
+ else if (ex.foldable() && ex.fold() == null) {
+ localFailures.add(fail(v, "Null not allowed as a PIVOT value", v.name()));
+ }
+ // and that their type is compatible with that of the column
+ else if (DataTypes.areTypesCompatible(colType, v.dataType()) == false) {
+ localFailures.add(fail(v, "Literal [{}] of type [{}] does not match type [{}] of PIVOT column [{}]", v.name(),
+ v.dataType().typeName, colType.typeName, pv.column().sourceText()));
+ }
+ }
+
+ // check aggregate function, in particular formulas that might hide literals or scalars
+ pv.aggregates().forEach(a -> {
+ Holder hasAggs = new Holder<>(Boolean.FALSE);
+ List aggs = a.collectFirstChildren(c -> {
+ // skip aggregate functions
+ if (Functions.isAggregate(c)) {
+ hasAggs.set(Boolean.TRUE);
+ return true;
+ }
+ if (c.children().isEmpty()) {
+ return true;
+ }
+ return false;
+ });
+
+ if (Boolean.FALSE.equals(hasAggs.get())) {
+ localFailures.add(fail(a, "No aggregate function found in PIVOT at [{}]", a.sourceText()));
+ }
+ // check mixture of Agg and column (wrapped in scalar)
+ else {
+ for (Expression agg : aggs) {
+ if (agg instanceof FieldAttribute) {
+ localFailures.add(fail(a, "Non-aggregate function found in PIVOT at [{}]", a.sourceText()));
+ }
+ }
+ }
+ });
+
+ }, Pivot.class);
+ }
+}
\ No newline at end of file
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggregationCursor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggCursor.java
similarity index 72%
rename from x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggregationCursor.java
rename to x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggCursor.java
index 41b5e1199ef..616ca01c38f 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggregationCursor.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggCursor.java
@@ -37,14 +37,14 @@ import java.util.BitSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
-import java.util.function.Consumer;
-import java.util.function.Function;
+import java.util.function.BiFunction;
+import java.util.function.Supplier;
/**
* Cursor for composite aggregation (GROUP BY).
* Stores the query that gets updated/slides across requests.
*/
-public class CompositeAggregationCursor implements Cursor {
+public class CompositeAggCursor implements Cursor {
private final Logger log = LogManager.getLogger(getClass());
@@ -57,7 +57,7 @@ public class CompositeAggregationCursor implements Cursor {
private final int limit;
private final boolean includeFrozen;
- CompositeAggregationCursor(byte[] next, List exts, BitSet mask, int remainingLimit, boolean includeFrozen,
+ CompositeAggCursor(byte[] next, List exts, BitSet mask, int remainingLimit, boolean includeFrozen,
String... indices) {
this.indices = indices;
this.nextQuery = next;
@@ -67,7 +67,7 @@ public class CompositeAggregationCursor implements Cursor {
this.includeFrozen = includeFrozen;
}
- public CompositeAggregationCursor(StreamInput in) throws IOException {
+ public CompositeAggCursor(StreamInput in) throws IOException {
indices = in.readStringArray();
nextQuery = in.readByteArray();
limit = in.readVInt();
@@ -86,7 +86,6 @@ public class CompositeAggregationCursor implements Cursor {
out.writeNamedWriteableList(extractors);
out.writeByteArray(mask.toByteArray());
out.writeBoolean(includeFrozen);
-
}
@Override
@@ -133,16 +132,17 @@ public class CompositeAggregationCursor implements Cursor {
log.trace("About to execute composite query {} on {}", StringUtils.toString(query), indices);
}
- SearchRequest search = Querier.prepareRequest(client, query, cfg.pageTimeout(), includeFrozen, indices);
+ SearchRequest request = Querier.prepareRequest(client, query, cfg.pageTimeout(), includeFrozen, indices);
- client.search(search, new ActionListener() {
+ client.search(request, new ActionListener() {
@Override
- public void onResponse(SearchResponse r) {
- handle(r, search.source(), ba -> new CompositeAggsRowSet(extractors, mask, r, limit, ba),
- () -> client.search(search, this),
- p -> listener.onResponse(p),
- e -> listener.onFailure(e),
- Schema.EMPTY, includeFrozen, indices);
+ public void onResponse(SearchResponse response) {
+ handle(response, request.source(),
+ makeRowSet(response),
+ makeCursor(),
+ () -> client.search(request, this),
+ listener,
+ Schema.EMPTY);
}
@Override
@@ -152,40 +152,55 @@ public class CompositeAggregationCursor implements Cursor {
});
}
- static void handle(SearchResponse response, SearchSourceBuilder source, Function makeRowSet,
- Runnable retry, Consumer onPage, Consumer onFailure,
- Schema schema, boolean includeFrozen, String[] indices) {
+ protected Supplier makeRowSet(SearchResponse response) {
+ return () -> new CompositeAggRowSet(extractors, mask, response, limit);
+ }
+
+ protected BiFunction makeCursor() {
+ return (q, r) -> new CompositeAggCursor(q, r.extractors(), r.mask(), r.remainingData(), includeFrozen, indices);
+ }
+
+ static void handle(SearchResponse response, SearchSourceBuilder source,
+ Supplier makeRowSet,
+ BiFunction makeCursor,
+ Runnable retry,
+ ActionListener listener,
+ Schema schema) {
// there are some results
if (response.getAggregations().asList().isEmpty() == false) {
// retry
- if (CompositeAggregationCursor.shouldRetryDueToEmptyPage(response)) {
- CompositeAggregationCursor.updateCompositeAfterKey(response, source);
+ if (shouldRetryDueToEmptyPage(response)) {
+ updateCompositeAfterKey(response, source);
retry.run();
return;
}
try {
- boolean hasAfterKey = updateCompositeAfterKey(response, source);
- byte[] queryAsBytes = hasAfterKey ? serializeQuery(source) : null;
- CompositeAggsRowSet rowSet = makeRowSet.apply(queryAsBytes);
+ CompositeAggRowSet rowSet = makeRowSet.get();
+ Map afterKey = rowSet.afterKey();
+
+ byte[] queryAsBytes = null;
+ if (afterKey != null) {
+ updateSourceAfterKey(afterKey, source);
+ queryAsBytes = serializeQuery(source);
+ }
Cursor next = rowSet.remainingData() == 0
? Cursor.EMPTY
- : new CompositeAggregationCursor(queryAsBytes, rowSet.extractors(), rowSet.mask(),
- rowSet.remainingData(), includeFrozen, indices);
- onPage.accept(new Page(rowSet, next));
+ : makeCursor.apply(queryAsBytes, rowSet);
+ listener.onResponse(new Page(rowSet, next));
} catch (Exception ex) {
- onFailure.accept(ex);
+ listener.onFailure(ex);
}
}
// no results
else {
- onPage.accept(Page.last(Rows.empty(schema)));
+ listener.onResponse(Page.last(Rows.empty(schema)));
}
}
- static boolean shouldRetryDueToEmptyPage(SearchResponse response) {
+ private static boolean shouldRetryDueToEmptyPage(SearchResponse response) {
CompositeAggregation composite = getComposite(response);
// if there are no buckets but a next page, go fetch it instead of sending an empty response to the client
return composite != null && composite.getBuckets().isEmpty() && composite.afterKey() != null && !composite.afterKey().isEmpty();
@@ -204,25 +219,22 @@ public class CompositeAggregationCursor implements Cursor {
throw new SqlIllegalArgumentException("Unrecognized root group found; {}", agg.getClass());
}
- static boolean updateCompositeAfterKey(SearchResponse r, SearchSourceBuilder next) {
+ private static void updateCompositeAfterKey(SearchResponse r, SearchSourceBuilder search) {
CompositeAggregation composite = getComposite(r);
if (composite == null) {
throw new SqlIllegalArgumentException("Invalid server response; no group-by detected");
}
- Map afterKey = composite.afterKey();
- // a null after-key means done
- if (afterKey == null) {
- return false;
+ updateSourceAfterKey(composite.afterKey(), search);
}
- AggregationBuilder aggBuilder = next.aggregations().getAggregatorFactories().iterator().next();
+ private static void updateSourceAfterKey(Map afterKey, SearchSourceBuilder search) {
+ AggregationBuilder aggBuilder = search.aggregations().getAggregatorFactories().iterator().next();
// update after-key with the new value
if (aggBuilder instanceof CompositeAggregationBuilder) {
CompositeAggregationBuilder comp = (CompositeAggregationBuilder) aggBuilder;
comp.aggregateAfter(afterKey);
- return true;
} else {
throw new SqlIllegalArgumentException("Invalid client request; expected a group-by but instead got {}", aggBuilder);
}
@@ -240,7 +252,7 @@ public class CompositeAggregationCursor implements Cursor {
/**
* Serializes the search source to a byte array.
*/
- static byte[] serializeQuery(SearchSourceBuilder source) throws IOException {
+ private static byte[] serializeQuery(SearchSourceBuilder source) throws IOException {
if (source == null) {
return new byte[0];
}
@@ -259,7 +271,7 @@ public class CompositeAggregationCursor implements Cursor {
@Override
public int hashCode() {
- return Objects.hash(Arrays.hashCode(indices), Arrays.hashCode(nextQuery), extractors, limit);
+ return Objects.hash(Arrays.hashCode(indices), Arrays.hashCode(nextQuery), extractors, limit, mask, includeFrozen);
}
@Override
@@ -267,15 +279,16 @@ public class CompositeAggregationCursor implements Cursor {
if (obj == null || obj.getClass() != getClass()) {
return false;
}
- CompositeAggregationCursor other = (CompositeAggregationCursor) obj;
+ CompositeAggCursor other = (CompositeAggCursor) obj;
return Arrays.equals(indices, other.indices)
&& Arrays.equals(nextQuery, other.nextQuery)
&& Objects.equals(extractors, other.extractors)
- && Objects.equals(limit, other.limit);
+ && Objects.equals(limit, other.limit)
+ && Objects.equals(includeFrozen, other.includeFrozen);
}
@Override
public String toString() {
return "cursor for composite on index [" + Arrays.toString(indices) + "]";
}
-}
+}
\ No newline at end of file
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggsRowSet.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggRowSet.java
similarity index 70%
rename from x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggsRowSet.java
rename to x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggRowSet.java
index dd6b85279cb..1262e80e066 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggsRowSet.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggRowSet.java
@@ -12,50 +12,50 @@ import org.elasticsearch.xpack.sql.session.RowSet;
import java.util.BitSet;
import java.util.List;
+import java.util.Map;
import static java.util.Collections.emptyList;
/**
* {@link RowSet} specific to (GROUP BY) aggregation.
*/
-class CompositeAggsRowSet extends ResultRowSet {
+class CompositeAggRowSet extends ResultRowSet {
- private final List extends CompositeAggregation.Bucket> buckets;
- private final int remainingData;
- private final int size;
- private int row = 0;
+ final List extends CompositeAggregation.Bucket> buckets;
- CompositeAggsRowSet(List exts, BitSet mask, SearchResponse response, int limit, byte[] next) {
+ Map afterKey;
+ int remainingData;
+ int size;
+ int row = 0;
+
+ CompositeAggRowSet(List exts, BitSet mask, SearchResponse response, int limit) {
super(exts, mask);
- CompositeAggregation composite = CompositeAggregationCursor.getComposite(response);
+ CompositeAggregation composite = CompositeAggCursor.getComposite(response);
if (composite != null) {
buckets = composite.getBuckets();
+ afterKey = composite.afterKey();
} else {
buckets = emptyList();
+ afterKey = null;
}
// page size
size = limit == -1 ? buckets.size() : Math.min(buckets.size(), limit);
+ remainingData = remainingData(afterKey != null, size, limit);
+ }
- if (next == null) {
- remainingData = 0;
+ static int remainingData(boolean hasNextPage, int size, int limit) {
+ if (hasNextPage == false) {
+ return 0;
} else {
- // Compute remaining limit
-
- // If the limit is -1 then we have a local sorting (sort on aggregate function) that requires all the buckets
- // to be processed so we stop only when all data is exhausted.
int remainingLimit = (limit == -1) ? limit : ((limit - size) >= 0 ? (limit - size) : 0);
// if the computed limit is zero, or the size is zero it means either there's nothing left or the limit has been reached
// note that a composite agg might be valid but return zero groups (since these can be filtered with HAVING/bucket selector)
// however the Querier takes care of that and keeps making requests until either the query is invalid or at least one response
// is returned.
- if (size == 0 || remainingLimit == 0) {
- remainingData = 0;
- } else {
- remainingData = remainingLimit;
- }
+ return size == 0 ? size : remainingLimit;
}
}
@@ -91,4 +91,8 @@ class CompositeAggsRowSet extends ResultRowSet {
int remainingData() {
return remainingData;
}
+
+ Map afterKey() {
+ return afterKey;
+ }
}
\ No newline at end of file
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/PivotCursor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/PivotCursor.java
new file mode 100644
index 00000000000..a815602d950
--- /dev/null
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/PivotCursor.java
@@ -0,0 +1,74 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+
+package org.elasticsearch.xpack.sql.execution.search;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.xpack.sql.execution.search.extractor.BucketExtractor;
+import org.elasticsearch.xpack.sql.type.Schema;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.BitSet;
+import java.util.List;
+import java.util.Map;
+import java.util.function.BiFunction;
+import java.util.function.Supplier;
+
+public class PivotCursor extends CompositeAggCursor {
+
+ public static final String NAME = "p";
+
+ private final Map previousKey;
+
+ PivotCursor(Map previousKey, byte[] next, List exts, BitSet mask, int remainingLimit,
+ boolean includeFrozen,
+ String... indices) {
+ super(next, exts, mask, remainingLimit, includeFrozen, indices);
+ this.previousKey = previousKey;
+ }
+
+ public PivotCursor(StreamInput in) throws IOException {
+ super(in);
+ previousKey = in.readBoolean() == true ? in.readMap() : null;
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ if (previousKey != null) {
+ out.writeBoolean(true);
+ out.writeMap(previousKey);
+ } else {
+ out.writeBoolean(false);
+ }
+ }
+
+ @Override
+ public String getWriteableName() {
+ return NAME;
+ }
+
+ @Override
+ protected Supplier makeRowSet(SearchResponse response) {
+ return () -> new PivotRowSet(Schema.EMPTY, extractors(), mask(), response, limit(), previousKey);
+ }
+
+ @Override
+ protected BiFunction makeCursor() {
+ return (q, r) -> {
+ Map lastAfterKey = r instanceof PivotRowSet ? ((PivotRowSet) r).lastAfterKey() : null;
+ return new PivotCursor(lastAfterKey, q, r.extractors(), r.mask(), r.remainingData(), includeFrozen(), indices());
+ };
+ }
+
+ @Override
+ public String toString() {
+ return "pivot for index [" + Arrays.toString(indices()) + "]";
+ }
+}
\ No newline at end of file
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/PivotRowSet.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/PivotRowSet.java
new file mode 100644
index 00000000000..6839e7275ae
--- /dev/null
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/PivotRowSet.java
@@ -0,0 +1,139 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+
+package org.elasticsearch.xpack.sql.execution.search;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregation;
+import org.elasticsearch.xpack.sql.execution.search.extractor.BucketExtractor;
+import org.elasticsearch.xpack.sql.type.Schema;
+
+import java.util.ArrayList;
+import java.util.BitSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Objects;
+
+import static java.util.Collections.emptyList;
+
+class PivotRowSet extends SchemaCompositeAggRowSet {
+
+ private final List data;
+ private final Map lastAfterKey;
+
+ PivotRowSet(Schema schema, List exts, BitSet mask, SearchResponse response, int limit,
+ Map previousLastKey) {
+ super(schema, exts, mask, response, limit);
+
+ data = buckets.isEmpty() ? emptyList() : new ArrayList<>();
+
+ // the last page contains no data, handle that to avoid NPEs and such
+ if (buckets.isEmpty()) {
+ lastAfterKey = null;
+ return;
+ }
+
+ // consume buckets until all pivot columns are initialized or the next grouping starts
+ // to determine a group, find all group-by extractors (CompositeKeyExtractor)
+ // extract their values and keep iterating through the buckets as long as the result is the same
+
+ Map currentRowGroupKey = null;
+ Map lastCompletedGroupKey = null;
+ Object[] currentRow = new Object[columnCount()];
+
+ for (int bucketIndex = 0; bucketIndex < buckets.size(); bucketIndex++) {
+ CompositeAggregation.Bucket bucket = buckets.get(bucketIndex);
+ Map key = bucket.getKey();
+
+ // does the bucket below to the same group?
+ if (currentRowGroupKey == null || sameCompositeKey(currentRowGroupKey, key)) {
+ currentRowGroupKey = key;
+ }
+ // done computing row
+ else {
+ // be sure to remember the last consumed group before changing to the new one
+ lastCompletedGroupKey = currentRowGroupKey;
+ currentRowGroupKey = key;
+ // save the data
+ data.add(currentRow);
+ // create a new row
+ currentRow = new Object[columnCount()];
+ }
+
+ for (int columnIndex = 0; columnIndex < currentRow.length; columnIndex++) {
+ BucketExtractor extractor = userExtractor(columnIndex);
+ Object value = extractor.extract(bucket);
+
+ // rerun the bucket through all the extractors but update only the non-null components
+ // since the pivot extractors will react only when encountering the matching group
+ if (currentRow[columnIndex] == null && value != null) {
+ currentRow[columnIndex] = value;
+ }
+ }
+ }
+
+ // add the last group if any of the following matches:
+ // a. the last key has been sent before (it's the last page)
+ if ((previousLastKey != null && sameCompositeKey(previousLastKey, currentRowGroupKey))) {
+ data.add(currentRow);
+ afterKey = null;
+ }
+ // b. all the values are initialized (there might be another page but no need to ask for the group again)
+ // c. or no data was added (typically because there's a null value such as the group)
+ else if (hasNull(currentRow) == false || data.isEmpty()) {
+ data.add(currentRow);
+ afterKey = currentRowGroupKey;
+ }
+ //otherwise we can't tell whether it's complete or not
+ // so discard the last group and ask for it on the next page
+ else {
+ afterKey = lastCompletedGroupKey;
+ }
+
+ // lastly initialize the size and remainingData
+ size = data.size();
+ remainingData = remainingData(afterKey != null, size, limit);
+ lastAfterKey = currentRowGroupKey;
+ }
+
+ private boolean hasNull(Object[] currentRow) {
+ for (Object object : currentRow) {
+ if (object == null) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // compare the equality of two composite key WITHOUT the last group
+ // this method relies on the internal map implementation which preserves the key position
+ // hence why the comparison happens against the current key (not the previous one which might
+ // have a different order due to serialization)
+ static boolean sameCompositeKey(Map previous, Map current) {
+ int keys = current.size() - 1;
+ int keyIndex = 0;
+ for (Entry entry : current.entrySet()) {
+ if (keyIndex++ >= keys) {
+ return true;
+ }
+ if (Objects.equals(entry.getValue(), previous.get(entry.getKey())) == false) {
+ return false;
+ }
+ }
+ // there's no other key, it's the same group
+ return true;
+ }
+
+ @Override
+ protected Object getColumn(int column) {
+ return data.get(row)[column];
+ }
+
+ Map lastAfterKey() {
+ return lastAfterKey;
+ }
+}
\ No newline at end of file
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java
index 9e0d4f3a691..333d320e908 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java
@@ -36,6 +36,7 @@ import org.elasticsearch.xpack.sql.execution.search.extractor.ConstantExtractor;
import org.elasticsearch.xpack.sql.execution.search.extractor.FieldHitExtractor;
import org.elasticsearch.xpack.sql.execution.search.extractor.HitExtractor;
import org.elasticsearch.xpack.sql.execution.search.extractor.MetricAggExtractor;
+import org.elasticsearch.xpack.sql.execution.search.extractor.PivotExtractor;
import org.elasticsearch.xpack.sql.execution.search.extractor.TopHitsAggExtractor;
import org.elasticsearch.xpack.sql.expression.Attribute;
import org.elasticsearch.xpack.sql.expression.ExpressionId;
@@ -50,6 +51,7 @@ import org.elasticsearch.xpack.sql.querydsl.container.ComputedRef;
import org.elasticsearch.xpack.sql.querydsl.container.GlobalCountRef;
import org.elasticsearch.xpack.sql.querydsl.container.GroupByRef;
import org.elasticsearch.xpack.sql.querydsl.container.MetricAggRef;
+import org.elasticsearch.xpack.sql.querydsl.container.PivotColumnRef;
import org.elasticsearch.xpack.sql.querydsl.container.QueryContainer;
import org.elasticsearch.xpack.sql.querydsl.container.ScriptFieldRef;
import org.elasticsearch.xpack.sql.querydsl.container.SearchHitFieldRef;
@@ -71,9 +73,12 @@ import java.util.BitSet;
import java.util.Comparator;
import java.util.LinkedHashSet;
import java.util.List;
+import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.BiFunction;
+import java.util.function.Supplier;
import static java.util.Collections.singletonList;
import static org.elasticsearch.action.ActionListener.wrap;
@@ -320,21 +325,39 @@ public class Querier {
*/
static class CompositeActionListener extends BaseAggActionListener {
+ private final boolean isPivot;
+
CompositeActionListener(ActionListener listener, Client client, Configuration cfg,
List output, QueryContainer query, SearchRequest request) {
super(listener, client, cfg, output, query, request);
+
+ isPivot = query.fields().stream().anyMatch(t -> t.v1() instanceof PivotColumnRef);
}
@Override
protected void handleResponse(SearchResponse response, ActionListener listener) {
- CompositeAggregationCursor.handle(response, request.source(),
- ba -> new SchemaCompositeAggsRowSet(schema, initBucketExtractors(response), mask, response,
- query.sortingColumns().isEmpty() ? query.limit() : -1, ba),
+ Supplier makeRowSet = isPivot ?
+ () -> new PivotRowSet(schema, initBucketExtractors(response), mask, response,
+ query.sortingColumns().isEmpty() ? query.limit() : -1, null) :
+ () -> new SchemaCompositeAggRowSet(schema, initBucketExtractors(response), mask, response,
+ query.sortingColumns().isEmpty() ? query.limit() : -1);
+
+ BiFunction makeCursor = isPivot ?
+ (q, r) -> {
+ Map lastAfterKey = r instanceof PivotRowSet ? ((PivotRowSet) r).lastAfterKey() : null;
+ return new PivotCursor(lastAfterKey, q, r.extractors(), r.mask(), r.remainingData(), query.shouldIncludeFrozen(),
+ request.indices());
+ } :
+ (q, r) -> new CompositeAggCursor(q, r.extractors(), r.mask(), r.remainingData, query.shouldIncludeFrozen(),
+ request.indices());
+
+ CompositeAggCursor.handle(response, request.source(),
+ makeRowSet,
+ makeCursor,
() -> client.search(request, this),
- p -> listener.onResponse(p),
- e -> listener.onFailure(e),
- schema, query.shouldIncludeFrozen(), request.indices());
+ listener,
+ schema);
}
}
@@ -380,6 +403,11 @@ public class Querier {
return new TopHitsAggExtractor(r.name(), r.fieldDataType(), cfg.zoneId());
}
+ if (ref instanceof PivotColumnRef) {
+ PivotColumnRef r = (PivotColumnRef) ref;
+ return new PivotExtractor(createExtractor(r.pivot(), totalCount), createExtractor(r.agg(), totalCount), r.value());
+ }
+
if (ref == GlobalCountRef.INSTANCE) {
return totalCount;
}
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SchemaCompositeAggsRowSet.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SchemaCompositeAggRowSet.java
similarity index 77%
rename from x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SchemaCompositeAggsRowSet.java
rename to x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SchemaCompositeAggRowSet.java
index 7eeb8b28f15..eb4d568f557 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SchemaCompositeAggsRowSet.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SchemaCompositeAggRowSet.java
@@ -18,12 +18,12 @@ import java.util.List;
* Extension of the {@link RowSet} over a composite agg, extending it to provide its schema.
* Used for the initial response.
*/
-class SchemaCompositeAggsRowSet extends CompositeAggsRowSet implements SchemaRowSet {
+class SchemaCompositeAggRowSet extends CompositeAggRowSet implements SchemaRowSet {
private final Schema schema;
- SchemaCompositeAggsRowSet(Schema schema, List exts, BitSet mask, SearchResponse r, int limitAggs, byte[] next) {
- super(exts, mask, r, limitAggs, next);
+ SchemaCompositeAggRowSet(Schema schema, List exts, BitSet mask, SearchResponse r, int limitAggs) {
+ super(exts, mask, r, limitAggs);
this.schema = schema;
}
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SchemaDelegatingRowSet.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SchemaDelegatingRowSet.java
new file mode 100644
index 00000000000..ccfe1ad55f2
--- /dev/null
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SchemaDelegatingRowSet.java
@@ -0,0 +1,52 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+
+package org.elasticsearch.xpack.sql.execution.search;
+
+import org.elasticsearch.xpack.sql.session.RowSet;
+import org.elasticsearch.xpack.sql.session.SchemaRowSet;
+import org.elasticsearch.xpack.sql.type.Schema;
+
+class SchemaDelegatingRowSet implements SchemaRowSet {
+
+ private final Schema schema;
+ private final RowSet delegate;
+
+ SchemaDelegatingRowSet(Schema schema, RowSet delegate) {
+ this.schema = schema;
+ this.delegate = delegate;
+ }
+
+ @Override
+ public Schema schema() {
+ return schema;
+ }
+
+ @Override
+ public boolean hasCurrentRow() {
+ return delegate.hasCurrentRow();
+ }
+
+ @Override
+ public boolean advanceRow() {
+ return delegate.advanceRow();
+ }
+
+ @Override
+ public int size() {
+ return delegate.size();
+ }
+
+ @Override
+ public void reset() {
+ delegate.reset();
+ }
+
+ @Override
+ public Object column(int index) {
+ return delegate.column(index);
+ }
+}
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SourceGenerator.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SourceGenerator.java
index 4e343c1e54f..868dd2dcfff 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SourceGenerator.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SourceGenerator.java
@@ -76,6 +76,10 @@ public abstract class SourceGenerator {
// set page size
if (size != null) {
int sz = container.limit() > 0 ? Math.min(container.limit(), size) : size;
+ // now take into account the the minimum page (if set)
+ // that is, return the multiple of the minimum page size closer to the set size
+ int minSize = container.minPageSize();
+ sz = minSize > 0 ? (Math.max(sz / minSize, 1) * minSize) : sz;
if (source.size() == -1) {
source.size(sz);
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/BucketExtractors.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/BucketExtractors.java
index 221662b79c1..bcbbce8e457 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/BucketExtractors.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/BucketExtractors.java
@@ -26,6 +26,7 @@ public final class BucketExtractors {
entries.add(new Entry(BucketExtractor.class, MetricAggExtractor.NAME, MetricAggExtractor::new));
entries.add(new Entry(BucketExtractor.class, TopHitsAggExtractor.NAME, TopHitsAggExtractor::new));
entries.add(new Entry(BucketExtractor.class, ConstantExtractor.NAME, ConstantExtractor::new));
+ entries.add(new Entry(BucketExtractor.class, PivotExtractor.NAME, PivotExtractor::new));
return entries;
}
}
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/PivotExtractor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/PivotExtractor.java
new file mode 100644
index 00000000000..e7c1b8dfa30
--- /dev/null
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/PivotExtractor.java
@@ -0,0 +1,71 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+
+package org.elasticsearch.xpack.sql.execution.search.extractor;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket;
+
+import java.io.IOException;
+import java.util.Objects;
+
+public class PivotExtractor implements BucketExtractor {
+
+ static final String NAME = "pv";
+
+ private final BucketExtractor groupExtractor;
+ private final BucketExtractor metricExtractor;
+ private final Object value;
+
+ public PivotExtractor(BucketExtractor groupExtractor, BucketExtractor metricExtractor, Object value) {
+ this.groupExtractor = groupExtractor;
+ this.metricExtractor = metricExtractor;
+ this.value = value;
+ }
+
+ PivotExtractor(StreamInput in) throws IOException {
+ groupExtractor = in.readNamedWriteable(BucketExtractor.class);
+ metricExtractor = in.readNamedWriteable(BucketExtractor.class);
+ value = in.readGenericValue();
+ }
+
+ @Override
+ public String getWriteableName() {
+ return NAME;
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeNamedWriteable(groupExtractor);
+ out.writeNamedWriteable(metricExtractor);
+ out.writeGenericValue(value);
+ }
+
+ @Override
+ public Object extract(Bucket bucket) {
+ if (Objects.equals(value, groupExtractor.extract(bucket))) {
+ return metricExtractor.extract(bucket);
+ }
+ return null;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(groupExtractor, metricExtractor, value);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (obj == null || obj.getClass() != getClass()) {
+ return false;
+ }
+ PivotExtractor other = (PivotExtractor) obj;
+ return Objects.equals(groupExtractor, other.groupExtractor)
+ && Objects.equals(metricExtractor, other.metricExtractor)
+ && Objects.equals(value, other.value);
+ }
+}
\ No newline at end of file
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Alias.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Alias.java
index f4c8526bf47..4ebc030c281 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Alias.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Alias.java
@@ -108,7 +108,7 @@ public class Alias extends NamedExpression {
Attribute attr = Expressions.attribute(c);
if (attr != null) {
- return attr.clone(source(), name(), qualifier, child.nullable(), id(), synthetic());
+ return attr.clone(source(), name(), child.dataType(), qualifier, child.nullable(), id(), synthetic());
}
else {
// TODO: WE need to fix this fake Field
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Attribute.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Attribute.java
index 2f8b6633249..9f6b54badaf 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Attribute.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Attribute.java
@@ -9,6 +9,7 @@ import org.elasticsearch.xpack.sql.SqlIllegalArgumentException;
import org.elasticsearch.xpack.sql.expression.gen.script.ScriptTemplate;
import org.elasticsearch.xpack.sql.tree.NodeInfo;
import org.elasticsearch.xpack.sql.tree.Source;
+import org.elasticsearch.xpack.sql.type.DataType;
import java.util.List;
import java.util.Objects;
@@ -87,19 +88,33 @@ public abstract class Attribute extends NamedExpression {
}
public Attribute withLocation(Source source) {
- return Objects.equals(source(), source) ? this : clone(source, name(), qualifier(), nullable(), id(), synthetic());
+ return Objects.equals(source(), source) ? this : clone(source, name(), dataType(), qualifier(), nullable(), id(), synthetic());
}
public Attribute withQualifier(String qualifier) {
- return Objects.equals(qualifier(), qualifier) ? this : clone(source(), name(), qualifier, nullable(), id(), synthetic());
+ return Objects.equals(qualifier(), qualifier) ? this : clone(source(), name(), dataType(), qualifier, nullable(), id(),
+ synthetic());
+ }
+
+ public Attribute withName(String name) {
+ return Objects.equals(name(), name) ? this : clone(source(), name, dataType(), qualifier(), nullable(), id(), synthetic());
}
public Attribute withNullability(Nullability nullability) {
- return Objects.equals(nullable(), nullability) ? this : clone(source(), name(), qualifier(), nullability, id(), synthetic());
+ return Objects.equals(nullable(), nullability) ? this : clone(source(), name(), dataType(), qualifier(), nullability, id(),
+ synthetic());
}
- protected abstract Attribute clone(Source source, String name, String qualifier, Nullability nullability, ExpressionId id,
- boolean synthetic);
+ public Attribute withDataType(DataType type) {
+ return Objects.equals(dataType(), type) ? this : clone(source(), name(), type, qualifier(), nullable(), id(), synthetic());
+ }
+
+ public Attribute withId(ExpressionId id) {
+ return clone(source(), name(), dataType(), qualifier(), nullable(), id, synthetic());
+ }
+
+ protected abstract Attribute clone(Source source, String name, DataType type, String qualifier, Nullability nullability,
+ ExpressionId id, boolean synthetic);
@Override
public Attribute toAttribute() {
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/ExpressionId.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/ExpressionId.java
index 55f947a20ac..cbc622a615c 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/ExpressionId.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/ExpressionId.java
@@ -25,6 +25,10 @@ public class ExpressionId {
this.id = COUNTER.incrementAndGet();
}
+ public ExpressionId(long id) {
+ this.id = id;
+ }
+
@Override
public int hashCode() {
return Objects.hash(id);
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expressions.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expressions.java
index ca5e4b75756..0515d4f11b4 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expressions.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expressions.java
@@ -8,10 +8,13 @@ package org.elasticsearch.xpack.sql.expression;
import org.elasticsearch.xpack.sql.SqlIllegalArgumentException;
import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe;
import org.elasticsearch.xpack.sql.type.DataType;
+import org.elasticsearch.xpack.sql.type.DataTypes;
import java.util.ArrayList;
import java.util.Collection;
+import java.util.LinkedHashSet;
import java.util.List;
+import java.util.Set;
import java.util.function.Predicate;
import static java.util.Collections.emptyList;
@@ -134,6 +137,30 @@ public final class Expressions {
return true;
}
+ public static List onlyPrimitiveFieldAttributes(Collection attributes) {
+ List filtered = new ArrayList<>();
+ // add only primitives
+ // but filter out multi fields (allow only the top-level value)
+ Set seenMultiFields = new LinkedHashSet<>();
+
+ for (Attribute a : attributes) {
+ if (!DataTypes.isUnsupported(a.dataType()) && a.dataType().isPrimitive()) {
+ if (a instanceof FieldAttribute) {
+ FieldAttribute fa = (FieldAttribute) a;
+ // skip nested fields and seen multi-fields
+ if (!fa.isNested() && !seenMultiFields.contains(fa.parent())) {
+ filtered.add(a);
+ seenMultiFields.add(a);
+ }
+ } else {
+ filtered.add(a);
+ }
+ }
+ }
+
+ return filtered;
+ }
+
public static Pipe pipe(Expression e) {
if (e instanceof NamedExpression) {
return ((NamedExpression) e).asPipe();
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/FieldAttribute.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/FieldAttribute.java
index cb86e2742b2..c0cd9a95eb6 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/FieldAttribute.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/FieldAttribute.java
@@ -36,10 +36,15 @@ public class FieldAttribute extends TypedAttribute {
public FieldAttribute(Source source, FieldAttribute parent, String name, EsField field) {
this(source, parent, name, field, null, Nullability.TRUE, null, false);
}
+
+ public FieldAttribute(Source source, FieldAttribute parent, String name, EsField field, String qualifier, Nullability nullability,
+ ExpressionId id, boolean synthetic) {
+ this(source, parent, name, field.getDataType(), field, qualifier, nullability, id, synthetic);
+ }
- public FieldAttribute(Source source, FieldAttribute parent, String name, EsField field, String qualifier,
+ public FieldAttribute(Source source, FieldAttribute parent, String name, DataType type, EsField field, String qualifier,
Nullability nullability, ExpressionId id, boolean synthetic) {
- super(source, name, field.getDataType(), qualifier, nullability, id, synthetic);
+ super(source, name, type, qualifier, nullability, id, synthetic);
this.path = parent != null ? parent.name() : StringUtils.EMPTY;
this.parent = parent;
this.field = field;
@@ -57,7 +62,7 @@ public class FieldAttribute extends TypedAttribute {
@Override
protected NodeInfo info() {
- return NodeInfo.create(this, FieldAttribute::new, parent, name(), field, qualifier(), nullable(), id(), synthetic());
+ return NodeInfo.create(this, FieldAttribute::new, parent, name(), dataType(), field, qualifier(), nullable(), id(), synthetic());
}
public FieldAttribute parent() {
@@ -103,8 +108,8 @@ public class FieldAttribute extends TypedAttribute {
}
@Override
- protected Attribute clone(Source source, String name, String qualifier, Nullability nullability,
- ExpressionId id, boolean synthetic) {
+ protected Attribute clone(Source source, String name, DataType type, String qualifier,
+ Nullability nullability, ExpressionId id, boolean synthetic) {
FieldAttribute qualifiedParent = parent != null ? (FieldAttribute) parent.withQualifier(qualifier) : null;
return new FieldAttribute(source, qualifiedParent, name, field, qualifier, nullability, id, synthetic);
}
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Literal.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Literal.java
index b4ccd7eb9ff..b22483bda36 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Literal.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Literal.java
@@ -77,7 +77,7 @@ public class Literal extends NamedExpression {
@Override
public Attribute toAttribute() {
- return new LiteralAttribute(source(), name(), null, nullable(), id(), false, dataType, this);
+ return new LiteralAttribute(source(), name(), dataType, null, nullable(), id(), false, this);
}
@Override
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/LiteralAttribute.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/LiteralAttribute.java
index 1305240b609..506f3f8a073 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/LiteralAttribute.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/LiteralAttribute.java
@@ -14,8 +14,8 @@ public class LiteralAttribute extends TypedAttribute {
private final Literal literal;
- public LiteralAttribute(Source source, String name, String qualifier, Nullability nullability, ExpressionId id, boolean synthetic,
- DataType dataType, Literal literal) {
+ public LiteralAttribute(Source source, String name, DataType dataType, String qualifier, Nullability nullability, ExpressionId id,
+ boolean synthetic, Literal literal) {
super(source, name, dataType, qualifier, nullability, id, synthetic);
this.literal = literal;
}
@@ -23,13 +23,13 @@ public class LiteralAttribute extends TypedAttribute {
@Override
protected NodeInfo info() {
return NodeInfo.create(this, LiteralAttribute::new,
- name(), qualifier(), nullable(), id(), synthetic(), dataType(), literal);
+ name(), dataType(), qualifier(), nullable(), id(), synthetic(), literal);
}
@Override
- protected LiteralAttribute clone(Source source, String name, String qualifier, Nullability nullability,
+ protected LiteralAttribute clone(Source source, String name, DataType dataType, String qualifier, Nullability nullability,
ExpressionId id, boolean synthetic) {
- return new LiteralAttribute(source, name, qualifier, nullability, id, synthetic, dataType(), literal);
+ return new LiteralAttribute(source, name, dataType, qualifier, nullability, id, synthetic, literal);
}
@Override
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/UnresolvedAttribute.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/UnresolvedAttribute.java
index 476c69fea09..add7f702e04 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/UnresolvedAttribute.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/UnresolvedAttribute.java
@@ -65,7 +65,7 @@ public class UnresolvedAttribute extends Attribute implements Unresolvable {
}
@Override
- protected Attribute clone(Source source, String name, String qualifier, Nullability nullability,
+ protected Attribute clone(Source source, String name, DataType dataType, String qualifier, Nullability nullability,
ExpressionId id, boolean synthetic) {
return this;
}
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/ScoreAttribute.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/ScoreAttribute.java
index bcd0aab16c6..7d93db3d862 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/ScoreAttribute.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/ScoreAttribute.java
@@ -41,9 +41,9 @@ public class ScoreAttribute extends FunctionAttribute {
}
@Override
- protected Attribute clone(Source source, String name, String qualifier, Nullability nullability,
+ protected Attribute clone(Source source, String name, DataType dataType, String qualifier, Nullability nullability,
ExpressionId id, boolean synthetic) {
- return new ScoreAttribute(source, name, dataType(), qualifier, nullability, id, synthetic);
+ return new ScoreAttribute(source, name, dataType, qualifier, nullability, id, synthetic);
}
@Override
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/AggregateFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/AggregateFunction.java
index 177f598dc9a..59b4f345a4a 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/AggregateFunction.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/AggregateFunction.java
@@ -28,7 +28,7 @@ import static java.util.Collections.singletonList;
public abstract class AggregateFunction extends Function {
private final Expression field;
- private final List parameters;
+ private final List extends Expression> parameters;
private AggregateFunctionAttribute lazyAttribute;
@@ -36,7 +36,7 @@ public abstract class AggregateFunction extends Function {
this(source, field, emptyList());
}
- protected AggregateFunction(Source source, Expression field, List parameters) {
+ protected AggregateFunction(Source source, Expression field, List extends Expression> parameters) {
super(source, CollectionUtils.combine(singletonList(field), parameters));
this.field = field;
this.parameters = parameters;
@@ -46,7 +46,7 @@ public abstract class AggregateFunction extends Function {
return field;
}
- public List parameters() {
+ public List extends Expression> parameters() {
return parameters;
}
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/AggregateFunctionAttribute.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/AggregateFunctionAttribute.java
index 96f072acda5..0bd0c9199bc 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/AggregateFunctionAttribute.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/AggregateFunctionAttribute.java
@@ -60,10 +60,11 @@ public class AggregateFunctionAttribute extends FunctionAttribute {
}
@Override
- protected Attribute clone(Source source, String name, String qualifier, Nullability nullability, ExpressionId id, boolean synthetic) {
+ protected Attribute clone(Source source, String name, DataType dataType, String qualifier, Nullability nullability, ExpressionId id,
+ boolean synthetic) {
// this is highly correlated with QueryFolder$FoldAggregate#addFunction (regarding the function name within the querydsl)
// that is the functionId is actually derived from the expression id to easily track it across contexts
- return new AggregateFunctionAttribute(source, name, dataType(), qualifier, nullability, id, synthetic, functionId(), innerId,
+ return new AggregateFunctionAttribute(source, name, dataType, qualifier, nullability, id, synthetic, functionId(), innerId,
propertyPath);
}
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/grouping/GroupingFunctionAttribute.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/grouping/GroupingFunctionAttribute.java
index c33c893141b..2fed4cf3060 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/grouping/GroupingFunctionAttribute.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/grouping/GroupingFunctionAttribute.java
@@ -37,11 +37,11 @@ public class GroupingFunctionAttribute extends FunctionAttribute {
}
@Override
- protected Attribute clone(Source source, String name, String qualifier, Nullability nullability,
+ protected Attribute clone(Source source, String name, DataType dataType, String qualifier, Nullability nullability,
ExpressionId id, boolean synthetic) {
// this is highly correlated with QueryFolder$FoldAggregate#addFunction (regarding the function name within the querydsl)
// that is the functionId is actually derived from the expression id to easily track it across contexts
- return new GroupingFunctionAttribute(source, name, dataType(), qualifier, nullability, id, synthetic, functionId());
+ return new GroupingFunctionAttribute(source, name, dataType, qualifier, nullability, id, synthetic, functionId());
}
public GroupingFunctionAttribute withFunctionId(String functionId, String propertyPath) {
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/ScalarFunctionAttribute.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/ScalarFunctionAttribute.java
index 6a0980c2690..67324ba466c 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/ScalarFunctionAttribute.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/ScalarFunctionAttribute.java
@@ -66,9 +66,9 @@ public class ScalarFunctionAttribute extends FunctionAttribute {
}
@Override
- protected Attribute clone(Source source, String name, String qualifier, Nullability nullability,
+ protected Attribute clone(Source source, String name, DataType dataType, String qualifier, Nullability nullability,
ExpressionId id, boolean synthetic) {
- return new ScalarFunctionAttribute(source, name, dataType(), qualifier, nullability,
+ return new ScalarFunctionAttribute(source, name, dataType, qualifier, nullability,
id, synthetic, functionId(), script, orderBy, pipe);
}
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java
index 6689a33b162..e702c4ecdbb 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java
@@ -19,6 +19,7 @@ import org.elasticsearch.xpack.sql.expression.Literal;
import org.elasticsearch.xpack.sql.expression.NamedExpression;
import org.elasticsearch.xpack.sql.expression.Nullability;
import org.elasticsearch.xpack.sql.expression.Order;
+import org.elasticsearch.xpack.sql.expression.UnresolvedAttribute;
import org.elasticsearch.xpack.sql.expression.function.Function;
import org.elasticsearch.xpack.sql.expression.function.FunctionAttribute;
import org.elasticsearch.xpack.sql.expression.function.Functions;
@@ -72,6 +73,7 @@ import org.elasticsearch.xpack.sql.plan.logical.Limit;
import org.elasticsearch.xpack.sql.plan.logical.LocalRelation;
import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan;
import org.elasticsearch.xpack.sql.plan.logical.OrderBy;
+import org.elasticsearch.xpack.sql.plan.logical.Pivot;
import org.elasticsearch.xpack.sql.plan.logical.Project;
import org.elasticsearch.xpack.sql.plan.logical.SubQueryAlias;
import org.elasticsearch.xpack.sql.plan.logical.UnaryPlan;
@@ -96,6 +98,7 @@ import java.util.Map.Entry;
import java.util.Set;
import java.util.function.Consumer;
+import static java.util.Collections.singletonList;
import static org.elasticsearch.xpack.sql.expression.Expressions.equalsAsAttribute;
import static org.elasticsearch.xpack.sql.expression.Literal.FALSE;
import static org.elasticsearch.xpack.sql.expression.Literal.TRUE;
@@ -120,6 +123,9 @@ public class Optimizer extends RuleExecutor {
@Override
protected Iterable.Batch> batches() {
+ Batch pivot = new Batch("Pivot Rewrite", Limiter.ONCE,
+ new RewritePivot());
+
Batch operators = new Batch("Operator Optimization",
new PruneDuplicatesInGroupBy(),
// combining
@@ -170,9 +176,40 @@ public class Optimizer extends RuleExecutor {
CleanAliases.INSTANCE,
new SetAsOptimized());
- return Arrays.asList(operators, aggregate, local, label);
+ return Arrays.asList(pivot, operators, aggregate, local, label);
}
+ static class RewritePivot extends OptimizerRule {
+
+ @Override
+ protected LogicalPlan rule(Pivot plan) {
+ // 1. add the IN filter
+ List rawValues = new ArrayList<>(plan.values().size());
+ for (NamedExpression namedExpression : plan.values()) {
+ // everything should have resolved to an alias
+ if (namedExpression instanceof Alias) {
+ rawValues.add(((Alias) namedExpression).child());
+ }
+ // TODO: this should be removed when refactoring NamedExpression
+ else if (namedExpression instanceof Literal) {
+ rawValues.add(namedExpression);
+ }
+ // TODO: NamedExpression refactoring should remove this
+ else if (namedExpression.foldable()) {
+ rawValues.add(Literal.of(namedExpression.name(), namedExpression));
+ }
+ // TODO: same as above
+ else {
+ UnresolvedAttribute attr = new UnresolvedAttribute(namedExpression.source(), namedExpression.name(), null,
+ "Unexpected alias");
+ return new Pivot(plan.source(), plan.child(), plan.column(), singletonList(attr), plan.aggregates());
+ }
+ }
+ Filter filter = new Filter(plan.source(), plan.child(), new In(plan.source(), plan.column(), rawValues));
+ // 2. preserve the PIVOT
+ return new Pivot(plan.source(), filter, plan.column(), plan.values(), plan.aggregates());
+ }
+ }
static class PruneDuplicatesInGroupBy extends OptimizerRule {
@@ -1038,7 +1075,14 @@ public class Optimizer extends RuleExecutor {
Aggregate a = (Aggregate) child;
return new Aggregate(a.source(), a.child(), a.groupings(), combineProjections(project.projections(), a.aggregates()));
}
-
+ // if the pivot custom columns are not used, convert the project + pivot into a GROUP BY/Aggregate
+ if (child instanceof Pivot) {
+ Pivot p = (Pivot) child;
+ if (project.outputSet().subsetOf(p.groupingSet())) {
+ return new Aggregate(p.source(), p.child(), new ArrayList<>(project.projections()), project.projections());
+ }
+ }
+ // TODO: add rule for combining Agg/Pivot with underlying project
return project;
}
@@ -1172,7 +1216,7 @@ public class Optimizer extends RuleExecutor {
return Literal.of(in, null);
}
- } else if (e instanceof Alias == false
+ } else if (e instanceof Alias == false
&& e.nullable() == Nullability.TRUE
&& Expressions.anyMatch(e.children(), Expressions::isNull)) {
return Literal.of(e, null);
@@ -1976,7 +2020,8 @@ public class Optimizer extends RuleExecutor {
}
} else if (n.foldable()) {
values.add(n.fold());
- } else {
+ }
+ else {
// not everything is foldable, bail-out early
return values;
}
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/LogicalPlanBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/LogicalPlanBuilder.java
index 429e572878f..9a663994ccf 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/LogicalPlanBuilder.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/LogicalPlanBuilder.java
@@ -8,11 +8,13 @@ package org.elasticsearch.xpack.sql.parser;
import org.antlr.v4.runtime.ParserRuleContext;
import org.antlr.v4.runtime.Token;
import org.antlr.v4.runtime.tree.TerminalNode;
+import org.elasticsearch.xpack.sql.expression.Alias;
import org.elasticsearch.xpack.sql.expression.Expression;
import org.elasticsearch.xpack.sql.expression.Literal;
import org.elasticsearch.xpack.sql.expression.NamedExpression;
import org.elasticsearch.xpack.sql.expression.Order;
import org.elasticsearch.xpack.sql.expression.UnresolvedAlias;
+import org.elasticsearch.xpack.sql.expression.UnresolvedAttribute;
import org.elasticsearch.xpack.sql.parser.SqlBaseParser.AliasedQueryContext;
import org.elasticsearch.xpack.sql.parser.SqlBaseParser.AliasedRelationContext;
import org.elasticsearch.xpack.sql.parser.SqlBaseParser.FromClauseContext;
@@ -22,7 +24,10 @@ import org.elasticsearch.xpack.sql.parser.SqlBaseParser.JoinCriteriaContext;
import org.elasticsearch.xpack.sql.parser.SqlBaseParser.JoinRelationContext;
import org.elasticsearch.xpack.sql.parser.SqlBaseParser.LimitClauseContext;
import org.elasticsearch.xpack.sql.parser.SqlBaseParser.NamedQueryContext;
+import org.elasticsearch.xpack.sql.parser.SqlBaseParser.NamedValueExpressionContext;
import org.elasticsearch.xpack.sql.parser.SqlBaseParser.OrderByContext;
+import org.elasticsearch.xpack.sql.parser.SqlBaseParser.PivotArgsContext;
+import org.elasticsearch.xpack.sql.parser.SqlBaseParser.PivotClauseContext;
import org.elasticsearch.xpack.sql.parser.SqlBaseParser.QueryContext;
import org.elasticsearch.xpack.sql.parser.SqlBaseParser.QueryNoWithContext;
import org.elasticsearch.xpack.sql.parser.SqlBaseParser.QuerySpecificationContext;
@@ -39,20 +44,22 @@ import org.elasticsearch.xpack.sql.plan.logical.Limit;
import org.elasticsearch.xpack.sql.plan.logical.LocalRelation;
import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan;
import org.elasticsearch.xpack.sql.plan.logical.OrderBy;
+import org.elasticsearch.xpack.sql.plan.logical.Pivot;
import org.elasticsearch.xpack.sql.plan.logical.Project;
import org.elasticsearch.xpack.sql.plan.logical.SubQueryAlias;
import org.elasticsearch.xpack.sql.plan.logical.UnresolvedRelation;
import org.elasticsearch.xpack.sql.plan.logical.With;
import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue;
import org.elasticsearch.xpack.sql.session.SingletonExecutable;
+import org.elasticsearch.xpack.sql.tree.Source;
import org.elasticsearch.xpack.sql.type.DataType;
+import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import static java.util.Collections.emptyList;
-import static java.util.stream.Collectors.toList;
abstract class LogicalPlanBuilder extends ExpressionBuilder {
@@ -119,14 +126,8 @@ abstract class LogicalPlanBuilder extends ExpressionBuilder {
query = new Filter(source(ctx), query, expression(ctx.where));
}
- List selectTarget = emptyList();
-
- // SELECT a, b, c ...
- if (!ctx.selectItem().isEmpty()) {
- selectTarget = expressions(ctx.selectItem()).stream()
- .map(e -> (e instanceof NamedExpression) ? (NamedExpression) e : new UnresolvedAlias(e.source(), e))
- .collect(toList());
- }
+ List selectTarget = ctx.selectItems().isEmpty() ? emptyList() : visitList(ctx.selectItems().selectItem(),
+ NamedExpression.class);
// GROUP BY
GroupByContext groupByCtx = ctx.groupBy();
@@ -142,7 +143,7 @@ abstract class LogicalPlanBuilder extends ExpressionBuilder {
query = new Aggregate(source(ctx.GROUP(), endSource), query, groupBy, selectTarget);
}
else if (!selectTarget.isEmpty()) {
- query = new Project(source(ctx.selectItem(0)), query, selectTarget);
+ query = new Project(source(ctx.selectItems()), query, selectTarget);
}
// HAVING
@@ -160,9 +161,37 @@ abstract class LogicalPlanBuilder extends ExpressionBuilder {
public LogicalPlan visitFromClause(FromClauseContext ctx) {
// if there are multiple FROM clauses, convert each pair in a inner join
List plans = plans(ctx.relation());
- return plans.stream()
+ LogicalPlan plan = plans.stream()
.reduce((left, right) -> new Join(source(ctx), left, right, Join.JoinType.IMPLICIT, null))
.get();
+
+ // PIVOT
+ if (ctx.pivotClause() != null) {
+ PivotClauseContext pivotClause = ctx.pivotClause();
+ UnresolvedAttribute column = new UnresolvedAttribute(source(pivotClause.column), visitQualifiedName(pivotClause.column));
+ List values = namedValues(pivotClause.aggs);
+ if (values.size() > 1) {
+ throw new ParsingException(source(pivotClause.aggs), "PIVOT currently supports only one aggregation, found [{}]",
+ values.size());
+ }
+ plan = new Pivot(source(pivotClause), plan, column, namedValues(pivotClause.vals), namedValues(pivotClause.aggs));
+ }
+ return plan;
+ }
+
+ private List namedValues(PivotArgsContext args) {
+ if (args == null || args.isEmpty()) {
+ return emptyList();
+ }
+ List values = new ArrayList<>();
+
+ for (NamedValueExpressionContext value : args.namedValueExpression()) {
+ Expression exp = expression(value.valueExpression());
+ String alias = visitIdentifier(value.identifier());
+ Source source = source(value);
+ values.add(alias != null ? new Alias(source, alias, exp) : new UnresolvedAlias(source, exp));
+ }
+ return values;
}
@Override
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseListener.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseListener.java
index 9e8dd6cd6af..15531de7036 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseListener.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseListener.java
@@ -311,6 +311,18 @@ class SqlBaseBaseListener implements SqlBaseListener {
* The default implementation does nothing.
*/
@Override public void exitSetQuantifier(SqlBaseParser.SetQuantifierContext ctx) { }
+ /**
+ * {@inheritDoc}
+ *
+ * The default implementation does nothing.
+ */
+ @Override public void enterSelectItems(SqlBaseParser.SelectItemsContext ctx) { }
+ /**
+ * {@inheritDoc}
+ *
+ * The default implementation does nothing.
+ */
+ @Override public void exitSelectItems(SqlBaseParser.SelectItemsContext ctx) { }
/**
* {@inheritDoc}
*
@@ -407,6 +419,42 @@ class SqlBaseBaseListener implements SqlBaseListener {
* The default implementation does nothing.
*/
@Override public void exitAliasedRelation(SqlBaseParser.AliasedRelationContext ctx) { }
+ /**
+ * {@inheritDoc}
+ *
+ * The default implementation does nothing.
+ */
+ @Override public void enterPivotClause(SqlBaseParser.PivotClauseContext ctx) { }
+ /**
+ * {@inheritDoc}
+ *
+ * The default implementation does nothing.
+ */
+ @Override public void exitPivotClause(SqlBaseParser.PivotClauseContext ctx) { }
+ /**
+ * {@inheritDoc}
+ *
+ * The default implementation does nothing.
+ */
+ @Override public void enterPivotArgs(SqlBaseParser.PivotArgsContext ctx) { }
+ /**
+ * {@inheritDoc}
+ *
+ * The default implementation does nothing.
+ */
+ @Override public void exitPivotArgs(SqlBaseParser.PivotArgsContext ctx) { }
+ /**
+ * {@inheritDoc}
+ *
+ * The default implementation does nothing.
+ */
+ @Override public void enterNamedValueExpression(SqlBaseParser.NamedValueExpressionContext ctx) { }
+ /**
+ * {@inheritDoc}
+ *
+ * The default implementation does nothing.
+ */
+ @Override public void exitNamedValueExpression(SqlBaseParser.NamedValueExpressionContext ctx) { }
/**
* {@inheritDoc}
*
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseVisitor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseVisitor.java
index 199fb407698..dc05e66c1e7 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseVisitor.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseVisitor.java
@@ -186,6 +186,13 @@ class SqlBaseBaseVisitor extends AbstractParseTreeVisitor implements SqlBa
* {@link #visitChildren} on {@code ctx}.
*/
@Override public T visitSetQuantifier(SqlBaseParser.SetQuantifierContext ctx) { return visitChildren(ctx); }
+ /**
+ * {@inheritDoc}
+ *
+ * The default implementation returns the result of calling
+ * {@link #visitChildren} on {@code ctx}.
+ */
+ @Override public T visitSelectItems(SqlBaseParser.SelectItemsContext ctx) { return visitChildren(ctx); }
/**
* {@inheritDoc}
*
@@ -242,6 +249,27 @@ class SqlBaseBaseVisitor extends AbstractParseTreeVisitor implements SqlBa
* {@link #visitChildren} on {@code ctx}.
*/
@Override public T visitAliasedRelation(SqlBaseParser.AliasedRelationContext ctx) { return visitChildren(ctx); }
+ /**
+ * {@inheritDoc}
+ *
+ * The default implementation returns the result of calling
+ * {@link #visitChildren} on {@code ctx}.
+ */
+ @Override public T visitPivotClause(SqlBaseParser.PivotClauseContext ctx) { return visitChildren(ctx); }
+ /**
+ * {@inheritDoc}
+ *
+ * The default implementation returns the result of calling
+ * {@link #visitChildren} on {@code ctx}.
+ */
+ @Override public T visitPivotArgs(SqlBaseParser.PivotArgsContext ctx) { return visitChildren(ctx); }
+ /**
+ * {@inheritDoc}
+ *
+ * The default implementation returns the result of calling
+ * {@link #visitChildren} on {@code ctx}.
+ */
+ @Override public T visitNamedValueExpression(SqlBaseParser.NamedValueExpressionContext ctx) { return visitChildren(ctx); }
/**
* {@inheritDoc}
*
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseLexer.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseLexer.java
index de8afac1526..cba3c1ee9a3 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseLexer.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseLexer.java
@@ -22,21 +22,22 @@ class SqlBaseLexer extends Lexer {
COLUMNS=18, CONVERT=19, CURRENT_DATE=20, CURRENT_TIME=21, CURRENT_TIMESTAMP=22,
DAY=23, DAYS=24, DEBUG=25, DESC=26, DESCRIBE=27, DISTINCT=28, ELSE=29,
END=30, ESCAPE=31, EXECUTABLE=32, EXISTS=33, EXPLAIN=34, EXTRACT=35, FALSE=36,
- FIRST=37, FORMAT=38, FROM=39, FROZEN=40, FULL=41, FUNCTIONS=42, GRAPHVIZ=43,
- GROUP=44, HAVING=45, HOUR=46, HOURS=47, IN=48, INCLUDE=49, INNER=50, INTERVAL=51,
- IS=52, JOIN=53, LAST=54, LEFT=55, LIKE=56, LIMIT=57, MAPPED=58, MATCH=59,
- MINUTE=60, MINUTES=61, MONTH=62, MONTHS=63, NATURAL=64, NOT=65, NULL=66,
- NULLS=67, ON=68, OPTIMIZED=69, OR=70, ORDER=71, OUTER=72, PARSED=73, PHYSICAL=74,
- PLAN=75, RIGHT=76, RLIKE=77, QUERY=78, SCHEMAS=79, SECOND=80, SECONDS=81,
- SELECT=82, SHOW=83, SYS=84, TABLE=85, TABLES=86, TEXT=87, THEN=88, TRUE=89,
- TO=90, TYPE=91, TYPES=92, USING=93, VERIFY=94, WHEN=95, WHERE=96, WITH=97,
- YEAR=98, YEARS=99, ESCAPE_ESC=100, FUNCTION_ESC=101, LIMIT_ESC=102, DATE_ESC=103,
- TIME_ESC=104, TIMESTAMP_ESC=105, GUID_ESC=106, ESC_END=107, EQ=108, NULLEQ=109,
- NEQ=110, LT=111, LTE=112, GT=113, GTE=114, PLUS=115, MINUS=116, ASTERISK=117,
- SLASH=118, PERCENT=119, CAST_OP=120, CONCAT=121, DOT=122, PARAM=123, STRING=124,
- INTEGER_VALUE=125, DECIMAL_VALUE=126, IDENTIFIER=127, DIGIT_IDENTIFIER=128,
- TABLE_IDENTIFIER=129, QUOTED_IDENTIFIER=130, BACKQUOTED_IDENTIFIER=131,
- SIMPLE_COMMENT=132, BRACKETED_COMMENT=133, WS=134, UNRECOGNIZED=135;
+ FIRST=37, FOR=38, FORMAT=39, FROM=40, FROZEN=41, FULL=42, FUNCTIONS=43,
+ GRAPHVIZ=44, GROUP=45, HAVING=46, HOUR=47, HOURS=48, IN=49, INCLUDE=50,
+ INNER=51, INTERVAL=52, IS=53, JOIN=54, LAST=55, LEFT=56, LIKE=57, LIMIT=58,
+ MAPPED=59, MATCH=60, MINUTE=61, MINUTES=62, MONTH=63, MONTHS=64, NATURAL=65,
+ NOT=66, NULL=67, NULLS=68, ON=69, OPTIMIZED=70, OR=71, ORDER=72, OUTER=73,
+ PARSED=74, PHYSICAL=75, PIVOT=76, PLAN=77, RIGHT=78, RLIKE=79, QUERY=80,
+ SCHEMAS=81, SECOND=82, SECONDS=83, SELECT=84, SHOW=85, SYS=86, TABLE=87,
+ TABLES=88, TEXT=89, THEN=90, TRUE=91, TO=92, TYPE=93, TYPES=94, USING=95,
+ VERIFY=96, WHEN=97, WHERE=98, WITH=99, YEAR=100, YEARS=101, ESCAPE_ESC=102,
+ FUNCTION_ESC=103, LIMIT_ESC=104, DATE_ESC=105, TIME_ESC=106, TIMESTAMP_ESC=107,
+ GUID_ESC=108, ESC_END=109, EQ=110, NULLEQ=111, NEQ=112, LT=113, LTE=114,
+ GT=115, GTE=116, PLUS=117, MINUS=118, ASTERISK=119, SLASH=120, PERCENT=121,
+ CAST_OP=122, CONCAT=123, DOT=124, PARAM=125, STRING=126, INTEGER_VALUE=127,
+ DECIMAL_VALUE=128, IDENTIFIER=129, DIGIT_IDENTIFIER=130, TABLE_IDENTIFIER=131,
+ QUOTED_IDENTIFIER=132, BACKQUOTED_IDENTIFIER=133, SIMPLE_COMMENT=134,
+ BRACKETED_COMMENT=135, WS=136, UNRECOGNIZED=137;
public static String[] modeNames = {
"DEFAULT_MODE"
};
@@ -46,21 +47,22 @@ class SqlBaseLexer extends Lexer {
"AS", "ASC", "BETWEEN", "BY", "CASE", "CAST", "CATALOG", "CATALOGS", "COLUMNS",
"CONVERT", "CURRENT_DATE", "CURRENT_TIME", "CURRENT_TIMESTAMP", "DAY",
"DAYS", "DEBUG", "DESC", "DESCRIBE", "DISTINCT", "ELSE", "END", "ESCAPE",
- "EXECUTABLE", "EXISTS", "EXPLAIN", "EXTRACT", "FALSE", "FIRST", "FORMAT",
- "FROM", "FROZEN", "FULL", "FUNCTIONS", "GRAPHVIZ", "GROUP", "HAVING",
- "HOUR", "HOURS", "IN", "INCLUDE", "INNER", "INTERVAL", "IS", "JOIN", "LAST",
- "LEFT", "LIKE", "LIMIT", "MAPPED", "MATCH", "MINUTE", "MINUTES", "MONTH",
- "MONTHS", "NATURAL", "NOT", "NULL", "NULLS", "ON", "OPTIMIZED", "OR",
- "ORDER", "OUTER", "PARSED", "PHYSICAL", "PLAN", "RIGHT", "RLIKE", "QUERY",
- "SCHEMAS", "SECOND", "SECONDS", "SELECT", "SHOW", "SYS", "TABLE", "TABLES",
- "TEXT", "THEN", "TRUE", "TO", "TYPE", "TYPES", "USING", "VERIFY", "WHEN",
- "WHERE", "WITH", "YEAR", "YEARS", "ESCAPE_ESC", "FUNCTION_ESC", "LIMIT_ESC",
- "DATE_ESC", "TIME_ESC", "TIMESTAMP_ESC", "GUID_ESC", "ESC_END", "EQ",
- "NULLEQ", "NEQ", "LT", "LTE", "GT", "GTE", "PLUS", "MINUS", "ASTERISK",
- "SLASH", "PERCENT", "CAST_OP", "CONCAT", "DOT", "PARAM", "STRING", "INTEGER_VALUE",
- "DECIMAL_VALUE", "IDENTIFIER", "DIGIT_IDENTIFIER", "TABLE_IDENTIFIER",
- "QUOTED_IDENTIFIER", "BACKQUOTED_IDENTIFIER", "EXPONENT", "DIGIT", "LETTER",
- "SIMPLE_COMMENT", "BRACKETED_COMMENT", "WS", "UNRECOGNIZED"
+ "EXECUTABLE", "EXISTS", "EXPLAIN", "EXTRACT", "FALSE", "FIRST", "FOR",
+ "FORMAT", "FROM", "FROZEN", "FULL", "FUNCTIONS", "GRAPHVIZ", "GROUP",
+ "HAVING", "HOUR", "HOURS", "IN", "INCLUDE", "INNER", "INTERVAL", "IS",
+ "JOIN", "LAST", "LEFT", "LIKE", "LIMIT", "MAPPED", "MATCH", "MINUTE",
+ "MINUTES", "MONTH", "MONTHS", "NATURAL", "NOT", "NULL", "NULLS", "ON",
+ "OPTIMIZED", "OR", "ORDER", "OUTER", "PARSED", "PHYSICAL", "PIVOT", "PLAN",
+ "RIGHT", "RLIKE", "QUERY", "SCHEMAS", "SECOND", "SECONDS", "SELECT", "SHOW",
+ "SYS", "TABLE", "TABLES", "TEXT", "THEN", "TRUE", "TO", "TYPE", "TYPES",
+ "USING", "VERIFY", "WHEN", "WHERE", "WITH", "YEAR", "YEARS", "ESCAPE_ESC",
+ "FUNCTION_ESC", "LIMIT_ESC", "DATE_ESC", "TIME_ESC", "TIMESTAMP_ESC",
+ "GUID_ESC", "ESC_END", "EQ", "NULLEQ", "NEQ", "LT", "LTE", "GT", "GTE",
+ "PLUS", "MINUS", "ASTERISK", "SLASH", "PERCENT", "CAST_OP", "CONCAT",
+ "DOT", "PARAM", "STRING", "INTEGER_VALUE", "DECIMAL_VALUE", "IDENTIFIER",
+ "DIGIT_IDENTIFIER", "TABLE_IDENTIFIER", "QUOTED_IDENTIFIER", "BACKQUOTED_IDENTIFIER",
+ "EXPONENT", "DIGIT", "LETTER", "SIMPLE_COMMENT", "BRACKETED_COMMENT",
+ "WS", "UNRECOGNIZED"
};
private static final String[] _LITERAL_NAMES = {
@@ -69,40 +71,40 @@ class SqlBaseLexer extends Lexer {
"'CATALOG'", "'CATALOGS'", "'COLUMNS'", "'CONVERT'", "'CURRENT_DATE'",
"'CURRENT_TIME'", "'CURRENT_TIMESTAMP'", "'DAY'", "'DAYS'", "'DEBUG'",
"'DESC'", "'DESCRIBE'", "'DISTINCT'", "'ELSE'", "'END'", "'ESCAPE'", "'EXECUTABLE'",
- "'EXISTS'", "'EXPLAIN'", "'EXTRACT'", "'FALSE'", "'FIRST'", "'FORMAT'",
+ "'EXISTS'", "'EXPLAIN'", "'EXTRACT'", "'FALSE'", "'FIRST'", "'FOR'", "'FORMAT'",
"'FROM'", "'FROZEN'", "'FULL'", "'FUNCTIONS'", "'GRAPHVIZ'", "'GROUP'",
"'HAVING'", "'HOUR'", "'HOURS'", "'IN'", "'INCLUDE'", "'INNER'", "'INTERVAL'",
"'IS'", "'JOIN'", "'LAST'", "'LEFT'", "'LIKE'", "'LIMIT'", "'MAPPED'",
"'MATCH'", "'MINUTE'", "'MINUTES'", "'MONTH'", "'MONTHS'", "'NATURAL'",
"'NOT'", "'NULL'", "'NULLS'", "'ON'", "'OPTIMIZED'", "'OR'", "'ORDER'",
- "'OUTER'", "'PARSED'", "'PHYSICAL'", "'PLAN'", "'RIGHT'", "'RLIKE'", "'QUERY'",
- "'SCHEMAS'", "'SECOND'", "'SECONDS'", "'SELECT'", "'SHOW'", "'SYS'", "'TABLE'",
- "'TABLES'", "'TEXT'", "'THEN'", "'TRUE'", "'TO'", "'TYPE'", "'TYPES'",
- "'USING'", "'VERIFY'", "'WHEN'", "'WHERE'", "'WITH'", "'YEAR'", "'YEARS'",
- "'{ESCAPE'", "'{FN'", "'{LIMIT'", "'{D'", "'{T'", "'{TS'", "'{GUID'",
- "'}'", "'='", "'<=>'", null, "'<'", "'<='", "'>'", "'>='", "'+'", "'-'",
- "'*'", "'/'", "'%'", "'::'", "'||'", "'.'", "'?'"
+ "'OUTER'", "'PARSED'", "'PHYSICAL'", "'PIVOT'", "'PLAN'", "'RIGHT'", "'RLIKE'",
+ "'QUERY'", "'SCHEMAS'", "'SECOND'", "'SECONDS'", "'SELECT'", "'SHOW'",
+ "'SYS'", "'TABLE'", "'TABLES'", "'TEXT'", "'THEN'", "'TRUE'", "'TO'",
+ "'TYPE'", "'TYPES'", "'USING'", "'VERIFY'", "'WHEN'", "'WHERE'", "'WITH'",
+ "'YEAR'", "'YEARS'", "'{ESCAPE'", "'{FN'", "'{LIMIT'", "'{D'", "'{T'",
+ "'{TS'", "'{GUID'", "'}'", "'='", "'<=>'", null, "'<'", "'<='", "'>'",
+ "'>='", "'+'", "'-'", "'*'", "'/'", "'%'", "'::'", "'||'", "'.'", "'?'"
};
private static final String[] _SYMBOLIC_NAMES = {
null, null, null, null, null, "ALL", "ANALYZE", "ANALYZED", "AND", "ANY",
"AS", "ASC", "BETWEEN", "BY", "CASE", "CAST", "CATALOG", "CATALOGS", "COLUMNS",
"CONVERT", "CURRENT_DATE", "CURRENT_TIME", "CURRENT_TIMESTAMP", "DAY",
"DAYS", "DEBUG", "DESC", "DESCRIBE", "DISTINCT", "ELSE", "END", "ESCAPE",
- "EXECUTABLE", "EXISTS", "EXPLAIN", "EXTRACT", "FALSE", "FIRST", "FORMAT",
- "FROM", "FROZEN", "FULL", "FUNCTIONS", "GRAPHVIZ", "GROUP", "HAVING",
- "HOUR", "HOURS", "IN", "INCLUDE", "INNER", "INTERVAL", "IS", "JOIN", "LAST",
- "LEFT", "LIKE", "LIMIT", "MAPPED", "MATCH", "MINUTE", "MINUTES", "MONTH",
- "MONTHS", "NATURAL", "NOT", "NULL", "NULLS", "ON", "OPTIMIZED", "OR",
- "ORDER", "OUTER", "PARSED", "PHYSICAL", "PLAN", "RIGHT", "RLIKE", "QUERY",
- "SCHEMAS", "SECOND", "SECONDS", "SELECT", "SHOW", "SYS", "TABLE", "TABLES",
- "TEXT", "THEN", "TRUE", "TO", "TYPE", "TYPES", "USING", "VERIFY", "WHEN",
- "WHERE", "WITH", "YEAR", "YEARS", "ESCAPE_ESC", "FUNCTION_ESC", "LIMIT_ESC",
- "DATE_ESC", "TIME_ESC", "TIMESTAMP_ESC", "GUID_ESC", "ESC_END", "EQ",
- "NULLEQ", "NEQ", "LT", "LTE", "GT", "GTE", "PLUS", "MINUS", "ASTERISK",
- "SLASH", "PERCENT", "CAST_OP", "CONCAT", "DOT", "PARAM", "STRING", "INTEGER_VALUE",
- "DECIMAL_VALUE", "IDENTIFIER", "DIGIT_IDENTIFIER", "TABLE_IDENTIFIER",
- "QUOTED_IDENTIFIER", "BACKQUOTED_IDENTIFIER", "SIMPLE_COMMENT", "BRACKETED_COMMENT",
- "WS", "UNRECOGNIZED"
+ "EXECUTABLE", "EXISTS", "EXPLAIN", "EXTRACT", "FALSE", "FIRST", "FOR",
+ "FORMAT", "FROM", "FROZEN", "FULL", "FUNCTIONS", "GRAPHVIZ", "GROUP",
+ "HAVING", "HOUR", "HOURS", "IN", "INCLUDE", "INNER", "INTERVAL", "IS",
+ "JOIN", "LAST", "LEFT", "LIKE", "LIMIT", "MAPPED", "MATCH", "MINUTE",
+ "MINUTES", "MONTH", "MONTHS", "NATURAL", "NOT", "NULL", "NULLS", "ON",
+ "OPTIMIZED", "OR", "ORDER", "OUTER", "PARSED", "PHYSICAL", "PIVOT", "PLAN",
+ "RIGHT", "RLIKE", "QUERY", "SCHEMAS", "SECOND", "SECONDS", "SELECT", "SHOW",
+ "SYS", "TABLE", "TABLES", "TEXT", "THEN", "TRUE", "TO", "TYPE", "TYPES",
+ "USING", "VERIFY", "WHEN", "WHERE", "WITH", "YEAR", "YEARS", "ESCAPE_ESC",
+ "FUNCTION_ESC", "LIMIT_ESC", "DATE_ESC", "TIME_ESC", "TIMESTAMP_ESC",
+ "GUID_ESC", "ESC_END", "EQ", "NULLEQ", "NEQ", "LT", "LTE", "GT", "GTE",
+ "PLUS", "MINUS", "ASTERISK", "SLASH", "PERCENT", "CAST_OP", "CONCAT",
+ "DOT", "PARAM", "STRING", "INTEGER_VALUE", "DECIMAL_VALUE", "IDENTIFIER",
+ "DIGIT_IDENTIFIER", "TABLE_IDENTIFIER", "QUOTED_IDENTIFIER", "BACKQUOTED_IDENTIFIER",
+ "SIMPLE_COMMENT", "BRACKETED_COMMENT", "WS", "UNRECOGNIZED"
};
public static final Vocabulary VOCABULARY = new VocabularyImpl(_LITERAL_NAMES, _SYMBOLIC_NAMES);
@@ -159,7 +161,7 @@ class SqlBaseLexer extends Lexer {
public ATN getATN() { return _ATN; }
public static final String _serializedATN =
- "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2\u0089\u0471\b\1\4"+
+ "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2\u008b\u047f\b\1\4"+
"\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n"+
"\4\13\t\13\4\f\t\f\4\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22"+
"\t\22\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31"+
@@ -175,384 +177,391 @@ class SqlBaseLexer extends Lexer {
"\4w\tw\4x\tx\4y\ty\4z\tz\4{\t{\4|\t|\4}\t}\4~\t~\4\177\t\177\4\u0080\t"+
"\u0080\4\u0081\t\u0081\4\u0082\t\u0082\4\u0083\t\u0083\4\u0084\t\u0084"+
"\4\u0085\t\u0085\4\u0086\t\u0086\4\u0087\t\u0087\4\u0088\t\u0088\4\u0089"+
- "\t\u0089\4\u008a\t\u008a\4\u008b\t\u008b\3\2\3\2\3\3\3\3\3\4\3\4\3\5\3"+
- "\5\3\6\3\6\3\6\3\6\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\b\3\b\3\b\3\b\3\b"+
- "\3\b\3\b\3\b\3\b\3\t\3\t\3\t\3\t\3\n\3\n\3\n\3\n\3\13\3\13\3\13\3\f\3"+
- "\f\3\f\3\f\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3\16\3\16\3\16\3\17\3\17\3"+
- "\17\3\17\3\17\3\20\3\20\3\20\3\20\3\20\3\21\3\21\3\21\3\21\3\21\3\21\3"+
- "\21\3\21\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\23\3\23\3\23\3"+
- "\23\3\23\3\23\3\23\3\23\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\25\3"+
- "\25\3\25\3\25\3\25\3\25\3\25\3\25\3\25\3\25\3\25\3\25\3\25\3\26\3\26\3"+
- "\26\3\26\3\26\3\26\3\26\3\26\3\26\3\26\3\26\3\26\3\26\3\27\3\27\3\27\3"+
- "\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3"+
- "\27\3\30\3\30\3\30\3\30\3\31\3\31\3\31\3\31\3\31\3\32\3\32\3\32\3\32\3"+
- "\32\3\32\3\33\3\33\3\33\3\33\3\33\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3"+
- "\34\3\34\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3\36\3\36\3\36\3"+
- "\36\3\36\3\37\3\37\3\37\3\37\3 \3 \3 \3 \3 \3 \3 \3!\3!\3!\3!\3!\3!\3"+
- "!\3!\3!\3!\3!\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3#\3#\3#\3#\3#\3#\3#\3#\3$\3"+
- "$\3$\3$\3$\3$\3$\3$\3%\3%\3%\3%\3%\3%\3&\3&\3&\3&\3&\3&\3\'\3\'\3\'\3"+
- "\'\3\'\3\'\3\'\3(\3(\3(\3(\3(\3)\3)\3)\3)\3)\3)\3)\3*\3*\3*\3*\3*\3+\3"+
- "+\3+\3+\3+\3+\3+\3+\3+\3+\3,\3,\3,\3,\3,\3,\3,\3,\3,\3-\3-\3-\3-\3-\3"+
- "-\3.\3.\3.\3.\3.\3.\3.\3/\3/\3/\3/\3/\3\60\3\60\3\60\3\60\3\60\3\60\3"+
- "\61\3\61\3\61\3\62\3\62\3\62\3\62\3\62\3\62\3\62\3\62\3\63\3\63\3\63\3"+
- "\63\3\63\3\63\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\65\3\65\3"+
- "\65\3\66\3\66\3\66\3\66\3\66\3\67\3\67\3\67\3\67\3\67\38\38\38\38\38\3"+
- "9\39\39\39\39\3:\3:\3:\3:\3:\3:\3;\3;\3;\3;\3;\3;\3;\3<\3<\3<\3<\3<\3"+
- "<\3=\3=\3=\3=\3=\3=\3=\3>\3>\3>\3>\3>\3>\3>\3>\3?\3?\3?\3?\3?\3?\3@\3"+
- "@\3@\3@\3@\3@\3@\3A\3A\3A\3A\3A\3A\3A\3A\3B\3B\3B\3B\3C\3C\3C\3C\3C\3"+
- "D\3D\3D\3D\3D\3D\3E\3E\3E\3F\3F\3F\3F\3F\3F\3F\3F\3F\3F\3G\3G\3G\3H\3"+
- "H\3H\3H\3H\3H\3I\3I\3I\3I\3I\3I\3J\3J\3J\3J\3J\3J\3J\3K\3K\3K\3K\3K\3"+
- "K\3K\3K\3K\3L\3L\3L\3L\3L\3M\3M\3M\3M\3M\3M\3N\3N\3N\3N\3N\3N\3O\3O\3"+
- "O\3O\3O\3O\3P\3P\3P\3P\3P\3P\3P\3P\3Q\3Q\3Q\3Q\3Q\3Q\3Q\3R\3R\3R\3R\3"+
- "R\3R\3R\3R\3S\3S\3S\3S\3S\3S\3S\3T\3T\3T\3T\3T\3U\3U\3U\3U\3V\3V\3V\3"+
- "V\3V\3V\3W\3W\3W\3W\3W\3W\3W\3X\3X\3X\3X\3X\3Y\3Y\3Y\3Y\3Y\3Z\3Z\3Z\3"+
- "Z\3Z\3[\3[\3[\3\\\3\\\3\\\3\\\3\\\3]\3]\3]\3]\3]\3]\3^\3^\3^\3^\3^\3^"+
- "\3_\3_\3_\3_\3_\3_\3_\3`\3`\3`\3`\3`\3a\3a\3a\3a\3a\3a\3b\3b\3b\3b\3b"+
- "\3c\3c\3c\3c\3c\3d\3d\3d\3d\3d\3d\3e\3e\3e\3e\3e\3e\3e\3e\3f\3f\3f\3f"+
- "\3g\3g\3g\3g\3g\3g\3g\3h\3h\3h\3i\3i\3i\3j\3j\3j\3j\3k\3k\3k\3k\3k\3k"+
- "\3l\3l\3m\3m\3n\3n\3n\3n\3o\3o\3o\3o\5o\u03af\no\3p\3p\3q\3q\3q\3r\3r"+
- "\3s\3s\3s\3t\3t\3u\3u\3v\3v\3w\3w\3x\3x\3y\3y\3y\3z\3z\3z\3{\3{\3|\3|"+
- "\3}\3}\3}\3}\7}\u03d3\n}\f}\16}\u03d6\13}\3}\3}\3~\6~\u03db\n~\r~\16~"+
- "\u03dc\3\177\6\177\u03e0\n\177\r\177\16\177\u03e1\3\177\3\177\7\177\u03e6"+
- "\n\177\f\177\16\177\u03e9\13\177\3\177\3\177\6\177\u03ed\n\177\r\177\16"+
- "\177\u03ee\3\177\6\177\u03f2\n\177\r\177\16\177\u03f3\3\177\3\177\7\177"+
- "\u03f8\n\177\f\177\16\177\u03fb\13\177\5\177\u03fd\n\177\3\177\3\177\3"+
- "\177\3\177\6\177\u0403\n\177\r\177\16\177\u0404\3\177\3\177\5\177\u0409"+
- "\n\177\3\u0080\3\u0080\5\u0080\u040d\n\u0080\3\u0080\3\u0080\3\u0080\7"+
- "\u0080\u0412\n\u0080\f\u0080\16\u0080\u0415\13\u0080\3\u0081\3\u0081\3"+
- "\u0081\3\u0081\6\u0081\u041b\n\u0081\r\u0081\16\u0081\u041c\3\u0082\3"+
- "\u0082\3\u0082\6\u0082\u0422\n\u0082\r\u0082\16\u0082\u0423\3\u0083\3"+
- "\u0083\3\u0083\3\u0083\7\u0083\u042a\n\u0083\f\u0083\16\u0083\u042d\13"+
- "\u0083\3\u0083\3\u0083\3\u0084\3\u0084\3\u0084\3\u0084\7\u0084\u0435\n"+
- "\u0084\f\u0084\16\u0084\u0438\13\u0084\3\u0084\3\u0084\3\u0085\3\u0085"+
- "\5\u0085\u043e\n\u0085\3\u0085\6\u0085\u0441\n\u0085\r\u0085\16\u0085"+
- "\u0442\3\u0086\3\u0086\3\u0087\3\u0087\3\u0088\3\u0088\3\u0088\3\u0088"+
- "\7\u0088\u044d\n\u0088\f\u0088\16\u0088\u0450\13\u0088\3\u0088\5\u0088"+
- "\u0453\n\u0088\3\u0088\5\u0088\u0456\n\u0088\3\u0088\3\u0088\3\u0089\3"+
- "\u0089\3\u0089\3\u0089\3\u0089\7\u0089\u045f\n\u0089\f\u0089\16\u0089"+
- "\u0462\13\u0089\3\u0089\3\u0089\3\u0089\3\u0089\3\u0089\3\u008a\6\u008a"+
- "\u046a\n\u008a\r\u008a\16\u008a\u046b\3\u008a\3\u008a\3\u008b\3\u008b"+
- "\3\u0460\2\u008c\3\3\5\4\7\5\t\6\13\7\r\b\17\t\21\n\23\13\25\f\27\r\31"+
- "\16\33\17\35\20\37\21!\22#\23%\24\'\25)\26+\27-\30/\31\61\32\63\33\65"+
- "\34\67\359\36;\37= ?!A\"C#E$G%I&K\'M(O)Q*S+U,W-Y.[/]\60_\61a\62c\63e\64"+
- "g\65i\66k\67m8o9q:s;u{?}@\177A\u0081B\u0083C\u0085D\u0087E\u0089"+
- "F\u008bG\u008dH\u008fI\u0091J\u0093K\u0095L\u0097M\u0099N\u009bO\u009d"+
- "P\u009fQ\u00a1R\u00a3S\u00a5T\u00a7U\u00a9V\u00abW\u00adX\u00afY\u00b1"+
- "Z\u00b3[\u00b5\\\u00b7]\u00b9^\u00bb_\u00bd`\u00bfa\u00c1b\u00c3c\u00c5"+
- "d\u00c7e\u00c9f\u00cbg\u00cdh\u00cfi\u00d1j\u00d3k\u00d5l\u00d7m\u00d9"+
- "n\u00dbo\u00ddp\u00dfq\u00e1r\u00e3s\u00e5t\u00e7u\u00e9v\u00ebw\u00ed"+
- "x\u00efy\u00f1z\u00f3{\u00f5|\u00f7}\u00f9~\u00fb\177\u00fd\u0080\u00ff"+
- "\u0081\u0101\u0082\u0103\u0083\u0105\u0084\u0107\u0085\u0109\2\u010b\2"+
- "\u010d\2\u010f\u0086\u0111\u0087\u0113\u0088\u0115\u0089\3\2\13\3\2))"+
- "\4\2BBaa\3\2$$\3\2bb\4\2--//\3\2\62;\3\2C\\\4\2\f\f\17\17\5\2\13\f\17"+
- "\17\"\"\u0491\2\3\3\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2"+
- "\2\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21\3\2\2\2\2\23\3\2\2\2\2\25\3\2\2\2\2"+
- "\27\3\2\2\2\2\31\3\2\2\2\2\33\3\2\2\2\2\35\3\2\2\2\2\37\3\2\2\2\2!\3\2"+
- "\2\2\2#\3\2\2\2\2%\3\2\2\2\2\'\3\2\2\2\2)\3\2\2\2\2+\3\2\2\2\2-\3\2\2"+
- "\2\2/\3\2\2\2\2\61\3\2\2\2\2\63\3\2\2\2\2\65\3\2\2\2\2\67\3\2\2\2\29\3"+
- "\2\2\2\2;\3\2\2\2\2=\3\2\2\2\2?\3\2\2\2\2A\3\2\2\2\2C\3\2\2\2\2E\3\2\2"+
- "\2\2G\3\2\2\2\2I\3\2\2\2\2K\3\2\2\2\2M\3\2\2\2\2O\3\2\2\2\2Q\3\2\2\2\2"+
- "S\3\2\2\2\2U\3\2\2\2\2W\3\2\2\2\2Y\3\2\2\2\2[\3\2\2\2\2]\3\2\2\2\2_\3"+
- "\2\2\2\2a\3\2\2\2\2c\3\2\2\2\2e\3\2\2\2\2g\3\2\2\2\2i\3\2\2\2\2k\3\2\2"+
- "\2\2m\3\2\2\2\2o\3\2\2\2\2q\3\2\2\2\2s\3\2\2\2\2u\3\2\2\2\2w\3\2\2\2\2"+
- "y\3\2\2\2\2{\3\2\2\2\2}\3\2\2\2\2\177\3\2\2\2\2\u0081\3\2\2\2\2\u0083"+
- "\3\2\2\2\2\u0085\3\2\2\2\2\u0087\3\2\2\2\2\u0089\3\2\2\2\2\u008b\3\2\2"+
- "\2\2\u008d\3\2\2\2\2\u008f\3\2\2\2\2\u0091\3\2\2\2\2\u0093\3\2\2\2\2\u0095"+
- "\3\2\2\2\2\u0097\3\2\2\2\2\u0099\3\2\2\2\2\u009b\3\2\2\2\2\u009d\3\2\2"+
- "\2\2\u009f\3\2\2\2\2\u00a1\3\2\2\2\2\u00a3\3\2\2\2\2\u00a5\3\2\2\2\2\u00a7"+
- "\3\2\2\2\2\u00a9\3\2\2\2\2\u00ab\3\2\2\2\2\u00ad\3\2\2\2\2\u00af\3\2\2"+
- "\2\2\u00b1\3\2\2\2\2\u00b3\3\2\2\2\2\u00b5\3\2\2\2\2\u00b7\3\2\2\2\2\u00b9"+
- "\3\2\2\2\2\u00bb\3\2\2\2\2\u00bd\3\2\2\2\2\u00bf\3\2\2\2\2\u00c1\3\2\2"+
- "\2\2\u00c3\3\2\2\2\2\u00c5\3\2\2\2\2\u00c7\3\2\2\2\2\u00c9\3\2\2\2\2\u00cb"+
- "\3\2\2\2\2\u00cd\3\2\2\2\2\u00cf\3\2\2\2\2\u00d1\3\2\2\2\2\u00d3\3\2\2"+
- "\2\2\u00d5\3\2\2\2\2\u00d7\3\2\2\2\2\u00d9\3\2\2\2\2\u00db\3\2\2\2\2\u00dd"+
- "\3\2\2\2\2\u00df\3\2\2\2\2\u00e1\3\2\2\2\2\u00e3\3\2\2\2\2\u00e5\3\2\2"+
- "\2\2\u00e7\3\2\2\2\2\u00e9\3\2\2\2\2\u00eb\3\2\2\2\2\u00ed\3\2\2\2\2\u00ef"+
- "\3\2\2\2\2\u00f1\3\2\2\2\2\u00f3\3\2\2\2\2\u00f5\3\2\2\2\2\u00f7\3\2\2"+
- "\2\2\u00f9\3\2\2\2\2\u00fb\3\2\2\2\2\u00fd\3\2\2\2\2\u00ff\3\2\2\2\2\u0101"+
- "\3\2\2\2\2\u0103\3\2\2\2\2\u0105\3\2\2\2\2\u0107\3\2\2\2\2\u010f\3\2\2"+
- "\2\2\u0111\3\2\2\2\2\u0113\3\2\2\2\2\u0115\3\2\2\2\3\u0117\3\2\2\2\5\u0119"+
- "\3\2\2\2\7\u011b\3\2\2\2\t\u011d\3\2\2\2\13\u011f\3\2\2\2\r\u0123\3\2"+
- "\2\2\17\u012b\3\2\2\2\21\u0134\3\2\2\2\23\u0138\3\2\2\2\25\u013c\3\2\2"+
- "\2\27\u013f\3\2\2\2\31\u0143\3\2\2\2\33\u014b\3\2\2\2\35\u014e\3\2\2\2"+
- "\37\u0153\3\2\2\2!\u0158\3\2\2\2#\u0160\3\2\2\2%\u0169\3\2\2\2\'\u0171"+
- "\3\2\2\2)\u0179\3\2\2\2+\u0186\3\2\2\2-\u0193\3\2\2\2/\u01a5\3\2\2\2\61"+
- "\u01a9\3\2\2\2\63\u01ae\3\2\2\2\65\u01b4\3\2\2\2\67\u01b9\3\2\2\29\u01c2"+
- "\3\2\2\2;\u01cb\3\2\2\2=\u01d0\3\2\2\2?\u01d4\3\2\2\2A\u01db\3\2\2\2C"+
- "\u01e6\3\2\2\2E\u01ed\3\2\2\2G\u01f5\3\2\2\2I\u01fd\3\2\2\2K\u0203\3\2"+
- "\2\2M\u0209\3\2\2\2O\u0210\3\2\2\2Q\u0215\3\2\2\2S\u021c\3\2\2\2U\u0221"+
- "\3\2\2\2W\u022b\3\2\2\2Y\u0234\3\2\2\2[\u023a\3\2\2\2]\u0241\3\2\2\2_"+
- "\u0246\3\2\2\2a\u024c\3\2\2\2c\u024f\3\2\2\2e\u0257\3\2\2\2g\u025d\3\2"+
- "\2\2i\u0266\3\2\2\2k\u0269\3\2\2\2m\u026e\3\2\2\2o\u0273\3\2\2\2q\u0278"+
- "\3\2\2\2s\u027d\3\2\2\2u\u0283\3\2\2\2w\u028a\3\2\2\2y\u0290\3\2\2\2{"+
- "\u0297\3\2\2\2}\u029f\3\2\2\2\177\u02a5\3\2\2\2\u0081\u02ac\3\2\2\2\u0083"+
- "\u02b4\3\2\2\2\u0085\u02b8\3\2\2\2\u0087\u02bd\3\2\2\2\u0089\u02c3\3\2"+
- "\2\2\u008b\u02c6\3\2\2\2\u008d\u02d0\3\2\2\2\u008f\u02d3\3\2\2\2\u0091"+
- "\u02d9\3\2\2\2\u0093\u02df\3\2\2\2\u0095\u02e6\3\2\2\2\u0097\u02ef\3\2"+
- "\2\2\u0099\u02f4\3\2\2\2\u009b\u02fa\3\2\2\2\u009d\u0300\3\2\2\2\u009f"+
- "\u0306\3\2\2\2\u00a1\u030e\3\2\2\2\u00a3\u0315\3\2\2\2\u00a5\u031d\3\2"+
- "\2\2\u00a7\u0324\3\2\2\2\u00a9\u0329\3\2\2\2\u00ab\u032d\3\2\2\2\u00ad"+
- "\u0333\3\2\2\2\u00af\u033a\3\2\2\2\u00b1\u033f\3\2\2\2\u00b3\u0344\3\2"+
- "\2\2\u00b5\u0349\3\2\2\2\u00b7\u034c\3\2\2\2\u00b9\u0351\3\2\2\2\u00bb"+
- "\u0357\3\2\2\2\u00bd\u035d\3\2\2\2\u00bf\u0364\3\2\2\2\u00c1\u0369\3\2"+
- "\2\2\u00c3\u036f\3\2\2\2\u00c5\u0374\3\2\2\2\u00c7\u0379\3\2\2\2\u00c9"+
- "\u037f\3\2\2\2\u00cb\u0387\3\2\2\2\u00cd\u038b\3\2\2\2\u00cf\u0392\3\2"+
- "\2\2\u00d1\u0395\3\2\2\2\u00d3\u0398\3\2\2\2\u00d5\u039c\3\2\2\2\u00d7"+
- "\u03a2\3\2\2\2\u00d9\u03a4\3\2\2\2\u00db\u03a6\3\2\2\2\u00dd\u03ae\3\2"+
- "\2\2\u00df\u03b0\3\2\2\2\u00e1\u03b2\3\2\2\2\u00e3\u03b5\3\2\2\2\u00e5"+
- "\u03b7\3\2\2\2\u00e7\u03ba\3\2\2\2\u00e9\u03bc\3\2\2\2\u00eb\u03be\3\2"+
- "\2\2\u00ed\u03c0\3\2\2\2\u00ef\u03c2\3\2\2\2\u00f1\u03c4\3\2\2\2\u00f3"+
- "\u03c7\3\2\2\2\u00f5\u03ca\3\2\2\2\u00f7\u03cc\3\2\2\2\u00f9\u03ce\3\2"+
- "\2\2\u00fb\u03da\3\2\2\2\u00fd\u0408\3\2\2\2\u00ff\u040c\3\2\2\2\u0101"+
- "\u0416\3\2\2\2\u0103\u0421\3\2\2\2\u0105\u0425\3\2\2\2\u0107\u0430\3\2"+
- "\2\2\u0109\u043b\3\2\2\2\u010b\u0444\3\2\2\2\u010d\u0446\3\2\2\2\u010f"+
- "\u0448\3\2\2\2\u0111\u0459\3\2\2\2\u0113\u0469\3\2\2\2\u0115\u046f\3\2"+
- "\2\2\u0117\u0118\7*\2\2\u0118\4\3\2\2\2\u0119\u011a\7+\2\2\u011a\6\3\2"+
- "\2\2\u011b\u011c\7.\2\2\u011c\b\3\2\2\2\u011d\u011e\7<\2\2\u011e\n\3\2"+
- "\2\2\u011f\u0120\7C\2\2\u0120\u0121\7N\2\2\u0121\u0122\7N\2\2\u0122\f"+
- "\3\2\2\2\u0123\u0124\7C\2\2\u0124\u0125\7P\2\2\u0125\u0126\7C\2\2\u0126"+
- "\u0127\7N\2\2\u0127\u0128\7[\2\2\u0128\u0129\7\\\2\2\u0129\u012a\7G\2"+
- "\2\u012a\16\3\2\2\2\u012b\u012c\7C\2\2\u012c\u012d\7P\2\2\u012d\u012e"+
- "\7C\2\2\u012e\u012f\7N\2\2\u012f\u0130\7[\2\2\u0130\u0131\7\\\2\2\u0131"+
- "\u0132\7G\2\2\u0132\u0133\7F\2\2\u0133\20\3\2\2\2\u0134\u0135\7C\2\2\u0135"+
- "\u0136\7P\2\2\u0136\u0137\7F\2\2\u0137\22\3\2\2\2\u0138\u0139\7C\2\2\u0139"+
- "\u013a\7P\2\2\u013a\u013b\7[\2\2\u013b\24\3\2\2\2\u013c\u013d\7C\2\2\u013d"+
- "\u013e\7U\2\2\u013e\26\3\2\2\2\u013f\u0140\7C\2\2\u0140\u0141\7U\2\2\u0141"+
- "\u0142\7E\2\2\u0142\30\3\2\2\2\u0143\u0144\7D\2\2\u0144\u0145\7G\2\2\u0145"+
- "\u0146\7V\2\2\u0146\u0147\7Y\2\2\u0147\u0148\7G\2\2\u0148\u0149\7G\2\2"+
- "\u0149\u014a\7P\2\2\u014a\32\3\2\2\2\u014b\u014c\7D\2\2\u014c\u014d\7"+
- "[\2\2\u014d\34\3\2\2\2\u014e\u014f\7E\2\2\u014f\u0150\7C\2\2\u0150\u0151"+
- "\7U\2\2\u0151\u0152\7G\2\2\u0152\36\3\2\2\2\u0153\u0154\7E\2\2\u0154\u0155"+
- "\7C\2\2\u0155\u0156\7U\2\2\u0156\u0157\7V\2\2\u0157 \3\2\2\2\u0158\u0159"+
- "\7E\2\2\u0159\u015a\7C\2\2\u015a\u015b\7V\2\2\u015b\u015c\7C\2\2\u015c"+
- "\u015d\7N\2\2\u015d\u015e\7Q\2\2\u015e\u015f\7I\2\2\u015f\"\3\2\2\2\u0160"+
- "\u0161\7E\2\2\u0161\u0162\7C\2\2\u0162\u0163\7V\2\2\u0163\u0164\7C\2\2"+
- "\u0164\u0165\7N\2\2\u0165\u0166\7Q\2\2\u0166\u0167\7I\2\2\u0167\u0168"+
- "\7U\2\2\u0168$\3\2\2\2\u0169\u016a\7E\2\2\u016a\u016b\7Q\2\2\u016b\u016c"+
- "\7N\2\2\u016c\u016d\7W\2\2\u016d\u016e\7O\2\2\u016e\u016f\7P\2\2\u016f"+
- "\u0170\7U\2\2\u0170&\3\2\2\2\u0171\u0172\7E\2\2\u0172\u0173\7Q\2\2\u0173"+
- "\u0174\7P\2\2\u0174\u0175\7X\2\2\u0175\u0176\7G\2\2\u0176\u0177\7T\2\2"+
- "\u0177\u0178\7V\2\2\u0178(\3\2\2\2\u0179\u017a\7E\2\2\u017a\u017b\7W\2"+
- "\2\u017b\u017c\7T\2\2\u017c\u017d\7T\2\2\u017d\u017e\7G\2\2\u017e\u017f"+
- "\7P\2\2\u017f\u0180\7V\2\2\u0180\u0181\7a\2\2\u0181\u0182\7F\2\2\u0182"+
- "\u0183\7C\2\2\u0183\u0184\7V\2\2\u0184\u0185\7G\2\2\u0185*\3\2\2\2\u0186"+
- "\u0187\7E\2\2\u0187\u0188\7W\2\2\u0188\u0189\7T\2\2\u0189\u018a\7T\2\2"+
- "\u018a\u018b\7G\2\2\u018b\u018c\7P\2\2\u018c\u018d\7V\2\2\u018d\u018e"+
- "\7a\2\2\u018e\u018f\7V\2\2\u018f\u0190\7K\2\2\u0190\u0191\7O\2\2\u0191"+
- "\u0192\7G\2\2\u0192,\3\2\2\2\u0193\u0194\7E\2\2\u0194\u0195\7W\2\2\u0195"+
- "\u0196\7T\2\2\u0196\u0197\7T\2\2\u0197\u0198\7G\2\2\u0198\u0199\7P\2\2"+
- "\u0199\u019a\7V\2\2\u019a\u019b\7a\2\2\u019b\u019c\7V\2\2\u019c\u019d"+
- "\7K\2\2\u019d\u019e\7O\2\2\u019e\u019f\7G\2\2\u019f\u01a0\7U\2\2\u01a0"+
- "\u01a1\7V\2\2\u01a1\u01a2\7C\2\2\u01a2\u01a3\7O\2\2\u01a3\u01a4\7R\2\2"+
- "\u01a4.\3\2\2\2\u01a5\u01a6\7F\2\2\u01a6\u01a7\7C\2\2\u01a7\u01a8\7[\2"+
- "\2\u01a8\60\3\2\2\2\u01a9\u01aa\7F\2\2\u01aa\u01ab\7C\2\2\u01ab\u01ac"+
- "\7[\2\2\u01ac\u01ad\7U\2\2\u01ad\62\3\2\2\2\u01ae\u01af\7F\2\2\u01af\u01b0"+
- "\7G\2\2\u01b0\u01b1\7D\2\2\u01b1\u01b2\7W\2\2\u01b2\u01b3\7I\2\2\u01b3"+
- "\64\3\2\2\2\u01b4\u01b5\7F\2\2\u01b5\u01b6\7G\2\2\u01b6\u01b7\7U\2\2\u01b7"+
- "\u01b8\7E\2\2\u01b8\66\3\2\2\2\u01b9\u01ba\7F\2\2\u01ba\u01bb\7G\2\2\u01bb"+
- "\u01bc\7U\2\2\u01bc\u01bd\7E\2\2\u01bd\u01be\7T\2\2\u01be\u01bf\7K\2\2"+
- "\u01bf\u01c0\7D\2\2\u01c0\u01c1\7G\2\2\u01c18\3\2\2\2\u01c2\u01c3\7F\2"+
- "\2\u01c3\u01c4\7K\2\2\u01c4\u01c5\7U\2\2\u01c5\u01c6\7V\2\2\u01c6\u01c7"+
- "\7K\2\2\u01c7\u01c8\7P\2\2\u01c8\u01c9\7E\2\2\u01c9\u01ca\7V\2\2\u01ca"+
- ":\3\2\2\2\u01cb\u01cc\7G\2\2\u01cc\u01cd\7N\2\2\u01cd\u01ce\7U\2\2\u01ce"+
- "\u01cf\7G\2\2\u01cf<\3\2\2\2\u01d0\u01d1\7G\2\2\u01d1\u01d2\7P\2\2\u01d2"+
- "\u01d3\7F\2\2\u01d3>\3\2\2\2\u01d4\u01d5\7G\2\2\u01d5\u01d6\7U\2\2\u01d6"+
- "\u01d7\7E\2\2\u01d7\u01d8\7C\2\2\u01d8\u01d9\7R\2\2\u01d9\u01da\7G\2\2"+
- "\u01da@\3\2\2\2\u01db\u01dc\7G\2\2\u01dc\u01dd\7Z\2\2\u01dd\u01de\7G\2"+
- "\2\u01de\u01df\7E\2\2\u01df\u01e0\7W\2\2\u01e0\u01e1\7V\2\2\u01e1\u01e2"+
- "\7C\2\2\u01e2\u01e3\7D\2\2\u01e3\u01e4\7N\2\2\u01e4\u01e5\7G\2\2\u01e5"+
- "B\3\2\2\2\u01e6\u01e7\7G\2\2\u01e7\u01e8\7Z\2\2\u01e8\u01e9\7K\2\2\u01e9"+
- "\u01ea\7U\2\2\u01ea\u01eb\7V\2\2\u01eb\u01ec\7U\2\2\u01ecD\3\2\2\2\u01ed"+
- "\u01ee\7G\2\2\u01ee\u01ef\7Z\2\2\u01ef\u01f0\7R\2\2\u01f0\u01f1\7N\2\2"+
- "\u01f1\u01f2\7C\2\2\u01f2\u01f3\7K\2\2\u01f3\u01f4\7P\2\2\u01f4F\3\2\2"+
- "\2\u01f5\u01f6\7G\2\2\u01f6\u01f7\7Z\2\2\u01f7\u01f8\7V\2\2\u01f8\u01f9"+
- "\7T\2\2\u01f9\u01fa\7C\2\2\u01fa\u01fb\7E\2\2\u01fb\u01fc\7V\2\2\u01fc"+
- "H\3\2\2\2\u01fd\u01fe\7H\2\2\u01fe\u01ff\7C\2\2\u01ff\u0200\7N\2\2\u0200"+
- "\u0201\7U\2\2\u0201\u0202\7G\2\2\u0202J\3\2\2\2\u0203\u0204\7H\2\2\u0204"+
- "\u0205\7K\2\2\u0205\u0206\7T\2\2\u0206\u0207\7U\2\2\u0207\u0208\7V\2\2"+
- "\u0208L\3\2\2\2\u0209\u020a\7H\2\2\u020a\u020b\7Q\2\2\u020b\u020c\7T\2"+
- "\2\u020c\u020d\7O\2\2\u020d\u020e\7C\2\2\u020e\u020f\7V\2\2\u020fN\3\2"+
- "\2\2\u0210\u0211\7H\2\2\u0211\u0212\7T\2\2\u0212\u0213\7Q\2\2\u0213\u0214"+
- "\7O\2\2\u0214P\3\2\2\2\u0215\u0216\7H\2\2\u0216\u0217\7T\2\2\u0217\u0218"+
- "\7Q\2\2\u0218\u0219\7\\\2\2\u0219\u021a\7G\2\2\u021a\u021b\7P\2\2\u021b"+
- "R\3\2\2\2\u021c\u021d\7H\2\2\u021d\u021e\7W\2\2\u021e\u021f\7N\2\2\u021f"+
- "\u0220\7N\2\2\u0220T\3\2\2\2\u0221\u0222\7H\2\2\u0222\u0223\7W\2\2\u0223"+
- "\u0224\7P\2\2\u0224\u0225\7E\2\2\u0225\u0226\7V\2\2\u0226\u0227\7K\2\2"+
- "\u0227\u0228\7Q\2\2\u0228\u0229\7P\2\2\u0229\u022a\7U\2\2\u022aV\3\2\2"+
- "\2\u022b\u022c\7I\2\2\u022c\u022d\7T\2\2\u022d\u022e\7C\2\2\u022e\u022f"+
- "\7R\2\2\u022f\u0230\7J\2\2\u0230\u0231\7X\2\2\u0231\u0232\7K\2\2\u0232"+
- "\u0233\7\\\2\2\u0233X\3\2\2\2\u0234\u0235\7I\2\2\u0235\u0236\7T\2\2\u0236"+
- "\u0237\7Q\2\2\u0237\u0238\7W\2\2\u0238\u0239\7R\2\2\u0239Z\3\2\2\2\u023a"+
- "\u023b\7J\2\2\u023b\u023c\7C\2\2\u023c\u023d\7X\2\2\u023d\u023e\7K\2\2"+
- "\u023e\u023f\7P\2\2\u023f\u0240\7I\2\2\u0240\\\3\2\2\2\u0241\u0242\7J"+
- "\2\2\u0242\u0243\7Q\2\2\u0243\u0244\7W\2\2\u0244\u0245\7T\2\2\u0245^\3"+
- "\2\2\2\u0246\u0247\7J\2\2\u0247\u0248\7Q\2\2\u0248\u0249\7W\2\2\u0249"+
- "\u024a\7T\2\2\u024a\u024b\7U\2\2\u024b`\3\2\2\2\u024c\u024d\7K\2\2\u024d"+
- "\u024e\7P\2\2\u024eb\3\2\2\2\u024f\u0250\7K\2\2\u0250\u0251\7P\2\2\u0251"+
- "\u0252\7E\2\2\u0252\u0253\7N\2\2\u0253\u0254\7W\2\2\u0254\u0255\7F\2\2"+
- "\u0255\u0256\7G\2\2\u0256d\3\2\2\2\u0257\u0258\7K\2\2\u0258\u0259\7P\2"+
- "\2\u0259\u025a\7P\2\2\u025a\u025b\7G\2\2\u025b\u025c\7T\2\2\u025cf\3\2"+
- "\2\2\u025d\u025e\7K\2\2\u025e\u025f\7P\2\2\u025f\u0260\7V\2\2\u0260\u0261"+
- "\7G\2\2\u0261\u0262\7T\2\2\u0262\u0263\7X\2\2\u0263\u0264\7C\2\2\u0264"+
- "\u0265\7N\2\2\u0265h\3\2\2\2\u0266\u0267\7K\2\2\u0267\u0268\7U\2\2\u0268"+
- "j\3\2\2\2\u0269\u026a\7L\2\2\u026a\u026b\7Q\2\2\u026b\u026c\7K\2\2\u026c"+
- "\u026d\7P\2\2\u026dl\3\2\2\2\u026e\u026f\7N\2\2\u026f\u0270\7C\2\2\u0270"+
- "\u0271\7U\2\2\u0271\u0272\7V\2\2\u0272n\3\2\2\2\u0273\u0274\7N\2\2\u0274"+
- "\u0275\7G\2\2\u0275\u0276\7H\2\2\u0276\u0277\7V\2\2\u0277p\3\2\2\2\u0278"+
- "\u0279\7N\2\2\u0279\u027a\7K\2\2\u027a\u027b\7M\2\2\u027b\u027c\7G\2\2"+
- "\u027cr\3\2\2\2\u027d\u027e\7N\2\2\u027e\u027f\7K\2\2\u027f\u0280\7O\2"+
- "\2\u0280\u0281\7K\2\2\u0281\u0282\7V\2\2\u0282t\3\2\2\2\u0283\u0284\7"+
- "O\2\2\u0284\u0285\7C\2\2\u0285\u0286\7R\2\2\u0286\u0287\7R\2\2\u0287\u0288"+
- "\7G\2\2\u0288\u0289\7F\2\2\u0289v\3\2\2\2\u028a\u028b\7O\2\2\u028b\u028c"+
- "\7C\2\2\u028c\u028d\7V\2\2\u028d\u028e\7E\2\2\u028e\u028f\7J\2\2\u028f"+
- "x\3\2\2\2\u0290\u0291\7O\2\2\u0291\u0292\7K\2\2\u0292\u0293\7P\2\2\u0293"+
- "\u0294\7W\2\2\u0294\u0295\7V\2\2\u0295\u0296\7G\2\2\u0296z\3\2\2\2\u0297"+
- "\u0298\7O\2\2\u0298\u0299\7K\2\2\u0299\u029a\7P\2\2\u029a\u029b\7W\2\2"+
- "\u029b\u029c\7V\2\2\u029c\u029d\7G\2\2\u029d\u029e\7U\2\2\u029e|\3\2\2"+
- "\2\u029f\u02a0\7O\2\2\u02a0\u02a1\7Q\2\2\u02a1\u02a2\7P\2\2\u02a2\u02a3"+
- "\7V\2\2\u02a3\u02a4\7J\2\2\u02a4~\3\2\2\2\u02a5\u02a6\7O\2\2\u02a6\u02a7"+
- "\7Q\2\2\u02a7\u02a8\7P\2\2\u02a8\u02a9\7V\2\2\u02a9\u02aa\7J\2\2\u02aa"+
- "\u02ab\7U\2\2\u02ab\u0080\3\2\2\2\u02ac\u02ad\7P\2\2\u02ad\u02ae\7C\2"+
- "\2\u02ae\u02af\7V\2\2\u02af\u02b0\7W\2\2\u02b0\u02b1\7T\2\2\u02b1\u02b2"+
- "\7C\2\2\u02b2\u02b3\7N\2\2\u02b3\u0082\3\2\2\2\u02b4\u02b5\7P\2\2\u02b5"+
- "\u02b6\7Q\2\2\u02b6\u02b7\7V\2\2\u02b7\u0084\3\2\2\2\u02b8\u02b9\7P\2"+
- "\2\u02b9\u02ba\7W\2\2\u02ba\u02bb\7N\2\2\u02bb\u02bc\7N\2\2\u02bc\u0086"+
- "\3\2\2\2\u02bd\u02be\7P\2\2\u02be\u02bf\7W\2\2\u02bf\u02c0\7N\2\2\u02c0"+
- "\u02c1\7N\2\2\u02c1\u02c2\7U\2\2\u02c2\u0088\3\2\2\2\u02c3\u02c4\7Q\2"+
- "\2\u02c4\u02c5\7P\2\2\u02c5\u008a\3\2\2\2\u02c6\u02c7\7Q\2\2\u02c7\u02c8"+
- "\7R\2\2\u02c8\u02c9\7V\2\2\u02c9\u02ca\7K\2\2\u02ca\u02cb\7O\2\2\u02cb"+
- "\u02cc\7K\2\2\u02cc\u02cd\7\\\2\2\u02cd\u02ce\7G\2\2\u02ce\u02cf\7F\2"+
- "\2\u02cf\u008c\3\2\2\2\u02d0\u02d1\7Q\2\2\u02d1\u02d2\7T\2\2\u02d2\u008e"+
- "\3\2\2\2\u02d3\u02d4\7Q\2\2\u02d4\u02d5\7T\2\2\u02d5\u02d6\7F\2\2\u02d6"+
- "\u02d7\7G\2\2\u02d7\u02d8\7T\2\2\u02d8\u0090\3\2\2\2\u02d9\u02da\7Q\2"+
- "\2\u02da\u02db\7W\2\2\u02db\u02dc\7V\2\2\u02dc\u02dd\7G\2\2\u02dd\u02de"+
- "\7T\2\2\u02de\u0092\3\2\2\2\u02df\u02e0\7R\2\2\u02e0\u02e1\7C\2\2\u02e1"+
- "\u02e2\7T\2\2\u02e2\u02e3\7U\2\2\u02e3\u02e4\7G\2\2\u02e4\u02e5\7F\2\2"+
- "\u02e5\u0094\3\2\2\2\u02e6\u02e7\7R\2\2\u02e7\u02e8\7J\2\2\u02e8\u02e9"+
- "\7[\2\2\u02e9\u02ea\7U\2\2\u02ea\u02eb\7K\2\2\u02eb\u02ec\7E\2\2\u02ec"+
- "\u02ed\7C\2\2\u02ed\u02ee\7N\2\2\u02ee\u0096\3\2\2\2\u02ef\u02f0\7R\2"+
- "\2\u02f0\u02f1\7N\2\2\u02f1\u02f2\7C\2\2\u02f2\u02f3\7P\2\2\u02f3\u0098"+
- "\3\2\2\2\u02f4\u02f5\7T\2\2\u02f5\u02f6\7K\2\2\u02f6\u02f7\7I\2\2\u02f7"+
- "\u02f8\7J\2\2\u02f8\u02f9\7V\2\2\u02f9\u009a\3\2\2\2\u02fa\u02fb\7T\2"+
- "\2\u02fb\u02fc\7N\2\2\u02fc\u02fd\7K\2\2\u02fd\u02fe\7M\2\2\u02fe\u02ff"+
- "\7G\2\2\u02ff\u009c\3\2\2\2\u0300\u0301\7S\2\2\u0301\u0302\7W\2\2\u0302"+
- "\u0303\7G\2\2\u0303\u0304\7T\2\2\u0304\u0305\7[\2\2\u0305\u009e\3\2\2"+
- "\2\u0306\u0307\7U\2\2\u0307\u0308\7E\2\2\u0308\u0309\7J\2\2\u0309\u030a"+
- "\7G\2\2\u030a\u030b\7O\2\2\u030b\u030c\7C\2\2\u030c\u030d\7U\2\2\u030d"+
- "\u00a0\3\2\2\2\u030e\u030f\7U\2\2\u030f\u0310\7G\2\2\u0310\u0311\7E\2"+
- "\2\u0311\u0312\7Q\2\2\u0312\u0313\7P\2\2\u0313\u0314\7F\2\2\u0314\u00a2"+
- "\3\2\2\2\u0315\u0316\7U\2\2\u0316\u0317\7G\2\2\u0317\u0318\7E\2\2\u0318"+
- "\u0319\7Q\2\2\u0319\u031a\7P\2\2\u031a\u031b\7F\2\2\u031b\u031c\7U\2\2"+
- "\u031c\u00a4\3\2\2\2\u031d\u031e\7U\2\2\u031e\u031f\7G\2\2\u031f\u0320"+
- "\7N\2\2\u0320\u0321\7G\2\2\u0321\u0322\7E\2\2\u0322\u0323\7V\2\2\u0323"+
- "\u00a6\3\2\2\2\u0324\u0325\7U\2\2\u0325\u0326\7J\2\2\u0326\u0327\7Q\2"+
- "\2\u0327\u0328\7Y\2\2\u0328\u00a8\3\2\2\2\u0329\u032a\7U\2\2\u032a\u032b"+
- "\7[\2\2\u032b\u032c\7U\2\2\u032c\u00aa\3\2\2\2\u032d\u032e\7V\2\2\u032e"+
- "\u032f\7C\2\2\u032f\u0330\7D\2\2\u0330\u0331\7N\2\2\u0331\u0332\7G\2\2"+
- "\u0332\u00ac\3\2\2\2\u0333\u0334\7V\2\2\u0334\u0335\7C\2\2\u0335\u0336"+
- "\7D\2\2\u0336\u0337\7N\2\2\u0337\u0338\7G\2\2\u0338\u0339\7U\2\2\u0339"+
- "\u00ae\3\2\2\2\u033a\u033b\7V\2\2\u033b\u033c\7G\2\2\u033c\u033d\7Z\2"+
- "\2\u033d\u033e\7V\2\2\u033e\u00b0\3\2\2\2\u033f\u0340\7V\2\2\u0340\u0341"+
- "\7J\2\2\u0341\u0342\7G\2\2\u0342\u0343\7P\2\2\u0343\u00b2\3\2\2\2\u0344"+
- "\u0345\7V\2\2\u0345\u0346\7T\2\2\u0346\u0347\7W\2\2\u0347\u0348\7G\2\2"+
- "\u0348\u00b4\3\2\2\2\u0349\u034a\7V\2\2\u034a\u034b\7Q\2\2\u034b\u00b6"+
- "\3\2\2\2\u034c\u034d\7V\2\2\u034d\u034e\7[\2\2\u034e\u034f\7R\2\2\u034f"+
- "\u0350\7G\2\2\u0350\u00b8\3\2\2\2\u0351\u0352\7V\2\2\u0352\u0353\7[\2"+
- "\2\u0353\u0354\7R\2\2\u0354\u0355\7G\2\2\u0355\u0356\7U\2\2\u0356\u00ba"+
- "\3\2\2\2\u0357\u0358\7W\2\2\u0358\u0359\7U\2\2\u0359\u035a\7K\2\2\u035a"+
- "\u035b\7P\2\2\u035b\u035c\7I\2\2\u035c\u00bc\3\2\2\2\u035d\u035e\7X\2"+
- "\2\u035e\u035f\7G\2\2\u035f\u0360\7T\2\2\u0360\u0361\7K\2\2\u0361\u0362"+
- "\7H\2\2\u0362\u0363\7[\2\2\u0363\u00be\3\2\2\2\u0364\u0365\7Y\2\2\u0365"+
- "\u0366\7J\2\2\u0366\u0367\7G\2\2\u0367\u0368\7P\2\2\u0368\u00c0\3\2\2"+
- "\2\u0369\u036a\7Y\2\2\u036a\u036b\7J\2\2\u036b\u036c\7G\2\2\u036c\u036d"+
- "\7T\2\2\u036d\u036e\7G\2\2\u036e\u00c2\3\2\2\2\u036f\u0370\7Y\2\2\u0370"+
- "\u0371\7K\2\2\u0371\u0372\7V\2\2\u0372\u0373\7J\2\2\u0373\u00c4\3\2\2"+
- "\2\u0374\u0375\7[\2\2\u0375\u0376\7G\2\2\u0376\u0377\7C\2\2\u0377\u0378"+
- "\7T\2\2\u0378\u00c6\3\2\2\2\u0379\u037a\7[\2\2\u037a\u037b\7G\2\2\u037b"+
- "\u037c\7C\2\2\u037c\u037d\7T\2\2\u037d\u037e\7U\2\2\u037e\u00c8\3\2\2"+
- "\2\u037f\u0380\7}\2\2\u0380\u0381\7G\2\2\u0381\u0382\7U\2\2\u0382\u0383"+
- "\7E\2\2\u0383\u0384\7C\2\2\u0384\u0385\7R\2\2\u0385\u0386\7G\2\2\u0386"+
- "\u00ca\3\2\2\2\u0387\u0388\7}\2\2\u0388\u0389\7H\2\2\u0389\u038a\7P\2"+
- "\2\u038a\u00cc\3\2\2\2\u038b\u038c\7}\2\2\u038c\u038d\7N\2\2\u038d\u038e"+
- "\7K\2\2\u038e\u038f\7O\2\2\u038f\u0390\7K\2\2\u0390\u0391\7V\2\2\u0391"+
- "\u00ce\3\2\2\2\u0392\u0393\7}\2\2\u0393\u0394\7F\2\2\u0394\u00d0\3\2\2"+
- "\2\u0395\u0396\7}\2\2\u0396\u0397\7V\2\2\u0397\u00d2\3\2\2\2\u0398\u0399"+
- "\7}\2\2\u0399\u039a\7V\2\2\u039a\u039b\7U\2\2\u039b\u00d4\3\2\2\2\u039c"+
- "\u039d\7}\2\2\u039d\u039e\7I\2\2\u039e\u039f\7W\2\2\u039f\u03a0\7K\2\2"+
- "\u03a0\u03a1\7F\2\2\u03a1\u00d6\3\2\2\2\u03a2\u03a3\7\177\2\2\u03a3\u00d8"+
- "\3\2\2\2\u03a4\u03a5\7?\2\2\u03a5\u00da\3\2\2\2\u03a6\u03a7\7>\2\2\u03a7"+
- "\u03a8\7?\2\2\u03a8\u03a9\7@\2\2\u03a9\u00dc\3\2\2\2\u03aa\u03ab\7>\2"+
- "\2\u03ab\u03af\7@\2\2\u03ac\u03ad\7#\2\2\u03ad\u03af\7?\2\2\u03ae\u03aa"+
- "\3\2\2\2\u03ae\u03ac\3\2\2\2\u03af\u00de\3\2\2\2\u03b0\u03b1\7>\2\2\u03b1"+
- "\u00e0\3\2\2\2\u03b2\u03b3\7>\2\2\u03b3\u03b4\7?\2\2\u03b4\u00e2\3\2\2"+
- "\2\u03b5\u03b6\7@\2\2\u03b6\u00e4\3\2\2\2\u03b7\u03b8\7@\2\2\u03b8\u03b9"+
- "\7?\2\2\u03b9\u00e6\3\2\2\2\u03ba\u03bb\7-\2\2\u03bb\u00e8\3\2\2\2\u03bc"+
- "\u03bd\7/\2\2\u03bd\u00ea\3\2\2\2\u03be\u03bf\7,\2\2\u03bf\u00ec\3\2\2"+
- "\2\u03c0\u03c1\7\61\2\2\u03c1\u00ee\3\2\2\2\u03c2\u03c3\7\'\2\2\u03c3"+
- "\u00f0\3\2\2\2\u03c4\u03c5\7<\2\2\u03c5\u03c6\7<\2\2\u03c6\u00f2\3\2\2"+
- "\2\u03c7\u03c8\7~\2\2\u03c8\u03c9\7~\2\2\u03c9\u00f4\3\2\2\2\u03ca\u03cb"+
- "\7\60\2\2\u03cb\u00f6\3\2\2\2\u03cc\u03cd\7A\2\2\u03cd\u00f8\3\2\2\2\u03ce"+
- "\u03d4\7)\2\2\u03cf\u03d3\n\2\2\2\u03d0\u03d1\7)\2\2\u03d1\u03d3\7)\2"+
- "\2\u03d2\u03cf\3\2\2\2\u03d2\u03d0\3\2\2\2\u03d3\u03d6\3\2\2\2\u03d4\u03d2"+
- "\3\2\2\2\u03d4\u03d5\3\2\2\2\u03d5\u03d7\3\2\2\2\u03d6\u03d4\3\2\2\2\u03d7"+
- "\u03d8\7)\2\2\u03d8\u00fa\3\2\2\2\u03d9\u03db\5\u010b\u0086\2\u03da\u03d9"+
- "\3\2\2\2\u03db\u03dc\3\2\2\2\u03dc\u03da\3\2\2\2\u03dc\u03dd\3\2\2\2\u03dd"+
- "\u00fc\3\2\2\2\u03de\u03e0\5\u010b\u0086\2\u03df\u03de\3\2\2\2\u03e0\u03e1"+
- "\3\2\2\2\u03e1\u03df\3\2\2\2\u03e1\u03e2\3\2\2\2\u03e2\u03e3\3\2\2\2\u03e3"+
- "\u03e7\5\u00f5{\2\u03e4\u03e6\5\u010b\u0086\2\u03e5\u03e4\3\2\2\2\u03e6"+
- "\u03e9\3\2\2\2\u03e7\u03e5\3\2\2\2\u03e7\u03e8\3\2\2\2\u03e8\u0409\3\2"+
- "\2\2\u03e9\u03e7\3\2\2\2\u03ea\u03ec\5\u00f5{\2\u03eb\u03ed\5\u010b\u0086"+
- "\2\u03ec\u03eb\3\2\2\2\u03ed\u03ee\3\2\2\2\u03ee\u03ec\3\2\2\2\u03ee\u03ef"+
- "\3\2\2\2\u03ef\u0409\3\2\2\2\u03f0\u03f2\5\u010b\u0086\2\u03f1\u03f0\3"+
- "\2\2\2\u03f2\u03f3\3\2\2\2\u03f3\u03f1\3\2\2\2\u03f3\u03f4\3\2\2\2\u03f4"+
- "\u03fc\3\2\2\2\u03f5\u03f9\5\u00f5{\2\u03f6\u03f8\5\u010b\u0086\2\u03f7"+
- "\u03f6\3\2\2\2\u03f8\u03fb\3\2\2\2\u03f9\u03f7\3\2\2\2\u03f9\u03fa\3\2"+
- "\2\2\u03fa\u03fd\3\2\2\2\u03fb\u03f9\3\2\2\2\u03fc\u03f5\3\2\2\2\u03fc"+
- "\u03fd\3\2\2\2\u03fd\u03fe\3\2\2\2\u03fe\u03ff\5\u0109\u0085\2\u03ff\u0409"+
- "\3\2\2\2\u0400\u0402\5\u00f5{\2\u0401\u0403\5\u010b\u0086\2\u0402\u0401"+
- "\3\2\2\2\u0403\u0404\3\2\2\2\u0404\u0402\3\2\2\2\u0404\u0405\3\2\2\2\u0405"+
- "\u0406\3\2\2\2\u0406\u0407\5\u0109\u0085\2\u0407\u0409\3\2\2\2\u0408\u03df"+
- "\3\2\2\2\u0408\u03ea\3\2\2\2\u0408\u03f1\3\2\2\2\u0408\u0400\3\2\2\2\u0409"+
- "\u00fe\3\2\2\2\u040a\u040d\5\u010d\u0087\2\u040b\u040d\7a\2\2\u040c\u040a"+
- "\3\2\2\2\u040c\u040b\3\2\2\2\u040d\u0413\3\2\2\2\u040e\u0412\5\u010d\u0087"+
- "\2\u040f\u0412\5\u010b\u0086\2\u0410\u0412\t\3\2\2\u0411\u040e\3\2\2\2"+
- "\u0411\u040f\3\2\2\2\u0411\u0410\3\2\2\2\u0412\u0415\3\2\2\2\u0413\u0411"+
- "\3\2\2\2\u0413\u0414\3\2\2\2\u0414\u0100\3\2\2\2\u0415\u0413\3\2\2\2\u0416"+
- "\u041a\5\u010b\u0086\2\u0417\u041b\5\u010d\u0087\2\u0418\u041b\5\u010b"+
- "\u0086\2\u0419\u041b\t\3\2\2\u041a\u0417\3\2\2\2\u041a\u0418\3\2\2\2\u041a"+
- "\u0419\3\2\2\2\u041b\u041c\3\2\2\2\u041c\u041a\3\2\2\2\u041c\u041d\3\2"+
- "\2\2\u041d\u0102\3\2\2\2\u041e\u0422\5\u010d\u0087\2\u041f\u0422\5\u010b"+
- "\u0086\2\u0420\u0422\7a\2\2\u0421\u041e\3\2\2\2\u0421\u041f\3\2\2\2\u0421"+
- "\u0420\3\2\2\2\u0422\u0423\3\2\2\2\u0423\u0421\3\2\2\2\u0423\u0424\3\2"+
- "\2\2\u0424\u0104\3\2\2\2\u0425\u042b\7$\2\2\u0426\u042a\n\4\2\2\u0427"+
- "\u0428\7$\2\2\u0428\u042a\7$\2\2\u0429\u0426\3\2\2\2\u0429\u0427\3\2\2"+
- "\2\u042a\u042d\3\2\2\2\u042b\u0429\3\2\2\2\u042b\u042c\3\2\2\2\u042c\u042e"+
- "\3\2\2\2\u042d\u042b\3\2\2\2\u042e\u042f\7$\2\2\u042f\u0106\3\2\2\2\u0430"+
- "\u0436\7b\2\2\u0431\u0435\n\5\2\2\u0432\u0433\7b\2\2\u0433\u0435\7b\2"+
- "\2\u0434\u0431\3\2\2\2\u0434\u0432\3\2\2\2\u0435\u0438\3\2\2\2\u0436\u0434"+
- "\3\2\2\2\u0436\u0437\3\2\2\2\u0437\u0439\3\2\2\2\u0438\u0436\3\2\2\2\u0439"+
- "\u043a\7b\2\2\u043a\u0108\3\2\2\2\u043b\u043d\7G\2\2\u043c\u043e\t\6\2"+
- "\2\u043d\u043c\3\2\2\2\u043d\u043e\3\2\2\2\u043e\u0440\3\2\2\2\u043f\u0441"+
- "\5\u010b\u0086\2\u0440\u043f\3\2\2\2\u0441\u0442\3\2\2\2\u0442\u0440\3"+
- "\2\2\2\u0442\u0443\3\2\2\2\u0443\u010a\3\2\2\2\u0444\u0445\t\7\2\2\u0445"+
- "\u010c\3\2\2\2\u0446\u0447\t\b\2\2\u0447\u010e\3\2\2\2\u0448\u0449\7/"+
- "\2\2\u0449\u044a\7/\2\2\u044a\u044e\3\2\2\2\u044b\u044d\n\t\2\2\u044c"+
- "\u044b\3\2\2\2\u044d\u0450\3\2\2\2\u044e\u044c\3\2\2\2\u044e\u044f\3\2"+
- "\2\2\u044f\u0452\3\2\2\2\u0450\u044e\3\2\2\2\u0451\u0453\7\17\2\2\u0452"+
- "\u0451\3\2\2\2\u0452\u0453\3\2\2\2\u0453\u0455\3\2\2\2\u0454\u0456\7\f"+
- "\2\2\u0455\u0454\3\2\2\2\u0455\u0456\3\2\2\2\u0456\u0457\3\2\2\2\u0457"+
- "\u0458\b\u0088\2\2\u0458\u0110\3\2\2\2\u0459\u045a\7\61\2\2\u045a\u045b"+
- "\7,\2\2\u045b\u0460\3\2\2\2\u045c\u045f\5\u0111\u0089\2\u045d\u045f\13"+
- "\2\2\2\u045e\u045c\3\2\2\2\u045e\u045d\3\2\2\2\u045f\u0462\3\2\2\2\u0460"+
- "\u0461\3\2\2\2\u0460\u045e\3\2\2\2\u0461\u0463\3\2\2\2\u0462\u0460\3\2"+
- "\2\2\u0463\u0464\7,\2\2\u0464\u0465\7\61\2\2\u0465\u0466\3\2\2\2\u0466"+
- "\u0467\b\u0089\2\2\u0467\u0112\3\2\2\2\u0468\u046a\t\n\2\2\u0469\u0468"+
- "\3\2\2\2\u046a\u046b\3\2\2\2\u046b\u0469\3\2\2\2\u046b\u046c\3\2\2\2\u046c"+
- "\u046d\3\2\2\2\u046d\u046e\b\u008a\2\2\u046e\u0114\3\2\2\2\u046f\u0470"+
- "\13\2\2\2\u0470\u0116\3\2\2\2\"\2\u03ae\u03d2\u03d4\u03dc\u03e1\u03e7"+
- "\u03ee\u03f3\u03f9\u03fc\u0404\u0408\u040c\u0411\u0413\u041a\u041c\u0421"+
- "\u0423\u0429\u042b\u0434\u0436\u043d\u0442\u044e\u0452\u0455\u045e\u0460"+
- "\u046b\3\2\3\2";
+ "\t\u0089\4\u008a\t\u008a\4\u008b\t\u008b\4\u008c\t\u008c\4\u008d\t\u008d"+
+ "\3\2\3\2\3\3\3\3\3\4\3\4\3\5\3\5\3\6\3\6\3\6\3\6\3\7\3\7\3\7\3\7\3\7\3"+
+ "\7\3\7\3\7\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\t\3\t\3\t\3\t\3\n\3\n"+
+ "\3\n\3\n\3\13\3\13\3\13\3\f\3\f\3\f\3\f\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3"+
+ "\r\3\16\3\16\3\16\3\17\3\17\3\17\3\17\3\17\3\20\3\20\3\20\3\20\3\20\3"+
+ "\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\22\3\22\3\22\3\22\3\22\3\22\3"+
+ "\22\3\22\3\22\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\24\3\24\3\24\3"+
+ "\24\3\24\3\24\3\24\3\24\3\25\3\25\3\25\3\25\3\25\3\25\3\25\3\25\3\25\3"+
+ "\25\3\25\3\25\3\25\3\26\3\26\3\26\3\26\3\26\3\26\3\26\3\26\3\26\3\26\3"+
+ "\26\3\26\3\26\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3"+
+ "\27\3\27\3\27\3\27\3\27\3\27\3\27\3\30\3\30\3\30\3\30\3\31\3\31\3\31\3"+
+ "\31\3\31\3\32\3\32\3\32\3\32\3\32\3\32\3\33\3\33\3\33\3\33\3\33\3\34\3"+
+ "\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\35\3\35\3\35\3\35\3\35\3\35\3"+
+ "\35\3\35\3\35\3\36\3\36\3\36\3\36\3\36\3\37\3\37\3\37\3\37\3 \3 \3 \3"+
+ " \3 \3 \3 \3!\3!\3!\3!\3!\3!\3!\3!\3!\3!\3!\3\"\3\"\3\"\3\"\3\"\3\"\3"+
+ "\"\3#\3#\3#\3#\3#\3#\3#\3#\3$\3$\3$\3$\3$\3$\3$\3$\3%\3%\3%\3%\3%\3%\3"+
+ "&\3&\3&\3&\3&\3&\3\'\3\'\3\'\3\'\3(\3(\3(\3(\3(\3(\3(\3)\3)\3)\3)\3)\3"+
+ "*\3*\3*\3*\3*\3*\3*\3+\3+\3+\3+\3+\3,\3,\3,\3,\3,\3,\3,\3,\3,\3,\3-\3"+
+ "-\3-\3-\3-\3-\3-\3-\3-\3.\3.\3.\3.\3.\3.\3/\3/\3/\3/\3/\3/\3/\3\60\3\60"+
+ "\3\60\3\60\3\60\3\61\3\61\3\61\3\61\3\61\3\61\3\62\3\62\3\62\3\63\3\63"+
+ "\3\63\3\63\3\63\3\63\3\63\3\63\3\64\3\64\3\64\3\64\3\64\3\64\3\65\3\65"+
+ "\3\65\3\65\3\65\3\65\3\65\3\65\3\65\3\66\3\66\3\66\3\67\3\67\3\67\3\67"+
+ "\3\67\38\38\38\38\38\39\39\39\39\39\3:\3:\3:\3:\3:\3;\3;\3;\3;\3;\3;\3"+
+ "<\3<\3<\3<\3<\3<\3<\3=\3=\3=\3=\3=\3=\3>\3>\3>\3>\3>\3>\3>\3?\3?\3?\3"+
+ "?\3?\3?\3?\3?\3@\3@\3@\3@\3@\3@\3A\3A\3A\3A\3A\3A\3A\3B\3B\3B\3B\3B\3"+
+ "B\3B\3B\3C\3C\3C\3C\3D\3D\3D\3D\3D\3E\3E\3E\3E\3E\3E\3F\3F\3F\3G\3G\3"+
+ "G\3G\3G\3G\3G\3G\3G\3G\3H\3H\3H\3I\3I\3I\3I\3I\3I\3J\3J\3J\3J\3J\3J\3"+
+ "K\3K\3K\3K\3K\3K\3K\3L\3L\3L\3L\3L\3L\3L\3L\3L\3M\3M\3M\3M\3M\3M\3N\3"+
+ "N\3N\3N\3N\3O\3O\3O\3O\3O\3O\3P\3P\3P\3P\3P\3P\3Q\3Q\3Q\3Q\3Q\3Q\3R\3"+
+ "R\3R\3R\3R\3R\3R\3R\3S\3S\3S\3S\3S\3S\3S\3T\3T\3T\3T\3T\3T\3T\3T\3U\3"+
+ "U\3U\3U\3U\3U\3U\3V\3V\3V\3V\3V\3W\3W\3W\3W\3X\3X\3X\3X\3X\3X\3Y\3Y\3"+
+ "Y\3Y\3Y\3Y\3Y\3Z\3Z\3Z\3Z\3Z\3[\3[\3[\3[\3[\3\\\3\\\3\\\3\\\3\\\3]\3]"+
+ "\3]\3^\3^\3^\3^\3^\3_\3_\3_\3_\3_\3_\3`\3`\3`\3`\3`\3`\3a\3a\3a\3a\3a"+
+ "\3a\3a\3b\3b\3b\3b\3b\3c\3c\3c\3c\3c\3c\3d\3d\3d\3d\3d\3e\3e\3e\3e\3e"+
+ "\3f\3f\3f\3f\3f\3f\3g\3g\3g\3g\3g\3g\3g\3g\3h\3h\3h\3h\3i\3i\3i\3i\3i"+
+ "\3i\3i\3j\3j\3j\3k\3k\3k\3l\3l\3l\3l\3m\3m\3m\3m\3m\3m\3n\3n\3o\3o\3p"+
+ "\3p\3p\3p\3q\3q\3q\3q\5q\u03bd\nq\3r\3r\3s\3s\3s\3t\3t\3u\3u\3u\3v\3v"+
+ "\3w\3w\3x\3x\3y\3y\3z\3z\3{\3{\3{\3|\3|\3|\3}\3}\3~\3~\3\177\3\177\3\177"+
+ "\3\177\7\177\u03e1\n\177\f\177\16\177\u03e4\13\177\3\177\3\177\3\u0080"+
+ "\6\u0080\u03e9\n\u0080\r\u0080\16\u0080\u03ea\3\u0081\6\u0081\u03ee\n"+
+ "\u0081\r\u0081\16\u0081\u03ef\3\u0081\3\u0081\7\u0081\u03f4\n\u0081\f"+
+ "\u0081\16\u0081\u03f7\13\u0081\3\u0081\3\u0081\6\u0081\u03fb\n\u0081\r"+
+ "\u0081\16\u0081\u03fc\3\u0081\6\u0081\u0400\n\u0081\r\u0081\16\u0081\u0401"+
+ "\3\u0081\3\u0081\7\u0081\u0406\n\u0081\f\u0081\16\u0081\u0409\13\u0081"+
+ "\5\u0081\u040b\n\u0081\3\u0081\3\u0081\3\u0081\3\u0081\6\u0081\u0411\n"+
+ "\u0081\r\u0081\16\u0081\u0412\3\u0081\3\u0081\5\u0081\u0417\n\u0081\3"+
+ "\u0082\3\u0082\5\u0082\u041b\n\u0082\3\u0082\3\u0082\3\u0082\7\u0082\u0420"+
+ "\n\u0082\f\u0082\16\u0082\u0423\13\u0082\3\u0083\3\u0083\3\u0083\3\u0083"+
+ "\6\u0083\u0429\n\u0083\r\u0083\16\u0083\u042a\3\u0084\3\u0084\3\u0084"+
+ "\6\u0084\u0430\n\u0084\r\u0084\16\u0084\u0431\3\u0085\3\u0085\3\u0085"+
+ "\3\u0085\7\u0085\u0438\n\u0085\f\u0085\16\u0085\u043b\13\u0085\3\u0085"+
+ "\3\u0085\3\u0086\3\u0086\3\u0086\3\u0086\7\u0086\u0443\n\u0086\f\u0086"+
+ "\16\u0086\u0446\13\u0086\3\u0086\3\u0086\3\u0087\3\u0087\5\u0087\u044c"+
+ "\n\u0087\3\u0087\6\u0087\u044f\n\u0087\r\u0087\16\u0087\u0450\3\u0088"+
+ "\3\u0088\3\u0089\3\u0089\3\u008a\3\u008a\3\u008a\3\u008a\7\u008a\u045b"+
+ "\n\u008a\f\u008a\16\u008a\u045e\13\u008a\3\u008a\5\u008a\u0461\n\u008a"+
+ "\3\u008a\5\u008a\u0464\n\u008a\3\u008a\3\u008a\3\u008b\3\u008b\3\u008b"+
+ "\3\u008b\3\u008b\7\u008b\u046d\n\u008b\f\u008b\16\u008b\u0470\13\u008b"+
+ "\3\u008b\3\u008b\3\u008b\3\u008b\3\u008b\3\u008c\6\u008c\u0478\n\u008c"+
+ "\r\u008c\16\u008c\u0479\3\u008c\3\u008c\3\u008d\3\u008d\3\u046e\2\u008e"+
+ "\3\3\5\4\7\5\t\6\13\7\r\b\17\t\21\n\23\13\25\f\27\r\31\16\33\17\35\20"+
+ "\37\21!\22#\23%\24\'\25)\26+\27-\30/\31\61\32\63\33\65\34\67\359\36;\37"+
+ "= ?!A\"C#E$G%I&K\'M(O)Q*S+U,W-Y.[/]\60_\61a\62c\63e\64g\65i\66k\67m8o"+
+ "9q:s;u{?}@\177A\u0081B\u0083C\u0085D\u0087E\u0089F\u008bG\u008dH"+
+ "\u008fI\u0091J\u0093K\u0095L\u0097M\u0099N\u009bO\u009dP\u009fQ\u00a1"+
+ "R\u00a3S\u00a5T\u00a7U\u00a9V\u00abW\u00adX\u00afY\u00b1Z\u00b3[\u00b5"+
+ "\\\u00b7]\u00b9^\u00bb_\u00bd`\u00bfa\u00c1b\u00c3c\u00c5d\u00c7e\u00c9"+
+ "f\u00cbg\u00cdh\u00cfi\u00d1j\u00d3k\u00d5l\u00d7m\u00d9n\u00dbo\u00dd"+
+ "p\u00dfq\u00e1r\u00e3s\u00e5t\u00e7u\u00e9v\u00ebw\u00edx\u00efy\u00f1"+
+ "z\u00f3{\u00f5|\u00f7}\u00f9~\u00fb\177\u00fd\u0080\u00ff\u0081\u0101"+
+ "\u0082\u0103\u0083\u0105\u0084\u0107\u0085\u0109\u0086\u010b\u0087\u010d"+
+ "\2\u010f\2\u0111\2\u0113\u0088\u0115\u0089\u0117\u008a\u0119\u008b\3\2"+
+ "\13\3\2))\4\2BBaa\3\2$$\3\2bb\4\2--//\3\2\62;\3\2C\\\4\2\f\f\17\17\5\2"+
+ "\13\f\17\17\"\"\u049f\2\3\3\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2\2\2"+
+ "\2\13\3\2\2\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21\3\2\2\2\2\23\3\2\2\2\2\25"+
+ "\3\2\2\2\2\27\3\2\2\2\2\31\3\2\2\2\2\33\3\2\2\2\2\35\3\2\2\2\2\37\3\2"+
+ "\2\2\2!\3\2\2\2\2#\3\2\2\2\2%\3\2\2\2\2\'\3\2\2\2\2)\3\2\2\2\2+\3\2\2"+
+ "\2\2-\3\2\2\2\2/\3\2\2\2\2\61\3\2\2\2\2\63\3\2\2\2\2\65\3\2\2\2\2\67\3"+
+ "\2\2\2\29\3\2\2\2\2;\3\2\2\2\2=\3\2\2\2\2?\3\2\2\2\2A\3\2\2\2\2C\3\2\2"+
+ "\2\2E\3\2\2\2\2G\3\2\2\2\2I\3\2\2\2\2K\3\2\2\2\2M\3\2\2\2\2O\3\2\2\2\2"+
+ "Q\3\2\2\2\2S\3\2\2\2\2U\3\2\2\2\2W\3\2\2\2\2Y\3\2\2\2\2[\3\2\2\2\2]\3"+
+ "\2\2\2\2_\3\2\2\2\2a\3\2\2\2\2c\3\2\2\2\2e\3\2\2\2\2g\3\2\2\2\2i\3\2\2"+
+ "\2\2k\3\2\2\2\2m\3\2\2\2\2o\3\2\2\2\2q\3\2\2\2\2s\3\2\2\2\2u\3\2\2\2\2"+
+ "w\3\2\2\2\2y\3\2\2\2\2{\3\2\2\2\2}\3\2\2\2\2\177\3\2\2\2\2\u0081\3\2\2"+
+ "\2\2\u0083\3\2\2\2\2\u0085\3\2\2\2\2\u0087\3\2\2\2\2\u0089\3\2\2\2\2\u008b"+
+ "\3\2\2\2\2\u008d\3\2\2\2\2\u008f\3\2\2\2\2\u0091\3\2\2\2\2\u0093\3\2\2"+
+ "\2\2\u0095\3\2\2\2\2\u0097\3\2\2\2\2\u0099\3\2\2\2\2\u009b\3\2\2\2\2\u009d"+
+ "\3\2\2\2\2\u009f\3\2\2\2\2\u00a1\3\2\2\2\2\u00a3\3\2\2\2\2\u00a5\3\2\2"+
+ "\2\2\u00a7\3\2\2\2\2\u00a9\3\2\2\2\2\u00ab\3\2\2\2\2\u00ad\3\2\2\2\2\u00af"+
+ "\3\2\2\2\2\u00b1\3\2\2\2\2\u00b3\3\2\2\2\2\u00b5\3\2\2\2\2\u00b7\3\2\2"+
+ "\2\2\u00b9\3\2\2\2\2\u00bb\3\2\2\2\2\u00bd\3\2\2\2\2\u00bf\3\2\2\2\2\u00c1"+
+ "\3\2\2\2\2\u00c3\3\2\2\2\2\u00c5\3\2\2\2\2\u00c7\3\2\2\2\2\u00c9\3\2\2"+
+ "\2\2\u00cb\3\2\2\2\2\u00cd\3\2\2\2\2\u00cf\3\2\2\2\2\u00d1\3\2\2\2\2\u00d3"+
+ "\3\2\2\2\2\u00d5\3\2\2\2\2\u00d7\3\2\2\2\2\u00d9\3\2\2\2\2\u00db\3\2\2"+
+ "\2\2\u00dd\3\2\2\2\2\u00df\3\2\2\2\2\u00e1\3\2\2\2\2\u00e3\3\2\2\2\2\u00e5"+
+ "\3\2\2\2\2\u00e7\3\2\2\2\2\u00e9\3\2\2\2\2\u00eb\3\2\2\2\2\u00ed\3\2\2"+
+ "\2\2\u00ef\3\2\2\2\2\u00f1\3\2\2\2\2\u00f3\3\2\2\2\2\u00f5\3\2\2\2\2\u00f7"+
+ "\3\2\2\2\2\u00f9\3\2\2\2\2\u00fb\3\2\2\2\2\u00fd\3\2\2\2\2\u00ff\3\2\2"+
+ "\2\2\u0101\3\2\2\2\2\u0103\3\2\2\2\2\u0105\3\2\2\2\2\u0107\3\2\2\2\2\u0109"+
+ "\3\2\2\2\2\u010b\3\2\2\2\2\u0113\3\2\2\2\2\u0115\3\2\2\2\2\u0117\3\2\2"+
+ "\2\2\u0119\3\2\2\2\3\u011b\3\2\2\2\5\u011d\3\2\2\2\7\u011f\3\2\2\2\t\u0121"+
+ "\3\2\2\2\13\u0123\3\2\2\2\r\u0127\3\2\2\2\17\u012f\3\2\2\2\21\u0138\3"+
+ "\2\2\2\23\u013c\3\2\2\2\25\u0140\3\2\2\2\27\u0143\3\2\2\2\31\u0147\3\2"+
+ "\2\2\33\u014f\3\2\2\2\35\u0152\3\2\2\2\37\u0157\3\2\2\2!\u015c\3\2\2\2"+
+ "#\u0164\3\2\2\2%\u016d\3\2\2\2\'\u0175\3\2\2\2)\u017d\3\2\2\2+\u018a\3"+
+ "\2\2\2-\u0197\3\2\2\2/\u01a9\3\2\2\2\61\u01ad\3\2\2\2\63\u01b2\3\2\2\2"+
+ "\65\u01b8\3\2\2\2\67\u01bd\3\2\2\29\u01c6\3\2\2\2;\u01cf\3\2\2\2=\u01d4"+
+ "\3\2\2\2?\u01d8\3\2\2\2A\u01df\3\2\2\2C\u01ea\3\2\2\2E\u01f1\3\2\2\2G"+
+ "\u01f9\3\2\2\2I\u0201\3\2\2\2K\u0207\3\2\2\2M\u020d\3\2\2\2O\u0211\3\2"+
+ "\2\2Q\u0218\3\2\2\2S\u021d\3\2\2\2U\u0224\3\2\2\2W\u0229\3\2\2\2Y\u0233"+
+ "\3\2\2\2[\u023c\3\2\2\2]\u0242\3\2\2\2_\u0249\3\2\2\2a\u024e\3\2\2\2c"+
+ "\u0254\3\2\2\2e\u0257\3\2\2\2g\u025f\3\2\2\2i\u0265\3\2\2\2k\u026e\3\2"+
+ "\2\2m\u0271\3\2\2\2o\u0276\3\2\2\2q\u027b\3\2\2\2s\u0280\3\2\2\2u\u0285"+
+ "\3\2\2\2w\u028b\3\2\2\2y\u0292\3\2\2\2{\u0298\3\2\2\2}\u029f\3\2\2\2\177"+
+ "\u02a7\3\2\2\2\u0081\u02ad\3\2\2\2\u0083\u02b4\3\2\2\2\u0085\u02bc\3\2"+
+ "\2\2\u0087\u02c0\3\2\2\2\u0089\u02c5\3\2\2\2\u008b\u02cb\3\2\2\2\u008d"+
+ "\u02ce\3\2\2\2\u008f\u02d8\3\2\2\2\u0091\u02db\3\2\2\2\u0093\u02e1\3\2"+
+ "\2\2\u0095\u02e7\3\2\2\2\u0097\u02ee\3\2\2\2\u0099\u02f7\3\2\2\2\u009b"+
+ "\u02fd\3\2\2\2\u009d\u0302\3\2\2\2\u009f\u0308\3\2\2\2\u00a1\u030e\3\2"+
+ "\2\2\u00a3\u0314\3\2\2\2\u00a5\u031c\3\2\2\2\u00a7\u0323\3\2\2\2\u00a9"+
+ "\u032b\3\2\2\2\u00ab\u0332\3\2\2\2\u00ad\u0337\3\2\2\2\u00af\u033b\3\2"+
+ "\2\2\u00b1\u0341\3\2\2\2\u00b3\u0348\3\2\2\2\u00b5\u034d\3\2\2\2\u00b7"+
+ "\u0352\3\2\2\2\u00b9\u0357\3\2\2\2\u00bb\u035a\3\2\2\2\u00bd\u035f\3\2"+
+ "\2\2\u00bf\u0365\3\2\2\2\u00c1\u036b\3\2\2\2\u00c3\u0372\3\2\2\2\u00c5"+
+ "\u0377\3\2\2\2\u00c7\u037d\3\2\2\2\u00c9\u0382\3\2\2\2\u00cb\u0387\3\2"+
+ "\2\2\u00cd\u038d\3\2\2\2\u00cf\u0395\3\2\2\2\u00d1\u0399\3\2\2\2\u00d3"+
+ "\u03a0\3\2\2\2\u00d5\u03a3\3\2\2\2\u00d7\u03a6\3\2\2\2\u00d9\u03aa\3\2"+
+ "\2\2\u00db\u03b0\3\2\2\2\u00dd\u03b2\3\2\2\2\u00df\u03b4\3\2\2\2\u00e1"+
+ "\u03bc\3\2\2\2\u00e3\u03be\3\2\2\2\u00e5\u03c0\3\2\2\2\u00e7\u03c3\3\2"+
+ "\2\2\u00e9\u03c5\3\2\2\2\u00eb\u03c8\3\2\2\2\u00ed\u03ca\3\2\2\2\u00ef"+
+ "\u03cc\3\2\2\2\u00f1\u03ce\3\2\2\2\u00f3\u03d0\3\2\2\2\u00f5\u03d2\3\2"+
+ "\2\2\u00f7\u03d5\3\2\2\2\u00f9\u03d8\3\2\2\2\u00fb\u03da\3\2\2\2\u00fd"+
+ "\u03dc\3\2\2\2\u00ff\u03e8\3\2\2\2\u0101\u0416\3\2\2\2\u0103\u041a\3\2"+
+ "\2\2\u0105\u0424\3\2\2\2\u0107\u042f\3\2\2\2\u0109\u0433\3\2\2\2\u010b"+
+ "\u043e\3\2\2\2\u010d\u0449\3\2\2\2\u010f\u0452\3\2\2\2\u0111\u0454\3\2"+
+ "\2\2\u0113\u0456\3\2\2\2\u0115\u0467\3\2\2\2\u0117\u0477\3\2\2\2\u0119"+
+ "\u047d\3\2\2\2\u011b\u011c\7*\2\2\u011c\4\3\2\2\2\u011d\u011e\7+\2\2\u011e"+
+ "\6\3\2\2\2\u011f\u0120\7.\2\2\u0120\b\3\2\2\2\u0121\u0122\7<\2\2\u0122"+
+ "\n\3\2\2\2\u0123\u0124\7C\2\2\u0124\u0125\7N\2\2\u0125\u0126\7N\2\2\u0126"+
+ "\f\3\2\2\2\u0127\u0128\7C\2\2\u0128\u0129\7P\2\2\u0129\u012a\7C\2\2\u012a"+
+ "\u012b\7N\2\2\u012b\u012c\7[\2\2\u012c\u012d\7\\\2\2\u012d\u012e\7G\2"+
+ "\2\u012e\16\3\2\2\2\u012f\u0130\7C\2\2\u0130\u0131\7P\2\2\u0131\u0132"+
+ "\7C\2\2\u0132\u0133\7N\2\2\u0133\u0134\7[\2\2\u0134\u0135\7\\\2\2\u0135"+
+ "\u0136\7G\2\2\u0136\u0137\7F\2\2\u0137\20\3\2\2\2\u0138\u0139\7C\2\2\u0139"+
+ "\u013a\7P\2\2\u013a\u013b\7F\2\2\u013b\22\3\2\2\2\u013c\u013d\7C\2\2\u013d"+
+ "\u013e\7P\2\2\u013e\u013f\7[\2\2\u013f\24\3\2\2\2\u0140\u0141\7C\2\2\u0141"+
+ "\u0142\7U\2\2\u0142\26\3\2\2\2\u0143\u0144\7C\2\2\u0144\u0145\7U\2\2\u0145"+
+ "\u0146\7E\2\2\u0146\30\3\2\2\2\u0147\u0148\7D\2\2\u0148\u0149\7G\2\2\u0149"+
+ "\u014a\7V\2\2\u014a\u014b\7Y\2\2\u014b\u014c\7G\2\2\u014c\u014d\7G\2\2"+
+ "\u014d\u014e\7P\2\2\u014e\32\3\2\2\2\u014f\u0150\7D\2\2\u0150\u0151\7"+
+ "[\2\2\u0151\34\3\2\2\2\u0152\u0153\7E\2\2\u0153\u0154\7C\2\2\u0154\u0155"+
+ "\7U\2\2\u0155\u0156\7G\2\2\u0156\36\3\2\2\2\u0157\u0158\7E\2\2\u0158\u0159"+
+ "\7C\2\2\u0159\u015a\7U\2\2\u015a\u015b\7V\2\2\u015b \3\2\2\2\u015c\u015d"+
+ "\7E\2\2\u015d\u015e\7C\2\2\u015e\u015f\7V\2\2\u015f\u0160\7C\2\2\u0160"+
+ "\u0161\7N\2\2\u0161\u0162\7Q\2\2\u0162\u0163\7I\2\2\u0163\"\3\2\2\2\u0164"+
+ "\u0165\7E\2\2\u0165\u0166\7C\2\2\u0166\u0167\7V\2\2\u0167\u0168\7C\2\2"+
+ "\u0168\u0169\7N\2\2\u0169\u016a\7Q\2\2\u016a\u016b\7I\2\2\u016b\u016c"+
+ "\7U\2\2\u016c$\3\2\2\2\u016d\u016e\7E\2\2\u016e\u016f\7Q\2\2\u016f\u0170"+
+ "\7N\2\2\u0170\u0171\7W\2\2\u0171\u0172\7O\2\2\u0172\u0173\7P\2\2\u0173"+
+ "\u0174\7U\2\2\u0174&\3\2\2\2\u0175\u0176\7E\2\2\u0176\u0177\7Q\2\2\u0177"+
+ "\u0178\7P\2\2\u0178\u0179\7X\2\2\u0179\u017a\7G\2\2\u017a\u017b\7T\2\2"+
+ "\u017b\u017c\7V\2\2\u017c(\3\2\2\2\u017d\u017e\7E\2\2\u017e\u017f\7W\2"+
+ "\2\u017f\u0180\7T\2\2\u0180\u0181\7T\2\2\u0181\u0182\7G\2\2\u0182\u0183"+
+ "\7P\2\2\u0183\u0184\7V\2\2\u0184\u0185\7a\2\2\u0185\u0186\7F\2\2\u0186"+
+ "\u0187\7C\2\2\u0187\u0188\7V\2\2\u0188\u0189\7G\2\2\u0189*\3\2\2\2\u018a"+
+ "\u018b\7E\2\2\u018b\u018c\7W\2\2\u018c\u018d\7T\2\2\u018d\u018e\7T\2\2"+
+ "\u018e\u018f\7G\2\2\u018f\u0190\7P\2\2\u0190\u0191\7V\2\2\u0191\u0192"+
+ "\7a\2\2\u0192\u0193\7V\2\2\u0193\u0194\7K\2\2\u0194\u0195\7O\2\2\u0195"+
+ "\u0196\7G\2\2\u0196,\3\2\2\2\u0197\u0198\7E\2\2\u0198\u0199\7W\2\2\u0199"+
+ "\u019a\7T\2\2\u019a\u019b\7T\2\2\u019b\u019c\7G\2\2\u019c\u019d\7P\2\2"+
+ "\u019d\u019e\7V\2\2\u019e\u019f\7a\2\2\u019f\u01a0\7V\2\2\u01a0\u01a1"+
+ "\7K\2\2\u01a1\u01a2\7O\2\2\u01a2\u01a3\7G\2\2\u01a3\u01a4\7U\2\2\u01a4"+
+ "\u01a5\7V\2\2\u01a5\u01a6\7C\2\2\u01a6\u01a7\7O\2\2\u01a7\u01a8\7R\2\2"+
+ "\u01a8.\3\2\2\2\u01a9\u01aa\7F\2\2\u01aa\u01ab\7C\2\2\u01ab\u01ac\7[\2"+
+ "\2\u01ac\60\3\2\2\2\u01ad\u01ae\7F\2\2\u01ae\u01af\7C\2\2\u01af\u01b0"+
+ "\7[\2\2\u01b0\u01b1\7U\2\2\u01b1\62\3\2\2\2\u01b2\u01b3\7F\2\2\u01b3\u01b4"+
+ "\7G\2\2\u01b4\u01b5\7D\2\2\u01b5\u01b6\7W\2\2\u01b6\u01b7\7I\2\2\u01b7"+
+ "\64\3\2\2\2\u01b8\u01b9\7F\2\2\u01b9\u01ba\7G\2\2\u01ba\u01bb\7U\2\2\u01bb"+
+ "\u01bc\7E\2\2\u01bc\66\3\2\2\2\u01bd\u01be\7F\2\2\u01be\u01bf\7G\2\2\u01bf"+
+ "\u01c0\7U\2\2\u01c0\u01c1\7E\2\2\u01c1\u01c2\7T\2\2\u01c2\u01c3\7K\2\2"+
+ "\u01c3\u01c4\7D\2\2\u01c4\u01c5\7G\2\2\u01c58\3\2\2\2\u01c6\u01c7\7F\2"+
+ "\2\u01c7\u01c8\7K\2\2\u01c8\u01c9\7U\2\2\u01c9\u01ca\7V\2\2\u01ca\u01cb"+
+ "\7K\2\2\u01cb\u01cc\7P\2\2\u01cc\u01cd\7E\2\2\u01cd\u01ce\7V\2\2\u01ce"+
+ ":\3\2\2\2\u01cf\u01d0\7G\2\2\u01d0\u01d1\7N\2\2\u01d1\u01d2\7U\2\2\u01d2"+
+ "\u01d3\7G\2\2\u01d3<\3\2\2\2\u01d4\u01d5\7G\2\2\u01d5\u01d6\7P\2\2\u01d6"+
+ "\u01d7\7F\2\2\u01d7>\3\2\2\2\u01d8\u01d9\7G\2\2\u01d9\u01da\7U\2\2\u01da"+
+ "\u01db\7E\2\2\u01db\u01dc\7C\2\2\u01dc\u01dd\7R\2\2\u01dd\u01de\7G\2\2"+
+ "\u01de@\3\2\2\2\u01df\u01e0\7G\2\2\u01e0\u01e1\7Z\2\2\u01e1\u01e2\7G\2"+
+ "\2\u01e2\u01e3\7E\2\2\u01e3\u01e4\7W\2\2\u01e4\u01e5\7V\2\2\u01e5\u01e6"+
+ "\7C\2\2\u01e6\u01e7\7D\2\2\u01e7\u01e8\7N\2\2\u01e8\u01e9\7G\2\2\u01e9"+
+ "B\3\2\2\2\u01ea\u01eb\7G\2\2\u01eb\u01ec\7Z\2\2\u01ec\u01ed\7K\2\2\u01ed"+
+ "\u01ee\7U\2\2\u01ee\u01ef\7V\2\2\u01ef\u01f0\7U\2\2\u01f0D\3\2\2\2\u01f1"+
+ "\u01f2\7G\2\2\u01f2\u01f3\7Z\2\2\u01f3\u01f4\7R\2\2\u01f4\u01f5\7N\2\2"+
+ "\u01f5\u01f6\7C\2\2\u01f6\u01f7\7K\2\2\u01f7\u01f8\7P\2\2\u01f8F\3\2\2"+
+ "\2\u01f9\u01fa\7G\2\2\u01fa\u01fb\7Z\2\2\u01fb\u01fc\7V\2\2\u01fc\u01fd"+
+ "\7T\2\2\u01fd\u01fe\7C\2\2\u01fe\u01ff\7E\2\2\u01ff\u0200\7V\2\2\u0200"+
+ "H\3\2\2\2\u0201\u0202\7H\2\2\u0202\u0203\7C\2\2\u0203\u0204\7N\2\2\u0204"+
+ "\u0205\7U\2\2\u0205\u0206\7G\2\2\u0206J\3\2\2\2\u0207\u0208\7H\2\2\u0208"+
+ "\u0209\7K\2\2\u0209\u020a\7T\2\2\u020a\u020b\7U\2\2\u020b\u020c\7V\2\2"+
+ "\u020cL\3\2\2\2\u020d\u020e\7H\2\2\u020e\u020f\7Q\2\2\u020f\u0210\7T\2"+
+ "\2\u0210N\3\2\2\2\u0211\u0212\7H\2\2\u0212\u0213\7Q\2\2\u0213\u0214\7"+
+ "T\2\2\u0214\u0215\7O\2\2\u0215\u0216\7C\2\2\u0216\u0217\7V\2\2\u0217P"+
+ "\3\2\2\2\u0218\u0219\7H\2\2\u0219\u021a\7T\2\2\u021a\u021b\7Q\2\2\u021b"+
+ "\u021c\7O\2\2\u021cR\3\2\2\2\u021d\u021e\7H\2\2\u021e\u021f\7T\2\2\u021f"+
+ "\u0220\7Q\2\2\u0220\u0221\7\\\2\2\u0221\u0222\7G\2\2\u0222\u0223\7P\2"+
+ "\2\u0223T\3\2\2\2\u0224\u0225\7H\2\2\u0225\u0226\7W\2\2\u0226\u0227\7"+
+ "N\2\2\u0227\u0228\7N\2\2\u0228V\3\2\2\2\u0229\u022a\7H\2\2\u022a\u022b"+
+ "\7W\2\2\u022b\u022c\7P\2\2\u022c\u022d\7E\2\2\u022d\u022e\7V\2\2\u022e"+
+ "\u022f\7K\2\2\u022f\u0230\7Q\2\2\u0230\u0231\7P\2\2\u0231\u0232\7U\2\2"+
+ "\u0232X\3\2\2\2\u0233\u0234\7I\2\2\u0234\u0235\7T\2\2\u0235\u0236\7C\2"+
+ "\2\u0236\u0237\7R\2\2\u0237\u0238\7J\2\2\u0238\u0239\7X\2\2\u0239\u023a"+
+ "\7K\2\2\u023a\u023b\7\\\2\2\u023bZ\3\2\2\2\u023c\u023d\7I\2\2\u023d\u023e"+
+ "\7T\2\2\u023e\u023f\7Q\2\2\u023f\u0240\7W\2\2\u0240\u0241\7R\2\2\u0241"+
+ "\\\3\2\2\2\u0242\u0243\7J\2\2\u0243\u0244\7C\2\2\u0244\u0245\7X\2\2\u0245"+
+ "\u0246\7K\2\2\u0246\u0247\7P\2\2\u0247\u0248\7I\2\2\u0248^\3\2\2\2\u0249"+
+ "\u024a\7J\2\2\u024a\u024b\7Q\2\2\u024b\u024c\7W\2\2\u024c\u024d\7T\2\2"+
+ "\u024d`\3\2\2\2\u024e\u024f\7J\2\2\u024f\u0250\7Q\2\2\u0250\u0251\7W\2"+
+ "\2\u0251\u0252\7T\2\2\u0252\u0253\7U\2\2\u0253b\3\2\2\2\u0254\u0255\7"+
+ "K\2\2\u0255\u0256\7P\2\2\u0256d\3\2\2\2\u0257\u0258\7K\2\2\u0258\u0259"+
+ "\7P\2\2\u0259\u025a\7E\2\2\u025a\u025b\7N\2\2\u025b\u025c\7W\2\2\u025c"+
+ "\u025d\7F\2\2\u025d\u025e\7G\2\2\u025ef\3\2\2\2\u025f\u0260\7K\2\2\u0260"+
+ "\u0261\7P\2\2\u0261\u0262\7P\2\2\u0262\u0263\7G\2\2\u0263\u0264\7T\2\2"+
+ "\u0264h\3\2\2\2\u0265\u0266\7K\2\2\u0266\u0267\7P\2\2\u0267\u0268\7V\2"+
+ "\2\u0268\u0269\7G\2\2\u0269\u026a\7T\2\2\u026a\u026b\7X\2\2\u026b\u026c"+
+ "\7C\2\2\u026c\u026d\7N\2\2\u026dj\3\2\2\2\u026e\u026f\7K\2\2\u026f\u0270"+
+ "\7U\2\2\u0270l\3\2\2\2\u0271\u0272\7L\2\2\u0272\u0273\7Q\2\2\u0273\u0274"+
+ "\7K\2\2\u0274\u0275\7P\2\2\u0275n\3\2\2\2\u0276\u0277\7N\2\2\u0277\u0278"+
+ "\7C\2\2\u0278\u0279\7U\2\2\u0279\u027a\7V\2\2\u027ap\3\2\2\2\u027b\u027c"+
+ "\7N\2\2\u027c\u027d\7G\2\2\u027d\u027e\7H\2\2\u027e\u027f\7V\2\2\u027f"+
+ "r\3\2\2\2\u0280\u0281\7N\2\2\u0281\u0282\7K\2\2\u0282\u0283\7M\2\2\u0283"+
+ "\u0284\7G\2\2\u0284t\3\2\2\2\u0285\u0286\7N\2\2\u0286\u0287\7K\2\2\u0287"+
+ "\u0288\7O\2\2\u0288\u0289\7K\2\2\u0289\u028a\7V\2\2\u028av\3\2\2\2\u028b"+
+ "\u028c\7O\2\2\u028c\u028d\7C\2\2\u028d\u028e\7R\2\2\u028e\u028f\7R\2\2"+
+ "\u028f\u0290\7G\2\2\u0290\u0291\7F\2\2\u0291x\3\2\2\2\u0292\u0293\7O\2"+
+ "\2\u0293\u0294\7C\2\2\u0294\u0295\7V\2\2\u0295\u0296\7E\2\2\u0296\u0297"+
+ "\7J\2\2\u0297z\3\2\2\2\u0298\u0299\7O\2\2\u0299\u029a\7K\2\2\u029a\u029b"+
+ "\7P\2\2\u029b\u029c\7W\2\2\u029c\u029d\7V\2\2\u029d\u029e\7G\2\2\u029e"+
+ "|\3\2\2\2\u029f\u02a0\7O\2\2\u02a0\u02a1\7K\2\2\u02a1\u02a2\7P\2\2\u02a2"+
+ "\u02a3\7W\2\2\u02a3\u02a4\7V\2\2\u02a4\u02a5\7G\2\2\u02a5\u02a6\7U\2\2"+
+ "\u02a6~\3\2\2\2\u02a7\u02a8\7O\2\2\u02a8\u02a9\7Q\2\2\u02a9\u02aa\7P\2"+
+ "\2\u02aa\u02ab\7V\2\2\u02ab\u02ac\7J\2\2\u02ac\u0080\3\2\2\2\u02ad\u02ae"+
+ "\7O\2\2\u02ae\u02af\7Q\2\2\u02af\u02b0\7P\2\2\u02b0\u02b1\7V\2\2\u02b1"+
+ "\u02b2\7J\2\2\u02b2\u02b3\7U\2\2\u02b3\u0082\3\2\2\2\u02b4\u02b5\7P\2"+
+ "\2\u02b5\u02b6\7C\2\2\u02b6\u02b7\7V\2\2\u02b7\u02b8\7W\2\2\u02b8\u02b9"+
+ "\7T\2\2\u02b9\u02ba\7C\2\2\u02ba\u02bb\7N\2\2\u02bb\u0084\3\2\2\2\u02bc"+
+ "\u02bd\7P\2\2\u02bd\u02be\7Q\2\2\u02be\u02bf\7V\2\2\u02bf\u0086\3\2\2"+
+ "\2\u02c0\u02c1\7P\2\2\u02c1\u02c2\7W\2\2\u02c2\u02c3\7N\2\2\u02c3\u02c4"+
+ "\7N\2\2\u02c4\u0088\3\2\2\2\u02c5\u02c6\7P\2\2\u02c6\u02c7\7W\2\2\u02c7"+
+ "\u02c8\7N\2\2\u02c8\u02c9\7N\2\2\u02c9\u02ca\7U\2\2\u02ca\u008a\3\2\2"+
+ "\2\u02cb\u02cc\7Q\2\2\u02cc\u02cd\7P\2\2\u02cd\u008c\3\2\2\2\u02ce\u02cf"+
+ "\7Q\2\2\u02cf\u02d0\7R\2\2\u02d0\u02d1\7V\2\2\u02d1\u02d2\7K\2\2\u02d2"+
+ "\u02d3\7O\2\2\u02d3\u02d4\7K\2\2\u02d4\u02d5\7\\\2\2\u02d5\u02d6\7G\2"+
+ "\2\u02d6\u02d7\7F\2\2\u02d7\u008e\3\2\2\2\u02d8\u02d9\7Q\2\2\u02d9\u02da"+
+ "\7T\2\2\u02da\u0090\3\2\2\2\u02db\u02dc\7Q\2\2\u02dc\u02dd\7T\2\2\u02dd"+
+ "\u02de\7F\2\2\u02de\u02df\7G\2\2\u02df\u02e0\7T\2\2\u02e0\u0092\3\2\2"+
+ "\2\u02e1\u02e2\7Q\2\2\u02e2\u02e3\7W\2\2\u02e3\u02e4\7V\2\2\u02e4\u02e5"+
+ "\7G\2\2\u02e5\u02e6\7T\2\2\u02e6\u0094\3\2\2\2\u02e7\u02e8\7R\2\2\u02e8"+
+ "\u02e9\7C\2\2\u02e9\u02ea\7T\2\2\u02ea\u02eb\7U\2\2\u02eb\u02ec\7G\2\2"+
+ "\u02ec\u02ed\7F\2\2\u02ed\u0096\3\2\2\2\u02ee\u02ef\7R\2\2\u02ef\u02f0"+
+ "\7J\2\2\u02f0\u02f1\7[\2\2\u02f1\u02f2\7U\2\2\u02f2\u02f3\7K\2\2\u02f3"+
+ "\u02f4\7E\2\2\u02f4\u02f5\7C\2\2\u02f5\u02f6\7N\2\2\u02f6\u0098\3\2\2"+
+ "\2\u02f7\u02f8\7R\2\2\u02f8\u02f9\7K\2\2\u02f9\u02fa\7X\2\2\u02fa\u02fb"+
+ "\7Q\2\2\u02fb\u02fc\7V\2\2\u02fc\u009a\3\2\2\2\u02fd\u02fe\7R\2\2\u02fe"+
+ "\u02ff\7N\2\2\u02ff\u0300\7C\2\2\u0300\u0301\7P\2\2\u0301\u009c\3\2\2"+
+ "\2\u0302\u0303\7T\2\2\u0303\u0304\7K\2\2\u0304\u0305\7I\2\2\u0305\u0306"+
+ "\7J\2\2\u0306\u0307\7V\2\2\u0307\u009e\3\2\2\2\u0308\u0309\7T\2\2\u0309"+
+ "\u030a\7N\2\2\u030a\u030b\7K\2\2\u030b\u030c\7M\2\2\u030c\u030d\7G\2\2"+
+ "\u030d\u00a0\3\2\2\2\u030e\u030f\7S\2\2\u030f\u0310\7W\2\2\u0310\u0311"+
+ "\7G\2\2\u0311\u0312\7T\2\2\u0312\u0313\7[\2\2\u0313\u00a2\3\2\2\2\u0314"+
+ "\u0315\7U\2\2\u0315\u0316\7E\2\2\u0316\u0317\7J\2\2\u0317\u0318\7G\2\2"+
+ "\u0318\u0319\7O\2\2\u0319\u031a\7C\2\2\u031a\u031b\7U\2\2\u031b\u00a4"+
+ "\3\2\2\2\u031c\u031d\7U\2\2\u031d\u031e\7G\2\2\u031e\u031f\7E\2\2\u031f"+
+ "\u0320\7Q\2\2\u0320\u0321\7P\2\2\u0321\u0322\7F\2\2\u0322\u00a6\3\2\2"+
+ "\2\u0323\u0324\7U\2\2\u0324\u0325\7G\2\2\u0325\u0326\7E\2\2\u0326\u0327"+
+ "\7Q\2\2\u0327\u0328\7P\2\2\u0328\u0329\7F\2\2\u0329\u032a\7U\2\2\u032a"+
+ "\u00a8\3\2\2\2\u032b\u032c\7U\2\2\u032c\u032d\7G\2\2\u032d\u032e\7N\2"+
+ "\2\u032e\u032f\7G\2\2\u032f\u0330\7E\2\2\u0330\u0331\7V\2\2\u0331\u00aa"+
+ "\3\2\2\2\u0332\u0333\7U\2\2\u0333\u0334\7J\2\2\u0334\u0335\7Q\2\2\u0335"+
+ "\u0336\7Y\2\2\u0336\u00ac\3\2\2\2\u0337\u0338\7U\2\2\u0338\u0339\7[\2"+
+ "\2\u0339\u033a\7U\2\2\u033a\u00ae\3\2\2\2\u033b\u033c\7V\2\2\u033c\u033d"+
+ "\7C\2\2\u033d\u033e\7D\2\2\u033e\u033f\7N\2\2\u033f\u0340\7G\2\2\u0340"+
+ "\u00b0\3\2\2\2\u0341\u0342\7V\2\2\u0342\u0343\7C\2\2\u0343\u0344\7D\2"+
+ "\2\u0344\u0345\7N\2\2\u0345\u0346\7G\2\2\u0346\u0347\7U\2\2\u0347\u00b2"+
+ "\3\2\2\2\u0348\u0349\7V\2\2\u0349\u034a\7G\2\2\u034a\u034b\7Z\2\2\u034b"+
+ "\u034c\7V\2\2\u034c\u00b4\3\2\2\2\u034d\u034e\7V\2\2\u034e\u034f\7J\2"+
+ "\2\u034f\u0350\7G\2\2\u0350\u0351\7P\2\2\u0351\u00b6\3\2\2\2\u0352\u0353"+
+ "\7V\2\2\u0353\u0354\7T\2\2\u0354\u0355\7W\2\2\u0355\u0356\7G\2\2\u0356"+
+ "\u00b8\3\2\2\2\u0357\u0358\7V\2\2\u0358\u0359\7Q\2\2\u0359\u00ba\3\2\2"+
+ "\2\u035a\u035b\7V\2\2\u035b\u035c\7[\2\2\u035c\u035d\7R\2\2\u035d\u035e"+
+ "\7G\2\2\u035e\u00bc\3\2\2\2\u035f\u0360\7V\2\2\u0360\u0361\7[\2\2\u0361"+
+ "\u0362\7R\2\2\u0362\u0363\7G\2\2\u0363\u0364\7U\2\2\u0364\u00be\3\2\2"+
+ "\2\u0365\u0366\7W\2\2\u0366\u0367\7U\2\2\u0367\u0368\7K\2\2\u0368\u0369"+
+ "\7P\2\2\u0369\u036a\7I\2\2\u036a\u00c0\3\2\2\2\u036b\u036c\7X\2\2\u036c"+
+ "\u036d\7G\2\2\u036d\u036e\7T\2\2\u036e\u036f\7K\2\2\u036f\u0370\7H\2\2"+
+ "\u0370\u0371\7[\2\2\u0371\u00c2\3\2\2\2\u0372\u0373\7Y\2\2\u0373\u0374"+
+ "\7J\2\2\u0374\u0375\7G\2\2\u0375\u0376\7P\2\2\u0376\u00c4\3\2\2\2\u0377"+
+ "\u0378\7Y\2\2\u0378\u0379\7J\2\2\u0379\u037a\7G\2\2\u037a\u037b\7T\2\2"+
+ "\u037b\u037c\7G\2\2\u037c\u00c6\3\2\2\2\u037d\u037e\7Y\2\2\u037e\u037f"+
+ "\7K\2\2\u037f\u0380\7V\2\2\u0380\u0381\7J\2\2\u0381\u00c8\3\2\2\2\u0382"+
+ "\u0383\7[\2\2\u0383\u0384\7G\2\2\u0384\u0385\7C\2\2\u0385\u0386\7T\2\2"+
+ "\u0386\u00ca\3\2\2\2\u0387\u0388\7[\2\2\u0388\u0389\7G\2\2\u0389\u038a"+
+ "\7C\2\2\u038a\u038b\7T\2\2\u038b\u038c\7U\2\2\u038c\u00cc\3\2\2\2\u038d"+
+ "\u038e\7}\2\2\u038e\u038f\7G\2\2\u038f\u0390\7U\2\2\u0390\u0391\7E\2\2"+
+ "\u0391\u0392\7C\2\2\u0392\u0393\7R\2\2\u0393\u0394\7G\2\2\u0394\u00ce"+
+ "\3\2\2\2\u0395\u0396\7}\2\2\u0396\u0397\7H\2\2\u0397\u0398\7P\2\2\u0398"+
+ "\u00d0\3\2\2\2\u0399\u039a\7}\2\2\u039a\u039b\7N\2\2\u039b\u039c\7K\2"+
+ "\2\u039c\u039d\7O\2\2\u039d\u039e\7K\2\2\u039e\u039f\7V\2\2\u039f\u00d2"+
+ "\3\2\2\2\u03a0\u03a1\7}\2\2\u03a1\u03a2\7F\2\2\u03a2\u00d4\3\2\2\2\u03a3"+
+ "\u03a4\7}\2\2\u03a4\u03a5\7V\2\2\u03a5\u00d6\3\2\2\2\u03a6\u03a7\7}\2"+
+ "\2\u03a7\u03a8\7V\2\2\u03a8\u03a9\7U\2\2\u03a9\u00d8\3\2\2\2\u03aa\u03ab"+
+ "\7}\2\2\u03ab\u03ac\7I\2\2\u03ac\u03ad\7W\2\2\u03ad\u03ae\7K\2\2\u03ae"+
+ "\u03af\7F\2\2\u03af\u00da\3\2\2\2\u03b0\u03b1\7\177\2\2\u03b1\u00dc\3"+
+ "\2\2\2\u03b2\u03b3\7?\2\2\u03b3\u00de\3\2\2\2\u03b4\u03b5\7>\2\2\u03b5"+
+ "\u03b6\7?\2\2\u03b6\u03b7\7@\2\2\u03b7\u00e0\3\2\2\2\u03b8\u03b9\7>\2"+
+ "\2\u03b9\u03bd\7@\2\2\u03ba\u03bb\7#\2\2\u03bb\u03bd\7?\2\2\u03bc\u03b8"+
+ "\3\2\2\2\u03bc\u03ba\3\2\2\2\u03bd\u00e2\3\2\2\2\u03be\u03bf\7>\2\2\u03bf"+
+ "\u00e4\3\2\2\2\u03c0\u03c1\7>\2\2\u03c1\u03c2\7?\2\2\u03c2\u00e6\3\2\2"+
+ "\2\u03c3\u03c4\7@\2\2\u03c4\u00e8\3\2\2\2\u03c5\u03c6\7@\2\2\u03c6\u03c7"+
+ "\7?\2\2\u03c7\u00ea\3\2\2\2\u03c8\u03c9\7-\2\2\u03c9\u00ec\3\2\2\2\u03ca"+
+ "\u03cb\7/\2\2\u03cb\u00ee\3\2\2\2\u03cc\u03cd\7,\2\2\u03cd\u00f0\3\2\2"+
+ "\2\u03ce\u03cf\7\61\2\2\u03cf\u00f2\3\2\2\2\u03d0\u03d1\7\'\2\2\u03d1"+
+ "\u00f4\3\2\2\2\u03d2\u03d3\7<\2\2\u03d3\u03d4\7<\2\2\u03d4\u00f6\3\2\2"+
+ "\2\u03d5\u03d6\7~\2\2\u03d6\u03d7\7~\2\2\u03d7\u00f8\3\2\2\2\u03d8\u03d9"+
+ "\7\60\2\2\u03d9\u00fa\3\2\2\2\u03da\u03db\7A\2\2\u03db\u00fc\3\2\2\2\u03dc"+
+ "\u03e2\7)\2\2\u03dd\u03e1\n\2\2\2\u03de\u03df\7)\2\2\u03df\u03e1\7)\2"+
+ "\2\u03e0\u03dd\3\2\2\2\u03e0\u03de\3\2\2\2\u03e1\u03e4\3\2\2\2\u03e2\u03e0"+
+ "\3\2\2\2\u03e2\u03e3\3\2\2\2\u03e3\u03e5\3\2\2\2\u03e4\u03e2\3\2\2\2\u03e5"+
+ "\u03e6\7)\2\2\u03e6\u00fe\3\2\2\2\u03e7\u03e9\5\u010f\u0088\2\u03e8\u03e7"+
+ "\3\2\2\2\u03e9\u03ea\3\2\2\2\u03ea\u03e8\3\2\2\2\u03ea\u03eb\3\2\2\2\u03eb"+
+ "\u0100\3\2\2\2\u03ec\u03ee\5\u010f\u0088\2\u03ed\u03ec\3\2\2\2\u03ee\u03ef"+
+ "\3\2\2\2\u03ef\u03ed\3\2\2\2\u03ef\u03f0\3\2\2\2\u03f0\u03f1\3\2\2\2\u03f1"+
+ "\u03f5\5\u00f9}\2\u03f2\u03f4\5\u010f\u0088\2\u03f3\u03f2\3\2\2\2\u03f4"+
+ "\u03f7\3\2\2\2\u03f5\u03f3\3\2\2\2\u03f5\u03f6\3\2\2\2\u03f6\u0417\3\2"+
+ "\2\2\u03f7\u03f5\3\2\2\2\u03f8\u03fa\5\u00f9}\2\u03f9\u03fb\5\u010f\u0088"+
+ "\2\u03fa\u03f9\3\2\2\2\u03fb\u03fc\3\2\2\2\u03fc\u03fa\3\2\2\2\u03fc\u03fd"+
+ "\3\2\2\2\u03fd\u0417\3\2\2\2\u03fe\u0400\5\u010f\u0088\2\u03ff\u03fe\3"+
+ "\2\2\2\u0400\u0401\3\2\2\2\u0401\u03ff\3\2\2\2\u0401\u0402\3\2\2\2\u0402"+
+ "\u040a\3\2\2\2\u0403\u0407\5\u00f9}\2\u0404\u0406\5\u010f\u0088\2\u0405"+
+ "\u0404\3\2\2\2\u0406\u0409\3\2\2\2\u0407\u0405\3\2\2\2\u0407\u0408\3\2"+
+ "\2\2\u0408\u040b\3\2\2\2\u0409\u0407\3\2\2\2\u040a\u0403\3\2\2\2\u040a"+
+ "\u040b\3\2\2\2\u040b\u040c\3\2\2\2\u040c\u040d\5\u010d\u0087\2\u040d\u0417"+
+ "\3\2\2\2\u040e\u0410\5\u00f9}\2\u040f\u0411\5\u010f\u0088\2\u0410\u040f"+
+ "\3\2\2\2\u0411\u0412\3\2\2\2\u0412\u0410\3\2\2\2\u0412\u0413\3\2\2\2\u0413"+
+ "\u0414\3\2\2\2\u0414\u0415\5\u010d\u0087\2\u0415\u0417\3\2\2\2\u0416\u03ed"+
+ "\3\2\2\2\u0416\u03f8\3\2\2\2\u0416\u03ff\3\2\2\2\u0416\u040e\3\2\2\2\u0417"+
+ "\u0102\3\2\2\2\u0418\u041b\5\u0111\u0089\2\u0419\u041b\7a\2\2\u041a\u0418"+
+ "\3\2\2\2\u041a\u0419\3\2\2\2\u041b\u0421\3\2\2\2\u041c\u0420\5\u0111\u0089"+
+ "\2\u041d\u0420\5\u010f\u0088\2\u041e\u0420\t\3\2\2\u041f\u041c\3\2\2\2"+
+ "\u041f\u041d\3\2\2\2\u041f\u041e\3\2\2\2\u0420\u0423\3\2\2\2\u0421\u041f"+
+ "\3\2\2\2\u0421\u0422\3\2\2\2\u0422\u0104\3\2\2\2\u0423\u0421\3\2\2\2\u0424"+
+ "\u0428\5\u010f\u0088\2\u0425\u0429\5\u0111\u0089\2\u0426\u0429\5\u010f"+
+ "\u0088\2\u0427\u0429\t\3\2\2\u0428\u0425\3\2\2\2\u0428\u0426\3\2\2\2\u0428"+
+ "\u0427\3\2\2\2\u0429\u042a\3\2\2\2\u042a\u0428\3\2\2\2\u042a\u042b\3\2"+
+ "\2\2\u042b\u0106\3\2\2\2\u042c\u0430\5\u0111\u0089\2\u042d\u0430\5\u010f"+
+ "\u0088\2\u042e\u0430\7a\2\2\u042f\u042c\3\2\2\2\u042f\u042d\3\2\2\2\u042f"+
+ "\u042e\3\2\2\2\u0430\u0431\3\2\2\2\u0431\u042f\3\2\2\2\u0431\u0432\3\2"+
+ "\2\2\u0432\u0108\3\2\2\2\u0433\u0439\7$\2\2\u0434\u0438\n\4\2\2\u0435"+
+ "\u0436\7$\2\2\u0436\u0438\7$\2\2\u0437\u0434\3\2\2\2\u0437\u0435\3\2\2"+
+ "\2\u0438\u043b\3\2\2\2\u0439\u0437\3\2\2\2\u0439\u043a\3\2\2\2\u043a\u043c"+
+ "\3\2\2\2\u043b\u0439\3\2\2\2\u043c\u043d\7$\2\2\u043d\u010a\3\2\2\2\u043e"+
+ "\u0444\7b\2\2\u043f\u0443\n\5\2\2\u0440\u0441\7b\2\2\u0441\u0443\7b\2"+
+ "\2\u0442\u043f\3\2\2\2\u0442\u0440\3\2\2\2\u0443\u0446\3\2\2\2\u0444\u0442"+
+ "\3\2\2\2\u0444\u0445\3\2\2\2\u0445\u0447\3\2\2\2\u0446\u0444\3\2\2\2\u0447"+
+ "\u0448\7b\2\2\u0448\u010c\3\2\2\2\u0449\u044b\7G\2\2\u044a\u044c\t\6\2"+
+ "\2\u044b\u044a\3\2\2\2\u044b\u044c\3\2\2\2\u044c\u044e\3\2\2\2\u044d\u044f"+
+ "\5\u010f\u0088\2\u044e\u044d\3\2\2\2\u044f\u0450\3\2\2\2\u0450\u044e\3"+
+ "\2\2\2\u0450\u0451\3\2\2\2\u0451\u010e\3\2\2\2\u0452\u0453\t\7\2\2\u0453"+
+ "\u0110\3\2\2\2\u0454\u0455\t\b\2\2\u0455\u0112\3\2\2\2\u0456\u0457\7/"+
+ "\2\2\u0457\u0458\7/\2\2\u0458\u045c\3\2\2\2\u0459\u045b\n\t\2\2\u045a"+
+ "\u0459\3\2\2\2\u045b\u045e\3\2\2\2\u045c\u045a\3\2\2\2\u045c\u045d\3\2"+
+ "\2\2\u045d\u0460\3\2\2\2\u045e\u045c\3\2\2\2\u045f\u0461\7\17\2\2\u0460"+
+ "\u045f\3\2\2\2\u0460\u0461\3\2\2\2\u0461\u0463\3\2\2\2\u0462\u0464\7\f"+
+ "\2\2\u0463\u0462\3\2\2\2\u0463\u0464\3\2\2\2\u0464\u0465\3\2\2\2\u0465"+
+ "\u0466\b\u008a\2\2\u0466\u0114\3\2\2\2\u0467\u0468\7\61\2\2\u0468\u0469"+
+ "\7,\2\2\u0469\u046e\3\2\2\2\u046a\u046d\5\u0115\u008b\2\u046b\u046d\13"+
+ "\2\2\2\u046c\u046a\3\2\2\2\u046c\u046b\3\2\2\2\u046d\u0470\3\2\2\2\u046e"+
+ "\u046f\3\2\2\2\u046e\u046c\3\2\2\2\u046f\u0471\3\2\2\2\u0470\u046e\3\2"+
+ "\2\2\u0471\u0472\7,\2\2\u0472\u0473\7\61\2\2\u0473\u0474\3\2\2\2\u0474"+
+ "\u0475\b\u008b\2\2\u0475\u0116\3\2\2\2\u0476\u0478\t\n\2\2\u0477\u0476"+
+ "\3\2\2\2\u0478\u0479\3\2\2\2\u0479\u0477\3\2\2\2\u0479\u047a\3\2\2\2\u047a"+
+ "\u047b\3\2\2\2\u047b\u047c\b\u008c\2\2\u047c\u0118\3\2\2\2\u047d\u047e"+
+ "\13\2\2\2\u047e\u011a\3\2\2\2\"\2\u03bc\u03e0\u03e2\u03ea\u03ef\u03f5"+
+ "\u03fc\u0401\u0407\u040a\u0412\u0416\u041a\u041f\u0421\u0428\u042a\u042f"+
+ "\u0431\u0437\u0439\u0442\u0444\u044b\u0450\u045c\u0460\u0463\u046c\u046e"+
+ "\u0479\3\2\3\2";
public static final ATN _ATN =
new ATNDeserializer().deserialize(_serializedATN.toCharArray());
static {
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseListener.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseListener.java
index c0845b7adb5..671368342e8 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseListener.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseListener.java
@@ -283,6 +283,16 @@ interface SqlBaseListener extends ParseTreeListener {
* @param ctx the parse tree
*/
void exitSetQuantifier(SqlBaseParser.SetQuantifierContext ctx);
+ /**
+ * Enter a parse tree produced by {@link SqlBaseParser#selectItems}.
+ * @param ctx the parse tree
+ */
+ void enterSelectItems(SqlBaseParser.SelectItemsContext ctx);
+ /**
+ * Exit a parse tree produced by {@link SqlBaseParser#selectItems}.
+ * @param ctx the parse tree
+ */
+ void exitSelectItems(SqlBaseParser.SelectItemsContext ctx);
/**
* Enter a parse tree produced by the {@code selectExpression}
* labeled alternative in {@link SqlBaseParser#selectItem}.
@@ -371,6 +381,36 @@ interface SqlBaseListener extends ParseTreeListener {
* @param ctx the parse tree
*/
void exitAliasedRelation(SqlBaseParser.AliasedRelationContext ctx);
+ /**
+ * Enter a parse tree produced by {@link SqlBaseParser#pivotClause}.
+ * @param ctx the parse tree
+ */
+ void enterPivotClause(SqlBaseParser.PivotClauseContext ctx);
+ /**
+ * Exit a parse tree produced by {@link SqlBaseParser#pivotClause}.
+ * @param ctx the parse tree
+ */
+ void exitPivotClause(SqlBaseParser.PivotClauseContext ctx);
+ /**
+ * Enter a parse tree produced by {@link SqlBaseParser#pivotArgs}.
+ * @param ctx the parse tree
+ */
+ void enterPivotArgs(SqlBaseParser.PivotArgsContext ctx);
+ /**
+ * Exit a parse tree produced by {@link SqlBaseParser#pivotArgs}.
+ * @param ctx the parse tree
+ */
+ void exitPivotArgs(SqlBaseParser.PivotArgsContext ctx);
+ /**
+ * Enter a parse tree produced by {@link SqlBaseParser#namedValueExpression}.
+ * @param ctx the parse tree
+ */
+ void enterNamedValueExpression(SqlBaseParser.NamedValueExpressionContext ctx);
+ /**
+ * Exit a parse tree produced by {@link SqlBaseParser#namedValueExpression}.
+ * @param ctx the parse tree
+ */
+ void exitNamedValueExpression(SqlBaseParser.NamedValueExpressionContext ctx);
/**
* Enter a parse tree produced by {@link SqlBaseParser#expression}.
* @param ctx the parse tree
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseParser.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseParser.java
index 76e0f4654df..63cc1bd7a3f 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseParser.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseParser.java
@@ -22,51 +22,54 @@ class SqlBaseParser extends Parser {
COLUMNS=18, CONVERT=19, CURRENT_DATE=20, CURRENT_TIME=21, CURRENT_TIMESTAMP=22,
DAY=23, DAYS=24, DEBUG=25, DESC=26, DESCRIBE=27, DISTINCT=28, ELSE=29,
END=30, ESCAPE=31, EXECUTABLE=32, EXISTS=33, EXPLAIN=34, EXTRACT=35, FALSE=36,
- FIRST=37, FORMAT=38, FROM=39, FROZEN=40, FULL=41, FUNCTIONS=42, GRAPHVIZ=43,
- GROUP=44, HAVING=45, HOUR=46, HOURS=47, IN=48, INCLUDE=49, INNER=50, INTERVAL=51,
- IS=52, JOIN=53, LAST=54, LEFT=55, LIKE=56, LIMIT=57, MAPPED=58, MATCH=59,
- MINUTE=60, MINUTES=61, MONTH=62, MONTHS=63, NATURAL=64, NOT=65, NULL=66,
- NULLS=67, ON=68, OPTIMIZED=69, OR=70, ORDER=71, OUTER=72, PARSED=73, PHYSICAL=74,
- PLAN=75, RIGHT=76, RLIKE=77, QUERY=78, SCHEMAS=79, SECOND=80, SECONDS=81,
- SELECT=82, SHOW=83, SYS=84, TABLE=85, TABLES=86, TEXT=87, THEN=88, TRUE=89,
- TO=90, TYPE=91, TYPES=92, USING=93, VERIFY=94, WHEN=95, WHERE=96, WITH=97,
- YEAR=98, YEARS=99, ESCAPE_ESC=100, FUNCTION_ESC=101, LIMIT_ESC=102, DATE_ESC=103,
- TIME_ESC=104, TIMESTAMP_ESC=105, GUID_ESC=106, ESC_END=107, EQ=108, NULLEQ=109,
- NEQ=110, LT=111, LTE=112, GT=113, GTE=114, PLUS=115, MINUS=116, ASTERISK=117,
- SLASH=118, PERCENT=119, CAST_OP=120, CONCAT=121, DOT=122, PARAM=123, STRING=124,
- INTEGER_VALUE=125, DECIMAL_VALUE=126, IDENTIFIER=127, DIGIT_IDENTIFIER=128,
- TABLE_IDENTIFIER=129, QUOTED_IDENTIFIER=130, BACKQUOTED_IDENTIFIER=131,
- SIMPLE_COMMENT=132, BRACKETED_COMMENT=133, WS=134, UNRECOGNIZED=135, DELIMITER=136;
+ FIRST=37, FOR=38, FORMAT=39, FROM=40, FROZEN=41, FULL=42, FUNCTIONS=43,
+ GRAPHVIZ=44, GROUP=45, HAVING=46, HOUR=47, HOURS=48, IN=49, INCLUDE=50,
+ INNER=51, INTERVAL=52, IS=53, JOIN=54, LAST=55, LEFT=56, LIKE=57, LIMIT=58,
+ MAPPED=59, MATCH=60, MINUTE=61, MINUTES=62, MONTH=63, MONTHS=64, NATURAL=65,
+ NOT=66, NULL=67, NULLS=68, ON=69, OPTIMIZED=70, OR=71, ORDER=72, OUTER=73,
+ PARSED=74, PHYSICAL=75, PIVOT=76, PLAN=77, RIGHT=78, RLIKE=79, QUERY=80,
+ SCHEMAS=81, SECOND=82, SECONDS=83, SELECT=84, SHOW=85, SYS=86, TABLE=87,
+ TABLES=88, TEXT=89, THEN=90, TRUE=91, TO=92, TYPE=93, TYPES=94, USING=95,
+ VERIFY=96, WHEN=97, WHERE=98, WITH=99, YEAR=100, YEARS=101, ESCAPE_ESC=102,
+ FUNCTION_ESC=103, LIMIT_ESC=104, DATE_ESC=105, TIME_ESC=106, TIMESTAMP_ESC=107,
+ GUID_ESC=108, ESC_END=109, EQ=110, NULLEQ=111, NEQ=112, LT=113, LTE=114,
+ GT=115, GTE=116, PLUS=117, MINUS=118, ASTERISK=119, SLASH=120, PERCENT=121,
+ CAST_OP=122, CONCAT=123, DOT=124, PARAM=125, STRING=126, INTEGER_VALUE=127,
+ DECIMAL_VALUE=128, IDENTIFIER=129, DIGIT_IDENTIFIER=130, TABLE_IDENTIFIER=131,
+ QUOTED_IDENTIFIER=132, BACKQUOTED_IDENTIFIER=133, SIMPLE_COMMENT=134,
+ BRACKETED_COMMENT=135, WS=136, UNRECOGNIZED=137, DELIMITER=138;
public static final int
RULE_singleStatement = 0, RULE_singleExpression = 1, RULE_statement = 2,
RULE_query = 3, RULE_queryNoWith = 4, RULE_limitClause = 5, RULE_queryTerm = 6,
RULE_orderBy = 7, RULE_querySpecification = 8, RULE_fromClause = 9, RULE_groupBy = 10,
RULE_groupingElement = 11, RULE_groupingExpressions = 12, RULE_namedQuery = 13,
- RULE_setQuantifier = 14, RULE_selectItem = 15, RULE_relation = 16, RULE_joinRelation = 17,
- RULE_joinType = 18, RULE_joinCriteria = 19, RULE_relationPrimary = 20,
- RULE_expression = 21, RULE_booleanExpression = 22, RULE_matchQueryOptions = 23,
- RULE_predicated = 24, RULE_predicate = 25, RULE_likePattern = 26, RULE_pattern = 27,
- RULE_patternEscape = 28, RULE_valueExpression = 29, RULE_primaryExpression = 30,
- RULE_builtinDateTimeFunction = 31, RULE_castExpression = 32, RULE_castTemplate = 33,
- RULE_convertTemplate = 34, RULE_extractExpression = 35, RULE_extractTemplate = 36,
- RULE_functionExpression = 37, RULE_functionTemplate = 38, RULE_functionName = 39,
- RULE_constant = 40, RULE_comparisonOperator = 41, RULE_booleanValue = 42,
- RULE_interval = 43, RULE_intervalField = 44, RULE_dataType = 45, RULE_qualifiedName = 46,
- RULE_identifier = 47, RULE_tableIdentifier = 48, RULE_quoteIdentifier = 49,
- RULE_unquoteIdentifier = 50, RULE_number = 51, RULE_string = 52, RULE_whenClause = 53,
- RULE_nonReserved = 54;
+ RULE_setQuantifier = 14, RULE_selectItems = 15, RULE_selectItem = 16,
+ RULE_relation = 17, RULE_joinRelation = 18, RULE_joinType = 19, RULE_joinCriteria = 20,
+ RULE_relationPrimary = 21, RULE_pivotClause = 22, RULE_pivotArgs = 23,
+ RULE_namedValueExpression = 24, RULE_expression = 25, RULE_booleanExpression = 26,
+ RULE_matchQueryOptions = 27, RULE_predicated = 28, RULE_predicate = 29,
+ RULE_likePattern = 30, RULE_pattern = 31, RULE_patternEscape = 32, RULE_valueExpression = 33,
+ RULE_primaryExpression = 34, RULE_builtinDateTimeFunction = 35, RULE_castExpression = 36,
+ RULE_castTemplate = 37, RULE_convertTemplate = 38, RULE_extractExpression = 39,
+ RULE_extractTemplate = 40, RULE_functionExpression = 41, RULE_functionTemplate = 42,
+ RULE_functionName = 43, RULE_constant = 44, RULE_comparisonOperator = 45,
+ RULE_booleanValue = 46, RULE_interval = 47, RULE_intervalField = 48, RULE_dataType = 49,
+ RULE_qualifiedName = 50, RULE_identifier = 51, RULE_tableIdentifier = 52,
+ RULE_quoteIdentifier = 53, RULE_unquoteIdentifier = 54, RULE_number = 55,
+ RULE_string = 56, RULE_whenClause = 57, RULE_nonReserved = 58;
public static final String[] ruleNames = {
"singleStatement", "singleExpression", "statement", "query", "queryNoWith",
"limitClause", "queryTerm", "orderBy", "querySpecification", "fromClause",
"groupBy", "groupingElement", "groupingExpressions", "namedQuery", "setQuantifier",
- "selectItem", "relation", "joinRelation", "joinType", "joinCriteria",
- "relationPrimary", "expression", "booleanExpression", "matchQueryOptions",
- "predicated", "predicate", "likePattern", "pattern", "patternEscape",
- "valueExpression", "primaryExpression", "builtinDateTimeFunction", "castExpression",
- "castTemplate", "convertTemplate", "extractExpression", "extractTemplate",
- "functionExpression", "functionTemplate", "functionName", "constant",
- "comparisonOperator", "booleanValue", "interval", "intervalField", "dataType",
- "qualifiedName", "identifier", "tableIdentifier", "quoteIdentifier", "unquoteIdentifier",
+ "selectItems", "selectItem", "relation", "joinRelation", "joinType", "joinCriteria",
+ "relationPrimary", "pivotClause", "pivotArgs", "namedValueExpression",
+ "expression", "booleanExpression", "matchQueryOptions", "predicated",
+ "predicate", "likePattern", "pattern", "patternEscape", "valueExpression",
+ "primaryExpression", "builtinDateTimeFunction", "castExpression", "castTemplate",
+ "convertTemplate", "extractExpression", "extractTemplate", "functionExpression",
+ "functionTemplate", "functionName", "constant", "comparisonOperator",
+ "booleanValue", "interval", "intervalField", "dataType", "qualifiedName",
+ "identifier", "tableIdentifier", "quoteIdentifier", "unquoteIdentifier",
"number", "string", "whenClause", "nonReserved"
};
@@ -76,40 +79,40 @@ class SqlBaseParser extends Parser {
"'CATALOG'", "'CATALOGS'", "'COLUMNS'", "'CONVERT'", "'CURRENT_DATE'",
"'CURRENT_TIME'", "'CURRENT_TIMESTAMP'", "'DAY'", "'DAYS'", "'DEBUG'",
"'DESC'", "'DESCRIBE'", "'DISTINCT'", "'ELSE'", "'END'", "'ESCAPE'", "'EXECUTABLE'",
- "'EXISTS'", "'EXPLAIN'", "'EXTRACT'", "'FALSE'", "'FIRST'", "'FORMAT'",
+ "'EXISTS'", "'EXPLAIN'", "'EXTRACT'", "'FALSE'", "'FIRST'", "'FOR'", "'FORMAT'",
"'FROM'", "'FROZEN'", "'FULL'", "'FUNCTIONS'", "'GRAPHVIZ'", "'GROUP'",
"'HAVING'", "'HOUR'", "'HOURS'", "'IN'", "'INCLUDE'", "'INNER'", "'INTERVAL'",
"'IS'", "'JOIN'", "'LAST'", "'LEFT'", "'LIKE'", "'LIMIT'", "'MAPPED'",
"'MATCH'", "'MINUTE'", "'MINUTES'", "'MONTH'", "'MONTHS'", "'NATURAL'",
"'NOT'", "'NULL'", "'NULLS'", "'ON'", "'OPTIMIZED'", "'OR'", "'ORDER'",
- "'OUTER'", "'PARSED'", "'PHYSICAL'", "'PLAN'", "'RIGHT'", "'RLIKE'", "'QUERY'",
- "'SCHEMAS'", "'SECOND'", "'SECONDS'", "'SELECT'", "'SHOW'", "'SYS'", "'TABLE'",
- "'TABLES'", "'TEXT'", "'THEN'", "'TRUE'", "'TO'", "'TYPE'", "'TYPES'",
- "'USING'", "'VERIFY'", "'WHEN'", "'WHERE'", "'WITH'", "'YEAR'", "'YEARS'",
- "'{ESCAPE'", "'{FN'", "'{LIMIT'", "'{D'", "'{T'", "'{TS'", "'{GUID'",
- "'}'", "'='", "'<=>'", null, "'<'", "'<='", "'>'", "'>='", "'+'", "'-'",
- "'*'", "'/'", "'%'", "'::'", "'||'", "'.'", "'?'"
+ "'OUTER'", "'PARSED'", "'PHYSICAL'", "'PIVOT'", "'PLAN'", "'RIGHT'", "'RLIKE'",
+ "'QUERY'", "'SCHEMAS'", "'SECOND'", "'SECONDS'", "'SELECT'", "'SHOW'",
+ "'SYS'", "'TABLE'", "'TABLES'", "'TEXT'", "'THEN'", "'TRUE'", "'TO'",
+ "'TYPE'", "'TYPES'", "'USING'", "'VERIFY'", "'WHEN'", "'WHERE'", "'WITH'",
+ "'YEAR'", "'YEARS'", "'{ESCAPE'", "'{FN'", "'{LIMIT'", "'{D'", "'{T'",
+ "'{TS'", "'{GUID'", "'}'", "'='", "'<=>'", null, "'<'", "'<='", "'>'",
+ "'>='", "'+'", "'-'", "'*'", "'/'", "'%'", "'::'", "'||'", "'.'", "'?'"
};
private static final String[] _SYMBOLIC_NAMES = {
null, null, null, null, null, "ALL", "ANALYZE", "ANALYZED", "AND", "ANY",
"AS", "ASC", "BETWEEN", "BY", "CASE", "CAST", "CATALOG", "CATALOGS", "COLUMNS",
"CONVERT", "CURRENT_DATE", "CURRENT_TIME", "CURRENT_TIMESTAMP", "DAY",
"DAYS", "DEBUG", "DESC", "DESCRIBE", "DISTINCT", "ELSE", "END", "ESCAPE",
- "EXECUTABLE", "EXISTS", "EXPLAIN", "EXTRACT", "FALSE", "FIRST", "FORMAT",
- "FROM", "FROZEN", "FULL", "FUNCTIONS", "GRAPHVIZ", "GROUP", "HAVING",
- "HOUR", "HOURS", "IN", "INCLUDE", "INNER", "INTERVAL", "IS", "JOIN", "LAST",
- "LEFT", "LIKE", "LIMIT", "MAPPED", "MATCH", "MINUTE", "MINUTES", "MONTH",
- "MONTHS", "NATURAL", "NOT", "NULL", "NULLS", "ON", "OPTIMIZED", "OR",
- "ORDER", "OUTER", "PARSED", "PHYSICAL", "PLAN", "RIGHT", "RLIKE", "QUERY",
- "SCHEMAS", "SECOND", "SECONDS", "SELECT", "SHOW", "SYS", "TABLE", "TABLES",
- "TEXT", "THEN", "TRUE", "TO", "TYPE", "TYPES", "USING", "VERIFY", "WHEN",
- "WHERE", "WITH", "YEAR", "YEARS", "ESCAPE_ESC", "FUNCTION_ESC", "LIMIT_ESC",
- "DATE_ESC", "TIME_ESC", "TIMESTAMP_ESC", "GUID_ESC", "ESC_END", "EQ",
- "NULLEQ", "NEQ", "LT", "LTE", "GT", "GTE", "PLUS", "MINUS", "ASTERISK",
- "SLASH", "PERCENT", "CAST_OP", "CONCAT", "DOT", "PARAM", "STRING", "INTEGER_VALUE",
- "DECIMAL_VALUE", "IDENTIFIER", "DIGIT_IDENTIFIER", "TABLE_IDENTIFIER",
- "QUOTED_IDENTIFIER", "BACKQUOTED_IDENTIFIER", "SIMPLE_COMMENT", "BRACKETED_COMMENT",
- "WS", "UNRECOGNIZED", "DELIMITER"
+ "EXECUTABLE", "EXISTS", "EXPLAIN", "EXTRACT", "FALSE", "FIRST", "FOR",
+ "FORMAT", "FROM", "FROZEN", "FULL", "FUNCTIONS", "GRAPHVIZ", "GROUP",
+ "HAVING", "HOUR", "HOURS", "IN", "INCLUDE", "INNER", "INTERVAL", "IS",
+ "JOIN", "LAST", "LEFT", "LIKE", "LIMIT", "MAPPED", "MATCH", "MINUTE",
+ "MINUTES", "MONTH", "MONTHS", "NATURAL", "NOT", "NULL", "NULLS", "ON",
+ "OPTIMIZED", "OR", "ORDER", "OUTER", "PARSED", "PHYSICAL", "PIVOT", "PLAN",
+ "RIGHT", "RLIKE", "QUERY", "SCHEMAS", "SECOND", "SECONDS", "SELECT", "SHOW",
+ "SYS", "TABLE", "TABLES", "TEXT", "THEN", "TRUE", "TO", "TYPE", "TYPES",
+ "USING", "VERIFY", "WHEN", "WHERE", "WITH", "YEAR", "YEARS", "ESCAPE_ESC",
+ "FUNCTION_ESC", "LIMIT_ESC", "DATE_ESC", "TIME_ESC", "TIMESTAMP_ESC",
+ "GUID_ESC", "ESC_END", "EQ", "NULLEQ", "NEQ", "LT", "LTE", "GT", "GTE",
+ "PLUS", "MINUS", "ASTERISK", "SLASH", "PERCENT", "CAST_OP", "CONCAT",
+ "DOT", "PARAM", "STRING", "INTEGER_VALUE", "DECIMAL_VALUE", "IDENTIFIER",
+ "DIGIT_IDENTIFIER", "TABLE_IDENTIFIER", "QUOTED_IDENTIFIER", "BACKQUOTED_IDENTIFIER",
+ "SIMPLE_COMMENT", "BRACKETED_COMMENT", "WS", "UNRECOGNIZED", "DELIMITER"
};
public static final Vocabulary VOCABULARY = new VocabularyImpl(_LITERAL_NAMES, _SYMBOLIC_NAMES);
@@ -190,9 +193,9 @@ class SqlBaseParser extends Parser {
try {
enterOuterAlt(_localctx, 1);
{
- setState(110);
+ setState(118);
statement();
- setState(111);
+ setState(119);
match(EOF);
}
}
@@ -237,9 +240,9 @@ class SqlBaseParser extends Parser {
try {
enterOuterAlt(_localctx, 1);
{
- setState(113);
+ setState(121);
expression();
- setState(114);
+ setState(122);
match(EOF);
}
}
@@ -606,14 +609,14 @@ class SqlBaseParser extends Parser {
enterRule(_localctx, 4, RULE_statement);
int _la;
try {
- setState(229);
+ setState(237);
_errHandler.sync(this);
switch ( getInterpreter().adaptivePredict(_input,22,_ctx) ) {
case 1:
_localctx = new StatementDefaultContext(_localctx);
enterOuterAlt(_localctx, 1);
{
- setState(116);
+ setState(124);
query();
}
break;
@@ -621,27 +624,27 @@ class SqlBaseParser extends Parser {
_localctx = new ExplainContext(_localctx);
enterOuterAlt(_localctx, 2);
{
- setState(117);
+ setState(125);
match(EXPLAIN);
- setState(131);
+ setState(139);
_errHandler.sync(this);
switch ( getInterpreter().adaptivePredict(_input,2,_ctx) ) {
case 1:
{
- setState(118);
+ setState(126);
match(T__0);
- setState(127);
+ setState(135);
_errHandler.sync(this);
_la = _input.LA(1);
- while (((((_la - 38)) & ~0x3f) == 0 && ((1L << (_la - 38)) & ((1L << (FORMAT - 38)) | (1L << (PLAN - 38)) | (1L << (VERIFY - 38)))) != 0)) {
+ while (((((_la - 39)) & ~0x3f) == 0 && ((1L << (_la - 39)) & ((1L << (FORMAT - 39)) | (1L << (PLAN - 39)) | (1L << (VERIFY - 39)))) != 0)) {
{
- setState(125);
+ setState(133);
switch (_input.LA(1)) {
case PLAN:
{
- setState(119);
+ setState(127);
match(PLAN);
- setState(120);
+ setState(128);
((ExplainContext)_localctx).type = _input.LT(1);
_la = _input.LA(1);
if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ALL) | (1L << ANALYZED) | (1L << EXECUTABLE) | (1L << MAPPED))) != 0) || _la==OPTIMIZED || _la==PARSED) ) {
@@ -653,9 +656,9 @@ class SqlBaseParser extends Parser {
break;
case FORMAT:
{
- setState(121);
+ setState(129);
match(FORMAT);
- setState(122);
+ setState(130);
((ExplainContext)_localctx).format = _input.LT(1);
_la = _input.LA(1);
if ( !(_la==GRAPHVIZ || _la==TEXT) ) {
@@ -667,9 +670,9 @@ class SqlBaseParser extends Parser {
break;
case VERIFY:
{
- setState(123);
+ setState(131);
match(VERIFY);
- setState(124);
+ setState(132);
((ExplainContext)_localctx).verify = booleanValue();
}
break;
@@ -677,16 +680,16 @@ class SqlBaseParser extends Parser {
throw new NoViableAltException(this);
}
}
- setState(129);
+ setState(137);
_errHandler.sync(this);
_la = _input.LA(1);
}
- setState(130);
+ setState(138);
match(T__1);
}
break;
}
- setState(133);
+ setState(141);
statement();
}
break;
@@ -694,27 +697,27 @@ class SqlBaseParser extends Parser {
_localctx = new DebugContext(_localctx);
enterOuterAlt(_localctx, 3);
{
- setState(134);
+ setState(142);
match(DEBUG);
- setState(146);
+ setState(154);
_errHandler.sync(this);
switch ( getInterpreter().adaptivePredict(_input,5,_ctx) ) {
case 1:
{
- setState(135);
+ setState(143);
match(T__0);
- setState(142);
+ setState(150);
_errHandler.sync(this);
_la = _input.LA(1);
while (_la==FORMAT || _la==PLAN) {
{
- setState(140);
+ setState(148);
switch (_input.LA(1)) {
case PLAN:
{
- setState(136);
+ setState(144);
match(PLAN);
- setState(137);
+ setState(145);
((DebugContext)_localctx).type = _input.LT(1);
_la = _input.LA(1);
if ( !(_la==ANALYZED || _la==OPTIMIZED) ) {
@@ -726,9 +729,9 @@ class SqlBaseParser extends Parser {
break;
case FORMAT:
{
- setState(138);
+ setState(146);
match(FORMAT);
- setState(139);
+ setState(147);
((DebugContext)_localctx).format = _input.LT(1);
_la = _input.LA(1);
if ( !(_la==GRAPHVIZ || _la==TEXT) ) {
@@ -742,16 +745,16 @@ class SqlBaseParser extends Parser {
throw new NoViableAltException(this);
}
}
- setState(144);
+ setState(152);
_errHandler.sync(this);
_la = _input.LA(1);
}
- setState(145);
+ setState(153);
match(T__1);
}
break;
}
- setState(148);
+ setState(156);
statement();
}
break;
@@ -759,26 +762,26 @@ class SqlBaseParser extends Parser {
_localctx = new ShowTablesContext(_localctx);
enterOuterAlt(_localctx, 4);
{
- setState(149);
+ setState(157);
match(SHOW);
- setState(150);
+ setState(158);
match(TABLES);
- setState(153);
+ setState(161);
_la = _input.LA(1);
if (_la==INCLUDE) {
{
- setState(151);
+ setState(159);
match(INCLUDE);
- setState(152);
+ setState(160);
match(FROZEN);
}
}
- setState(157);
+ setState(165);
switch (_input.LA(1)) {
case LIKE:
{
- setState(155);
+ setState(163);
((ShowTablesContext)_localctx).tableLike = likePattern();
}
break;
@@ -808,6 +811,7 @@ class SqlBaseParser extends Parser {
case OPTIMIZED:
case PARSED:
case PHYSICAL:
+ case PIVOT:
case PLAN:
case RLIKE:
case QUERY:
@@ -827,7 +831,7 @@ class SqlBaseParser extends Parser {
case QUOTED_IDENTIFIER:
case BACKQUOTED_IDENTIFIER:
{
- setState(156);
+ setState(164);
((ShowTablesContext)_localctx).tableIdent = tableIdentifier();
}
break;
@@ -842,33 +846,33 @@ class SqlBaseParser extends Parser {
_localctx = new ShowColumnsContext(_localctx);
enterOuterAlt(_localctx, 5);
{
- setState(159);
+ setState(167);
match(SHOW);
- setState(160);
+ setState(168);
match(COLUMNS);
- setState(163);
+ setState(171);
_la = _input.LA(1);
if (_la==INCLUDE) {
{
- setState(161);
+ setState(169);
match(INCLUDE);
- setState(162);
+ setState(170);
match(FROZEN);
}
}
- setState(165);
+ setState(173);
_la = _input.LA(1);
if ( !(_la==FROM || _la==IN) ) {
_errHandler.recoverInline(this);
} else {
consume();
}
- setState(168);
+ setState(176);
switch (_input.LA(1)) {
case LIKE:
{
- setState(166);
+ setState(174);
((ShowColumnsContext)_localctx).tableLike = likePattern();
}
break;
@@ -898,6 +902,7 @@ class SqlBaseParser extends Parser {
case OPTIMIZED:
case PARSED:
case PHYSICAL:
+ case PIVOT:
case PLAN:
case RLIKE:
case QUERY:
@@ -917,7 +922,7 @@ class SqlBaseParser extends Parser {
case QUOTED_IDENTIFIER:
case BACKQUOTED_IDENTIFIER:
{
- setState(167);
+ setState(175);
((ShowColumnsContext)_localctx).tableIdent = tableIdentifier();
}
break;
@@ -930,29 +935,29 @@ class SqlBaseParser extends Parser {
_localctx = new ShowColumnsContext(_localctx);
enterOuterAlt(_localctx, 6);
{
- setState(170);
+ setState(178);
_la = _input.LA(1);
if ( !(_la==DESC || _la==DESCRIBE) ) {
_errHandler.recoverInline(this);
} else {
consume();
}
- setState(173);
+ setState(181);
_la = _input.LA(1);
if (_la==INCLUDE) {
{
- setState(171);
+ setState(179);
match(INCLUDE);
- setState(172);
+ setState(180);
match(FROZEN);
}
}
- setState(177);
+ setState(185);
switch (_input.LA(1)) {
case LIKE:
{
- setState(175);
+ setState(183);
((ShowColumnsContext)_localctx).tableLike = likePattern();
}
break;
@@ -982,6 +987,7 @@ class SqlBaseParser extends Parser {
case OPTIMIZED:
case PARSED:
case PHYSICAL:
+ case PIVOT:
case PLAN:
case RLIKE:
case QUERY:
@@ -1001,7 +1007,7 @@ class SqlBaseParser extends Parser {
case QUOTED_IDENTIFIER:
case BACKQUOTED_IDENTIFIER:
{
- setState(176);
+ setState(184);
((ShowColumnsContext)_localctx).tableIdent = tableIdentifier();
}
break;
@@ -1014,15 +1020,15 @@ class SqlBaseParser extends Parser {
_localctx = new ShowFunctionsContext(_localctx);
enterOuterAlt(_localctx, 7);
{
- setState(179);
+ setState(187);
match(SHOW);
- setState(180);
+ setState(188);
match(FUNCTIONS);
- setState(182);
+ setState(190);
_la = _input.LA(1);
if (_la==LIKE) {
{
- setState(181);
+ setState(189);
likePattern();
}
}
@@ -1033,9 +1039,9 @@ class SqlBaseParser extends Parser {
_localctx = new ShowSchemasContext(_localctx);
enterOuterAlt(_localctx, 8);
{
- setState(184);
+ setState(192);
match(SHOW);
- setState(185);
+ setState(193);
match(SCHEMAS);
}
break;
@@ -1043,58 +1049,58 @@ class SqlBaseParser extends Parser {
_localctx = new SysTablesContext(_localctx);
enterOuterAlt(_localctx, 9);
{
- setState(186);
+ setState(194);
match(SYS);
- setState(187);
+ setState(195);
match(TABLES);
- setState(190);
+ setState(198);
_la = _input.LA(1);
if (_la==CATALOG) {
{
- setState(188);
+ setState(196);
match(CATALOG);
- setState(189);
+ setState(197);
((SysTablesContext)_localctx).clusterLike = likePattern();
}
}
- setState(194);
+ setState(202);
_errHandler.sync(this);
switch ( getInterpreter().adaptivePredict(_input,14,_ctx) ) {
case 1:
{
- setState(192);
+ setState(200);
((SysTablesContext)_localctx).tableLike = likePattern();
}
break;
case 2:
{
- setState(193);
+ setState(201);
((SysTablesContext)_localctx).tableIdent = tableIdentifier();
}
break;
}
- setState(205);
+ setState(213);
_la = _input.LA(1);
if (_la==TYPE) {
{
- setState(196);
+ setState(204);
match(TYPE);
- setState(197);
+ setState(205);
string();
- setState(202);
+ setState(210);
_errHandler.sync(this);
_la = _input.LA(1);
while (_la==T__2) {
{
{
- setState(198);
+ setState(206);
match(T__2);
- setState(199);
+ setState(207);
string();
}
}
- setState(204);
+ setState(212);
_errHandler.sync(this);
_la = _input.LA(1);
}
@@ -1107,28 +1113,28 @@ class SqlBaseParser extends Parser {
_localctx = new SysColumnsContext(_localctx);
enterOuterAlt(_localctx, 10);
{
- setState(207);
+ setState(215);
match(SYS);
- setState(208);
+ setState(216);
match(COLUMNS);
- setState(211);
+ setState(219);
_la = _input.LA(1);
if (_la==CATALOG) {
{
- setState(209);
+ setState(217);
match(CATALOG);
- setState(210);
+ setState(218);
((SysColumnsContext)_localctx).cluster = string();
}
}
- setState(216);
+ setState(224);
switch (_input.LA(1)) {
case TABLE:
{
- setState(213);
+ setState(221);
match(TABLE);
- setState(214);
+ setState(222);
((SysColumnsContext)_localctx).tableLike = likePattern();
}
break;
@@ -1158,6 +1164,7 @@ class SqlBaseParser extends Parser {
case OPTIMIZED:
case PARSED:
case PHYSICAL:
+ case PIVOT:
case PLAN:
case RLIKE:
case QUERY:
@@ -1177,7 +1184,7 @@ class SqlBaseParser extends Parser {
case QUOTED_IDENTIFIER:
case BACKQUOTED_IDENTIFIER:
{
- setState(215);
+ setState(223);
((SysColumnsContext)_localctx).tableIdent = tableIdentifier();
}
break;
@@ -1187,11 +1194,11 @@ class SqlBaseParser extends Parser {
default:
throw new NoViableAltException(this);
}
- setState(219);
+ setState(227);
_la = _input.LA(1);
if (_la==LIKE) {
{
- setState(218);
+ setState(226);
((SysColumnsContext)_localctx).columnPattern = likePattern();
}
}
@@ -1202,19 +1209,19 @@ class SqlBaseParser extends Parser {
_localctx = new SysTypesContext(_localctx);
enterOuterAlt(_localctx, 11);
{
- setState(221);
+ setState(229);
match(SYS);
- setState(222);
+ setState(230);
match(TYPES);
- setState(227);
+ setState(235);
_la = _input.LA(1);
- if (((((_la - 115)) & ~0x3f) == 0 && ((1L << (_la - 115)) & ((1L << (PLUS - 115)) | (1L << (MINUS - 115)) | (1L << (INTEGER_VALUE - 115)) | (1L << (DECIMAL_VALUE - 115)))) != 0)) {
+ if (((((_la - 117)) & ~0x3f) == 0 && ((1L << (_la - 117)) & ((1L << (PLUS - 117)) | (1L << (MINUS - 117)) | (1L << (INTEGER_VALUE - 117)) | (1L << (DECIMAL_VALUE - 117)))) != 0)) {
{
- setState(224);
+ setState(232);
_la = _input.LA(1);
if (_la==PLUS || _la==MINUS) {
{
- setState(223);
+ setState(231);
_la = _input.LA(1);
if ( !(_la==PLUS || _la==MINUS) ) {
_errHandler.recoverInline(this);
@@ -1224,7 +1231,7 @@ class SqlBaseParser extends Parser {
}
}
- setState(226);
+ setState(234);
((SysTypesContext)_localctx).type = number();
}
}
@@ -1281,34 +1288,34 @@ class SqlBaseParser extends Parser {
try {
enterOuterAlt(_localctx, 1);
{
- setState(240);
+ setState(248);
_la = _input.LA(1);
if (_la==WITH) {
{
- setState(231);
+ setState(239);
match(WITH);
- setState(232);
+ setState(240);
namedQuery();
- setState(237);
+ setState(245);
_errHandler.sync(this);
_la = _input.LA(1);
while (_la==T__2) {
{
{
- setState(233);
+ setState(241);
match(T__2);
- setState(234);
+ setState(242);
namedQuery();
}
}
- setState(239);
+ setState(247);
_errHandler.sync(this);
_la = _input.LA(1);
}
}
}
- setState(242);
+ setState(250);
queryNoWith();
}
}
@@ -1364,42 +1371,42 @@ class SqlBaseParser extends Parser {
try {
enterOuterAlt(_localctx, 1);
{
- setState(244);
+ setState(252);
queryTerm();
- setState(255);
+ setState(263);
_la = _input.LA(1);
if (_la==ORDER) {
{
- setState(245);
+ setState(253);
match(ORDER);
- setState(246);
+ setState(254);
match(BY);
- setState(247);
+ setState(255);
orderBy();
- setState(252);
+ setState(260);
_errHandler.sync(this);
_la = _input.LA(1);
while (_la==T__2) {
{
{
- setState(248);
+ setState(256);
match(T__2);
- setState(249);
+ setState(257);
orderBy();
}
}
- setState(254);
+ setState(262);
_errHandler.sync(this);
_la = _input.LA(1);
}
}
}
- setState(258);
+ setState(266);
_la = _input.LA(1);
if (_la==LIMIT || _la==LIMIT_ESC) {
{
- setState(257);
+ setState(265);
limitClause();
}
}
@@ -1448,14 +1455,14 @@ class SqlBaseParser extends Parser {
enterRule(_localctx, 10, RULE_limitClause);
int _la;
try {
- setState(265);
+ setState(273);
switch (_input.LA(1)) {
case LIMIT:
enterOuterAlt(_localctx, 1);
{
- setState(260);
+ setState(268);
match(LIMIT);
- setState(261);
+ setState(269);
((LimitClauseContext)_localctx).limit = _input.LT(1);
_la = _input.LA(1);
if ( !(_la==ALL || _la==INTEGER_VALUE) ) {
@@ -1468,9 +1475,9 @@ class SqlBaseParser extends Parser {
case LIMIT_ESC:
enterOuterAlt(_localctx, 2);
{
- setState(262);
+ setState(270);
match(LIMIT_ESC);
- setState(263);
+ setState(271);
((LimitClauseContext)_localctx).limit = _input.LT(1);
_la = _input.LA(1);
if ( !(_la==ALL || _la==INTEGER_VALUE) ) {
@@ -1478,7 +1485,7 @@ class SqlBaseParser extends Parser {
} else {
consume();
}
- setState(264);
+ setState(272);
match(ESC_END);
}
break;
@@ -1551,13 +1558,13 @@ class SqlBaseParser extends Parser {
QueryTermContext _localctx = new QueryTermContext(_ctx, getState());
enterRule(_localctx, 12, RULE_queryTerm);
try {
- setState(272);
+ setState(280);
switch (_input.LA(1)) {
case SELECT:
_localctx = new QueryPrimaryDefaultContext(_localctx);
enterOuterAlt(_localctx, 1);
{
- setState(267);
+ setState(275);
querySpecification();
}
break;
@@ -1565,11 +1572,11 @@ class SqlBaseParser extends Parser {
_localctx = new SubqueryContext(_localctx);
enterOuterAlt(_localctx, 2);
{
- setState(268);
+ setState(276);
match(T__0);
- setState(269);
+ setState(277);
queryNoWith();
- setState(270);
+ setState(278);
match(T__1);
}
break;
@@ -1625,13 +1632,13 @@ class SqlBaseParser extends Parser {
try {
enterOuterAlt(_localctx, 1);
{
- setState(274);
+ setState(282);
expression();
- setState(276);
+ setState(284);
_la = _input.LA(1);
if (_la==ASC || _la==DESC) {
{
- setState(275);
+ setState(283);
((OrderByContext)_localctx).ordering = _input.LT(1);
_la = _input.LA(1);
if ( !(_la==ASC || _la==DESC) ) {
@@ -1642,13 +1649,13 @@ class SqlBaseParser extends Parser {
}
}
- setState(280);
+ setState(288);
_la = _input.LA(1);
if (_la==NULLS) {
{
- setState(278);
+ setState(286);
match(NULLS);
- setState(279);
+ setState(287);
((OrderByContext)_localctx).nullOrdering = _input.LT(1);
_la = _input.LA(1);
if ( !(_la==FIRST || _la==LAST) ) {
@@ -1676,11 +1683,8 @@ class SqlBaseParser extends Parser {
public BooleanExpressionContext where;
public BooleanExpressionContext having;
public TerminalNode SELECT() { return getToken(SqlBaseParser.SELECT, 0); }
- public List selectItem() {
- return getRuleContexts(SelectItemContext.class);
- }
- public SelectItemContext selectItem(int i) {
- return getRuleContext(SelectItemContext.class,i);
+ public SelectItemsContext selectItems() {
+ return getRuleContext(SelectItemsContext.class,0);
}
public SetQuantifierContext setQuantifier() {
return getRuleContext(SetQuantifierContext.class,0);
@@ -1727,75 +1731,59 @@ class SqlBaseParser extends Parser {
try {
enterOuterAlt(_localctx, 1);
{
- setState(282);
+ setState(290);
match(SELECT);
- setState(284);
+ setState(292);
_la = _input.LA(1);
if (_la==ALL || _la==DISTINCT) {
{
- setState(283);
+ setState(291);
setQuantifier();
}
}
- setState(286);
- selectItem();
- setState(291);
- _errHandler.sync(this);
- _la = _input.LA(1);
- while (_la==T__2) {
- {
- {
- setState(287);
- match(T__2);
- setState(288);
- selectItem();
- }
- }
- setState(293);
- _errHandler.sync(this);
- _la = _input.LA(1);
- }
- setState(295);
+ setState(294);
+ selectItems();
+ setState(296);
_la = _input.LA(1);
if (_la==FROM) {
{
- setState(294);
+ setState(295);
fromClause();
}
}
- setState(299);
+ setState(300);
_la = _input.LA(1);
if (_la==WHERE) {
{
- setState(297);
- match(WHERE);
setState(298);
+ match(WHERE);
+ setState(299);
((QuerySpecificationContext)_localctx).where = booleanExpression(0);
}
}
- setState(304);
+ setState(305);
_la = _input.LA(1);
if (_la==GROUP) {
{
- setState(301);
- match(GROUP);
setState(302);
- match(BY);
+ match(GROUP);
setState(303);
+ match(BY);
+ setState(304);
groupBy();
}
}
- setState(308);
+ setState(309);
_la = _input.LA(1);
if (_la==HAVING) {
{
- setState(306);
- match(HAVING);
setState(307);
+ match(HAVING);
+ setState(308);
((QuerySpecificationContext)_localctx).having = booleanExpression(0);
}
}
@@ -1821,6 +1809,9 @@ class SqlBaseParser extends Parser {
public RelationContext relation(int i) {
return getRuleContext(RelationContext.class,i);
}
+ public PivotClauseContext pivotClause() {
+ return getRuleContext(PivotClauseContext.class,0);
+ }
public FromClauseContext(ParserRuleContext parent, int invokingState) {
super(parent, invokingState);
}
@@ -1847,26 +1838,35 @@ class SqlBaseParser extends Parser {
try {
enterOuterAlt(_localctx, 1);
{
- setState(310);
- match(FROM);
setState(311);
+ match(FROM);
+ setState(312);
relation();
- setState(316);
+ setState(317);
_errHandler.sync(this);
_la = _input.LA(1);
while (_la==T__2) {
{
{
- setState(312);
- match(T__2);
setState(313);
+ match(T__2);
+ setState(314);
relation();
}
}
- setState(318);
+ setState(319);
_errHandler.sync(this);
_la = _input.LA(1);
}
+ setState(321);
+ _la = _input.LA(1);
+ if (_la==PIVOT) {
+ {
+ setState(320);
+ pivotClause();
+ }
+ }
+
}
}
catch (RecognitionException re) {
@@ -1916,30 +1916,30 @@ class SqlBaseParser extends Parser {
try {
enterOuterAlt(_localctx, 1);
{
- setState(320);
+ setState(324);
_la = _input.LA(1);
if (_la==ALL || _la==DISTINCT) {
{
- setState(319);
+ setState(323);
setQuantifier();
}
}
- setState(322);
+ setState(326);
groupingElement();
- setState(327);
+ setState(331);
_errHandler.sync(this);
_la = _input.LA(1);
while (_la==T__2) {
{
{
- setState(323);
+ setState(327);
match(T__2);
- setState(324);
+ setState(328);
groupingElement();
}
}
- setState(329);
+ setState(333);
_errHandler.sync(this);
_la = _input.LA(1);
}
@@ -1994,7 +1994,7 @@ class SqlBaseParser extends Parser {
_localctx = new SingleGroupingSetContext(_localctx);
enterOuterAlt(_localctx, 1);
{
- setState(330);
+ setState(334);
groupingExpressions();
}
}
@@ -2040,47 +2040,47 @@ class SqlBaseParser extends Parser {
enterRule(_localctx, 24, RULE_groupingExpressions);
int _la;
try {
- setState(345);
+ setState(349);
_errHandler.sync(this);
switch ( getInterpreter().adaptivePredict(_input,43,_ctx) ) {
case 1:
enterOuterAlt(_localctx, 1);
{
- setState(332);
+ setState(336);
match(T__0);
- setState(341);
+ setState(345);
_la = _input.LA(1);
- if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << T__0) | (1L << ANALYZE) | (1L << ANALYZED) | (1L << CASE) | (1L << CAST) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CONVERT) | (1L << CURRENT_DATE) | (1L << CURRENT_TIME) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXISTS) | (1L << EXPLAIN) | (1L << EXTRACT) | (1L << FALSE) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LEFT) | (1L << LIMIT) | (1L << MAPPED) | (1L << MATCH) | (1L << MINUTE) | (1L << MONTH))) != 0) || ((((_la - 65)) & ~0x3f) == 0 && ((1L << (_la - 65)) & ((1L << (NOT - 65)) | (1L << (NULL - 65)) | (1L << (OPTIMIZED - 65)) | (1L << (PARSED - 65)) | (1L << (PHYSICAL - 65)) | (1L << (PLAN - 65)) | (1L << (RIGHT - 65)) | (1L << (RLIKE - 65)) | (1L << (QUERY - 65)) | (1L << (SCHEMAS - 65)) | (1L << (SECOND - 65)) | (1L << (SHOW - 65)) | (1L << (SYS - 65)) | (1L << (TABLES - 65)) | (1L << (TEXT - 65)) | (1L << (TRUE - 65)) | (1L << (TYPE - 65)) | (1L << (TYPES - 65)) | (1L << (VERIFY - 65)) | (1L << (YEAR - 65)) | (1L << (FUNCTION_ESC - 65)) | (1L << (DATE_ESC - 65)) | (1L << (TIME_ESC - 65)) | (1L << (TIMESTAMP_ESC - 65)) | (1L << (GUID_ESC - 65)) | (1L << (PLUS - 65)) | (1L << (MINUS - 65)) | (1L << (ASTERISK - 65)) | (1L << (PARAM - 65)) | (1L << (STRING - 65)) | (1L << (INTEGER_VALUE - 65)) | (1L << (DECIMAL_VALUE - 65)) | (1L << (IDENTIFIER - 65)) | (1L << (DIGIT_IDENTIFIER - 65)))) != 0) || _la==QUOTED_IDENTIFIER || _la==BACKQUOTED_IDENTIFIER) {
+ if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << T__0) | (1L << ANALYZE) | (1L << ANALYZED) | (1L << CASE) | (1L << CAST) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CONVERT) | (1L << CURRENT_DATE) | (1L << CURRENT_TIME) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXISTS) | (1L << EXPLAIN) | (1L << EXTRACT) | (1L << FALSE) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LEFT) | (1L << LIMIT) | (1L << MAPPED) | (1L << MATCH) | (1L << MINUTE) | (1L << MONTH))) != 0) || ((((_la - 66)) & ~0x3f) == 0 && ((1L << (_la - 66)) & ((1L << (NOT - 66)) | (1L << (NULL - 66)) | (1L << (OPTIMIZED - 66)) | (1L << (PARSED - 66)) | (1L << (PHYSICAL - 66)) | (1L << (PIVOT - 66)) | (1L << (PLAN - 66)) | (1L << (RIGHT - 66)) | (1L << (RLIKE - 66)) | (1L << (QUERY - 66)) | (1L << (SCHEMAS - 66)) | (1L << (SECOND - 66)) | (1L << (SHOW - 66)) | (1L << (SYS - 66)) | (1L << (TABLES - 66)) | (1L << (TEXT - 66)) | (1L << (TRUE - 66)) | (1L << (TYPE - 66)) | (1L << (TYPES - 66)) | (1L << (VERIFY - 66)) | (1L << (YEAR - 66)) | (1L << (FUNCTION_ESC - 66)) | (1L << (DATE_ESC - 66)) | (1L << (TIME_ESC - 66)) | (1L << (TIMESTAMP_ESC - 66)) | (1L << (GUID_ESC - 66)) | (1L << (PLUS - 66)) | (1L << (MINUS - 66)) | (1L << (ASTERISK - 66)) | (1L << (PARAM - 66)) | (1L << (STRING - 66)) | (1L << (INTEGER_VALUE - 66)) | (1L << (DECIMAL_VALUE - 66)) | (1L << (IDENTIFIER - 66)))) != 0) || ((((_la - 130)) & ~0x3f) == 0 && ((1L << (_la - 130)) & ((1L << (DIGIT_IDENTIFIER - 130)) | (1L << (QUOTED_IDENTIFIER - 130)) | (1L << (BACKQUOTED_IDENTIFIER - 130)))) != 0)) {
{
- setState(333);
+ setState(337);
expression();
- setState(338);
+ setState(342);
_errHandler.sync(this);
_la = _input.LA(1);
while (_la==T__2) {
{
{
- setState(334);
+ setState(338);
match(T__2);
- setState(335);
+ setState(339);
expression();
}
}
- setState(340);
+ setState(344);
_errHandler.sync(this);
_la = _input.LA(1);
}
}
}
- setState(343);
+ setState(347);
match(T__1);
}
break;
case 2:
enterOuterAlt(_localctx, 2);
{
- setState(344);
+ setState(348);
expression();
}
break;
@@ -2131,15 +2131,15 @@ class SqlBaseParser extends Parser {
try {
enterOuterAlt(_localctx, 1);
{
- setState(347);
- ((NamedQueryContext)_localctx).name = identifier();
- setState(348);
- match(AS);
- setState(349);
- match(T__0);
- setState(350);
- queryNoWith();
setState(351);
+ ((NamedQueryContext)_localctx).name = identifier();
+ setState(352);
+ match(AS);
+ setState(353);
+ match(T__0);
+ setState(354);
+ queryNoWith();
+ setState(355);
match(T__1);
}
}
@@ -2183,7 +2183,7 @@ class SqlBaseParser extends Parser {
try {
enterOuterAlt(_localctx, 1);
{
- setState(353);
+ setState(357);
_la = _input.LA(1);
if ( !(_la==ALL || _la==DISTINCT) ) {
_errHandler.recoverInline(this);
@@ -2203,6 +2203,70 @@ class SqlBaseParser extends Parser {
return _localctx;
}
+ public static class SelectItemsContext extends ParserRuleContext {
+ public List selectItem() {
+ return getRuleContexts(SelectItemContext.class);
+ }
+ public SelectItemContext selectItem(int i) {
+ return getRuleContext(SelectItemContext.class,i);
+ }
+ public SelectItemsContext(ParserRuleContext parent, int invokingState) {
+ super(parent, invokingState);
+ }
+ @Override public int getRuleIndex() { return RULE_selectItems; }
+ @Override
+ public void enterRule(ParseTreeListener listener) {
+ if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterSelectItems(this);
+ }
+ @Override
+ public void exitRule(ParseTreeListener listener) {
+ if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitSelectItems(this);
+ }
+ @Override
+ public T accept(ParseTreeVisitor extends T> visitor) {
+ if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor extends T>)visitor).visitSelectItems(this);
+ else return visitor.visitChildren(this);
+ }
+ }
+
+ public final SelectItemsContext selectItems() throws RecognitionException {
+ SelectItemsContext _localctx = new SelectItemsContext(_ctx, getState());
+ enterRule(_localctx, 30, RULE_selectItems);
+ int _la;
+ try {
+ enterOuterAlt(_localctx, 1);
+ {
+ setState(359);
+ selectItem();
+ setState(364);
+ _errHandler.sync(this);
+ _la = _input.LA(1);
+ while (_la==T__2) {
+ {
+ {
+ setState(360);
+ match(T__2);
+ setState(361);
+ selectItem();
+ }
+ }
+ setState(366);
+ _errHandler.sync(this);
+ _la = _input.LA(1);
+ }
+ }
+ }
+ catch (RecognitionException re) {
+ _localctx.exception = re;
+ _errHandler.reportError(this, re);
+ _errHandler.recover(this, re);
+ }
+ finally {
+ exitRule();
+ }
+ return _localctx;
+ }
+
public static class SelectItemContext extends ParserRuleContext {
public SelectItemContext(ParserRuleContext parent, int invokingState) {
super(parent, invokingState);
@@ -2240,29 +2304,29 @@ class SqlBaseParser extends Parser {
public final SelectItemContext selectItem() throws RecognitionException {
SelectItemContext _localctx = new SelectItemContext(_ctx, getState());
- enterRule(_localctx, 30, RULE_selectItem);
+ enterRule(_localctx, 32, RULE_selectItem);
int _la;
try {
_localctx = new SelectExpressionContext(_localctx);
enterOuterAlt(_localctx, 1);
{
- setState(355);
+ setState(367);
expression();
- setState(360);
+ setState(372);
_errHandler.sync(this);
- switch ( getInterpreter().adaptivePredict(_input,45,_ctx) ) {
+ switch ( getInterpreter().adaptivePredict(_input,46,_ctx) ) {
case 1:
{
- setState(357);
+ setState(369);
_la = _input.LA(1);
if (_la==AS) {
{
- setState(356);
+ setState(368);
match(AS);
}
}
- setState(359);
+ setState(371);
identifier();
}
break;
@@ -2311,24 +2375,24 @@ class SqlBaseParser extends Parser {
public final RelationContext relation() throws RecognitionException {
RelationContext _localctx = new RelationContext(_ctx, getState());
- enterRule(_localctx, 32, RULE_relation);
+ enterRule(_localctx, 34, RULE_relation);
int _la;
try {
enterOuterAlt(_localctx, 1);
{
- setState(362);
+ setState(374);
relationPrimary();
- setState(366);
+ setState(378);
_errHandler.sync(this);
_la = _input.LA(1);
- while (((((_la - 41)) & ~0x3f) == 0 && ((1L << (_la - 41)) & ((1L << (FULL - 41)) | (1L << (INNER - 41)) | (1L << (JOIN - 41)) | (1L << (LEFT - 41)) | (1L << (NATURAL - 41)) | (1L << (RIGHT - 41)))) != 0)) {
+ while (((((_la - 42)) & ~0x3f) == 0 && ((1L << (_la - 42)) & ((1L << (FULL - 42)) | (1L << (INNER - 42)) | (1L << (JOIN - 42)) | (1L << (LEFT - 42)) | (1L << (NATURAL - 42)) | (1L << (RIGHT - 42)))) != 0)) {
{
{
- setState(363);
+ setState(375);
joinRelation();
}
}
- setState(368);
+ setState(380);
_errHandler.sync(this);
_la = _input.LA(1);
}
@@ -2379,10 +2443,10 @@ class SqlBaseParser extends Parser {
public final JoinRelationContext joinRelation() throws RecognitionException {
JoinRelationContext _localctx = new JoinRelationContext(_ctx, getState());
- enterRule(_localctx, 34, RULE_joinRelation);
+ enterRule(_localctx, 36, RULE_joinRelation);
int _la;
try {
- setState(380);
+ setState(392);
switch (_input.LA(1)) {
case FULL:
case INNER:
@@ -2392,18 +2456,18 @@ class SqlBaseParser extends Parser {
enterOuterAlt(_localctx, 1);
{
{
- setState(369);
+ setState(381);
joinType();
}
- setState(370);
+ setState(382);
match(JOIN);
- setState(371);
+ setState(383);
((JoinRelationContext)_localctx).right = relationPrimary();
- setState(373);
+ setState(385);
_la = _input.LA(1);
if (_la==ON || _la==USING) {
{
- setState(372);
+ setState(384);
joinCriteria();
}
}
@@ -2413,13 +2477,13 @@ class SqlBaseParser extends Parser {
case NATURAL:
enterOuterAlt(_localctx, 2);
{
- setState(375);
+ setState(387);
match(NATURAL);
- setState(376);
+ setState(388);
joinType();
- setState(377);
+ setState(389);
match(JOIN);
- setState(378);
+ setState(390);
((JoinRelationContext)_localctx).right = relationPrimary();
}
break;
@@ -2465,20 +2529,20 @@ class SqlBaseParser extends Parser {
public final JoinTypeContext joinType() throws RecognitionException {
JoinTypeContext _localctx = new JoinTypeContext(_ctx, getState());
- enterRule(_localctx, 36, RULE_joinType);
+ enterRule(_localctx, 38, RULE_joinType);
int _la;
try {
- setState(397);
+ setState(409);
switch (_input.LA(1)) {
case INNER:
case JOIN:
enterOuterAlt(_localctx, 1);
{
- setState(383);
+ setState(395);
_la = _input.LA(1);
if (_la==INNER) {
{
- setState(382);
+ setState(394);
match(INNER);
}
}
@@ -2488,13 +2552,13 @@ class SqlBaseParser extends Parser {
case LEFT:
enterOuterAlt(_localctx, 2);
{
- setState(385);
+ setState(397);
match(LEFT);
- setState(387);
+ setState(399);
_la = _input.LA(1);
if (_la==OUTER) {
{
- setState(386);
+ setState(398);
match(OUTER);
}
}
@@ -2504,13 +2568,13 @@ class SqlBaseParser extends Parser {
case RIGHT:
enterOuterAlt(_localctx, 3);
{
- setState(389);
+ setState(401);
match(RIGHT);
- setState(391);
+ setState(403);
_la = _input.LA(1);
if (_la==OUTER) {
{
- setState(390);
+ setState(402);
match(OUTER);
}
}
@@ -2520,13 +2584,13 @@ class SqlBaseParser extends Parser {
case FULL:
enterOuterAlt(_localctx, 4);
{
- setState(393);
+ setState(405);
match(FULL);
- setState(395);
+ setState(407);
_la = _input.LA(1);
if (_la==OUTER) {
{
- setState(394);
+ setState(406);
match(OUTER);
}
}
@@ -2581,46 +2645,46 @@ class SqlBaseParser extends Parser {
public final JoinCriteriaContext joinCriteria() throws RecognitionException {
JoinCriteriaContext _localctx = new JoinCriteriaContext(_ctx, getState());
- enterRule(_localctx, 38, RULE_joinCriteria);
+ enterRule(_localctx, 40, RULE_joinCriteria);
int _la;
try {
- setState(413);
+ setState(425);
switch (_input.LA(1)) {
case ON:
enterOuterAlt(_localctx, 1);
{
- setState(399);
+ setState(411);
match(ON);
- setState(400);
+ setState(412);
booleanExpression(0);
}
break;
case USING:
enterOuterAlt(_localctx, 2);
{
- setState(401);
+ setState(413);
match(USING);
- setState(402);
+ setState(414);
match(T__0);
- setState(403);
+ setState(415);
identifier();
- setState(408);
+ setState(420);
_errHandler.sync(this);
_la = _input.LA(1);
while (_la==T__2) {
{
{
- setState(404);
+ setState(416);
match(T__2);
- setState(405);
+ setState(417);
identifier();
}
}
- setState(410);
+ setState(422);
_errHandler.sync(this);
_la = _input.LA(1);
}
- setState(411);
+ setState(423);
match(T__1);
}
break;
@@ -2723,42 +2787,42 @@ class SqlBaseParser extends Parser {
public final RelationPrimaryContext relationPrimary() throws RecognitionException {
RelationPrimaryContext _localctx = new RelationPrimaryContext(_ctx, getState());
- enterRule(_localctx, 40, RULE_relationPrimary);
+ enterRule(_localctx, 42, RULE_relationPrimary);
int _la;
try {
- setState(443);
+ setState(455);
_errHandler.sync(this);
- switch ( getInterpreter().adaptivePredict(_input,63,_ctx) ) {
+ switch ( getInterpreter().adaptivePredict(_input,64,_ctx) ) {
case 1:
_localctx = new TableNameContext(_localctx);
enterOuterAlt(_localctx, 1);
{
- setState(416);
+ setState(428);
_la = _input.LA(1);
if (_la==FROZEN) {
{
- setState(415);
+ setState(427);
match(FROZEN);
}
}
- setState(418);
+ setState(430);
tableIdentifier();
- setState(423);
+ setState(435);
_errHandler.sync(this);
- switch ( getInterpreter().adaptivePredict(_input,58,_ctx) ) {
+ switch ( getInterpreter().adaptivePredict(_input,59,_ctx) ) {
case 1:
{
- setState(420);
+ setState(432);
_la = _input.LA(1);
if (_la==AS) {
{
- setState(419);
+ setState(431);
match(AS);
}
}
- setState(422);
+ setState(434);
qualifiedName();
}
break;
@@ -2769,27 +2833,27 @@ class SqlBaseParser extends Parser {
_localctx = new AliasedQueryContext(_localctx);
enterOuterAlt(_localctx, 2);
{
- setState(425);
+ setState(437);
match(T__0);
- setState(426);
+ setState(438);
queryNoWith();
- setState(427);
+ setState(439);
match(T__1);
- setState(432);
+ setState(444);
_errHandler.sync(this);
- switch ( getInterpreter().adaptivePredict(_input,60,_ctx) ) {
+ switch ( getInterpreter().adaptivePredict(_input,61,_ctx) ) {
case 1:
{
- setState(429);
+ setState(441);
_la = _input.LA(1);
if (_la==AS) {
{
- setState(428);
+ setState(440);
match(AS);
}
}
- setState(431);
+ setState(443);
qualifiedName();
}
break;
@@ -2800,27 +2864,27 @@ class SqlBaseParser extends Parser {
_localctx = new AliasedRelationContext(_localctx);
enterOuterAlt(_localctx, 3);
{
- setState(434);
+ setState(446);
match(T__0);
- setState(435);
+ setState(447);
relation();
- setState(436);
+ setState(448);
match(T__1);
- setState(441);
+ setState(453);
_errHandler.sync(this);
- switch ( getInterpreter().adaptivePredict(_input,62,_ctx) ) {
+ switch ( getInterpreter().adaptivePredict(_input,63,_ctx) ) {
case 1:
{
- setState(438);
+ setState(450);
_la = _input.LA(1);
if (_la==AS) {
{
- setState(437);
+ setState(449);
match(AS);
}
}
- setState(440);
+ setState(452);
qualifiedName();
}
break;
@@ -2840,6 +2904,211 @@ class SqlBaseParser extends Parser {
return _localctx;
}
+ public static class PivotClauseContext extends ParserRuleContext {
+ public PivotArgsContext aggs;
+ public QualifiedNameContext column;
+ public PivotArgsContext vals;
+ public TerminalNode PIVOT() { return getToken(SqlBaseParser.PIVOT, 0); }
+ public TerminalNode FOR() { return getToken(SqlBaseParser.FOR, 0); }
+ public TerminalNode IN() { return getToken(SqlBaseParser.IN, 0); }
+ public List pivotArgs() {
+ return getRuleContexts(PivotArgsContext.class);
+ }
+ public PivotArgsContext pivotArgs(int i) {
+ return getRuleContext(PivotArgsContext.class,i);
+ }
+ public QualifiedNameContext qualifiedName() {
+ return getRuleContext(QualifiedNameContext.class,0);
+ }
+ public PivotClauseContext(ParserRuleContext parent, int invokingState) {
+ super(parent, invokingState);
+ }
+ @Override public int getRuleIndex() { return RULE_pivotClause; }
+ @Override
+ public void enterRule(ParseTreeListener listener) {
+ if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterPivotClause(this);
+ }
+ @Override
+ public void exitRule(ParseTreeListener listener) {
+ if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitPivotClause(this);
+ }
+ @Override
+ public T accept(ParseTreeVisitor extends T> visitor) {
+ if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor extends T>)visitor).visitPivotClause(this);
+ else return visitor.visitChildren(this);
+ }
+ }
+
+ public final PivotClauseContext pivotClause() throws RecognitionException {
+ PivotClauseContext _localctx = new PivotClauseContext(_ctx, getState());
+ enterRule(_localctx, 44, RULE_pivotClause);
+ try {
+ enterOuterAlt(_localctx, 1);
+ {
+ setState(457);
+ match(PIVOT);
+ setState(458);
+ match(T__0);
+ setState(459);
+ ((PivotClauseContext)_localctx).aggs = pivotArgs();
+ setState(460);
+ match(FOR);
+ setState(461);
+ ((PivotClauseContext)_localctx).column = qualifiedName();
+ setState(462);
+ match(IN);
+ setState(463);
+ match(T__0);
+ setState(464);
+ ((PivotClauseContext)_localctx).vals = pivotArgs();
+ setState(465);
+ match(T__1);
+ setState(466);
+ match(T__1);
+ }
+ }
+ catch (RecognitionException re) {
+ _localctx.exception = re;
+ _errHandler.reportError(this, re);
+ _errHandler.recover(this, re);
+ }
+ finally {
+ exitRule();
+ }
+ return _localctx;
+ }
+
+ public static class PivotArgsContext extends ParserRuleContext {
+ public List namedValueExpression() {
+ return getRuleContexts(NamedValueExpressionContext.class);
+ }
+ public NamedValueExpressionContext namedValueExpression(int i) {
+ return getRuleContext(NamedValueExpressionContext.class,i);
+ }
+ public PivotArgsContext(ParserRuleContext parent, int invokingState) {
+ super(parent, invokingState);
+ }
+ @Override public int getRuleIndex() { return RULE_pivotArgs; }
+ @Override
+ public void enterRule(ParseTreeListener listener) {
+ if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterPivotArgs(this);
+ }
+ @Override
+ public void exitRule(ParseTreeListener listener) {
+ if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitPivotArgs(this);
+ }
+ @Override
+ public T accept(ParseTreeVisitor extends T> visitor) {
+ if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor extends T>)visitor).visitPivotArgs(this);
+ else return visitor.visitChildren(this);
+ }
+ }
+
+ public final PivotArgsContext pivotArgs() throws RecognitionException {
+ PivotArgsContext _localctx = new PivotArgsContext(_ctx, getState());
+ enterRule(_localctx, 46, RULE_pivotArgs);
+ int _la;
+ try {
+ enterOuterAlt(_localctx, 1);
+ {
+ setState(468);
+ namedValueExpression();
+ setState(473);
+ _errHandler.sync(this);
+ _la = _input.LA(1);
+ while (_la==T__2) {
+ {
+ {
+ setState(469);
+ match(T__2);
+ setState(470);
+ namedValueExpression();
+ }
+ }
+ setState(475);
+ _errHandler.sync(this);
+ _la = _input.LA(1);
+ }
+ }
+ }
+ catch (RecognitionException re) {
+ _localctx.exception = re;
+ _errHandler.reportError(this, re);
+ _errHandler.recover(this, re);
+ }
+ finally {
+ exitRule();
+ }
+ return _localctx;
+ }
+
+ public static class NamedValueExpressionContext extends ParserRuleContext {
+ public ValueExpressionContext valueExpression() {
+ return getRuleContext(ValueExpressionContext.class,0);
+ }
+ public IdentifierContext identifier() {
+ return getRuleContext(IdentifierContext.class,0);
+ }
+ public TerminalNode AS() { return getToken(SqlBaseParser.AS, 0); }
+ public NamedValueExpressionContext(ParserRuleContext parent, int invokingState) {
+ super(parent, invokingState);
+ }
+ @Override public int getRuleIndex() { return RULE_namedValueExpression; }
+ @Override
+ public void enterRule(ParseTreeListener listener) {
+ if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterNamedValueExpression(this);
+ }
+ @Override
+ public void exitRule(ParseTreeListener listener) {
+ if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitNamedValueExpression(this);
+ }
+ @Override
+ public T accept(ParseTreeVisitor extends T> visitor) {
+ if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor extends T>)visitor).visitNamedValueExpression(this);
+ else return visitor.visitChildren(this);
+ }
+ }
+
+ public final NamedValueExpressionContext namedValueExpression() throws RecognitionException {
+ NamedValueExpressionContext _localctx = new NamedValueExpressionContext(_ctx, getState());
+ enterRule(_localctx, 48, RULE_namedValueExpression);
+ int _la;
+ try {
+ enterOuterAlt(_localctx, 1);
+ {
+ setState(476);
+ valueExpression(0);
+ setState(481);
+ _la = _input.LA(1);
+ if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << AS) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CURRENT_DATE) | (1L << CURRENT_TIME) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LIMIT) | (1L << MAPPED) | (1L << MINUTE) | (1L << MONTH))) != 0) || ((((_la - 70)) & ~0x3f) == 0 && ((1L << (_la - 70)) & ((1L << (OPTIMIZED - 70)) | (1L << (PARSED - 70)) | (1L << (PHYSICAL - 70)) | (1L << (PIVOT - 70)) | (1L << (PLAN - 70)) | (1L << (RLIKE - 70)) | (1L << (QUERY - 70)) | (1L << (SCHEMAS - 70)) | (1L << (SECOND - 70)) | (1L << (SHOW - 70)) | (1L << (SYS - 70)) | (1L << (TABLES - 70)) | (1L << (TEXT - 70)) | (1L << (TYPE - 70)) | (1L << (TYPES - 70)) | (1L << (VERIFY - 70)) | (1L << (YEAR - 70)) | (1L << (IDENTIFIER - 70)) | (1L << (DIGIT_IDENTIFIER - 70)) | (1L << (QUOTED_IDENTIFIER - 70)) | (1L << (BACKQUOTED_IDENTIFIER - 70)))) != 0)) {
+ {
+ setState(478);
+ _la = _input.LA(1);
+ if (_la==AS) {
+ {
+ setState(477);
+ match(AS);
+ }
+ }
+
+ setState(480);
+ identifier();
+ }
+ }
+
+ }
+ }
+ catch (RecognitionException re) {
+ _localctx.exception = re;
+ _errHandler.reportError(this, re);
+ _errHandler.recover(this, re);
+ }
+ finally {
+ exitRule();
+ }
+ return _localctx;
+ }
+
public static class ExpressionContext extends ParserRuleContext {
public BooleanExpressionContext booleanExpression() {
return getRuleContext(BooleanExpressionContext.class,0);
@@ -2865,11 +3134,11 @@ class SqlBaseParser extends Parser {
public final ExpressionContext expression() throws RecognitionException {
ExpressionContext _localctx = new ExpressionContext(_ctx, getState());
- enterRule(_localctx, 42, RULE_expression);
+ enterRule(_localctx, 50, RULE_expression);
try {
enterOuterAlt(_localctx, 1);
{
- setState(445);
+ setState(483);
booleanExpression(0);
}
}
@@ -3071,24 +3340,24 @@ class SqlBaseParser extends Parser {
int _parentState = getState();
BooleanExpressionContext _localctx = new BooleanExpressionContext(_ctx, _parentState);
BooleanExpressionContext _prevctx = _localctx;
- int _startState = 44;
- enterRecursionRule(_localctx, 44, RULE_booleanExpression, _p);
+ int _startState = 52;
+ enterRecursionRule(_localctx, 52, RULE_booleanExpression, _p);
try {
int _alt;
enterOuterAlt(_localctx, 1);
{
- setState(478);
+ setState(516);
_errHandler.sync(this);
- switch ( getInterpreter().adaptivePredict(_input,64,_ctx) ) {
+ switch ( getInterpreter().adaptivePredict(_input,68,_ctx) ) {
case 1:
{
_localctx = new LogicalNotContext(_localctx);
_ctx = _localctx;
_prevctx = _localctx;
- setState(448);
+ setState(486);
match(NOT);
- setState(449);
+ setState(487);
booleanExpression(8);
}
break;
@@ -3097,13 +3366,13 @@ class SqlBaseParser extends Parser {
_localctx = new ExistsContext(_localctx);
_ctx = _localctx;
_prevctx = _localctx;
- setState(450);
+ setState(488);
match(EXISTS);
- setState(451);
+ setState(489);
match(T__0);
- setState(452);
+ setState(490);
query();
- setState(453);
+ setState(491);
match(T__1);
}
break;
@@ -3112,15 +3381,15 @@ class SqlBaseParser extends Parser {
_localctx = new StringQueryContext(_localctx);
_ctx = _localctx;
_prevctx = _localctx;
- setState(455);
+ setState(493);
match(QUERY);
- setState(456);
+ setState(494);
match(T__0);
- setState(457);
+ setState(495);
((StringQueryContext)_localctx).queryString = string();
- setState(458);
+ setState(496);
matchQueryOptions();
- setState(459);
+ setState(497);
match(T__1);
}
break;
@@ -3129,19 +3398,19 @@ class SqlBaseParser extends Parser {
_localctx = new MatchQueryContext(_localctx);
_ctx = _localctx;
_prevctx = _localctx;
- setState(461);
+ setState(499);
match(MATCH);
- setState(462);
+ setState(500);
match(T__0);
- setState(463);
+ setState(501);
((MatchQueryContext)_localctx).singleField = qualifiedName();
- setState(464);
+ setState(502);
match(T__2);
- setState(465);
+ setState(503);
((MatchQueryContext)_localctx).queryString = string();
- setState(466);
+ setState(504);
matchQueryOptions();
- setState(467);
+ setState(505);
match(T__1);
}
break;
@@ -3150,19 +3419,19 @@ class SqlBaseParser extends Parser {
_localctx = new MultiMatchQueryContext(_localctx);
_ctx = _localctx;
_prevctx = _localctx;
- setState(469);
+ setState(507);
match(MATCH);
- setState(470);
+ setState(508);
match(T__0);
- setState(471);
+ setState(509);
((MultiMatchQueryContext)_localctx).multiFields = string();
- setState(472);
+ setState(510);
match(T__2);
- setState(473);
+ setState(511);
((MultiMatchQueryContext)_localctx).queryString = string();
- setState(474);
+ setState(512);
matchQueryOptions();
- setState(475);
+ setState(513);
match(T__1);
}
break;
@@ -3171,33 +3440,33 @@ class SqlBaseParser extends Parser {
_localctx = new BooleanDefaultContext(_localctx);
_ctx = _localctx;
_prevctx = _localctx;
- setState(477);
+ setState(515);
predicated();
}
break;
}
_ctx.stop = _input.LT(-1);
- setState(488);
+ setState(526);
_errHandler.sync(this);
- _alt = getInterpreter().adaptivePredict(_input,66,_ctx);
+ _alt = getInterpreter().adaptivePredict(_input,70,_ctx);
while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) {
if ( _alt==1 ) {
if ( _parseListeners!=null ) triggerExitRuleEvent();
_prevctx = _localctx;
{
- setState(486);
+ setState(524);
_errHandler.sync(this);
- switch ( getInterpreter().adaptivePredict(_input,65,_ctx) ) {
+ switch ( getInterpreter().adaptivePredict(_input,69,_ctx) ) {
case 1:
{
_localctx = new LogicalBinaryContext(new BooleanExpressionContext(_parentctx, _parentState));
((LogicalBinaryContext)_localctx).left = _prevctx;
pushNewRecursionContext(_localctx, _startState, RULE_booleanExpression);
- setState(480);
+ setState(518);
if (!(precpred(_ctx, 2))) throw new FailedPredicateException(this, "precpred(_ctx, 2)");
- setState(481);
+ setState(519);
((LogicalBinaryContext)_localctx).operator = match(AND);
- setState(482);
+ setState(520);
((LogicalBinaryContext)_localctx).right = booleanExpression(3);
}
break;
@@ -3206,20 +3475,20 @@ class SqlBaseParser extends Parser {
_localctx = new LogicalBinaryContext(new BooleanExpressionContext(_parentctx, _parentState));
((LogicalBinaryContext)_localctx).left = _prevctx;
pushNewRecursionContext(_localctx, _startState, RULE_booleanExpression);
- setState(483);
+ setState(521);
if (!(precpred(_ctx, 1))) throw new FailedPredicateException(this, "precpred(_ctx, 1)");
- setState(484);
+ setState(522);
((LogicalBinaryContext)_localctx).operator = match(OR);
- setState(485);
+ setState(523);
((LogicalBinaryContext)_localctx).right = booleanExpression(2);
}
break;
}
}
}
- setState(490);
+ setState(528);
_errHandler.sync(this);
- _alt = getInterpreter().adaptivePredict(_input,66,_ctx);
+ _alt = getInterpreter().adaptivePredict(_input,70,_ctx);
}
}
}
@@ -3262,24 +3531,24 @@ class SqlBaseParser extends Parser {
public final MatchQueryOptionsContext matchQueryOptions() throws RecognitionException {
MatchQueryOptionsContext _localctx = new MatchQueryOptionsContext(_ctx, getState());
- enterRule(_localctx, 46, RULE_matchQueryOptions);
+ enterRule(_localctx, 54, RULE_matchQueryOptions);
int _la;
try {
enterOuterAlt(_localctx, 1);
{
- setState(495);
+ setState(533);
_errHandler.sync(this);
_la = _input.LA(1);
while (_la==T__2) {
{
{
- setState(491);
+ setState(529);
match(T__2);
- setState(492);
+ setState(530);
string();
}
}
- setState(497);
+ setState(535);
_errHandler.sync(this);
_la = _input.LA(1);
}
@@ -3324,18 +3593,18 @@ class SqlBaseParser extends Parser {
public final PredicatedContext predicated() throws RecognitionException {
PredicatedContext _localctx = new PredicatedContext(_ctx, getState());
- enterRule(_localctx, 48, RULE_predicated);
+ enterRule(_localctx, 56, RULE_predicated);
try {
enterOuterAlt(_localctx, 1);
{
- setState(498);
+ setState(536);
valueExpression(0);
- setState(500);
+ setState(538);
_errHandler.sync(this);
- switch ( getInterpreter().adaptivePredict(_input,68,_ctx) ) {
+ switch ( getInterpreter().adaptivePredict(_input,72,_ctx) ) {
case 1:
{
- setState(499);
+ setState(537);
predicate();
}
break;
@@ -3402,145 +3671,145 @@ class SqlBaseParser extends Parser {
public final PredicateContext predicate() throws RecognitionException {
PredicateContext _localctx = new PredicateContext(_ctx, getState());
- enterRule(_localctx, 50, RULE_predicate);
+ enterRule(_localctx, 58, RULE_predicate);
int _la;
try {
- setState(548);
+ setState(586);
_errHandler.sync(this);
- switch ( getInterpreter().adaptivePredict(_input,76,_ctx) ) {
+ switch ( getInterpreter().adaptivePredict(_input,80,_ctx) ) {
case 1:
enterOuterAlt(_localctx, 1);
{
- setState(503);
+ setState(541);
_la = _input.LA(1);
if (_la==NOT) {
{
- setState(502);
+ setState(540);
match(NOT);
}
}
- setState(505);
+ setState(543);
((PredicateContext)_localctx).kind = match(BETWEEN);
- setState(506);
+ setState(544);
((PredicateContext)_localctx).lower = valueExpression(0);
- setState(507);
+ setState(545);
match(AND);
- setState(508);
+ setState(546);
((PredicateContext)_localctx).upper = valueExpression(0);
}
break;
case 2:
enterOuterAlt(_localctx, 2);
{
- setState(511);
+ setState(549);
_la = _input.LA(1);
if (_la==NOT) {
{
- setState(510);
+ setState(548);
match(NOT);
}
}
- setState(513);
+ setState(551);
((PredicateContext)_localctx).kind = match(IN);
- setState(514);
+ setState(552);
match(T__0);
- setState(515);
+ setState(553);
valueExpression(0);
- setState(520);
+ setState(558);
_errHandler.sync(this);
_la = _input.LA(1);
while (_la==T__2) {
{
{
- setState(516);
+ setState(554);
match(T__2);
- setState(517);
+ setState(555);
valueExpression(0);
}
}
- setState(522);
+ setState(560);
_errHandler.sync(this);
_la = _input.LA(1);
}
- setState(523);
+ setState(561);
match(T__1);
}
break;
case 3:
enterOuterAlt(_localctx, 3);
{
- setState(526);
+ setState(564);
_la = _input.LA(1);
if (_la==NOT) {
{
- setState(525);
+ setState(563);
match(NOT);
}
}
- setState(528);
+ setState(566);
((PredicateContext)_localctx).kind = match(IN);
- setState(529);
+ setState(567);
match(T__0);
- setState(530);
+ setState(568);
query();
- setState(531);
+ setState(569);
match(T__1);
}
break;
case 4:
enterOuterAlt(_localctx, 4);
{
- setState(534);
+ setState(572);
_la = _input.LA(1);
if (_la==NOT) {
{
- setState(533);
+ setState(571);
match(NOT);
}
}
- setState(536);
+ setState(574);
((PredicateContext)_localctx).kind = match(LIKE);
- setState(537);
+ setState(575);
pattern();
}
break;
case 5:
enterOuterAlt(_localctx, 5);
{
- setState(539);
+ setState(577);
_la = _input.LA(1);
if (_la==NOT) {
{
- setState(538);
+ setState(576);
match(NOT);
}
}
- setState(541);
+ setState(579);
((PredicateContext)_localctx).kind = match(RLIKE);
- setState(542);
+ setState(580);
((PredicateContext)_localctx).regex = string();
}
break;
case 6:
enterOuterAlt(_localctx, 6);
{
- setState(543);
+ setState(581);
match(IS);
- setState(545);
+ setState(583);
_la = _input.LA(1);
if (_la==NOT) {
{
- setState(544);
+ setState(582);
match(NOT);
}
}
- setState(547);
+ setState(585);
((PredicateContext)_localctx).kind = match(NULL);
}
break;
@@ -3583,13 +3852,13 @@ class SqlBaseParser extends Parser {
public final LikePatternContext likePattern() throws RecognitionException {
LikePatternContext _localctx = new LikePatternContext(_ctx, getState());
- enterRule(_localctx, 52, RULE_likePattern);
+ enterRule(_localctx, 60, RULE_likePattern);
try {
enterOuterAlt(_localctx, 1);
{
- setState(550);
+ setState(588);
match(LIKE);
- setState(551);
+ setState(589);
pattern();
}
}
@@ -3633,18 +3902,18 @@ class SqlBaseParser extends Parser {
public final PatternContext pattern() throws RecognitionException {
PatternContext _localctx = new PatternContext(_ctx, getState());
- enterRule(_localctx, 54, RULE_pattern);
+ enterRule(_localctx, 62, RULE_pattern);
try {
enterOuterAlt(_localctx, 1);
{
- setState(553);
+ setState(591);
((PatternContext)_localctx).value = string();
- setState(555);
+ setState(593);
_errHandler.sync(this);
- switch ( getInterpreter().adaptivePredict(_input,77,_ctx) ) {
+ switch ( getInterpreter().adaptivePredict(_input,81,_ctx) ) {
case 1:
{
- setState(554);
+ setState(592);
patternEscape();
}
break;
@@ -3690,27 +3959,27 @@ class SqlBaseParser extends Parser {
public final PatternEscapeContext patternEscape() throws RecognitionException {
PatternEscapeContext _localctx = new PatternEscapeContext(_ctx, getState());
- enterRule(_localctx, 56, RULE_patternEscape);
+ enterRule(_localctx, 64, RULE_patternEscape);
try {
- setState(563);
+ setState(601);
switch (_input.LA(1)) {
case ESCAPE:
enterOuterAlt(_localctx, 1);
{
- setState(557);
+ setState(595);
match(ESCAPE);
- setState(558);
+ setState(596);
((PatternEscapeContext)_localctx).escape = string();
}
break;
case ESCAPE_ESC:
enterOuterAlt(_localctx, 2);
{
- setState(559);
+ setState(597);
match(ESCAPE_ESC);
- setState(560);
+ setState(598);
((PatternEscapeContext)_localctx).escape = string();
- setState(561);
+ setState(599);
match(ESC_END);
}
break;
@@ -3848,14 +4117,14 @@ class SqlBaseParser extends Parser {
int _parentState = getState();
ValueExpressionContext _localctx = new ValueExpressionContext(_ctx, _parentState);
ValueExpressionContext _prevctx = _localctx;
- int _startState = 58;
- enterRecursionRule(_localctx, 58, RULE_valueExpression, _p);
+ int _startState = 66;
+ enterRecursionRule(_localctx, 66, RULE_valueExpression, _p);
int _la;
try {
int _alt;
enterOuterAlt(_localctx, 1);
{
- setState(569);
+ setState(607);
switch (_input.LA(1)) {
case T__0:
case ANALYZE:
@@ -3891,6 +4160,7 @@ class SqlBaseParser extends Parser {
case OPTIMIZED:
case PARSED:
case PHYSICAL:
+ case PIVOT:
case PLAN:
case RIGHT:
case RLIKE:
@@ -3925,7 +4195,7 @@ class SqlBaseParser extends Parser {
_ctx = _localctx;
_prevctx = _localctx;
- setState(566);
+ setState(604);
primaryExpression(0);
}
break;
@@ -3935,7 +4205,7 @@ class SqlBaseParser extends Parser {
_localctx = new ArithmeticUnaryContext(_localctx);
_ctx = _localctx;
_prevctx = _localctx;
- setState(567);
+ setState(605);
((ArithmeticUnaryContext)_localctx).operator = _input.LT(1);
_la = _input.LA(1);
if ( !(_la==PLUS || _la==MINUS) ) {
@@ -3943,7 +4213,7 @@ class SqlBaseParser extends Parser {
} else {
consume();
}
- setState(568);
+ setState(606);
valueExpression(4);
}
break;
@@ -3951,33 +4221,33 @@ class SqlBaseParser extends Parser {
throw new NoViableAltException(this);
}
_ctx.stop = _input.LT(-1);
- setState(583);
+ setState(621);
_errHandler.sync(this);
- _alt = getInterpreter().adaptivePredict(_input,81,_ctx);
+ _alt = getInterpreter().adaptivePredict(_input,85,_ctx);
while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) {
if ( _alt==1 ) {
if ( _parseListeners!=null ) triggerExitRuleEvent();
_prevctx = _localctx;
{
- setState(581);
+ setState(619);
_errHandler.sync(this);
- switch ( getInterpreter().adaptivePredict(_input,80,_ctx) ) {
+ switch ( getInterpreter().adaptivePredict(_input,84,_ctx) ) {
case 1:
{
_localctx = new ArithmeticBinaryContext(new ValueExpressionContext(_parentctx, _parentState));
((ArithmeticBinaryContext)_localctx).left = _prevctx;
pushNewRecursionContext(_localctx, _startState, RULE_valueExpression);
- setState(571);
+ setState(609);
if (!(precpred(_ctx, 3))) throw new FailedPredicateException(this, "precpred(_ctx, 3)");
- setState(572);
+ setState(610);
((ArithmeticBinaryContext)_localctx).operator = _input.LT(1);
_la = _input.LA(1);
- if ( !(((((_la - 117)) & ~0x3f) == 0 && ((1L << (_la - 117)) & ((1L << (ASTERISK - 117)) | (1L << (SLASH - 117)) | (1L << (PERCENT - 117)))) != 0)) ) {
+ if ( !(((((_la - 119)) & ~0x3f) == 0 && ((1L << (_la - 119)) & ((1L << (ASTERISK - 119)) | (1L << (SLASH - 119)) | (1L << (PERCENT - 119)))) != 0)) ) {
((ArithmeticBinaryContext)_localctx).operator = (Token)_errHandler.recoverInline(this);
} else {
consume();
}
- setState(573);
+ setState(611);
((ArithmeticBinaryContext)_localctx).right = valueExpression(4);
}
break;
@@ -3986,9 +4256,9 @@ class SqlBaseParser extends Parser {
_localctx = new ArithmeticBinaryContext(new ValueExpressionContext(_parentctx, _parentState));
((ArithmeticBinaryContext)_localctx).left = _prevctx;
pushNewRecursionContext(_localctx, _startState, RULE_valueExpression);
- setState(574);
+ setState(612);
if (!(precpred(_ctx, 2))) throw new FailedPredicateException(this, "precpred(_ctx, 2)");
- setState(575);
+ setState(613);
((ArithmeticBinaryContext)_localctx).operator = _input.LT(1);
_la = _input.LA(1);
if ( !(_la==PLUS || _la==MINUS) ) {
@@ -3996,7 +4266,7 @@ class SqlBaseParser extends Parser {
} else {
consume();
}
- setState(576);
+ setState(614);
((ArithmeticBinaryContext)_localctx).right = valueExpression(3);
}
break;
@@ -4005,20 +4275,20 @@ class SqlBaseParser extends Parser {
_localctx = new ComparisonContext(new ValueExpressionContext(_parentctx, _parentState));
((ComparisonContext)_localctx).left = _prevctx;
pushNewRecursionContext(_localctx, _startState, RULE_valueExpression);
- setState(577);
+ setState(615);
if (!(precpred(_ctx, 1))) throw new FailedPredicateException(this, "precpred(_ctx, 1)");
- setState(578);
+ setState(616);
comparisonOperator();
- setState(579);
+ setState(617);
((ComparisonContext)_localctx).right = valueExpression(2);
}
break;
}
}
}
- setState(585);
+ setState(623);
_errHandler.sync(this);
- _alt = getInterpreter().adaptivePredict(_input,81,_ctx);
+ _alt = getInterpreter().adaptivePredict(_input,85,_ctx);
}
}
}
@@ -4283,23 +4553,23 @@ class SqlBaseParser extends Parser {
int _parentState = getState();
PrimaryExpressionContext _localctx = new PrimaryExpressionContext(_ctx, _parentState);
PrimaryExpressionContext _prevctx = _localctx;
- int _startState = 60;
- enterRecursionRule(_localctx, 60, RULE_primaryExpression, _p);
+ int _startState = 68;
+ enterRecursionRule(_localctx, 68, RULE_primaryExpression, _p);
int _la;
try {
int _alt;
enterOuterAlt(_localctx, 1);
{
- setState(622);
+ setState(660);
_errHandler.sync(this);
- switch ( getInterpreter().adaptivePredict(_input,86,_ctx) ) {
+ switch ( getInterpreter().adaptivePredict(_input,90,_ctx) ) {
case 1:
{
_localctx = new CastContext(_localctx);
_ctx = _localctx;
_prevctx = _localctx;
- setState(587);
+ setState(625);
castExpression();
}
break;
@@ -4308,7 +4578,7 @@ class SqlBaseParser extends Parser {
_localctx = new ExtractContext(_localctx);
_ctx = _localctx;
_prevctx = _localctx;
- setState(588);
+ setState(626);
extractExpression();
}
break;
@@ -4317,7 +4587,7 @@ class SqlBaseParser extends Parser {
_localctx = new CurrentDateTimeFunctionContext(_localctx);
_ctx = _localctx;
_prevctx = _localctx;
- setState(589);
+ setState(627);
builtinDateTimeFunction();
}
break;
@@ -4326,7 +4596,7 @@ class SqlBaseParser extends Parser {
_localctx = new ConstantDefaultContext(_localctx);
_ctx = _localctx;
_prevctx = _localctx;
- setState(590);
+ setState(628);
constant();
}
break;
@@ -4335,18 +4605,18 @@ class SqlBaseParser extends Parser {
_localctx = new StarContext(_localctx);
_ctx = _localctx;
_prevctx = _localctx;
- setState(594);
+ setState(632);
_la = _input.LA(1);
- if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CURRENT_DATE) | (1L << CURRENT_TIME) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LIMIT) | (1L << MAPPED) | (1L << MINUTE) | (1L << MONTH))) != 0) || ((((_la - 69)) & ~0x3f) == 0 && ((1L << (_la - 69)) & ((1L << (OPTIMIZED - 69)) | (1L << (PARSED - 69)) | (1L << (PHYSICAL - 69)) | (1L << (PLAN - 69)) | (1L << (RLIKE - 69)) | (1L << (QUERY - 69)) | (1L << (SCHEMAS - 69)) | (1L << (SECOND - 69)) | (1L << (SHOW - 69)) | (1L << (SYS - 69)) | (1L << (TABLES - 69)) | (1L << (TEXT - 69)) | (1L << (TYPE - 69)) | (1L << (TYPES - 69)) | (1L << (VERIFY - 69)) | (1L << (YEAR - 69)) | (1L << (IDENTIFIER - 69)) | (1L << (DIGIT_IDENTIFIER - 69)) | (1L << (QUOTED_IDENTIFIER - 69)) | (1L << (BACKQUOTED_IDENTIFIER - 69)))) != 0)) {
+ if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CURRENT_DATE) | (1L << CURRENT_TIME) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LIMIT) | (1L << MAPPED) | (1L << MINUTE) | (1L << MONTH))) != 0) || ((((_la - 70)) & ~0x3f) == 0 && ((1L << (_la - 70)) & ((1L << (OPTIMIZED - 70)) | (1L << (PARSED - 70)) | (1L << (PHYSICAL - 70)) | (1L << (PIVOT - 70)) | (1L << (PLAN - 70)) | (1L << (RLIKE - 70)) | (1L << (QUERY - 70)) | (1L << (SCHEMAS - 70)) | (1L << (SECOND - 70)) | (1L << (SHOW - 70)) | (1L << (SYS - 70)) | (1L << (TABLES - 70)) | (1L << (TEXT - 70)) | (1L << (TYPE - 70)) | (1L << (TYPES - 70)) | (1L << (VERIFY - 70)) | (1L << (YEAR - 70)) | (1L << (IDENTIFIER - 70)) | (1L << (DIGIT_IDENTIFIER - 70)) | (1L << (QUOTED_IDENTIFIER - 70)) | (1L << (BACKQUOTED_IDENTIFIER - 70)))) != 0)) {
{
- setState(591);
+ setState(629);
qualifiedName();
- setState(592);
+ setState(630);
match(DOT);
}
}
- setState(596);
+ setState(634);
match(ASTERISK);
}
break;
@@ -4355,7 +4625,7 @@ class SqlBaseParser extends Parser {
_localctx = new FunctionContext(_localctx);
_ctx = _localctx;
_prevctx = _localctx;
- setState(597);
+ setState(635);
functionExpression();
}
break;
@@ -4364,11 +4634,11 @@ class SqlBaseParser extends Parser {
_localctx = new SubqueryExpressionContext(_localctx);
_ctx = _localctx;
_prevctx = _localctx;
- setState(598);
+ setState(636);
match(T__0);
- setState(599);
+ setState(637);
query();
- setState(600);
+ setState(638);
match(T__1);
}
break;
@@ -4377,7 +4647,7 @@ class SqlBaseParser extends Parser {
_localctx = new DereferenceContext(_localctx);
_ctx = _localctx;
_prevctx = _localctx;
- setState(602);
+ setState(640);
qualifiedName();
}
break;
@@ -4386,11 +4656,11 @@ class SqlBaseParser extends Parser {
_localctx = new ParenthesizedExpressionContext(_localctx);
_ctx = _localctx;
_prevctx = _localctx;
- setState(603);
+ setState(641);
match(T__0);
- setState(604);
+ setState(642);
expression();
- setState(605);
+ setState(643);
match(T__1);
}
break;
@@ -4399,51 +4669,51 @@ class SqlBaseParser extends Parser {
_localctx = new CaseContext(_localctx);
_ctx = _localctx;
_prevctx = _localctx;
- setState(607);
+ setState(645);
match(CASE);
- setState(609);
+ setState(647);
_la = _input.LA(1);
- if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << T__0) | (1L << ANALYZE) | (1L << ANALYZED) | (1L << CASE) | (1L << CAST) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CONVERT) | (1L << CURRENT_DATE) | (1L << CURRENT_TIME) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXISTS) | (1L << EXPLAIN) | (1L << EXTRACT) | (1L << FALSE) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LEFT) | (1L << LIMIT) | (1L << MAPPED) | (1L << MATCH) | (1L << MINUTE) | (1L << MONTH))) != 0) || ((((_la - 65)) & ~0x3f) == 0 && ((1L << (_la - 65)) & ((1L << (NOT - 65)) | (1L << (NULL - 65)) | (1L << (OPTIMIZED - 65)) | (1L << (PARSED - 65)) | (1L << (PHYSICAL - 65)) | (1L << (PLAN - 65)) | (1L << (RIGHT - 65)) | (1L << (RLIKE - 65)) | (1L << (QUERY - 65)) | (1L << (SCHEMAS - 65)) | (1L << (SECOND - 65)) | (1L << (SHOW - 65)) | (1L << (SYS - 65)) | (1L << (TABLES - 65)) | (1L << (TEXT - 65)) | (1L << (TRUE - 65)) | (1L << (TYPE - 65)) | (1L << (TYPES - 65)) | (1L << (VERIFY - 65)) | (1L << (YEAR - 65)) | (1L << (FUNCTION_ESC - 65)) | (1L << (DATE_ESC - 65)) | (1L << (TIME_ESC - 65)) | (1L << (TIMESTAMP_ESC - 65)) | (1L << (GUID_ESC - 65)) | (1L << (PLUS - 65)) | (1L << (MINUS - 65)) | (1L << (ASTERISK - 65)) | (1L << (PARAM - 65)) | (1L << (STRING - 65)) | (1L << (INTEGER_VALUE - 65)) | (1L << (DECIMAL_VALUE - 65)) | (1L << (IDENTIFIER - 65)) | (1L << (DIGIT_IDENTIFIER - 65)))) != 0) || _la==QUOTED_IDENTIFIER || _la==BACKQUOTED_IDENTIFIER) {
+ if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << T__0) | (1L << ANALYZE) | (1L << ANALYZED) | (1L << CASE) | (1L << CAST) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CONVERT) | (1L << CURRENT_DATE) | (1L << CURRENT_TIME) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXISTS) | (1L << EXPLAIN) | (1L << EXTRACT) | (1L << FALSE) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LEFT) | (1L << LIMIT) | (1L << MAPPED) | (1L << MATCH) | (1L << MINUTE) | (1L << MONTH))) != 0) || ((((_la - 66)) & ~0x3f) == 0 && ((1L << (_la - 66)) & ((1L << (NOT - 66)) | (1L << (NULL - 66)) | (1L << (OPTIMIZED - 66)) | (1L << (PARSED - 66)) | (1L << (PHYSICAL - 66)) | (1L << (PIVOT - 66)) | (1L << (PLAN - 66)) | (1L << (RIGHT - 66)) | (1L << (RLIKE - 66)) | (1L << (QUERY - 66)) | (1L << (SCHEMAS - 66)) | (1L << (SECOND - 66)) | (1L << (SHOW - 66)) | (1L << (SYS - 66)) | (1L << (TABLES - 66)) | (1L << (TEXT - 66)) | (1L << (TRUE - 66)) | (1L << (TYPE - 66)) | (1L << (TYPES - 66)) | (1L << (VERIFY - 66)) | (1L << (YEAR - 66)) | (1L << (FUNCTION_ESC - 66)) | (1L << (DATE_ESC - 66)) | (1L << (TIME_ESC - 66)) | (1L << (TIMESTAMP_ESC - 66)) | (1L << (GUID_ESC - 66)) | (1L << (PLUS - 66)) | (1L << (MINUS - 66)) | (1L << (ASTERISK - 66)) | (1L << (PARAM - 66)) | (1L << (STRING - 66)) | (1L << (INTEGER_VALUE - 66)) | (1L << (DECIMAL_VALUE - 66)) | (1L << (IDENTIFIER - 66)))) != 0) || ((((_la - 130)) & ~0x3f) == 0 && ((1L << (_la - 130)) & ((1L << (DIGIT_IDENTIFIER - 130)) | (1L << (QUOTED_IDENTIFIER - 130)) | (1L << (BACKQUOTED_IDENTIFIER - 130)))) != 0)) {
{
- setState(608);
+ setState(646);
((CaseContext)_localctx).operand = booleanExpression(0);
}
}
- setState(612);
+ setState(650);
_errHandler.sync(this);
_la = _input.LA(1);
do {
{
{
- setState(611);
+ setState(649);
whenClause();
}
}
- setState(614);
+ setState(652);
_errHandler.sync(this);
_la = _input.LA(1);
} while ( _la==WHEN );
- setState(618);
+ setState(656);
_la = _input.LA(1);
if (_la==ELSE) {
{
- setState(616);
+ setState(654);
match(ELSE);
- setState(617);
+ setState(655);
((CaseContext)_localctx).elseClause = booleanExpression(0);
}
}
- setState(620);
+ setState(658);
match(END);
}
break;
}
_ctx.stop = _input.LT(-1);
- setState(629);
+ setState(667);
_errHandler.sync(this);
- _alt = getInterpreter().adaptivePredict(_input,87,_ctx);
+ _alt = getInterpreter().adaptivePredict(_input,91,_ctx);
while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) {
if ( _alt==1 ) {
if ( _parseListeners!=null ) triggerExitRuleEvent();
@@ -4452,18 +4722,18 @@ class SqlBaseParser extends Parser {
{
_localctx = new CastOperatorExpressionContext(new PrimaryExpressionContext(_parentctx, _parentState));
pushNewRecursionContext(_localctx, _startState, RULE_primaryExpression);
- setState(624);
+ setState(662);
if (!(precpred(_ctx, 10))) throw new FailedPredicateException(this, "precpred(_ctx, 10)");
- setState(625);
+ setState(663);
match(CAST_OP);
- setState(626);
+ setState(664);
dataType();
}
}
}
- setState(631);
+ setState(669);
_errHandler.sync(this);
- _alt = getInterpreter().adaptivePredict(_input,87,_ctx);
+ _alt = getInterpreter().adaptivePredict(_input,91,_ctx);
}
}
}
@@ -4504,28 +4774,28 @@ class SqlBaseParser extends Parser {
public final BuiltinDateTimeFunctionContext builtinDateTimeFunction() throws RecognitionException {
BuiltinDateTimeFunctionContext _localctx = new BuiltinDateTimeFunctionContext(_ctx, getState());
- enterRule(_localctx, 62, RULE_builtinDateTimeFunction);
+ enterRule(_localctx, 70, RULE_builtinDateTimeFunction);
try {
- setState(635);
+ setState(673);
switch (_input.LA(1)) {
case CURRENT_TIMESTAMP:
enterOuterAlt(_localctx, 1);
{
- setState(632);
+ setState(670);
((BuiltinDateTimeFunctionContext)_localctx).name = match(CURRENT_TIMESTAMP);
}
break;
case CURRENT_DATE:
enterOuterAlt(_localctx, 2);
{
- setState(633);
+ setState(671);
((BuiltinDateTimeFunctionContext)_localctx).name = match(CURRENT_DATE);
}
break;
case CURRENT_TIME:
enterOuterAlt(_localctx, 3);
{
- setState(634);
+ setState(672);
((BuiltinDateTimeFunctionContext)_localctx).name = match(CURRENT_TIME);
}
break;
@@ -4574,44 +4844,44 @@ class SqlBaseParser extends Parser {
public final CastExpressionContext castExpression() throws RecognitionException {
CastExpressionContext _localctx = new CastExpressionContext(_ctx, getState());
- enterRule(_localctx, 64, RULE_castExpression);
+ enterRule(_localctx, 72, RULE_castExpression);
try {
- setState(647);
+ setState(685);
_errHandler.sync(this);
- switch ( getInterpreter().adaptivePredict(_input,89,_ctx) ) {
+ switch ( getInterpreter().adaptivePredict(_input,93,_ctx) ) {
case 1:
enterOuterAlt(_localctx, 1);
{
- setState(637);
+ setState(675);
castTemplate();
}
break;
case 2:
enterOuterAlt(_localctx, 2);
{
- setState(638);
+ setState(676);
match(FUNCTION_ESC);
- setState(639);
+ setState(677);
castTemplate();
- setState(640);
+ setState(678);
match(ESC_END);
}
break;
case 3:
enterOuterAlt(_localctx, 3);
{
- setState(642);
+ setState(680);
convertTemplate();
}
break;
case 4:
enterOuterAlt(_localctx, 4);
{
- setState(643);
+ setState(681);
match(FUNCTION_ESC);
- setState(644);
+ setState(682);
convertTemplate();
- setState(645);
+ setState(683);
match(ESC_END);
}
break;
@@ -4658,21 +4928,21 @@ class SqlBaseParser extends Parser {
public final CastTemplateContext castTemplate() throws RecognitionException {
CastTemplateContext _localctx = new CastTemplateContext(_ctx, getState());
- enterRule(_localctx, 66, RULE_castTemplate);
+ enterRule(_localctx, 74, RULE_castTemplate);
try {
enterOuterAlt(_localctx, 1);
{
- setState(649);
+ setState(687);
match(CAST);
- setState(650);
+ setState(688);
match(T__0);
- setState(651);
+ setState(689);
expression();
- setState(652);
+ setState(690);
match(AS);
- setState(653);
+ setState(691);
dataType();
- setState(654);
+ setState(692);
match(T__1);
}
}
@@ -4716,21 +4986,21 @@ class SqlBaseParser extends Parser {
public final ConvertTemplateContext convertTemplate() throws RecognitionException {
ConvertTemplateContext _localctx = new ConvertTemplateContext(_ctx, getState());
- enterRule(_localctx, 68, RULE_convertTemplate);
+ enterRule(_localctx, 76, RULE_convertTemplate);
try {
enterOuterAlt(_localctx, 1);
{
- setState(656);
+ setState(694);
match(CONVERT);
- setState(657);
+ setState(695);
match(T__0);
- setState(658);
+ setState(696);
expression();
- setState(659);
+ setState(697);
match(T__2);
- setState(660);
+ setState(698);
dataType();
- setState(661);
+ setState(699);
match(T__1);
}
}
@@ -4772,25 +5042,25 @@ class SqlBaseParser extends Parser {
public final ExtractExpressionContext extractExpression() throws RecognitionException {
ExtractExpressionContext _localctx = new ExtractExpressionContext(_ctx, getState());
- enterRule(_localctx, 70, RULE_extractExpression);
+ enterRule(_localctx, 78, RULE_extractExpression);
try {
- setState(668);
+ setState(706);
switch (_input.LA(1)) {
case EXTRACT:
enterOuterAlt(_localctx, 1);
{
- setState(663);
+ setState(701);
extractTemplate();
}
break;
case FUNCTION_ESC:
enterOuterAlt(_localctx, 2);
{
- setState(664);
+ setState(702);
match(FUNCTION_ESC);
- setState(665);
+ setState(703);
extractTemplate();
- setState(666);
+ setState(704);
match(ESC_END);
}
break;
@@ -4840,21 +5110,21 @@ class SqlBaseParser extends Parser {
public final ExtractTemplateContext extractTemplate() throws RecognitionException {
ExtractTemplateContext _localctx = new ExtractTemplateContext(_ctx, getState());
- enterRule(_localctx, 72, RULE_extractTemplate);
+ enterRule(_localctx, 80, RULE_extractTemplate);
try {
enterOuterAlt(_localctx, 1);
{
- setState(670);
+ setState(708);
match(EXTRACT);
- setState(671);
+ setState(709);
match(T__0);
- setState(672);
+ setState(710);
((ExtractTemplateContext)_localctx).field = identifier();
- setState(673);
+ setState(711);
match(FROM);
- setState(674);
+ setState(712);
valueExpression(0);
- setState(675);
+ setState(713);
match(T__1);
}
}
@@ -4895,9 +5165,9 @@ class SqlBaseParser extends Parser {
public final FunctionExpressionContext functionExpression() throws RecognitionException {
FunctionExpressionContext _localctx = new FunctionExpressionContext(_ctx, getState());
- enterRule(_localctx, 74, RULE_functionExpression);
+ enterRule(_localctx, 82, RULE_functionExpression);
try {
- setState(682);
+ setState(720);
switch (_input.LA(1)) {
case ANALYZE:
case ANALYZED:
@@ -4926,6 +5196,7 @@ class SqlBaseParser extends Parser {
case OPTIMIZED:
case PARSED:
case PHYSICAL:
+ case PIVOT:
case PLAN:
case RIGHT:
case RLIKE:
@@ -4946,18 +5217,18 @@ class SqlBaseParser extends Parser {
case BACKQUOTED_IDENTIFIER:
enterOuterAlt(_localctx, 1);
{
- setState(677);
+ setState(715);
functionTemplate();
}
break;
case FUNCTION_ESC:
enterOuterAlt(_localctx, 2);
{
- setState(678);
+ setState(716);
match(FUNCTION_ESC);
- setState(679);
+ setState(717);
functionTemplate();
- setState(680);
+ setState(718);
match(ESC_END);
}
break;
@@ -5010,50 +5281,50 @@ class SqlBaseParser extends Parser {
public final FunctionTemplateContext functionTemplate() throws RecognitionException {
FunctionTemplateContext _localctx = new FunctionTemplateContext(_ctx, getState());
- enterRule(_localctx, 76, RULE_functionTemplate);
+ enterRule(_localctx, 84, RULE_functionTemplate);
int _la;
try {
enterOuterAlt(_localctx, 1);
{
- setState(684);
+ setState(722);
functionName();
- setState(685);
+ setState(723);
match(T__0);
- setState(697);
+ setState(735);
_la = _input.LA(1);
- if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << T__0) | (1L << ALL) | (1L << ANALYZE) | (1L << ANALYZED) | (1L << CASE) | (1L << CAST) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CONVERT) | (1L << CURRENT_DATE) | (1L << CURRENT_TIME) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << DISTINCT) | (1L << EXECUTABLE) | (1L << EXISTS) | (1L << EXPLAIN) | (1L << EXTRACT) | (1L << FALSE) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LEFT) | (1L << LIMIT) | (1L << MAPPED) | (1L << MATCH) | (1L << MINUTE) | (1L << MONTH))) != 0) || ((((_la - 65)) & ~0x3f) == 0 && ((1L << (_la - 65)) & ((1L << (NOT - 65)) | (1L << (NULL - 65)) | (1L << (OPTIMIZED - 65)) | (1L << (PARSED - 65)) | (1L << (PHYSICAL - 65)) | (1L << (PLAN - 65)) | (1L << (RIGHT - 65)) | (1L << (RLIKE - 65)) | (1L << (QUERY - 65)) | (1L << (SCHEMAS - 65)) | (1L << (SECOND - 65)) | (1L << (SHOW - 65)) | (1L << (SYS - 65)) | (1L << (TABLES - 65)) | (1L << (TEXT - 65)) | (1L << (TRUE - 65)) | (1L << (TYPE - 65)) | (1L << (TYPES - 65)) | (1L << (VERIFY - 65)) | (1L << (YEAR - 65)) | (1L << (FUNCTION_ESC - 65)) | (1L << (DATE_ESC - 65)) | (1L << (TIME_ESC - 65)) | (1L << (TIMESTAMP_ESC - 65)) | (1L << (GUID_ESC - 65)) | (1L << (PLUS - 65)) | (1L << (MINUS - 65)) | (1L << (ASTERISK - 65)) | (1L << (PARAM - 65)) | (1L << (STRING - 65)) | (1L << (INTEGER_VALUE - 65)) | (1L << (DECIMAL_VALUE - 65)) | (1L << (IDENTIFIER - 65)) | (1L << (DIGIT_IDENTIFIER - 65)))) != 0) || _la==QUOTED_IDENTIFIER || _la==BACKQUOTED_IDENTIFIER) {
+ if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << T__0) | (1L << ALL) | (1L << ANALYZE) | (1L << ANALYZED) | (1L << CASE) | (1L << CAST) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CONVERT) | (1L << CURRENT_DATE) | (1L << CURRENT_TIME) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << DISTINCT) | (1L << EXECUTABLE) | (1L << EXISTS) | (1L << EXPLAIN) | (1L << EXTRACT) | (1L << FALSE) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LEFT) | (1L << LIMIT) | (1L << MAPPED) | (1L << MATCH) | (1L << MINUTE) | (1L << MONTH))) != 0) || ((((_la - 66)) & ~0x3f) == 0 && ((1L << (_la - 66)) & ((1L << (NOT - 66)) | (1L << (NULL - 66)) | (1L << (OPTIMIZED - 66)) | (1L << (PARSED - 66)) | (1L << (PHYSICAL - 66)) | (1L << (PIVOT - 66)) | (1L << (PLAN - 66)) | (1L << (RIGHT - 66)) | (1L << (RLIKE - 66)) | (1L << (QUERY - 66)) | (1L << (SCHEMAS - 66)) | (1L << (SECOND - 66)) | (1L << (SHOW - 66)) | (1L << (SYS - 66)) | (1L << (TABLES - 66)) | (1L << (TEXT - 66)) | (1L << (TRUE - 66)) | (1L << (TYPE - 66)) | (1L << (TYPES - 66)) | (1L << (VERIFY - 66)) | (1L << (YEAR - 66)) | (1L << (FUNCTION_ESC - 66)) | (1L << (DATE_ESC - 66)) | (1L << (TIME_ESC - 66)) | (1L << (TIMESTAMP_ESC - 66)) | (1L << (GUID_ESC - 66)) | (1L << (PLUS - 66)) | (1L << (MINUS - 66)) | (1L << (ASTERISK - 66)) | (1L << (PARAM - 66)) | (1L << (STRING - 66)) | (1L << (INTEGER_VALUE - 66)) | (1L << (DECIMAL_VALUE - 66)) | (1L << (IDENTIFIER - 66)))) != 0) || ((((_la - 130)) & ~0x3f) == 0 && ((1L << (_la - 130)) & ((1L << (DIGIT_IDENTIFIER - 130)) | (1L << (QUOTED_IDENTIFIER - 130)) | (1L << (BACKQUOTED_IDENTIFIER - 130)))) != 0)) {
{
- setState(687);
+ setState(725);
_la = _input.LA(1);
if (_la==ALL || _la==DISTINCT) {
{
- setState(686);
+ setState(724);
setQuantifier();
}
}
- setState(689);
+ setState(727);
expression();
- setState(694);
+ setState(732);
_errHandler.sync(this);
_la = _input.LA(1);
while (_la==T__2) {
{
{
- setState(690);
+ setState(728);
match(T__2);
- setState(691);
+ setState(729);
expression();
}
}
- setState(696);
+ setState(734);
_errHandler.sync(this);
_la = _input.LA(1);
}
}
}
- setState(699);
+ setState(737);
match(T__1);
}
}
@@ -5095,21 +5366,21 @@ class SqlBaseParser extends Parser {
public final FunctionNameContext functionName() throws RecognitionException {
FunctionNameContext _localctx = new FunctionNameContext(_ctx, getState());
- enterRule(_localctx, 78, RULE_functionName);
+ enterRule(_localctx, 86, RULE_functionName);
try {
- setState(704);
+ setState(742);
switch (_input.LA(1)) {
case LEFT:
enterOuterAlt(_localctx, 1);
{
- setState(701);
+ setState(739);
match(LEFT);
}
break;
case RIGHT:
enterOuterAlt(_localctx, 2);
{
- setState(702);
+ setState(740);
match(RIGHT);
}
break;
@@ -5139,6 +5410,7 @@ class SqlBaseParser extends Parser {
case OPTIMIZED:
case PARSED:
case PHYSICAL:
+ case PIVOT:
case PLAN:
case RLIKE:
case QUERY:
@@ -5158,7 +5430,7 @@ class SqlBaseParser extends Parser {
case BACKQUOTED_IDENTIFIER:
enterOuterAlt(_localctx, 3);
{
- setState(703);
+ setState(741);
identifier();
}
break;
@@ -5386,16 +5658,16 @@ class SqlBaseParser extends Parser {
public final ConstantContext constant() throws RecognitionException {
ConstantContext _localctx = new ConstantContext(_ctx, getState());
- enterRule(_localctx, 80, RULE_constant);
+ enterRule(_localctx, 88, RULE_constant);
try {
int _alt;
- setState(732);
+ setState(770);
switch (_input.LA(1)) {
case NULL:
_localctx = new NullLiteralContext(_localctx);
enterOuterAlt(_localctx, 1);
{
- setState(706);
+ setState(744);
match(NULL);
}
break;
@@ -5403,7 +5675,7 @@ class SqlBaseParser extends Parser {
_localctx = new IntervalLiteralContext(_localctx);
enterOuterAlt(_localctx, 2);
{
- setState(707);
+ setState(745);
interval();
}
break;
@@ -5412,7 +5684,7 @@ class SqlBaseParser extends Parser {
_localctx = new NumericLiteralContext(_localctx);
enterOuterAlt(_localctx, 3);
{
- setState(708);
+ setState(746);
number();
}
break;
@@ -5421,7 +5693,7 @@ class SqlBaseParser extends Parser {
_localctx = new BooleanLiteralContext(_localctx);
enterOuterAlt(_localctx, 4);
{
- setState(709);
+ setState(747);
booleanValue();
}
break;
@@ -5429,7 +5701,7 @@ class SqlBaseParser extends Parser {
_localctx = new StringLiteralContext(_localctx);
enterOuterAlt(_localctx, 5);
{
- setState(711);
+ setState(749);
_errHandler.sync(this);
_alt = 1;
do {
@@ -5437,7 +5709,7 @@ class SqlBaseParser extends Parser {
case 1:
{
{
- setState(710);
+ setState(748);
match(STRING);
}
}
@@ -5445,9 +5717,9 @@ class SqlBaseParser extends Parser {
default:
throw new NoViableAltException(this);
}
- setState(713);
+ setState(751);
_errHandler.sync(this);
- _alt = getInterpreter().adaptivePredict(_input,96,_ctx);
+ _alt = getInterpreter().adaptivePredict(_input,100,_ctx);
} while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER );
}
break;
@@ -5455,7 +5727,7 @@ class SqlBaseParser extends Parser {
_localctx = new ParamLiteralContext(_localctx);
enterOuterAlt(_localctx, 6);
{
- setState(715);
+ setState(753);
match(PARAM);
}
break;
@@ -5463,11 +5735,11 @@ class SqlBaseParser extends Parser {
_localctx = new DateEscapedLiteralContext(_localctx);
enterOuterAlt(_localctx, 7);
{
- setState(716);
+ setState(754);
match(DATE_ESC);
- setState(717);
+ setState(755);
string();
- setState(718);
+ setState(756);
match(ESC_END);
}
break;
@@ -5475,11 +5747,11 @@ class SqlBaseParser extends Parser {
_localctx = new TimeEscapedLiteralContext(_localctx);
enterOuterAlt(_localctx, 8);
{
- setState(720);
+ setState(758);
match(TIME_ESC);
- setState(721);
+ setState(759);
string();
- setState(722);
+ setState(760);
match(ESC_END);
}
break;
@@ -5487,11 +5759,11 @@ class SqlBaseParser extends Parser {
_localctx = new TimestampEscapedLiteralContext(_localctx);
enterOuterAlt(_localctx, 9);
{
- setState(724);
+ setState(762);
match(TIMESTAMP_ESC);
- setState(725);
+ setState(763);
string();
- setState(726);
+ setState(764);
match(ESC_END);
}
break;
@@ -5499,11 +5771,11 @@ class SqlBaseParser extends Parser {
_localctx = new GuidEscapedLiteralContext(_localctx);
enterOuterAlt(_localctx, 10);
{
- setState(728);
+ setState(766);
match(GUID_ESC);
- setState(729);
+ setState(767);
string();
- setState(730);
+ setState(768);
match(ESC_END);
}
break;
@@ -5551,14 +5823,14 @@ class SqlBaseParser extends Parser {
public final ComparisonOperatorContext comparisonOperator() throws RecognitionException {
ComparisonOperatorContext _localctx = new ComparisonOperatorContext(_ctx, getState());
- enterRule(_localctx, 82, RULE_comparisonOperator);
+ enterRule(_localctx, 90, RULE_comparisonOperator);
int _la;
try {
enterOuterAlt(_localctx, 1);
{
- setState(734);
+ setState(772);
_la = _input.LA(1);
- if ( !(((((_la - 108)) & ~0x3f) == 0 && ((1L << (_la - 108)) & ((1L << (EQ - 108)) | (1L << (NULLEQ - 108)) | (1L << (NEQ - 108)) | (1L << (LT - 108)) | (1L << (LTE - 108)) | (1L << (GT - 108)) | (1L << (GTE - 108)))) != 0)) ) {
+ if ( !(((((_la - 110)) & ~0x3f) == 0 && ((1L << (_la - 110)) & ((1L << (EQ - 110)) | (1L << (NULLEQ - 110)) | (1L << (NEQ - 110)) | (1L << (LT - 110)) | (1L << (LTE - 110)) | (1L << (GT - 110)) | (1L << (GTE - 110)))) != 0)) ) {
_errHandler.recoverInline(this);
} else {
consume();
@@ -5600,12 +5872,12 @@ class SqlBaseParser extends Parser {
public final BooleanValueContext booleanValue() throws RecognitionException {
BooleanValueContext _localctx = new BooleanValueContext(_ctx, getState());
- enterRule(_localctx, 84, RULE_booleanValue);
+ enterRule(_localctx, 92, RULE_booleanValue);
int _la;
try {
enterOuterAlt(_localctx, 1);
{
- setState(736);
+ setState(774);
_la = _input.LA(1);
if ( !(_la==FALSE || _la==TRUE) ) {
_errHandler.recoverInline(this);
@@ -5668,18 +5940,18 @@ class SqlBaseParser extends Parser {
public final IntervalContext interval() throws RecognitionException {
IntervalContext _localctx = new IntervalContext(_ctx, getState());
- enterRule(_localctx, 86, RULE_interval);
+ enterRule(_localctx, 94, RULE_interval);
int _la;
try {
enterOuterAlt(_localctx, 1);
{
- setState(738);
+ setState(776);
match(INTERVAL);
- setState(740);
+ setState(778);
_la = _input.LA(1);
if (_la==PLUS || _la==MINUS) {
{
- setState(739);
+ setState(777);
((IntervalContext)_localctx).sign = _input.LT(1);
_la = _input.LA(1);
if ( !(_la==PLUS || _la==MINUS) ) {
@@ -5690,35 +5962,35 @@ class SqlBaseParser extends Parser {
}
}
- setState(744);
+ setState(782);
switch (_input.LA(1)) {
case INTEGER_VALUE:
case DECIMAL_VALUE:
{
- setState(742);
+ setState(780);
((IntervalContext)_localctx).valueNumeric = number();
}
break;
case PARAM:
case STRING:
{
- setState(743);
+ setState(781);
((IntervalContext)_localctx).valuePattern = string();
}
break;
default:
throw new NoViableAltException(this);
}
- setState(746);
+ setState(784);
((IntervalContext)_localctx).leading = intervalField();
- setState(749);
+ setState(787);
_errHandler.sync(this);
- switch ( getInterpreter().adaptivePredict(_input,100,_ctx) ) {
+ switch ( getInterpreter().adaptivePredict(_input,104,_ctx) ) {
case 1:
{
- setState(747);
+ setState(785);
match(TO);
- setState(748);
+ setState(786);
((IntervalContext)_localctx).trailing = intervalField();
}
break;
@@ -5770,14 +6042,14 @@ class SqlBaseParser extends Parser {
public final IntervalFieldContext intervalField() throws RecognitionException {
IntervalFieldContext _localctx = new IntervalFieldContext(_ctx, getState());
- enterRule(_localctx, 88, RULE_intervalField);
+ enterRule(_localctx, 96, RULE_intervalField);
int _la;
try {
enterOuterAlt(_localctx, 1);
{
- setState(751);
+ setState(789);
_la = _input.LA(1);
- if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << DAY) | (1L << DAYS) | (1L << HOUR) | (1L << HOURS) | (1L << MINUTE) | (1L << MINUTES) | (1L << MONTH) | (1L << MONTHS))) != 0) || ((((_la - 80)) & ~0x3f) == 0 && ((1L << (_la - 80)) & ((1L << (SECOND - 80)) | (1L << (SECONDS - 80)) | (1L << (YEAR - 80)) | (1L << (YEARS - 80)))) != 0)) ) {
+ if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << DAY) | (1L << DAYS) | (1L << HOUR) | (1L << HOURS) | (1L << MINUTE) | (1L << MINUTES) | (1L << MONTH))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (MONTHS - 64)) | (1L << (SECOND - 64)) | (1L << (SECONDS - 64)) | (1L << (YEAR - 64)) | (1L << (YEARS - 64)))) != 0)) ) {
_errHandler.recoverInline(this);
} else {
consume();
@@ -5828,12 +6100,12 @@ class SqlBaseParser extends Parser {
public final DataTypeContext dataType() throws RecognitionException {
DataTypeContext _localctx = new DataTypeContext(_ctx, getState());
- enterRule(_localctx, 90, RULE_dataType);
+ enterRule(_localctx, 98, RULE_dataType);
try {
_localctx = new PrimitiveDataTypeContext(_localctx);
enterOuterAlt(_localctx, 1);
{
- setState(753);
+ setState(791);
identifier();
}
}
@@ -5880,30 +6152,30 @@ class SqlBaseParser extends Parser {
public final QualifiedNameContext qualifiedName() throws RecognitionException {
QualifiedNameContext _localctx = new QualifiedNameContext(_ctx, getState());
- enterRule(_localctx, 92, RULE_qualifiedName);
+ enterRule(_localctx, 100, RULE_qualifiedName);
try {
int _alt;
enterOuterAlt(_localctx, 1);
{
- setState(760);
+ setState(798);
_errHandler.sync(this);
- _alt = getInterpreter().adaptivePredict(_input,101,_ctx);
+ _alt = getInterpreter().adaptivePredict(_input,105,_ctx);
while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) {
if ( _alt==1 ) {
{
{
- setState(755);
+ setState(793);
identifier();
- setState(756);
+ setState(794);
match(DOT);
}
}
}
- setState(762);
+ setState(800);
_errHandler.sync(this);
- _alt = getInterpreter().adaptivePredict(_input,101,_ctx);
+ _alt = getInterpreter().adaptivePredict(_input,105,_ctx);
}
- setState(763);
+ setState(801);
identifier();
}
}
@@ -5946,15 +6218,15 @@ class SqlBaseParser extends Parser {
public final IdentifierContext identifier() throws RecognitionException {
IdentifierContext _localctx = new IdentifierContext(_ctx, getState());
- enterRule(_localctx, 94, RULE_identifier);
+ enterRule(_localctx, 102, RULE_identifier);
try {
- setState(767);
+ setState(805);
switch (_input.LA(1)) {
case QUOTED_IDENTIFIER:
case BACKQUOTED_IDENTIFIER:
enterOuterAlt(_localctx, 1);
{
- setState(765);
+ setState(803);
quoteIdentifier();
}
break;
@@ -5984,6 +6256,7 @@ class SqlBaseParser extends Parser {
case OPTIMIZED:
case PARSED:
case PHYSICAL:
+ case PIVOT:
case PLAN:
case RLIKE:
case QUERY:
@@ -6001,7 +6274,7 @@ class SqlBaseParser extends Parser {
case DIGIT_IDENTIFIER:
enterOuterAlt(_localctx, 2);
{
- setState(766);
+ setState(804);
unquoteIdentifier();
}
break;
@@ -6051,46 +6324,46 @@ class SqlBaseParser extends Parser {
public final TableIdentifierContext tableIdentifier() throws RecognitionException {
TableIdentifierContext _localctx = new TableIdentifierContext(_ctx, getState());
- enterRule(_localctx, 96, RULE_tableIdentifier);
+ enterRule(_localctx, 104, RULE_tableIdentifier);
int _la;
try {
- setState(781);
+ setState(819);
_errHandler.sync(this);
- switch ( getInterpreter().adaptivePredict(_input,105,_ctx) ) {
+ switch ( getInterpreter().adaptivePredict(_input,109,_ctx) ) {
case 1:
enterOuterAlt(_localctx, 1);
{
- setState(772);
+ setState(810);
_la = _input.LA(1);
- if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CURRENT_DATE) | (1L << CURRENT_TIME) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LIMIT) | (1L << MAPPED) | (1L << MINUTE) | (1L << MONTH))) != 0) || ((((_la - 69)) & ~0x3f) == 0 && ((1L << (_la - 69)) & ((1L << (OPTIMIZED - 69)) | (1L << (PARSED - 69)) | (1L << (PHYSICAL - 69)) | (1L << (PLAN - 69)) | (1L << (RLIKE - 69)) | (1L << (QUERY - 69)) | (1L << (SCHEMAS - 69)) | (1L << (SECOND - 69)) | (1L << (SHOW - 69)) | (1L << (SYS - 69)) | (1L << (TABLES - 69)) | (1L << (TEXT - 69)) | (1L << (TYPE - 69)) | (1L << (TYPES - 69)) | (1L << (VERIFY - 69)) | (1L << (YEAR - 69)) | (1L << (IDENTIFIER - 69)) | (1L << (DIGIT_IDENTIFIER - 69)) | (1L << (QUOTED_IDENTIFIER - 69)) | (1L << (BACKQUOTED_IDENTIFIER - 69)))) != 0)) {
+ if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CURRENT_DATE) | (1L << CURRENT_TIME) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LIMIT) | (1L << MAPPED) | (1L << MINUTE) | (1L << MONTH))) != 0) || ((((_la - 70)) & ~0x3f) == 0 && ((1L << (_la - 70)) & ((1L << (OPTIMIZED - 70)) | (1L << (PARSED - 70)) | (1L << (PHYSICAL - 70)) | (1L << (PIVOT - 70)) | (1L << (PLAN - 70)) | (1L << (RLIKE - 70)) | (1L << (QUERY - 70)) | (1L << (SCHEMAS - 70)) | (1L << (SECOND - 70)) | (1L << (SHOW - 70)) | (1L << (SYS - 70)) | (1L << (TABLES - 70)) | (1L << (TEXT - 70)) | (1L << (TYPE - 70)) | (1L << (TYPES - 70)) | (1L << (VERIFY - 70)) | (1L << (YEAR - 70)) | (1L << (IDENTIFIER - 70)) | (1L << (DIGIT_IDENTIFIER - 70)) | (1L << (QUOTED_IDENTIFIER - 70)) | (1L << (BACKQUOTED_IDENTIFIER - 70)))) != 0)) {
{
- setState(769);
+ setState(807);
((TableIdentifierContext)_localctx).catalog = identifier();
- setState(770);
+ setState(808);
match(T__3);
}
}
- setState(774);
+ setState(812);
match(TABLE_IDENTIFIER);
}
break;
case 2:
enterOuterAlt(_localctx, 2);
{
- setState(778);
+ setState(816);
_errHandler.sync(this);
- switch ( getInterpreter().adaptivePredict(_input,104,_ctx) ) {
+ switch ( getInterpreter().adaptivePredict(_input,108,_ctx) ) {
case 1:
{
- setState(775);
+ setState(813);
((TableIdentifierContext)_localctx).catalog = identifier();
- setState(776);
+ setState(814);
match(T__3);
}
break;
}
- setState(780);
+ setState(818);
((TableIdentifierContext)_localctx).name = identifier();
}
break;
@@ -6155,15 +6428,15 @@ class SqlBaseParser extends Parser {
public final QuoteIdentifierContext quoteIdentifier() throws RecognitionException {
QuoteIdentifierContext _localctx = new QuoteIdentifierContext(_ctx, getState());
- enterRule(_localctx, 98, RULE_quoteIdentifier);
+ enterRule(_localctx, 106, RULE_quoteIdentifier);
try {
- setState(785);
+ setState(823);
switch (_input.LA(1)) {
case QUOTED_IDENTIFIER:
_localctx = new QuotedIdentifierContext(_localctx);
enterOuterAlt(_localctx, 1);
{
- setState(783);
+ setState(821);
match(QUOTED_IDENTIFIER);
}
break;
@@ -6171,7 +6444,7 @@ class SqlBaseParser extends Parser {
_localctx = new BackQuotedIdentifierContext(_localctx);
enterOuterAlt(_localctx, 2);
{
- setState(784);
+ setState(822);
match(BACKQUOTED_IDENTIFIER);
}
break;
@@ -6241,15 +6514,15 @@ class SqlBaseParser extends Parser {
public final UnquoteIdentifierContext unquoteIdentifier() throws RecognitionException {
UnquoteIdentifierContext _localctx = new UnquoteIdentifierContext(_ctx, getState());
- enterRule(_localctx, 100, RULE_unquoteIdentifier);
+ enterRule(_localctx, 108, RULE_unquoteIdentifier);
try {
- setState(790);
+ setState(828);
switch (_input.LA(1)) {
case IDENTIFIER:
_localctx = new UnquotedIdentifierContext(_localctx);
enterOuterAlt(_localctx, 1);
{
- setState(787);
+ setState(825);
match(IDENTIFIER);
}
break;
@@ -6279,6 +6552,7 @@ class SqlBaseParser extends Parser {
case OPTIMIZED:
case PARSED:
case PHYSICAL:
+ case PIVOT:
case PLAN:
case RLIKE:
case QUERY:
@@ -6295,7 +6569,7 @@ class SqlBaseParser extends Parser {
_localctx = new UnquotedIdentifierContext(_localctx);
enterOuterAlt(_localctx, 2);
{
- setState(788);
+ setState(826);
nonReserved();
}
break;
@@ -6303,7 +6577,7 @@ class SqlBaseParser extends Parser {
_localctx = new DigitIdentifierContext(_localctx);
enterOuterAlt(_localctx, 3);
{
- setState(789);
+ setState(827);
match(DIGIT_IDENTIFIER);
}
break;
@@ -6370,15 +6644,15 @@ class SqlBaseParser extends Parser {
public final NumberContext number() throws RecognitionException {
NumberContext _localctx = new NumberContext(_ctx, getState());
- enterRule(_localctx, 102, RULE_number);
+ enterRule(_localctx, 110, RULE_number);
try {
- setState(794);
+ setState(832);
switch (_input.LA(1)) {
case DECIMAL_VALUE:
_localctx = new DecimalLiteralContext(_localctx);
enterOuterAlt(_localctx, 1);
{
- setState(792);
+ setState(830);
match(DECIMAL_VALUE);
}
break;
@@ -6386,7 +6660,7 @@ class SqlBaseParser extends Parser {
_localctx = new IntegerLiteralContext(_localctx);
enterOuterAlt(_localctx, 2);
{
- setState(793);
+ setState(831);
match(INTEGER_VALUE);
}
break;
@@ -6429,12 +6703,12 @@ class SqlBaseParser extends Parser {
public final StringContext string() throws RecognitionException {
StringContext _localctx = new StringContext(_ctx, getState());
- enterRule(_localctx, 104, RULE_string);
+ enterRule(_localctx, 112, RULE_string);
int _la;
try {
enterOuterAlt(_localctx, 1);
{
- setState(796);
+ setState(834);
_la = _input.LA(1);
if ( !(_la==PARAM || _la==STRING) ) {
_errHandler.recoverInline(this);
@@ -6486,17 +6760,17 @@ class SqlBaseParser extends Parser {
public final WhenClauseContext whenClause() throws RecognitionException {
WhenClauseContext _localctx = new WhenClauseContext(_ctx, getState());
- enterRule(_localctx, 106, RULE_whenClause);
+ enterRule(_localctx, 114, RULE_whenClause);
try {
enterOuterAlt(_localctx, 1);
{
- setState(798);
+ setState(836);
match(WHEN);
- setState(799);
+ setState(837);
((WhenClauseContext)_localctx).condition = expression();
- setState(800);
+ setState(838);
match(THEN);
- setState(801);
+ setState(839);
((WhenClauseContext)_localctx).result = expression();
}
}
@@ -6538,6 +6812,7 @@ class SqlBaseParser extends Parser {
public TerminalNode OPTIMIZED() { return getToken(SqlBaseParser.OPTIMIZED, 0); }
public TerminalNode PARSED() { return getToken(SqlBaseParser.PARSED, 0); }
public TerminalNode PHYSICAL() { return getToken(SqlBaseParser.PHYSICAL, 0); }
+ public TerminalNode PIVOT() { return getToken(SqlBaseParser.PIVOT, 0); }
public TerminalNode PLAN() { return getToken(SqlBaseParser.PLAN, 0); }
public TerminalNode QUERY() { return getToken(SqlBaseParser.QUERY, 0); }
public TerminalNode RLIKE() { return getToken(SqlBaseParser.RLIKE, 0); }
@@ -6572,14 +6847,14 @@ class SqlBaseParser extends Parser {
public final NonReservedContext nonReserved() throws RecognitionException {
NonReservedContext _localctx = new NonReservedContext(_ctx, getState());
- enterRule(_localctx, 108, RULE_nonReserved);
+ enterRule(_localctx, 116, RULE_nonReserved);
int _la;
try {
enterOuterAlt(_localctx, 1);
{
- setState(803);
+ setState(841);
_la = _input.LA(1);
- if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CURRENT_DATE) | (1L << CURRENT_TIME) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LIMIT) | (1L << MAPPED) | (1L << MINUTE) | (1L << MONTH))) != 0) || ((((_la - 69)) & ~0x3f) == 0 && ((1L << (_la - 69)) & ((1L << (OPTIMIZED - 69)) | (1L << (PARSED - 69)) | (1L << (PHYSICAL - 69)) | (1L << (PLAN - 69)) | (1L << (RLIKE - 69)) | (1L << (QUERY - 69)) | (1L << (SCHEMAS - 69)) | (1L << (SECOND - 69)) | (1L << (SHOW - 69)) | (1L << (SYS - 69)) | (1L << (TABLES - 69)) | (1L << (TEXT - 69)) | (1L << (TYPE - 69)) | (1L << (TYPES - 69)) | (1L << (VERIFY - 69)) | (1L << (YEAR - 69)))) != 0)) ) {
+ if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CURRENT_DATE) | (1L << CURRENT_TIME) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LIMIT) | (1L << MAPPED) | (1L << MINUTE) | (1L << MONTH))) != 0) || ((((_la - 70)) & ~0x3f) == 0 && ((1L << (_la - 70)) & ((1L << (OPTIMIZED - 70)) | (1L << (PARSED - 70)) | (1L << (PHYSICAL - 70)) | (1L << (PIVOT - 70)) | (1L << (PLAN - 70)) | (1L << (RLIKE - 70)) | (1L << (QUERY - 70)) | (1L << (SCHEMAS - 70)) | (1L << (SECOND - 70)) | (1L << (SHOW - 70)) | (1L << (SYS - 70)) | (1L << (TABLES - 70)) | (1L << (TEXT - 70)) | (1L << (TYPE - 70)) | (1L << (TYPES - 70)) | (1L << (VERIFY - 70)) | (1L << (YEAR - 70)))) != 0)) ) {
_errHandler.recoverInline(this);
} else {
consume();
@@ -6599,11 +6874,11 @@ class SqlBaseParser extends Parser {
public boolean sempred(RuleContext _localctx, int ruleIndex, int predIndex) {
switch (ruleIndex) {
- case 22:
+ case 26:
return booleanExpression_sempred((BooleanExpressionContext)_localctx, predIndex);
- case 29:
+ case 33:
return valueExpression_sempred((ValueExpressionContext)_localctx, predIndex);
- case 30:
+ case 34:
return primaryExpression_sempred((PrimaryExpressionContext)_localctx, predIndex);
}
return true;
@@ -6637,328 +6912,341 @@ class SqlBaseParser extends Parser {
}
public static final String _serializedATN =
- "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3\u008a\u0328\4\2\t"+
+ "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3\u008c\u034e\4\2\t"+
"\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13"+
"\t\13\4\f\t\f\4\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22"+
"\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31\t\31"+
"\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36\t\36\4\37\t\37\4 \t \4!"+
"\t!\4\"\t\"\4#\t#\4$\t$\4%\t%\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4"+
",\t,\4-\t-\4.\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64\t"+
- "\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\3\2\3\2\3\2\3\3\3\3\3\3\3\4\3"+
- "\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\7\4\u0080\n\4\f\4\16\4\u0083\13\4\3\4\5"+
- "\4\u0086\n\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\7\4\u008f\n\4\f\4\16\4\u0092"+
- "\13\4\3\4\5\4\u0095\n\4\3\4\3\4\3\4\3\4\3\4\5\4\u009c\n\4\3\4\3\4\5\4"+
- "\u00a0\n\4\3\4\3\4\3\4\3\4\5\4\u00a6\n\4\3\4\3\4\3\4\5\4\u00ab\n\4\3\4"+
- "\3\4\3\4\5\4\u00b0\n\4\3\4\3\4\5\4\u00b4\n\4\3\4\3\4\3\4\5\4\u00b9\n\4"+
- "\3\4\3\4\3\4\3\4\3\4\3\4\5\4\u00c1\n\4\3\4\3\4\5\4\u00c5\n\4\3\4\3\4\3"+
- "\4\3\4\7\4\u00cb\n\4\f\4\16\4\u00ce\13\4\5\4\u00d0\n\4\3\4\3\4\3\4\3\4"+
- "\5\4\u00d6\n\4\3\4\3\4\3\4\5\4\u00db\n\4\3\4\5\4\u00de\n\4\3\4\3\4\3\4"+
- "\5\4\u00e3\n\4\3\4\5\4\u00e6\n\4\5\4\u00e8\n\4\3\5\3\5\3\5\3\5\7\5\u00ee"+
- "\n\5\f\5\16\5\u00f1\13\5\5\5\u00f3\n\5\3\5\3\5\3\6\3\6\3\6\3\6\3\6\3\6"+
- "\7\6\u00fd\n\6\f\6\16\6\u0100\13\6\5\6\u0102\n\6\3\6\5\6\u0105\n\6\3\7"+
- "\3\7\3\7\3\7\3\7\5\7\u010c\n\7\3\b\3\b\3\b\3\b\3\b\5\b\u0113\n\b\3\t\3"+
- "\t\5\t\u0117\n\t\3\t\3\t\5\t\u011b\n\t\3\n\3\n\5\n\u011f\n\n\3\n\3\n\3"+
- "\n\7\n\u0124\n\n\f\n\16\n\u0127\13\n\3\n\5\n\u012a\n\n\3\n\3\n\5\n\u012e"+
- "\n\n\3\n\3\n\3\n\5\n\u0133\n\n\3\n\3\n\5\n\u0137\n\n\3\13\3\13\3\13\3"+
- "\13\7\13\u013d\n\13\f\13\16\13\u0140\13\13\3\f\5\f\u0143\n\f\3\f\3\f\3"+
- "\f\7\f\u0148\n\f\f\f\16\f\u014b\13\f\3\r\3\r\3\16\3\16\3\16\3\16\7\16"+
- "\u0153\n\16\f\16\16\16\u0156\13\16\5\16\u0158\n\16\3\16\3\16\5\16\u015c"+
- "\n\16\3\17\3\17\3\17\3\17\3\17\3\17\3\20\3\20\3\21\3\21\5\21\u0168\n\21"+
- "\3\21\5\21\u016b\n\21\3\22\3\22\7\22\u016f\n\22\f\22\16\22\u0172\13\22"+
- "\3\23\3\23\3\23\3\23\5\23\u0178\n\23\3\23\3\23\3\23\3\23\3\23\5\23\u017f"+
- "\n\23\3\24\5\24\u0182\n\24\3\24\3\24\5\24\u0186\n\24\3\24\3\24\5\24\u018a"+
- "\n\24\3\24\3\24\5\24\u018e\n\24\5\24\u0190\n\24\3\25\3\25\3\25\3\25\3"+
- "\25\3\25\3\25\7\25\u0199\n\25\f\25\16\25\u019c\13\25\3\25\3\25\5\25\u01a0"+
- "\n\25\3\26\5\26\u01a3\n\26\3\26\3\26\5\26\u01a7\n\26\3\26\5\26\u01aa\n"+
- "\26\3\26\3\26\3\26\3\26\5\26\u01b0\n\26\3\26\5\26\u01b3\n\26\3\26\3\26"+
- "\3\26\3\26\5\26\u01b9\n\26\3\26\5\26\u01bc\n\26\5\26\u01be\n\26\3\27\3"+
- "\27\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3"+
- "\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3"+
- "\30\3\30\3\30\3\30\5\30\u01e1\n\30\3\30\3\30\3\30\3\30\3\30\3\30\7\30"+
- "\u01e9\n\30\f\30\16\30\u01ec\13\30\3\31\3\31\7\31\u01f0\n\31\f\31\16\31"+
- "\u01f3\13\31\3\32\3\32\5\32\u01f7\n\32\3\33\5\33\u01fa\n\33\3\33\3\33"+
- "\3\33\3\33\3\33\3\33\5\33\u0202\n\33\3\33\3\33\3\33\3\33\3\33\7\33\u0209"+
- "\n\33\f\33\16\33\u020c\13\33\3\33\3\33\3\33\5\33\u0211\n\33\3\33\3\33"+
- "\3\33\3\33\3\33\3\33\5\33\u0219\n\33\3\33\3\33\3\33\5\33\u021e\n\33\3"+
- "\33\3\33\3\33\3\33\5\33\u0224\n\33\3\33\5\33\u0227\n\33\3\34\3\34\3\34"+
- "\3\35\3\35\5\35\u022e\n\35\3\36\3\36\3\36\3\36\3\36\3\36\5\36\u0236\n"+
- "\36\3\37\3\37\3\37\3\37\5\37\u023c\n\37\3\37\3\37\3\37\3\37\3\37\3\37"+
- "\3\37\3\37\3\37\3\37\7\37\u0248\n\37\f\37\16\37\u024b\13\37\3 \3 \3 \3"+
- " \3 \3 \3 \3 \5 \u0255\n \3 \3 \3 \3 \3 \3 \3 \3 \3 \3 \3 \3 \3 \5 \u0264"+
- "\n \3 \6 \u0267\n \r \16 \u0268\3 \3 \5 \u026d\n \3 \3 \5 \u0271\n \3"+
- " \3 \3 \7 \u0276\n \f \16 \u0279\13 \3!\3!\3!\5!\u027e\n!\3\"\3\"\3\""+
- "\3\"\3\"\3\"\3\"\3\"\3\"\3\"\5\"\u028a\n\"\3#\3#\3#\3#\3#\3#\3#\3$\3$"+
- "\3$\3$\3$\3$\3$\3%\3%\3%\3%\3%\5%\u029f\n%\3&\3&\3&\3&\3&\3&\3&\3\'\3"+
- "\'\3\'\3\'\3\'\5\'\u02ad\n\'\3(\3(\3(\5(\u02b2\n(\3(\3(\3(\7(\u02b7\n"+
- "(\f(\16(\u02ba\13(\5(\u02bc\n(\3(\3(\3)\3)\3)\5)\u02c3\n)\3*\3*\3*\3*"+
- "\3*\6*\u02ca\n*\r*\16*\u02cb\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3"+
- "*\3*\3*\3*\5*\u02df\n*\3+\3+\3,\3,\3-\3-\5-\u02e7\n-\3-\3-\5-\u02eb\n"+
- "-\3-\3-\3-\5-\u02f0\n-\3.\3.\3/\3/\3\60\3\60\3\60\7\60\u02f9\n\60\f\60"+
- "\16\60\u02fc\13\60\3\60\3\60\3\61\3\61\5\61\u0302\n\61\3\62\3\62\3\62"+
- "\5\62\u0307\n\62\3\62\3\62\3\62\3\62\5\62\u030d\n\62\3\62\5\62\u0310\n"+
- "\62\3\63\3\63\5\63\u0314\n\63\3\64\3\64\3\64\5\64\u0319\n\64\3\65\3\65"+
- "\5\65\u031d\n\65\3\66\3\66\3\67\3\67\3\67\3\67\3\67\38\38\38\2\5.<>9\2"+
- "\4\6\b\n\f\16\20\22\24\26\30\32\34\36 \"$&(*,.\60\62\64\668:<>@BDFHJL"+
- "NPRTVXZ\\^`bdfhjln\2\22\b\2\7\7\t\t\"\"<ARSde\3\2}~\30\2\b\t\23\24"+
- "\26\31\33\33\"\"$$\'(+-\60\60\65\6588;<>>@@GGKMORUVXY]^``dd\u038b\2p\3"+
- "\2\2\2\4s\3\2\2\2\6\u00e7\3\2\2\2\b\u00f2\3\2\2\2\n\u00f6\3\2\2\2\f\u010b"+
- "\3\2\2\2\16\u0112\3\2\2\2\20\u0114\3\2\2\2\22\u011c\3\2\2\2\24\u0138\3"+
- "\2\2\2\26\u0142\3\2\2\2\30\u014c\3\2\2\2\32\u015b\3\2\2\2\34\u015d\3\2"+
- "\2\2\36\u0163\3\2\2\2 \u0165\3\2\2\2\"\u016c\3\2\2\2$\u017e\3\2\2\2&\u018f"+
- "\3\2\2\2(\u019f\3\2\2\2*\u01bd\3\2\2\2,\u01bf\3\2\2\2.\u01e0\3\2\2\2\60"+
- "\u01f1\3\2\2\2\62\u01f4\3\2\2\2\64\u0226\3\2\2\2\66\u0228\3\2\2\28\u022b"+
- "\3\2\2\2:\u0235\3\2\2\2<\u023b\3\2\2\2>\u0270\3\2\2\2@\u027d\3\2\2\2B"+
- "\u0289\3\2\2\2D\u028b\3\2\2\2F\u0292\3\2\2\2H\u029e\3\2\2\2J\u02a0\3\2"+
- "\2\2L\u02ac\3\2\2\2N\u02ae\3\2\2\2P\u02c2\3\2\2\2R\u02de\3\2\2\2T\u02e0"+
- "\3\2\2\2V\u02e2\3\2\2\2X\u02e4\3\2\2\2Z\u02f1\3\2\2\2\\\u02f3\3\2\2\2"+
- "^\u02fa\3\2\2\2`\u0301\3\2\2\2b\u030f\3\2\2\2d\u0313\3\2\2\2f\u0318\3"+
- "\2\2\2h\u031c\3\2\2\2j\u031e\3\2\2\2l\u0320\3\2\2\2n\u0325\3\2\2\2pq\5"+
- "\6\4\2qr\7\2\2\3r\3\3\2\2\2st\5,\27\2tu\7\2\2\3u\5\3\2\2\2v\u00e8\5\b"+
- "\5\2w\u0085\7$\2\2x\u0081\7\3\2\2yz\7M\2\2z\u0080\t\2\2\2{|\7(\2\2|\u0080"+
- "\t\3\2\2}~\7`\2\2~\u0080\5V,\2\177y\3\2\2\2\177{\3\2\2\2\177}\3\2\2\2"+
- "\u0080\u0083\3\2\2\2\u0081\177\3\2\2\2\u0081\u0082\3\2\2\2\u0082\u0084"+
- "\3\2\2\2\u0083\u0081\3\2\2\2\u0084\u0086\7\4\2\2\u0085x\3\2\2\2\u0085"+
- "\u0086\3\2\2\2\u0086\u0087\3\2\2\2\u0087\u00e8\5\6\4\2\u0088\u0094\7\33"+
- "\2\2\u0089\u0090\7\3\2\2\u008a\u008b\7M\2\2\u008b\u008f\t\4\2\2\u008c"+
- "\u008d\7(\2\2\u008d\u008f\t\3\2\2\u008e\u008a\3\2\2\2\u008e\u008c\3\2"+
- "\2\2\u008f\u0092\3\2\2\2\u0090\u008e\3\2\2\2\u0090\u0091\3\2\2\2\u0091"+
- "\u0093\3\2\2\2\u0092\u0090\3\2\2\2\u0093\u0095\7\4\2\2\u0094\u0089\3\2"+
- "\2\2\u0094\u0095\3\2\2\2\u0095\u0096\3\2\2\2\u0096\u00e8\5\6\4\2\u0097"+
- "\u0098\7U\2\2\u0098\u009b\7X\2\2\u0099\u009a\7\63\2\2\u009a\u009c\7*\2"+
- "\2\u009b\u0099\3\2\2\2\u009b\u009c\3\2\2\2\u009c\u009f\3\2\2\2\u009d\u00a0"+
- "\5\66\34\2\u009e\u00a0\5b\62\2\u009f\u009d\3\2\2\2\u009f\u009e\3\2\2\2"+
- "\u009f\u00a0\3\2\2\2\u00a0\u00e8\3\2\2\2\u00a1\u00a2\7U\2\2\u00a2\u00a5"+
- "\7\24\2\2\u00a3\u00a4\7\63\2\2\u00a4\u00a6\7*\2\2\u00a5\u00a3\3\2\2\2"+
- "\u00a5\u00a6\3\2\2\2\u00a6\u00a7\3\2\2\2\u00a7\u00aa\t\5\2\2\u00a8\u00ab"+
- "\5\66\34\2\u00a9\u00ab\5b\62\2\u00aa\u00a8\3\2\2\2\u00aa\u00a9\3\2\2\2"+
- "\u00ab\u00e8\3\2\2\2\u00ac\u00af\t\6\2\2\u00ad\u00ae\7\63\2\2\u00ae\u00b0"+
- "\7*\2\2\u00af\u00ad\3\2\2\2\u00af\u00b0\3\2\2\2\u00b0\u00b3\3\2\2\2\u00b1"+
- "\u00b4\5\66\34\2\u00b2\u00b4\5b\62\2\u00b3\u00b1\3\2\2\2\u00b3\u00b2\3"+
- "\2\2\2\u00b4\u00e8\3\2\2\2\u00b5\u00b6\7U\2\2\u00b6\u00b8\7,\2\2\u00b7"+
- "\u00b9\5\66\34\2\u00b8\u00b7\3\2\2\2\u00b8\u00b9\3\2\2\2\u00b9\u00e8\3"+
- "\2\2\2\u00ba\u00bb\7U\2\2\u00bb\u00e8\7Q\2\2\u00bc\u00bd\7V\2\2\u00bd"+
- "\u00c0\7X\2\2\u00be\u00bf\7\22\2\2\u00bf\u00c1\5\66\34\2\u00c0\u00be\3"+
- "\2\2\2\u00c0\u00c1\3\2\2\2\u00c1\u00c4\3\2\2\2\u00c2\u00c5\5\66\34\2\u00c3"+
- "\u00c5\5b\62\2\u00c4\u00c2\3\2\2\2\u00c4\u00c3\3\2\2\2\u00c4\u00c5\3\2"+
- "\2\2\u00c5\u00cf\3\2\2\2\u00c6\u00c7\7]\2\2\u00c7\u00cc\5j\66\2\u00c8"+
- "\u00c9\7\5\2\2\u00c9\u00cb\5j\66\2\u00ca\u00c8\3\2\2\2\u00cb\u00ce\3\2"+
- "\2\2\u00cc\u00ca\3\2\2\2\u00cc\u00cd\3\2\2\2\u00cd\u00d0\3\2\2\2\u00ce"+
- "\u00cc\3\2\2\2\u00cf\u00c6\3\2\2\2\u00cf\u00d0\3\2\2\2\u00d0\u00e8\3\2"+
- "\2\2\u00d1\u00d2\7V\2\2\u00d2\u00d5\7\24\2\2\u00d3\u00d4\7\22\2\2\u00d4"+
- "\u00d6\5j\66\2\u00d5\u00d3\3\2\2\2\u00d5\u00d6\3\2\2\2\u00d6\u00da\3\2"+
- "\2\2\u00d7\u00d8\7W\2\2\u00d8\u00db\5\66\34\2\u00d9\u00db\5b\62\2\u00da"+
- "\u00d7\3\2\2\2\u00da\u00d9\3\2\2\2\u00da\u00db\3\2\2\2\u00db\u00dd\3\2"+
- "\2\2\u00dc\u00de\5\66\34\2\u00dd\u00dc\3\2\2\2\u00dd\u00de\3\2\2\2\u00de"+
- "\u00e8\3\2\2\2\u00df\u00e0\7V\2\2\u00e0\u00e5\7^\2\2\u00e1\u00e3\t\7\2"+
- "\2\u00e2\u00e1\3\2\2\2\u00e2\u00e3\3\2\2\2\u00e3\u00e4\3\2\2\2\u00e4\u00e6"+
- "\5h\65\2\u00e5\u00e2\3\2\2\2\u00e5\u00e6\3\2\2\2\u00e6\u00e8\3\2\2\2\u00e7"+
- "v\3\2\2\2\u00e7w\3\2\2\2\u00e7\u0088\3\2\2\2\u00e7\u0097\3\2\2\2\u00e7"+
- "\u00a1\3\2\2\2\u00e7\u00ac\3\2\2\2\u00e7\u00b5\3\2\2\2\u00e7\u00ba\3\2"+
- "\2\2\u00e7\u00bc\3\2\2\2\u00e7\u00d1\3\2\2\2\u00e7\u00df\3\2\2\2\u00e8"+
- "\7\3\2\2\2\u00e9\u00ea\7c\2\2\u00ea\u00ef\5\34\17\2\u00eb\u00ec\7\5\2"+
- "\2\u00ec\u00ee\5\34\17\2\u00ed\u00eb\3\2\2\2\u00ee\u00f1\3\2\2\2\u00ef"+
- "\u00ed\3\2\2\2\u00ef\u00f0\3\2\2\2\u00f0\u00f3\3\2\2\2\u00f1\u00ef\3\2"+
- "\2\2\u00f2\u00e9\3\2\2\2\u00f2\u00f3\3\2\2\2\u00f3\u00f4\3\2\2\2\u00f4"+
- "\u00f5\5\n\6\2\u00f5\t\3\2\2\2\u00f6\u0101\5\16\b\2\u00f7\u00f8\7I\2\2"+
- "\u00f8\u00f9\7\17\2\2\u00f9\u00fe\5\20\t\2\u00fa\u00fb\7\5\2\2\u00fb\u00fd"+
- "\5\20\t\2\u00fc\u00fa\3\2\2\2\u00fd\u0100\3\2\2\2\u00fe\u00fc\3\2\2\2"+
- "\u00fe\u00ff\3\2\2\2\u00ff\u0102\3\2\2\2\u0100\u00fe\3\2\2\2\u0101\u00f7"+
- "\3\2\2\2\u0101\u0102\3\2\2\2\u0102\u0104\3\2\2\2\u0103\u0105\5\f\7\2\u0104"+
- "\u0103\3\2\2\2\u0104\u0105\3\2\2\2\u0105\13\3\2\2\2\u0106\u0107\7;\2\2"+
- "\u0107\u010c\t\b\2\2\u0108\u0109\7h\2\2\u0109\u010a\t\b\2\2\u010a\u010c"+
- "\7m\2\2\u010b\u0106\3\2\2\2\u010b\u0108\3\2\2\2\u010c\r\3\2\2\2\u010d"+
- "\u0113\5\22\n\2\u010e\u010f\7\3\2\2\u010f\u0110\5\n\6\2\u0110\u0111\7"+
- "\4\2\2\u0111\u0113\3\2\2\2\u0112\u010d\3\2\2\2\u0112\u010e\3\2\2\2\u0113"+
- "\17\3\2\2\2\u0114\u0116\5,\27\2\u0115\u0117\t\t\2\2\u0116\u0115\3\2\2"+
- "\2\u0116\u0117\3\2\2\2\u0117\u011a\3\2\2\2\u0118\u0119\7E\2\2\u0119\u011b"+
- "\t\n\2\2\u011a\u0118\3\2\2\2\u011a\u011b\3\2\2\2\u011b\21\3\2\2\2\u011c"+
- "\u011e\7T\2\2\u011d\u011f\5\36\20\2\u011e\u011d\3\2\2\2\u011e\u011f\3"+
- "\2\2\2\u011f\u0120\3\2\2\2\u0120\u0125\5 \21\2\u0121\u0122\7\5\2\2\u0122"+
- "\u0124\5 \21\2\u0123\u0121\3\2\2\2\u0124\u0127\3\2\2\2\u0125\u0123\3\2"+
- "\2\2\u0125\u0126\3\2\2\2\u0126\u0129\3\2\2\2\u0127\u0125\3\2\2\2\u0128"+
- "\u012a\5\24\13\2\u0129\u0128\3\2\2\2\u0129\u012a\3\2\2\2\u012a\u012d\3"+
- "\2\2\2\u012b\u012c\7b\2\2\u012c\u012e\5.\30\2\u012d\u012b\3\2\2\2\u012d"+
- "\u012e\3\2\2\2\u012e\u0132\3\2\2\2\u012f\u0130\7.\2\2\u0130\u0131\7\17"+
- "\2\2\u0131\u0133\5\26\f\2\u0132\u012f\3\2\2\2\u0132\u0133\3\2\2\2\u0133"+
- "\u0136\3\2\2\2\u0134\u0135\7/\2\2\u0135\u0137\5.\30\2\u0136\u0134\3\2"+
- "\2\2\u0136\u0137\3\2\2\2\u0137\23\3\2\2\2\u0138\u0139\7)\2\2\u0139\u013e"+
- "\5\"\22\2\u013a\u013b\7\5\2\2\u013b\u013d\5\"\22\2\u013c\u013a\3\2\2\2"+
- "\u013d\u0140\3\2\2\2\u013e\u013c\3\2\2\2\u013e\u013f\3\2\2\2\u013f\25"+
- "\3\2\2\2\u0140\u013e\3\2\2\2\u0141\u0143\5\36\20\2\u0142\u0141\3\2\2\2"+
- "\u0142\u0143\3\2\2\2\u0143\u0144\3\2\2\2\u0144\u0149\5\30\r\2\u0145\u0146"+
- "\7\5\2\2\u0146\u0148\5\30\r\2\u0147\u0145\3\2\2\2\u0148\u014b\3\2\2\2"+
- "\u0149\u0147\3\2\2\2\u0149\u014a\3\2\2\2\u014a\27\3\2\2\2\u014b\u0149"+
- "\3\2\2\2\u014c\u014d\5\32\16\2\u014d\31\3\2\2\2\u014e\u0157\7\3\2\2\u014f"+
- "\u0154\5,\27\2\u0150\u0151\7\5\2\2\u0151\u0153\5,\27\2\u0152\u0150\3\2"+
- "\2\2\u0153\u0156\3\2\2\2\u0154\u0152\3\2\2\2\u0154\u0155\3\2\2\2\u0155"+
- "\u0158\3\2\2\2\u0156\u0154\3\2\2\2\u0157\u014f\3\2\2\2\u0157\u0158\3\2"+
- "\2\2\u0158\u0159\3\2\2\2\u0159\u015c\7\4\2\2\u015a\u015c\5,\27\2\u015b"+
- "\u014e\3\2\2\2\u015b\u015a\3\2\2\2\u015c\33\3\2\2\2\u015d\u015e\5`\61"+
- "\2\u015e\u015f\7\f\2\2\u015f\u0160\7\3\2\2\u0160\u0161\5\n\6\2\u0161\u0162"+
- "\7\4\2\2\u0162\35\3\2\2\2\u0163\u0164\t\13\2\2\u0164\37\3\2\2\2\u0165"+
- "\u016a\5,\27\2\u0166\u0168\7\f\2\2\u0167\u0166\3\2\2\2\u0167\u0168\3\2"+
- "\2\2\u0168\u0169\3\2\2\2\u0169\u016b\5`\61\2\u016a\u0167\3\2\2\2\u016a"+
- "\u016b\3\2\2\2\u016b!\3\2\2\2\u016c\u0170\5*\26\2\u016d\u016f\5$\23\2"+
- "\u016e\u016d\3\2\2\2\u016f\u0172\3\2\2\2\u0170\u016e\3\2\2\2\u0170\u0171"+
- "\3\2\2\2\u0171#\3\2\2\2\u0172\u0170\3\2\2\2\u0173\u0174\5&\24\2\u0174"+
- "\u0175\7\67\2\2\u0175\u0177\5*\26\2\u0176\u0178\5(\25\2\u0177\u0176\3"+
- "\2\2\2\u0177\u0178\3\2\2\2\u0178\u017f\3\2\2\2\u0179\u017a\7B\2\2\u017a"+
- "\u017b\5&\24\2\u017b\u017c\7\67\2\2\u017c\u017d\5*\26\2\u017d\u017f\3"+
- "\2\2\2\u017e\u0173\3\2\2\2\u017e\u0179\3\2\2\2\u017f%\3\2\2\2\u0180\u0182"+
- "\7\64\2\2\u0181\u0180\3\2\2\2\u0181\u0182\3\2\2\2\u0182\u0190\3\2\2\2"+
- "\u0183\u0185\79\2\2\u0184\u0186\7J\2\2\u0185\u0184\3\2\2\2\u0185\u0186"+
- "\3\2\2\2\u0186\u0190\3\2\2\2\u0187\u0189\7N\2\2\u0188\u018a\7J\2\2\u0189"+
- "\u0188\3\2\2\2\u0189\u018a\3\2\2\2\u018a\u0190\3\2\2\2\u018b\u018d\7+"+
- "\2\2\u018c\u018e\7J\2\2\u018d\u018c\3\2\2\2\u018d\u018e\3\2\2\2\u018e"+
- "\u0190\3\2\2\2\u018f\u0181\3\2\2\2\u018f\u0183\3\2\2\2\u018f\u0187\3\2"+
- "\2\2\u018f\u018b\3\2\2\2\u0190\'\3\2\2\2\u0191\u0192\7F\2\2\u0192\u01a0"+
- "\5.\30\2\u0193\u0194\7_\2\2\u0194\u0195\7\3\2\2\u0195\u019a\5`\61\2\u0196"+
- "\u0197\7\5\2\2\u0197\u0199\5`\61\2\u0198\u0196\3\2\2\2\u0199\u019c\3\2"+
- "\2\2\u019a\u0198\3\2\2\2\u019a\u019b\3\2\2\2\u019b\u019d\3\2\2\2\u019c"+
- "\u019a\3\2\2\2\u019d\u019e\7\4\2\2\u019e\u01a0\3\2\2\2\u019f\u0191\3\2"+
- "\2\2\u019f\u0193\3\2\2\2\u01a0)\3\2\2\2\u01a1\u01a3\7*\2\2\u01a2\u01a1"+
- "\3\2\2\2\u01a2\u01a3\3\2\2\2\u01a3\u01a4\3\2\2\2\u01a4\u01a9\5b\62\2\u01a5"+
- "\u01a7\7\f\2\2\u01a6\u01a5\3\2\2\2\u01a6\u01a7\3\2\2\2\u01a7\u01a8\3\2"+
- "\2\2\u01a8\u01aa\5^\60\2\u01a9\u01a6\3\2\2\2\u01a9\u01aa\3\2\2\2\u01aa"+
- "\u01be\3\2\2\2\u01ab\u01ac\7\3\2\2\u01ac\u01ad\5\n\6\2\u01ad\u01b2\7\4"+
- "\2\2\u01ae\u01b0\7\f\2\2\u01af\u01ae\3\2\2\2\u01af\u01b0\3\2\2\2\u01b0"+
- "\u01b1\3\2\2\2\u01b1\u01b3\5^\60\2\u01b2\u01af\3\2\2\2\u01b2\u01b3\3\2"+
- "\2\2\u01b3\u01be\3\2\2\2\u01b4\u01b5\7\3\2\2\u01b5\u01b6\5\"\22\2\u01b6"+
- "\u01bb\7\4\2\2\u01b7\u01b9\7\f\2\2\u01b8\u01b7\3\2\2\2\u01b8\u01b9\3\2"+
- "\2\2\u01b9\u01ba\3\2\2\2\u01ba\u01bc\5^\60\2\u01bb\u01b8\3\2\2\2\u01bb"+
- "\u01bc\3\2\2\2\u01bc\u01be\3\2\2\2\u01bd\u01a2\3\2\2\2\u01bd\u01ab\3\2"+
- "\2\2\u01bd\u01b4\3\2\2\2\u01be+\3\2\2\2\u01bf\u01c0\5.\30\2\u01c0-\3\2"+
- "\2\2\u01c1\u01c2\b\30\1\2\u01c2\u01c3\7C\2\2\u01c3\u01e1\5.\30\n\u01c4"+
- "\u01c5\7#\2\2\u01c5\u01c6\7\3\2\2\u01c6\u01c7\5\b\5\2\u01c7\u01c8\7\4"+
- "\2\2\u01c8\u01e1\3\2\2\2\u01c9\u01ca\7P\2\2\u01ca\u01cb\7\3\2\2\u01cb"+
- "\u01cc\5j\66\2\u01cc\u01cd\5\60\31\2\u01cd\u01ce\7\4\2\2\u01ce\u01e1\3"+
- "\2\2\2\u01cf\u01d0\7=\2\2\u01d0\u01d1\7\3\2\2\u01d1\u01d2\5^\60\2\u01d2"+
- "\u01d3\7\5\2\2\u01d3\u01d4\5j\66\2\u01d4\u01d5\5\60\31\2\u01d5\u01d6\7"+
- "\4\2\2\u01d6\u01e1\3\2\2\2\u01d7\u01d8\7=\2\2\u01d8\u01d9\7\3\2\2\u01d9"+
- "\u01da\5j\66\2\u01da\u01db\7\5\2\2\u01db\u01dc\5j\66\2\u01dc\u01dd\5\60"+
- "\31\2\u01dd\u01de\7\4\2\2\u01de\u01e1\3\2\2\2\u01df\u01e1\5\62\32\2\u01e0"+
- "\u01c1\3\2\2\2\u01e0\u01c4\3\2\2\2\u01e0\u01c9\3\2\2\2\u01e0\u01cf\3\2"+
- "\2\2\u01e0\u01d7\3\2\2\2\u01e0\u01df\3\2\2\2\u01e1\u01ea\3\2\2\2\u01e2"+
- "\u01e3\f\4\2\2\u01e3\u01e4\7\n\2\2\u01e4\u01e9\5.\30\5\u01e5\u01e6\f\3"+
- "\2\2\u01e6\u01e7\7H\2\2\u01e7\u01e9\5.\30\4\u01e8\u01e2\3\2\2\2\u01e8"+
- "\u01e5\3\2\2\2\u01e9\u01ec\3\2\2\2\u01ea\u01e8\3\2\2\2\u01ea\u01eb\3\2"+
- "\2\2\u01eb/\3\2\2\2\u01ec\u01ea\3\2\2\2\u01ed\u01ee\7\5\2\2\u01ee\u01f0"+
- "\5j\66\2\u01ef\u01ed\3\2\2\2\u01f0\u01f3\3\2\2\2\u01f1\u01ef\3\2\2\2\u01f1"+
- "\u01f2\3\2\2\2\u01f2\61\3\2\2\2\u01f3\u01f1\3\2\2\2\u01f4\u01f6\5<\37"+
- "\2\u01f5\u01f7\5\64\33\2\u01f6\u01f5\3\2\2\2\u01f6\u01f7\3\2\2\2\u01f7"+
- "\63\3\2\2\2\u01f8\u01fa\7C\2\2\u01f9\u01f8\3\2\2\2\u01f9\u01fa\3\2\2\2"+
- "\u01fa\u01fb\3\2\2\2\u01fb\u01fc\7\16\2\2\u01fc\u01fd\5<\37\2\u01fd\u01fe"+
- "\7\n\2\2\u01fe\u01ff\5<\37\2\u01ff\u0227\3\2\2\2\u0200\u0202\7C\2\2\u0201"+
- "\u0200\3\2\2\2\u0201\u0202\3\2\2\2\u0202\u0203\3\2\2\2\u0203\u0204\7\62"+
- "\2\2\u0204\u0205\7\3\2\2\u0205\u020a\5<\37\2\u0206\u0207\7\5\2\2\u0207"+
- "\u0209\5<\37\2\u0208\u0206\3\2\2\2\u0209\u020c\3\2\2\2\u020a\u0208\3\2"+
- "\2\2\u020a\u020b\3\2\2\2\u020b\u020d\3\2\2\2\u020c\u020a\3\2\2\2\u020d"+
- "\u020e\7\4\2\2\u020e\u0227\3\2\2\2\u020f\u0211\7C\2\2\u0210\u020f\3\2"+
- "\2\2\u0210\u0211\3\2\2\2\u0211\u0212\3\2\2\2\u0212\u0213\7\62\2\2\u0213"+
- "\u0214\7\3\2\2\u0214\u0215\5\b\5\2\u0215\u0216\7\4\2\2\u0216\u0227\3\2"+
- "\2\2\u0217\u0219\7C\2\2\u0218\u0217\3\2\2\2\u0218\u0219\3\2\2\2\u0219"+
- "\u021a\3\2\2\2\u021a\u021b\7:\2\2\u021b\u0227\58\35\2\u021c\u021e\7C\2"+
- "\2\u021d\u021c\3\2\2\2\u021d\u021e\3\2\2\2\u021e\u021f\3\2\2\2\u021f\u0220"+
- "\7O\2\2\u0220\u0227\5j\66\2\u0221\u0223\7\66\2\2\u0222\u0224\7C\2\2\u0223"+
- "\u0222\3\2\2\2\u0223\u0224\3\2\2\2\u0224\u0225\3\2\2\2\u0225\u0227\7D"+
- "\2\2\u0226\u01f9\3\2\2\2\u0226\u0201\3\2\2\2\u0226\u0210\3\2\2\2\u0226"+
- "\u0218\3\2\2\2\u0226\u021d\3\2\2\2\u0226\u0221\3\2\2\2\u0227\65\3\2\2"+
- "\2\u0228\u0229\7:\2\2\u0229\u022a\58\35\2\u022a\67\3\2\2\2\u022b\u022d"+
- "\5j\66\2\u022c\u022e\5:\36\2\u022d\u022c\3\2\2\2\u022d\u022e\3\2\2\2\u022e"+
- "9\3\2\2\2\u022f\u0230\7!\2\2\u0230\u0236\5j\66\2\u0231\u0232\7f\2\2\u0232"+
- "\u0233\5j\66\2\u0233\u0234\7m\2\2\u0234\u0236\3\2\2\2\u0235\u022f\3\2"+
- "\2\2\u0235\u0231\3\2\2\2\u0236;\3\2\2\2\u0237\u0238\b\37\1\2\u0238\u023c"+
- "\5> \2\u0239\u023a\t\7\2\2\u023a\u023c\5<\37\6\u023b\u0237\3\2\2\2\u023b"+
- "\u0239\3\2\2\2\u023c\u0249\3\2\2\2\u023d\u023e\f\5\2\2\u023e\u023f\t\f"+
- "\2\2\u023f\u0248\5<\37\6\u0240\u0241\f\4\2\2\u0241\u0242\t\7\2\2\u0242"+
- "\u0248\5<\37\5\u0243\u0244\f\3\2\2\u0244\u0245\5T+\2\u0245\u0246\5<\37"+
- "\4\u0246\u0248\3\2\2\2\u0247\u023d\3\2\2\2\u0247\u0240\3\2\2\2\u0247\u0243"+
- "\3\2\2\2\u0248\u024b\3\2\2\2\u0249\u0247\3\2\2\2\u0249\u024a\3\2\2\2\u024a"+
- "=\3\2\2\2\u024b\u0249\3\2\2\2\u024c\u024d\b \1\2\u024d\u0271\5B\"\2\u024e"+
- "\u0271\5H%\2\u024f\u0271\5@!\2\u0250\u0271\5R*\2\u0251\u0252\5^\60\2\u0252"+
- "\u0253\7|\2\2\u0253\u0255\3\2\2\2\u0254\u0251\3\2\2\2\u0254\u0255\3\2"+
- "\2\2\u0255\u0256\3\2\2\2\u0256\u0271\7w\2\2\u0257\u0271\5L\'\2\u0258\u0259"+
- "\7\3\2\2\u0259\u025a\5\b\5\2\u025a\u025b\7\4\2\2\u025b\u0271\3\2\2\2\u025c"+
- "\u0271\5^\60\2\u025d\u025e\7\3\2\2\u025e\u025f\5,\27\2\u025f\u0260\7\4"+
- "\2\2\u0260\u0271\3\2\2\2\u0261\u0263\7\20\2\2\u0262\u0264\5.\30\2\u0263"+
- "\u0262\3\2\2\2\u0263\u0264\3\2\2\2\u0264\u0266\3\2\2\2\u0265\u0267\5l"+
- "\67\2\u0266\u0265\3\2\2\2\u0267\u0268\3\2\2\2\u0268\u0266\3\2\2\2\u0268"+
- "\u0269\3\2\2\2\u0269\u026c\3\2\2\2\u026a\u026b\7\37\2\2\u026b\u026d\5"+
- ".\30\2\u026c\u026a\3\2\2\2\u026c\u026d\3\2\2\2\u026d\u026e\3\2\2\2\u026e"+
- "\u026f\7 \2\2\u026f\u0271\3\2\2\2\u0270\u024c\3\2\2\2\u0270\u024e\3\2"+
- "\2\2\u0270\u024f\3\2\2\2\u0270\u0250\3\2\2\2\u0270\u0254\3\2\2\2\u0270"+
- "\u0257\3\2\2\2\u0270\u0258\3\2\2\2\u0270\u025c\3\2\2\2\u0270\u025d\3\2"+
- "\2\2\u0270\u0261\3\2\2\2\u0271\u0277\3\2\2\2\u0272\u0273\f\f\2\2\u0273"+
- "\u0274\7z\2\2\u0274\u0276\5\\/\2\u0275\u0272\3\2\2\2\u0276\u0279\3\2\2"+
- "\2\u0277\u0275\3\2\2\2\u0277\u0278\3\2\2\2\u0278?\3\2\2\2\u0279\u0277"+
- "\3\2\2\2\u027a\u027e\7\30\2\2\u027b\u027e\7\26\2\2\u027c\u027e\7\27\2"+
- "\2\u027d\u027a\3\2\2\2\u027d\u027b\3\2\2\2\u027d\u027c\3\2\2\2\u027eA"+
- "\3\2\2\2\u027f\u028a\5D#\2\u0280\u0281\7g\2\2\u0281\u0282\5D#\2\u0282"+
- "\u0283\7m\2\2\u0283\u028a\3\2\2\2\u0284\u028a\5F$\2\u0285\u0286\7g\2\2"+
- "\u0286\u0287\5F$\2\u0287\u0288\7m\2\2\u0288\u028a\3\2\2\2\u0289\u027f"+
- "\3\2\2\2\u0289\u0280\3\2\2\2\u0289\u0284\3\2\2\2\u0289\u0285\3\2\2\2\u028a"+
- "C\3\2\2\2\u028b\u028c\7\21\2\2\u028c\u028d\7\3\2\2\u028d\u028e\5,\27\2"+
- "\u028e\u028f\7\f\2\2\u028f\u0290\5\\/\2\u0290\u0291\7\4\2\2\u0291E\3\2"+
- "\2\2\u0292\u0293\7\25\2\2\u0293\u0294\7\3\2\2\u0294\u0295\5,\27\2\u0295"+
- "\u0296\7\5\2\2\u0296\u0297\5\\/\2\u0297\u0298\7\4\2\2\u0298G\3\2\2\2\u0299"+
- "\u029f\5J&\2\u029a\u029b\7g\2\2\u029b\u029c\5J&\2\u029c\u029d\7m\2\2\u029d"+
- "\u029f\3\2\2\2\u029e\u0299\3\2\2\2\u029e\u029a\3\2\2\2\u029fI\3\2\2\2"+
- "\u02a0\u02a1\7%\2\2\u02a1\u02a2\7\3\2\2\u02a2\u02a3\5`\61\2\u02a3\u02a4"+
- "\7)\2\2\u02a4\u02a5\5<\37\2\u02a5\u02a6\7\4\2\2\u02a6K\3\2\2\2\u02a7\u02ad"+
- "\5N(\2\u02a8\u02a9\7g\2\2\u02a9\u02aa\5N(\2\u02aa\u02ab\7m\2\2\u02ab\u02ad"+
- "\3\2\2\2\u02ac\u02a7\3\2\2\2\u02ac\u02a8\3\2\2\2\u02adM\3\2\2\2\u02ae"+
- "\u02af\5P)\2\u02af\u02bb\7\3\2\2\u02b0\u02b2\5\36\20\2\u02b1\u02b0\3\2"+
- "\2\2\u02b1\u02b2\3\2\2\2\u02b2\u02b3\3\2\2\2\u02b3\u02b8\5,\27\2\u02b4"+
- "\u02b5\7\5\2\2\u02b5\u02b7\5,\27\2\u02b6\u02b4\3\2\2\2\u02b7\u02ba\3\2"+
- "\2\2\u02b8\u02b6\3\2\2\2\u02b8\u02b9\3\2\2\2\u02b9\u02bc\3\2\2\2\u02ba"+
- "\u02b8\3\2\2\2\u02bb\u02b1\3\2\2\2\u02bb\u02bc\3\2\2\2\u02bc\u02bd\3\2"+
- "\2\2\u02bd\u02be\7\4\2\2\u02beO\3\2\2\2\u02bf\u02c3\79\2\2\u02c0\u02c3"+
- "\7N\2\2\u02c1\u02c3\5`\61\2\u02c2\u02bf\3\2\2\2\u02c2\u02c0\3\2\2\2\u02c2"+
- "\u02c1\3\2\2\2\u02c3Q\3\2\2\2\u02c4\u02df\7D\2\2\u02c5\u02df\5X-\2\u02c6"+
- "\u02df\5h\65\2\u02c7\u02df\5V,\2\u02c8\u02ca\7~\2\2\u02c9\u02c8\3\2\2"+
- "\2\u02ca\u02cb\3\2\2\2\u02cb\u02c9\3\2\2\2\u02cb\u02cc\3\2\2\2\u02cc\u02df"+
- "\3\2\2\2\u02cd\u02df\7}\2\2\u02ce\u02cf\7i\2\2\u02cf\u02d0\5j\66\2\u02d0"+
- "\u02d1\7m\2\2\u02d1\u02df\3\2\2\2\u02d2\u02d3\7j\2\2\u02d3\u02d4\5j\66"+
- "\2\u02d4\u02d5\7m\2\2\u02d5\u02df\3\2\2\2\u02d6\u02d7\7k\2\2\u02d7\u02d8"+
- "\5j\66\2\u02d8\u02d9\7m\2\2\u02d9\u02df\3\2\2\2\u02da\u02db\7l\2\2\u02db"+
- "\u02dc\5j\66\2\u02dc\u02dd\7m\2\2\u02dd\u02df\3\2\2\2\u02de\u02c4\3\2"+
- "\2\2\u02de\u02c5\3\2\2\2\u02de\u02c6\3\2\2\2\u02de\u02c7\3\2\2\2\u02de"+
- "\u02c9\3\2\2\2\u02de\u02cd\3\2\2\2\u02de\u02ce\3\2\2\2\u02de\u02d2\3\2"+
- "\2\2\u02de\u02d6\3\2\2\2\u02de\u02da\3\2\2\2\u02dfS\3\2\2\2\u02e0\u02e1"+
- "\t\r\2\2\u02e1U\3\2\2\2\u02e2\u02e3\t\16\2\2\u02e3W\3\2\2\2\u02e4\u02e6"+
- "\7\65\2\2\u02e5\u02e7\t\7\2\2\u02e6\u02e5\3\2\2\2\u02e6\u02e7\3\2\2\2"+
- "\u02e7\u02ea\3\2\2\2\u02e8\u02eb\5h\65\2\u02e9\u02eb\5j\66\2\u02ea\u02e8"+
- "\3\2\2\2\u02ea\u02e9\3\2\2\2\u02eb\u02ec\3\2\2\2\u02ec\u02ef\5Z.\2\u02ed"+
- "\u02ee\7\\\2\2\u02ee\u02f0\5Z.\2\u02ef\u02ed\3\2\2\2\u02ef\u02f0\3\2\2"+
- "\2\u02f0Y\3\2\2\2\u02f1\u02f2\t\17\2\2\u02f2[\3\2\2\2\u02f3\u02f4\5`\61"+
- "\2\u02f4]\3\2\2\2\u02f5\u02f6\5`\61\2\u02f6\u02f7\7|\2\2\u02f7\u02f9\3"+
- "\2\2\2\u02f8\u02f5\3\2\2\2\u02f9\u02fc\3\2\2\2\u02fa\u02f8\3\2\2\2\u02fa"+
- "\u02fb\3\2\2\2\u02fb\u02fd\3\2\2\2\u02fc\u02fa\3\2\2\2\u02fd\u02fe\5`"+
- "\61\2\u02fe_\3\2\2\2\u02ff\u0302\5d\63\2\u0300\u0302\5f\64\2\u0301\u02ff"+
- "\3\2\2\2\u0301\u0300\3\2\2\2\u0302a\3\2\2\2\u0303\u0304\5`\61\2\u0304"+
- "\u0305\7\6\2\2\u0305\u0307\3\2\2\2\u0306\u0303\3\2\2\2\u0306\u0307\3\2"+
- "\2\2\u0307\u0308\3\2\2\2\u0308\u0310\7\u0083\2\2\u0309\u030a\5`\61\2\u030a"+
- "\u030b\7\6\2\2\u030b\u030d\3\2\2\2\u030c\u0309\3\2\2\2\u030c\u030d\3\2"+
- "\2\2\u030d\u030e\3\2\2\2\u030e\u0310\5`\61\2\u030f\u0306\3\2\2\2\u030f"+
- "\u030c\3\2\2\2\u0310c\3\2\2\2\u0311\u0314\7\u0084\2\2\u0312\u0314\7\u0085"+
- "\2\2\u0313\u0311\3\2\2\2\u0313\u0312\3\2\2\2\u0314e\3\2\2\2\u0315\u0319"+
- "\7\u0081\2\2\u0316\u0319\5n8\2\u0317\u0319\7\u0082\2\2\u0318\u0315\3\2"+
- "\2\2\u0318\u0316\3\2\2\2\u0318\u0317\3\2\2\2\u0319g\3\2\2\2\u031a\u031d"+
- "\7\u0080\2\2\u031b\u031d\7\177\2\2\u031c\u031a\3\2\2\2\u031c\u031b\3\2"+
- "\2\2\u031di\3\2\2\2\u031e\u031f\t\20\2\2\u031fk\3\2\2\2\u0320\u0321\7"+
- "a\2\2\u0321\u0322\5,\27\2\u0322\u0323\7Z\2\2\u0323\u0324\5,\27\2\u0324"+
- "m\3\2\2\2\u0325\u0326\t\21\2\2\u0326o\3\2\2\2o\177\u0081\u0085\u008e\u0090"+
- "\u0094\u009b\u009f\u00a5\u00aa\u00af\u00b3\u00b8\u00c0\u00c4\u00cc\u00cf"+
- "\u00d5\u00da\u00dd\u00e2\u00e5\u00e7\u00ef\u00f2\u00fe\u0101\u0104\u010b"+
- "\u0112\u0116\u011a\u011e\u0125\u0129\u012d\u0132\u0136\u013e\u0142\u0149"+
- "\u0154\u0157\u015b\u0167\u016a\u0170\u0177\u017e\u0181\u0185\u0189\u018d"+
- "\u018f\u019a\u019f\u01a2\u01a6\u01a9\u01af\u01b2\u01b8\u01bb\u01bd\u01e0"+
- "\u01e8\u01ea\u01f1\u01f6\u01f9\u0201\u020a\u0210\u0218\u021d\u0223\u0226"+
- "\u022d\u0235\u023b\u0247\u0249\u0254\u0263\u0268\u026c\u0270\u0277\u027d"+
- "\u0289\u029e\u02ac\u02b1\u02b8\u02bb\u02c2\u02cb\u02de\u02e6\u02ea\u02ef"+
- "\u02fa\u0301\u0306\u030c\u030f\u0313\u0318\u031c";
+ "\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:\4;\t;\4<\t<\3\2\3"+
+ "\2\3\2\3\3\3\3\3\3\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\7\4\u0088\n\4\f"+
+ "\4\16\4\u008b\13\4\3\4\5\4\u008e\n\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\7\4\u0097"+
+ "\n\4\f\4\16\4\u009a\13\4\3\4\5\4\u009d\n\4\3\4\3\4\3\4\3\4\3\4\5\4\u00a4"+
+ "\n\4\3\4\3\4\5\4\u00a8\n\4\3\4\3\4\3\4\3\4\5\4\u00ae\n\4\3\4\3\4\3\4\5"+
+ "\4\u00b3\n\4\3\4\3\4\3\4\5\4\u00b8\n\4\3\4\3\4\5\4\u00bc\n\4\3\4\3\4\3"+
+ "\4\5\4\u00c1\n\4\3\4\3\4\3\4\3\4\3\4\3\4\5\4\u00c9\n\4\3\4\3\4\5\4\u00cd"+
+ "\n\4\3\4\3\4\3\4\3\4\7\4\u00d3\n\4\f\4\16\4\u00d6\13\4\5\4\u00d8\n\4\3"+
+ "\4\3\4\3\4\3\4\5\4\u00de\n\4\3\4\3\4\3\4\5\4\u00e3\n\4\3\4\5\4\u00e6\n"+
+ "\4\3\4\3\4\3\4\5\4\u00eb\n\4\3\4\5\4\u00ee\n\4\5\4\u00f0\n\4\3\5\3\5\3"+
+ "\5\3\5\7\5\u00f6\n\5\f\5\16\5\u00f9\13\5\5\5\u00fb\n\5\3\5\3\5\3\6\3\6"+
+ "\3\6\3\6\3\6\3\6\7\6\u0105\n\6\f\6\16\6\u0108\13\6\5\6\u010a\n\6\3\6\5"+
+ "\6\u010d\n\6\3\7\3\7\3\7\3\7\3\7\5\7\u0114\n\7\3\b\3\b\3\b\3\b\3\b\5\b"+
+ "\u011b\n\b\3\t\3\t\5\t\u011f\n\t\3\t\3\t\5\t\u0123\n\t\3\n\3\n\5\n\u0127"+
+ "\n\n\3\n\3\n\5\n\u012b\n\n\3\n\3\n\5\n\u012f\n\n\3\n\3\n\3\n\5\n\u0134"+
+ "\n\n\3\n\3\n\5\n\u0138\n\n\3\13\3\13\3\13\3\13\7\13\u013e\n\13\f\13\16"+
+ "\13\u0141\13\13\3\13\5\13\u0144\n\13\3\f\5\f\u0147\n\f\3\f\3\f\3\f\7\f"+
+ "\u014c\n\f\f\f\16\f\u014f\13\f\3\r\3\r\3\16\3\16\3\16\3\16\7\16\u0157"+
+ "\n\16\f\16\16\16\u015a\13\16\5\16\u015c\n\16\3\16\3\16\5\16\u0160\n\16"+
+ "\3\17\3\17\3\17\3\17\3\17\3\17\3\20\3\20\3\21\3\21\3\21\7\21\u016d\n\21"+
+ "\f\21\16\21\u0170\13\21\3\22\3\22\5\22\u0174\n\22\3\22\5\22\u0177\n\22"+
+ "\3\23\3\23\7\23\u017b\n\23\f\23\16\23\u017e\13\23\3\24\3\24\3\24\3\24"+
+ "\5\24\u0184\n\24\3\24\3\24\3\24\3\24\3\24\5\24\u018b\n\24\3\25\5\25\u018e"+
+ "\n\25\3\25\3\25\5\25\u0192\n\25\3\25\3\25\5\25\u0196\n\25\3\25\3\25\5"+
+ "\25\u019a\n\25\5\25\u019c\n\25\3\26\3\26\3\26\3\26\3\26\3\26\3\26\7\26"+
+ "\u01a5\n\26\f\26\16\26\u01a8\13\26\3\26\3\26\5\26\u01ac\n\26\3\27\5\27"+
+ "\u01af\n\27\3\27\3\27\5\27\u01b3\n\27\3\27\5\27\u01b6\n\27\3\27\3\27\3"+
+ "\27\3\27\5\27\u01bc\n\27\3\27\5\27\u01bf\n\27\3\27\3\27\3\27\3\27\5\27"+
+ "\u01c5\n\27\3\27\5\27\u01c8\n\27\5\27\u01ca\n\27\3\30\3\30\3\30\3\30\3"+
+ "\30\3\30\3\30\3\30\3\30\3\30\3\30\3\31\3\31\3\31\7\31\u01da\n\31\f\31"+
+ "\16\31\u01dd\13\31\3\32\3\32\5\32\u01e1\n\32\3\32\5\32\u01e4\n\32\3\33"+
+ "\3\33\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34"+
+ "\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34"+
+ "\3\34\3\34\3\34\3\34\5\34\u0207\n\34\3\34\3\34\3\34\3\34\3\34\3\34\7\34"+
+ "\u020f\n\34\f\34\16\34\u0212\13\34\3\35\3\35\7\35\u0216\n\35\f\35\16\35"+
+ "\u0219\13\35\3\36\3\36\5\36\u021d\n\36\3\37\5\37\u0220\n\37\3\37\3\37"+
+ "\3\37\3\37\3\37\3\37\5\37\u0228\n\37\3\37\3\37\3\37\3\37\3\37\7\37\u022f"+
+ "\n\37\f\37\16\37\u0232\13\37\3\37\3\37\3\37\5\37\u0237\n\37\3\37\3\37"+
+ "\3\37\3\37\3\37\3\37\5\37\u023f\n\37\3\37\3\37\3\37\5\37\u0244\n\37\3"+
+ "\37\3\37\3\37\3\37\5\37\u024a\n\37\3\37\5\37\u024d\n\37\3 \3 \3 \3!\3"+
+ "!\5!\u0254\n!\3\"\3\"\3\"\3\"\3\"\3\"\5\"\u025c\n\"\3#\3#\3#\3#\5#\u0262"+
+ "\n#\3#\3#\3#\3#\3#\3#\3#\3#\3#\3#\7#\u026e\n#\f#\16#\u0271\13#\3$\3$\3"+
+ "$\3$\3$\3$\3$\3$\5$\u027b\n$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\5"+
+ "$\u028a\n$\3$\6$\u028d\n$\r$\16$\u028e\3$\3$\5$\u0293\n$\3$\3$\5$\u0297"+
+ "\n$\3$\3$\3$\7$\u029c\n$\f$\16$\u029f\13$\3%\3%\3%\5%\u02a4\n%\3&\3&\3"+
+ "&\3&\3&\3&\3&\3&\3&\3&\5&\u02b0\n&\3\'\3\'\3\'\3\'\3\'\3\'\3\'\3(\3(\3"+
+ "(\3(\3(\3(\3(\3)\3)\3)\3)\3)\5)\u02c5\n)\3*\3*\3*\3*\3*\3*\3*\3+\3+\3"+
+ "+\3+\3+\5+\u02d3\n+\3,\3,\3,\5,\u02d8\n,\3,\3,\3,\7,\u02dd\n,\f,\16,\u02e0"+
+ "\13,\5,\u02e2\n,\3,\3,\3-\3-\3-\5-\u02e9\n-\3.\3.\3.\3.\3.\6.\u02f0\n"+
+ ".\r.\16.\u02f1\3.\3.\3.\3.\3.\3.\3.\3.\3.\3.\3.\3.\3.\3.\3.\3.\3.\5.\u0305"+
+ "\n.\3/\3/\3\60\3\60\3\61\3\61\5\61\u030d\n\61\3\61\3\61\5\61\u0311\n\61"+
+ "\3\61\3\61\3\61\5\61\u0316\n\61\3\62\3\62\3\63\3\63\3\64\3\64\3\64\7\64"+
+ "\u031f\n\64\f\64\16\64\u0322\13\64\3\64\3\64\3\65\3\65\5\65\u0328\n\65"+
+ "\3\66\3\66\3\66\5\66\u032d\n\66\3\66\3\66\3\66\3\66\5\66\u0333\n\66\3"+
+ "\66\5\66\u0336\n\66\3\67\3\67\5\67\u033a\n\67\38\38\38\58\u033f\n8\39"+
+ "\39\59\u0343\n9\3:\3:\3;\3;\3;\3;\3;\3<\3<\3<\2\5\66DF=\2\4\6\b\n\f\16"+
+ "\20\22\24\26\30\32\34\36 \"$&(*,.\60\62\64\668:<>@BDFHJLNPRTVXZ\\^`bd"+
+ "fhjlnprtv\2\22\b\2\7\7\t\t\"\"==HHLL\4\2..[[\4\2\t\tHH\4\2**\63\63\3\2"+
+ "\34\35\3\2wx\4\2\7\7\u0081\u0081\4\2\r\r\34\34\4\2\'\'99\4\2\7\7\36\36"+
+ "\3\2y{\3\2pv\4\2&&]]\7\2\31\32\61\62?BTUfg\3\2\177\u0080\31\2\b\t\23\24"+
+ "\26\31\33\33\"\"$$\'\')),.\61\61\66\6699<=??AAHHLOQTWXZ[_`bbff\u03b1\2"+
+ "x\3\2\2\2\4{\3\2\2\2\6\u00ef\3\2\2\2\b\u00fa\3\2\2\2\n\u00fe\3\2\2\2\f"+
+ "\u0113\3\2\2\2\16\u011a\3\2\2\2\20\u011c\3\2\2\2\22\u0124\3\2\2\2\24\u0139"+
+ "\3\2\2\2\26\u0146\3\2\2\2\30\u0150\3\2\2\2\32\u015f\3\2\2\2\34\u0161\3"+
+ "\2\2\2\36\u0167\3\2\2\2 \u0169\3\2\2\2\"\u0171\3\2\2\2$\u0178\3\2\2\2"+
+ "&\u018a\3\2\2\2(\u019b\3\2\2\2*\u01ab\3\2\2\2,\u01c9\3\2\2\2.\u01cb\3"+
+ "\2\2\2\60\u01d6\3\2\2\2\62\u01de\3\2\2\2\64\u01e5\3\2\2\2\66\u0206\3\2"+
+ "\2\28\u0217\3\2\2\2:\u021a\3\2\2\2<\u024c\3\2\2\2>\u024e\3\2\2\2@\u0251"+
+ "\3\2\2\2B\u025b\3\2\2\2D\u0261\3\2\2\2F\u0296\3\2\2\2H\u02a3\3\2\2\2J"+
+ "\u02af\3\2\2\2L\u02b1\3\2\2\2N\u02b8\3\2\2\2P\u02c4\3\2\2\2R\u02c6\3\2"+
+ "\2\2T\u02d2\3\2\2\2V\u02d4\3\2\2\2X\u02e8\3\2\2\2Z\u0304\3\2\2\2\\\u0306"+
+ "\3\2\2\2^\u0308\3\2\2\2`\u030a\3\2\2\2b\u0317\3\2\2\2d\u0319\3\2\2\2f"+
+ "\u0320\3\2\2\2h\u0327\3\2\2\2j\u0335\3\2\2\2l\u0339\3\2\2\2n\u033e\3\2"+
+ "\2\2p\u0342\3\2\2\2r\u0344\3\2\2\2t\u0346\3\2\2\2v\u034b\3\2\2\2xy\5\6"+
+ "\4\2yz\7\2\2\3z\3\3\2\2\2{|\5\64\33\2|}\7\2\2\3}\5\3\2\2\2~\u00f0\5\b"+
+ "\5\2\177\u008d\7$\2\2\u0080\u0089\7\3\2\2\u0081\u0082\7O\2\2\u0082\u0088"+
+ "\t\2\2\2\u0083\u0084\7)\2\2\u0084\u0088\t\3\2\2\u0085\u0086\7b\2\2\u0086"+
+ "\u0088\5^\60\2\u0087\u0081\3\2\2\2\u0087\u0083\3\2\2\2\u0087\u0085\3\2"+
+ "\2\2\u0088\u008b\3\2\2\2\u0089\u0087\3\2\2\2\u0089\u008a\3\2\2\2\u008a"+
+ "\u008c\3\2\2\2\u008b\u0089\3\2\2\2\u008c\u008e\7\4\2\2\u008d\u0080\3\2"+
+ "\2\2\u008d\u008e\3\2\2\2\u008e\u008f\3\2\2\2\u008f\u00f0\5\6\4\2\u0090"+
+ "\u009c\7\33\2\2\u0091\u0098\7\3\2\2\u0092\u0093\7O\2\2\u0093\u0097\t\4"+
+ "\2\2\u0094\u0095\7)\2\2\u0095\u0097\t\3\2\2\u0096\u0092\3\2\2\2\u0096"+
+ "\u0094\3\2\2\2\u0097\u009a\3\2\2\2\u0098\u0096\3\2\2\2\u0098\u0099\3\2"+
+ "\2\2\u0099\u009b\3\2\2\2\u009a\u0098\3\2\2\2\u009b\u009d\7\4\2\2\u009c"+
+ "\u0091\3\2\2\2\u009c\u009d\3\2\2\2\u009d\u009e\3\2\2\2\u009e\u00f0\5\6"+
+ "\4\2\u009f\u00a0\7W\2\2\u00a0\u00a3\7Z\2\2\u00a1\u00a2\7\64\2\2\u00a2"+
+ "\u00a4\7+\2\2\u00a3\u00a1\3\2\2\2\u00a3\u00a4\3\2\2\2\u00a4\u00a7\3\2"+
+ "\2\2\u00a5\u00a8\5> \2\u00a6\u00a8\5j\66\2\u00a7\u00a5\3\2\2\2\u00a7\u00a6"+
+ "\3\2\2\2\u00a7\u00a8\3\2\2\2\u00a8\u00f0\3\2\2\2\u00a9\u00aa\7W\2\2\u00aa"+
+ "\u00ad\7\24\2\2\u00ab\u00ac\7\64\2\2\u00ac\u00ae\7+\2\2\u00ad\u00ab\3"+
+ "\2\2\2\u00ad\u00ae\3\2\2\2\u00ae\u00af\3\2\2\2\u00af\u00b2\t\5\2\2\u00b0"+
+ "\u00b3\5> \2\u00b1\u00b3\5j\66\2\u00b2\u00b0\3\2\2\2\u00b2\u00b1\3\2\2"+
+ "\2\u00b3\u00f0\3\2\2\2\u00b4\u00b7\t\6\2\2\u00b5\u00b6\7\64\2\2\u00b6"+
+ "\u00b8\7+\2\2\u00b7\u00b5\3\2\2\2\u00b7\u00b8\3\2\2\2\u00b8\u00bb\3\2"+
+ "\2\2\u00b9\u00bc\5> \2\u00ba\u00bc\5j\66\2\u00bb\u00b9\3\2\2\2\u00bb\u00ba"+
+ "\3\2\2\2\u00bc\u00f0\3\2\2\2\u00bd\u00be\7W\2\2\u00be\u00c0\7-\2\2\u00bf"+
+ "\u00c1\5> \2\u00c0\u00bf\3\2\2\2\u00c0\u00c1\3\2\2\2\u00c1\u00f0\3\2\2"+
+ "\2\u00c2\u00c3\7W\2\2\u00c3\u00f0\7S\2\2\u00c4\u00c5\7X\2\2\u00c5\u00c8"+
+ "\7Z\2\2\u00c6\u00c7\7\22\2\2\u00c7\u00c9\5> \2\u00c8\u00c6\3\2\2\2\u00c8"+
+ "\u00c9\3\2\2\2\u00c9\u00cc\3\2\2\2\u00ca\u00cd\5> \2\u00cb\u00cd\5j\66"+
+ "\2\u00cc\u00ca\3\2\2\2\u00cc\u00cb\3\2\2\2\u00cc\u00cd\3\2\2\2\u00cd\u00d7"+
+ "\3\2\2\2\u00ce\u00cf\7_\2\2\u00cf\u00d4\5r:\2\u00d0\u00d1\7\5\2\2\u00d1"+
+ "\u00d3\5r:\2\u00d2\u00d0\3\2\2\2\u00d3\u00d6\3\2\2\2\u00d4\u00d2\3\2\2"+
+ "\2\u00d4\u00d5\3\2\2\2\u00d5\u00d8\3\2\2\2\u00d6\u00d4\3\2\2\2\u00d7\u00ce"+
+ "\3\2\2\2\u00d7\u00d8\3\2\2\2\u00d8\u00f0\3\2\2\2\u00d9\u00da\7X\2\2\u00da"+
+ "\u00dd\7\24\2\2\u00db\u00dc\7\22\2\2\u00dc\u00de\5r:\2\u00dd\u00db\3\2"+
+ "\2\2\u00dd\u00de\3\2\2\2\u00de\u00e2\3\2\2\2\u00df\u00e0\7Y\2\2\u00e0"+
+ "\u00e3\5> \2\u00e1\u00e3\5j\66\2\u00e2\u00df\3\2\2\2\u00e2\u00e1\3\2\2"+
+ "\2\u00e2\u00e3\3\2\2\2\u00e3\u00e5\3\2\2\2\u00e4\u00e6\5> \2\u00e5\u00e4"+
+ "\3\2\2\2\u00e5\u00e6\3\2\2\2\u00e6\u00f0\3\2\2\2\u00e7\u00e8\7X\2\2\u00e8"+
+ "\u00ed\7`\2\2\u00e9\u00eb\t\7\2\2\u00ea\u00e9\3\2\2\2\u00ea\u00eb\3\2"+
+ "\2\2\u00eb\u00ec\3\2\2\2\u00ec\u00ee\5p9\2\u00ed\u00ea\3\2\2\2\u00ed\u00ee"+
+ "\3\2\2\2\u00ee\u00f0\3\2\2\2\u00ef~\3\2\2\2\u00ef\177\3\2\2\2\u00ef\u0090"+
+ "\3\2\2\2\u00ef\u009f\3\2\2\2\u00ef\u00a9\3\2\2\2\u00ef\u00b4\3\2\2\2\u00ef"+
+ "\u00bd\3\2\2\2\u00ef\u00c2\3\2\2\2\u00ef\u00c4\3\2\2\2\u00ef\u00d9\3\2"+
+ "\2\2\u00ef\u00e7\3\2\2\2\u00f0\7\3\2\2\2\u00f1\u00f2\7e\2\2\u00f2\u00f7"+
+ "\5\34\17\2\u00f3\u00f4\7\5\2\2\u00f4\u00f6\5\34\17\2\u00f5\u00f3\3\2\2"+
+ "\2\u00f6\u00f9\3\2\2\2\u00f7\u00f5\3\2\2\2\u00f7\u00f8\3\2\2\2\u00f8\u00fb"+
+ "\3\2\2\2\u00f9\u00f7\3\2\2\2\u00fa\u00f1\3\2\2\2\u00fa\u00fb\3\2\2\2\u00fb"+
+ "\u00fc\3\2\2\2\u00fc\u00fd\5\n\6\2\u00fd\t\3\2\2\2\u00fe\u0109\5\16\b"+
+ "\2\u00ff\u0100\7J\2\2\u0100\u0101\7\17\2\2\u0101\u0106\5\20\t\2\u0102"+
+ "\u0103\7\5\2\2\u0103\u0105\5\20\t\2\u0104\u0102\3\2\2\2\u0105\u0108\3"+
+ "\2\2\2\u0106\u0104\3\2\2\2\u0106\u0107\3\2\2\2\u0107\u010a\3\2\2\2\u0108"+
+ "\u0106\3\2\2\2\u0109\u00ff\3\2\2\2\u0109\u010a\3\2\2\2\u010a\u010c\3\2"+
+ "\2\2\u010b\u010d\5\f\7\2\u010c\u010b\3\2\2\2\u010c\u010d\3\2\2\2\u010d"+
+ "\13\3\2\2\2\u010e\u010f\7<\2\2\u010f\u0114\t\b\2\2\u0110\u0111\7j\2\2"+
+ "\u0111\u0112\t\b\2\2\u0112\u0114\7o\2\2\u0113\u010e\3\2\2\2\u0113\u0110"+
+ "\3\2\2\2\u0114\r\3\2\2\2\u0115\u011b\5\22\n\2\u0116\u0117\7\3\2\2\u0117"+
+ "\u0118\5\n\6\2\u0118\u0119\7\4\2\2\u0119\u011b\3\2\2\2\u011a\u0115\3\2"+
+ "\2\2\u011a\u0116\3\2\2\2\u011b\17\3\2\2\2\u011c\u011e\5\64\33\2\u011d"+
+ "\u011f\t\t\2\2\u011e\u011d\3\2\2\2\u011e\u011f\3\2\2\2\u011f\u0122\3\2"+
+ "\2\2\u0120\u0121\7F\2\2\u0121\u0123\t\n\2\2\u0122\u0120\3\2\2\2\u0122"+
+ "\u0123\3\2\2\2\u0123\21\3\2\2\2\u0124\u0126\7V\2\2\u0125\u0127\5\36\20"+
+ "\2\u0126\u0125\3\2\2\2\u0126\u0127\3\2\2\2\u0127\u0128\3\2\2\2\u0128\u012a"+
+ "\5 \21\2\u0129\u012b\5\24\13\2\u012a\u0129\3\2\2\2\u012a\u012b\3\2\2\2"+
+ "\u012b\u012e\3\2\2\2\u012c\u012d\7d\2\2\u012d\u012f\5\66\34\2\u012e\u012c"+
+ "\3\2\2\2\u012e\u012f\3\2\2\2\u012f\u0133\3\2\2\2\u0130\u0131\7/\2\2\u0131"+
+ "\u0132\7\17\2\2\u0132\u0134\5\26\f\2\u0133\u0130\3\2\2\2\u0133\u0134\3"+
+ "\2\2\2\u0134\u0137\3\2\2\2\u0135\u0136\7\60\2\2\u0136\u0138\5\66\34\2"+
+ "\u0137\u0135\3\2\2\2\u0137\u0138\3\2\2\2\u0138\23\3\2\2\2\u0139\u013a"+
+ "\7*\2\2\u013a\u013f\5$\23\2\u013b\u013c\7\5\2\2\u013c\u013e\5$\23\2\u013d"+
+ "\u013b\3\2\2\2\u013e\u0141\3\2\2\2\u013f\u013d\3\2\2\2\u013f\u0140\3\2"+
+ "\2\2\u0140\u0143\3\2\2\2\u0141\u013f\3\2\2\2\u0142\u0144\5.\30\2\u0143"+
+ "\u0142\3\2\2\2\u0143\u0144\3\2\2\2\u0144\25\3\2\2\2\u0145\u0147\5\36\20"+
+ "\2\u0146\u0145\3\2\2\2\u0146\u0147\3\2\2\2\u0147\u0148\3\2\2\2\u0148\u014d"+
+ "\5\30\r\2\u0149\u014a\7\5\2\2\u014a\u014c\5\30\r\2\u014b\u0149\3\2\2\2"+
+ "\u014c\u014f\3\2\2\2\u014d\u014b\3\2\2\2\u014d\u014e\3\2\2\2\u014e\27"+
+ "\3\2\2\2\u014f\u014d\3\2\2\2\u0150\u0151\5\32\16\2\u0151\31\3\2\2\2\u0152"+
+ "\u015b\7\3\2\2\u0153\u0158\5\64\33\2\u0154\u0155\7\5\2\2\u0155\u0157\5"+
+ "\64\33\2\u0156\u0154\3\2\2\2\u0157\u015a\3\2\2\2\u0158\u0156\3\2\2\2\u0158"+
+ "\u0159\3\2\2\2\u0159\u015c\3\2\2\2\u015a\u0158\3\2\2\2\u015b\u0153\3\2"+
+ "\2\2\u015b\u015c\3\2\2\2\u015c\u015d\3\2\2\2\u015d\u0160\7\4\2\2\u015e"+
+ "\u0160\5\64\33\2\u015f\u0152\3\2\2\2\u015f\u015e\3\2\2\2\u0160\33\3\2"+
+ "\2\2\u0161\u0162\5h\65\2\u0162\u0163\7\f\2\2\u0163\u0164\7\3\2\2\u0164"+
+ "\u0165\5\n\6\2\u0165\u0166\7\4\2\2\u0166\35\3\2\2\2\u0167\u0168\t\13\2"+
+ "\2\u0168\37\3\2\2\2\u0169\u016e\5\"\22\2\u016a\u016b\7\5\2\2\u016b\u016d"+
+ "\5\"\22\2\u016c\u016a\3\2\2\2\u016d\u0170\3\2\2\2\u016e\u016c\3\2\2\2"+
+ "\u016e\u016f\3\2\2\2\u016f!\3\2\2\2\u0170\u016e\3\2\2\2\u0171\u0176\5"+
+ "\64\33\2\u0172\u0174\7\f\2\2\u0173\u0172\3\2\2\2\u0173\u0174\3\2\2\2\u0174"+
+ "\u0175\3\2\2\2\u0175\u0177\5h\65\2\u0176\u0173\3\2\2\2\u0176\u0177\3\2"+
+ "\2\2\u0177#\3\2\2\2\u0178\u017c\5,\27\2\u0179\u017b\5&\24\2\u017a\u0179"+
+ "\3\2\2\2\u017b\u017e\3\2\2\2\u017c\u017a\3\2\2\2\u017c\u017d\3\2\2\2\u017d"+
+ "%\3\2\2\2\u017e\u017c\3\2\2\2\u017f\u0180\5(\25\2\u0180\u0181\78\2\2\u0181"+
+ "\u0183\5,\27\2\u0182\u0184\5*\26\2\u0183\u0182\3\2\2\2\u0183\u0184\3\2"+
+ "\2\2\u0184\u018b\3\2\2\2\u0185\u0186\7C\2\2\u0186\u0187\5(\25\2\u0187"+
+ "\u0188\78\2\2\u0188\u0189\5,\27\2\u0189\u018b\3\2\2\2\u018a\u017f\3\2"+
+ "\2\2\u018a\u0185\3\2\2\2\u018b\'\3\2\2\2\u018c\u018e\7\65\2\2\u018d\u018c"+
+ "\3\2\2\2\u018d\u018e\3\2\2\2\u018e\u019c\3\2\2\2\u018f\u0191\7:\2\2\u0190"+
+ "\u0192\7K\2\2\u0191\u0190\3\2\2\2\u0191\u0192\3\2\2\2\u0192\u019c\3\2"+
+ "\2\2\u0193\u0195\7P\2\2\u0194\u0196\7K\2\2\u0195\u0194\3\2\2\2\u0195\u0196"+
+ "\3\2\2\2\u0196\u019c\3\2\2\2\u0197\u0199\7,\2\2\u0198\u019a\7K\2\2\u0199"+
+ "\u0198\3\2\2\2\u0199\u019a\3\2\2\2\u019a\u019c\3\2\2\2\u019b\u018d\3\2"+
+ "\2\2\u019b\u018f\3\2\2\2\u019b\u0193\3\2\2\2\u019b\u0197\3\2\2\2\u019c"+
+ ")\3\2\2\2\u019d\u019e\7G\2\2\u019e\u01ac\5\66\34\2\u019f\u01a0\7a\2\2"+
+ "\u01a0\u01a1\7\3\2\2\u01a1\u01a6\5h\65\2\u01a2\u01a3\7\5\2\2\u01a3\u01a5"+
+ "\5h\65\2\u01a4\u01a2\3\2\2\2\u01a5\u01a8\3\2\2\2\u01a6\u01a4\3\2\2\2\u01a6"+
+ "\u01a7\3\2\2\2\u01a7\u01a9\3\2\2\2\u01a8\u01a6\3\2\2\2\u01a9\u01aa\7\4"+
+ "\2\2\u01aa\u01ac\3\2\2\2\u01ab\u019d\3\2\2\2\u01ab\u019f\3\2\2\2\u01ac"+
+ "+\3\2\2\2\u01ad\u01af\7+\2\2\u01ae\u01ad\3\2\2\2\u01ae\u01af\3\2\2\2\u01af"+
+ "\u01b0\3\2\2\2\u01b0\u01b5\5j\66\2\u01b1\u01b3\7\f\2\2\u01b2\u01b1\3\2"+
+ "\2\2\u01b2\u01b3\3\2\2\2\u01b3\u01b4\3\2\2\2\u01b4\u01b6\5f\64\2\u01b5"+
+ "\u01b2\3\2\2\2\u01b5\u01b6\3\2\2\2\u01b6\u01ca\3\2\2\2\u01b7\u01b8\7\3"+
+ "\2\2\u01b8\u01b9\5\n\6\2\u01b9\u01be\7\4\2\2\u01ba\u01bc\7\f\2\2\u01bb"+
+ "\u01ba\3\2\2\2\u01bb\u01bc\3\2\2\2\u01bc\u01bd\3\2\2\2\u01bd\u01bf\5f"+
+ "\64\2\u01be\u01bb\3\2\2\2\u01be\u01bf\3\2\2\2\u01bf\u01ca\3\2\2\2\u01c0"+
+ "\u01c1\7\3\2\2\u01c1\u01c2\5$\23\2\u01c2\u01c7\7\4\2\2\u01c3\u01c5\7\f"+
+ "\2\2\u01c4\u01c3\3\2\2\2\u01c4\u01c5\3\2\2\2\u01c5\u01c6\3\2\2\2\u01c6"+
+ "\u01c8\5f\64\2\u01c7\u01c4\3\2\2\2\u01c7\u01c8\3\2\2\2\u01c8\u01ca\3\2"+
+ "\2\2\u01c9\u01ae\3\2\2\2\u01c9\u01b7\3\2\2\2\u01c9\u01c0\3\2\2\2\u01ca"+
+ "-\3\2\2\2\u01cb\u01cc\7N\2\2\u01cc\u01cd\7\3\2\2\u01cd\u01ce\5\60\31\2"+
+ "\u01ce\u01cf\7(\2\2\u01cf\u01d0\5f\64\2\u01d0\u01d1\7\63\2\2\u01d1\u01d2"+
+ "\7\3\2\2\u01d2\u01d3\5\60\31\2\u01d3\u01d4\7\4\2\2\u01d4\u01d5\7\4\2\2"+
+ "\u01d5/\3\2\2\2\u01d6\u01db\5\62\32\2\u01d7\u01d8\7\5\2\2\u01d8\u01da"+
+ "\5\62\32\2\u01d9\u01d7\3\2\2\2\u01da\u01dd\3\2\2\2\u01db\u01d9\3\2\2\2"+
+ "\u01db\u01dc\3\2\2\2\u01dc\61\3\2\2\2\u01dd\u01db\3\2\2\2\u01de\u01e3"+
+ "\5D#\2\u01df\u01e1\7\f\2\2\u01e0\u01df\3\2\2\2\u01e0\u01e1\3\2\2\2\u01e1"+
+ "\u01e2\3\2\2\2\u01e2\u01e4\5h\65\2\u01e3\u01e0\3\2\2\2\u01e3\u01e4\3\2"+
+ "\2\2\u01e4\63\3\2\2\2\u01e5\u01e6\5\66\34\2\u01e6\65\3\2\2\2\u01e7\u01e8"+
+ "\b\34\1\2\u01e8\u01e9\7D\2\2\u01e9\u0207\5\66\34\n\u01ea\u01eb\7#\2\2"+
+ "\u01eb\u01ec\7\3\2\2\u01ec\u01ed\5\b\5\2\u01ed\u01ee\7\4\2\2\u01ee\u0207"+
+ "\3\2\2\2\u01ef\u01f0\7R\2\2\u01f0\u01f1\7\3\2\2\u01f1\u01f2\5r:\2\u01f2"+
+ "\u01f3\58\35\2\u01f3\u01f4\7\4\2\2\u01f4\u0207\3\2\2\2\u01f5\u01f6\7>"+
+ "\2\2\u01f6\u01f7\7\3\2\2\u01f7\u01f8\5f\64\2\u01f8\u01f9\7\5\2\2\u01f9"+
+ "\u01fa\5r:\2\u01fa\u01fb\58\35\2\u01fb\u01fc\7\4\2\2\u01fc\u0207\3\2\2"+
+ "\2\u01fd\u01fe\7>\2\2\u01fe\u01ff\7\3\2\2\u01ff\u0200\5r:\2\u0200\u0201"+
+ "\7\5\2\2\u0201\u0202\5r:\2\u0202\u0203\58\35\2\u0203\u0204\7\4\2\2\u0204"+
+ "\u0207\3\2\2\2\u0205\u0207\5:\36\2\u0206\u01e7\3\2\2\2\u0206\u01ea\3\2"+
+ "\2\2\u0206\u01ef\3\2\2\2\u0206\u01f5\3\2\2\2\u0206\u01fd\3\2\2\2\u0206"+
+ "\u0205\3\2\2\2\u0207\u0210\3\2\2\2\u0208\u0209\f\4\2\2\u0209\u020a\7\n"+
+ "\2\2\u020a\u020f\5\66\34\5\u020b\u020c\f\3\2\2\u020c\u020d\7I\2\2\u020d"+
+ "\u020f\5\66\34\4\u020e\u0208\3\2\2\2\u020e\u020b\3\2\2\2\u020f\u0212\3"+
+ "\2\2\2\u0210\u020e\3\2\2\2\u0210\u0211\3\2\2\2\u0211\67\3\2\2\2\u0212"+
+ "\u0210\3\2\2\2\u0213\u0214\7\5\2\2\u0214\u0216\5r:\2\u0215\u0213\3\2\2"+
+ "\2\u0216\u0219\3\2\2\2\u0217\u0215\3\2\2\2\u0217\u0218\3\2\2\2\u02189"+
+ "\3\2\2\2\u0219\u0217\3\2\2\2\u021a\u021c\5D#\2\u021b\u021d\5<\37\2\u021c"+
+ "\u021b\3\2\2\2\u021c\u021d\3\2\2\2\u021d;\3\2\2\2\u021e\u0220\7D\2\2\u021f"+
+ "\u021e\3\2\2\2\u021f\u0220\3\2\2\2\u0220\u0221\3\2\2\2\u0221\u0222\7\16"+
+ "\2\2\u0222\u0223\5D#\2\u0223\u0224\7\n\2\2\u0224\u0225\5D#\2\u0225\u024d"+
+ "\3\2\2\2\u0226\u0228\7D\2\2\u0227\u0226\3\2\2\2\u0227\u0228\3\2\2\2\u0228"+
+ "\u0229\3\2\2\2\u0229\u022a\7\63\2\2\u022a\u022b\7\3\2\2\u022b\u0230\5"+
+ "D#\2\u022c\u022d\7\5\2\2\u022d\u022f\5D#\2\u022e\u022c\3\2\2\2\u022f\u0232"+
+ "\3\2\2\2\u0230\u022e\3\2\2\2\u0230\u0231\3\2\2\2\u0231\u0233\3\2\2\2\u0232"+
+ "\u0230\3\2\2\2\u0233\u0234\7\4\2\2\u0234\u024d\3\2\2\2\u0235\u0237\7D"+
+ "\2\2\u0236\u0235\3\2\2\2\u0236\u0237\3\2\2\2\u0237\u0238\3\2\2\2\u0238"+
+ "\u0239\7\63\2\2\u0239\u023a\7\3\2\2\u023a\u023b\5\b\5\2\u023b\u023c\7"+
+ "\4\2\2\u023c\u024d\3\2\2\2\u023d\u023f\7D\2\2\u023e\u023d\3\2\2\2\u023e"+
+ "\u023f\3\2\2\2\u023f\u0240\3\2\2\2\u0240\u0241\7;\2\2\u0241\u024d\5@!"+
+ "\2\u0242\u0244\7D\2\2\u0243\u0242\3\2\2\2\u0243\u0244\3\2\2\2\u0244\u0245"+
+ "\3\2\2\2\u0245\u0246\7Q\2\2\u0246\u024d\5r:\2\u0247\u0249\7\67\2\2\u0248"+
+ "\u024a\7D\2\2\u0249\u0248\3\2\2\2\u0249\u024a\3\2\2\2\u024a\u024b\3\2"+
+ "\2\2\u024b\u024d\7E\2\2\u024c\u021f\3\2\2\2\u024c\u0227\3\2\2\2\u024c"+
+ "\u0236\3\2\2\2\u024c\u023e\3\2\2\2\u024c\u0243\3\2\2\2\u024c\u0247\3\2"+
+ "\2\2\u024d=\3\2\2\2\u024e\u024f\7;\2\2\u024f\u0250\5@!\2\u0250?\3\2\2"+
+ "\2\u0251\u0253\5r:\2\u0252\u0254\5B\"\2\u0253\u0252\3\2\2\2\u0253\u0254"+
+ "\3\2\2\2\u0254A\3\2\2\2\u0255\u0256\7!\2\2\u0256\u025c\5r:\2\u0257\u0258"+
+ "\7h\2\2\u0258\u0259\5r:\2\u0259\u025a\7o\2\2\u025a\u025c\3\2\2\2\u025b"+
+ "\u0255\3\2\2\2\u025b\u0257\3\2\2\2\u025cC\3\2\2\2\u025d\u025e\b#\1\2\u025e"+
+ "\u0262\5F$\2\u025f\u0260\t\7\2\2\u0260\u0262\5D#\6\u0261\u025d\3\2\2\2"+
+ "\u0261\u025f\3\2\2\2\u0262\u026f\3\2\2\2\u0263\u0264\f\5\2\2\u0264\u0265"+
+ "\t\f\2\2\u0265\u026e\5D#\6\u0266\u0267\f\4\2\2\u0267\u0268\t\7\2\2\u0268"+
+ "\u026e\5D#\5\u0269\u026a\f\3\2\2\u026a\u026b\5\\/\2\u026b\u026c\5D#\4"+
+ "\u026c\u026e\3\2\2\2\u026d\u0263\3\2\2\2\u026d\u0266\3\2\2\2\u026d\u0269"+
+ "\3\2\2\2\u026e\u0271\3\2\2\2\u026f\u026d\3\2\2\2\u026f\u0270\3\2\2\2\u0270"+
+ "E\3\2\2\2\u0271\u026f\3\2\2\2\u0272\u0273\b$\1\2\u0273\u0297\5J&\2\u0274"+
+ "\u0297\5P)\2\u0275\u0297\5H%\2\u0276\u0297\5Z.\2\u0277\u0278\5f\64\2\u0278"+
+ "\u0279\7~\2\2\u0279\u027b\3\2\2\2\u027a\u0277\3\2\2\2\u027a\u027b\3\2"+
+ "\2\2\u027b\u027c\3\2\2\2\u027c\u0297\7y\2\2\u027d\u0297\5T+\2\u027e\u027f"+
+ "\7\3\2\2\u027f\u0280\5\b\5\2\u0280\u0281\7\4\2\2\u0281\u0297\3\2\2\2\u0282"+
+ "\u0297\5f\64\2\u0283\u0284\7\3\2\2\u0284\u0285\5\64\33\2\u0285\u0286\7"+
+ "\4\2\2\u0286\u0297\3\2\2\2\u0287\u0289\7\20\2\2\u0288\u028a\5\66\34\2"+
+ "\u0289\u0288\3\2\2\2\u0289\u028a\3\2\2\2\u028a\u028c\3\2\2\2\u028b\u028d"+
+ "\5t;\2\u028c\u028b\3\2\2\2\u028d\u028e\3\2\2\2\u028e\u028c\3\2\2\2\u028e"+
+ "\u028f\3\2\2\2\u028f\u0292\3\2\2\2\u0290\u0291\7\37\2\2\u0291\u0293\5"+
+ "\66\34\2\u0292\u0290\3\2\2\2\u0292\u0293\3\2\2\2\u0293\u0294\3\2\2\2\u0294"+
+ "\u0295\7 \2\2\u0295\u0297\3\2\2\2\u0296\u0272\3\2\2\2\u0296\u0274\3\2"+
+ "\2\2\u0296\u0275\3\2\2\2\u0296\u0276\3\2\2\2\u0296\u027a\3\2\2\2\u0296"+
+ "\u027d\3\2\2\2\u0296\u027e\3\2\2\2\u0296\u0282\3\2\2\2\u0296\u0283\3\2"+
+ "\2\2\u0296\u0287\3\2\2\2\u0297\u029d\3\2\2\2\u0298\u0299\f\f\2\2\u0299"+
+ "\u029a\7|\2\2\u029a\u029c\5d\63\2\u029b\u0298\3\2\2\2\u029c\u029f\3\2"+
+ "\2\2\u029d\u029b\3\2\2\2\u029d\u029e\3\2\2\2\u029eG\3\2\2\2\u029f\u029d"+
+ "\3\2\2\2\u02a0\u02a4\7\30\2\2\u02a1\u02a4\7\26\2\2\u02a2\u02a4\7\27\2"+
+ "\2\u02a3\u02a0\3\2\2\2\u02a3\u02a1\3\2\2\2\u02a3\u02a2\3\2\2\2\u02a4I"+
+ "\3\2\2\2\u02a5\u02b0\5L\'\2\u02a6\u02a7\7i\2\2\u02a7\u02a8\5L\'\2\u02a8"+
+ "\u02a9\7o\2\2\u02a9\u02b0\3\2\2\2\u02aa\u02b0\5N(\2\u02ab\u02ac\7i\2\2"+
+ "\u02ac\u02ad\5N(\2\u02ad\u02ae\7o\2\2\u02ae\u02b0\3\2\2\2\u02af\u02a5"+
+ "\3\2\2\2\u02af\u02a6\3\2\2\2\u02af\u02aa\3\2\2\2\u02af\u02ab\3\2\2\2\u02b0"+
+ "K\3\2\2\2\u02b1\u02b2\7\21\2\2\u02b2\u02b3\7\3\2\2\u02b3\u02b4\5\64\33"+
+ "\2\u02b4\u02b5\7\f\2\2\u02b5\u02b6\5d\63\2\u02b6\u02b7\7\4\2\2\u02b7M"+
+ "\3\2\2\2\u02b8\u02b9\7\25\2\2\u02b9\u02ba\7\3\2\2\u02ba\u02bb\5\64\33"+
+ "\2\u02bb\u02bc\7\5\2\2\u02bc\u02bd\5d\63\2\u02bd\u02be\7\4\2\2\u02beO"+
+ "\3\2\2\2\u02bf\u02c5\5R*\2\u02c0\u02c1\7i\2\2\u02c1\u02c2\5R*\2\u02c2"+
+ "\u02c3\7o\2\2\u02c3\u02c5\3\2\2\2\u02c4\u02bf\3\2\2\2\u02c4\u02c0\3\2"+
+ "\2\2\u02c5Q\3\2\2\2\u02c6\u02c7\7%\2\2\u02c7\u02c8\7\3\2\2\u02c8\u02c9"+
+ "\5h\65\2\u02c9\u02ca\7*\2\2\u02ca\u02cb\5D#\2\u02cb\u02cc\7\4\2\2\u02cc"+
+ "S\3\2\2\2\u02cd\u02d3\5V,\2\u02ce\u02cf\7i\2\2\u02cf\u02d0\5V,\2\u02d0"+
+ "\u02d1\7o\2\2\u02d1\u02d3\3\2\2\2\u02d2\u02cd\3\2\2\2\u02d2\u02ce\3\2"+
+ "\2\2\u02d3U\3\2\2\2\u02d4\u02d5\5X-\2\u02d5\u02e1\7\3\2\2\u02d6\u02d8"+
+ "\5\36\20\2\u02d7\u02d6\3\2\2\2\u02d7\u02d8\3\2\2\2\u02d8\u02d9\3\2\2\2"+
+ "\u02d9\u02de\5\64\33\2\u02da\u02db\7\5\2\2\u02db\u02dd\5\64\33\2\u02dc"+
+ "\u02da\3\2\2\2\u02dd\u02e0\3\2\2\2\u02de\u02dc\3\2\2\2\u02de\u02df\3\2"+
+ "\2\2\u02df\u02e2\3\2\2\2\u02e0\u02de\3\2\2\2\u02e1\u02d7\3\2\2\2\u02e1"+
+ "\u02e2\3\2\2\2\u02e2\u02e3\3\2\2\2\u02e3\u02e4\7\4\2\2\u02e4W\3\2\2\2"+
+ "\u02e5\u02e9\7:\2\2\u02e6\u02e9\7P\2\2\u02e7\u02e9\5h\65\2\u02e8\u02e5"+
+ "\3\2\2\2\u02e8\u02e6\3\2\2\2\u02e8\u02e7\3\2\2\2\u02e9Y\3\2\2\2\u02ea"+
+ "\u0305\7E\2\2\u02eb\u0305\5`\61\2\u02ec\u0305\5p9\2\u02ed\u0305\5^\60"+
+ "\2\u02ee\u02f0\7\u0080\2\2\u02ef\u02ee\3\2\2\2\u02f0\u02f1\3\2\2\2\u02f1"+
+ "\u02ef\3\2\2\2\u02f1\u02f2\3\2\2\2\u02f2\u0305\3\2\2\2\u02f3\u0305\7\177"+
+ "\2\2\u02f4\u02f5\7k\2\2\u02f5\u02f6\5r:\2\u02f6\u02f7\7o\2\2\u02f7\u0305"+
+ "\3\2\2\2\u02f8\u02f9\7l\2\2\u02f9\u02fa\5r:\2\u02fa\u02fb\7o\2\2\u02fb"+
+ "\u0305\3\2\2\2\u02fc\u02fd\7m\2\2\u02fd\u02fe\5r:\2\u02fe\u02ff\7o\2\2"+
+ "\u02ff\u0305\3\2\2\2\u0300\u0301\7n\2\2\u0301\u0302\5r:\2\u0302\u0303"+
+ "\7o\2\2\u0303\u0305\3\2\2\2\u0304\u02ea\3\2\2\2\u0304\u02eb\3\2\2\2\u0304"+
+ "\u02ec\3\2\2\2\u0304\u02ed\3\2\2\2\u0304\u02ef\3\2\2\2\u0304\u02f3\3\2"+
+ "\2\2\u0304\u02f4\3\2\2\2\u0304\u02f8\3\2\2\2\u0304\u02fc\3\2\2\2\u0304"+
+ "\u0300\3\2\2\2\u0305[\3\2\2\2\u0306\u0307\t\r\2\2\u0307]\3\2\2\2\u0308"+
+ "\u0309\t\16\2\2\u0309_\3\2\2\2\u030a\u030c\7\66\2\2\u030b\u030d\t\7\2"+
+ "\2\u030c\u030b\3\2\2\2\u030c\u030d\3\2\2\2\u030d\u0310\3\2\2\2\u030e\u0311"+
+ "\5p9\2\u030f\u0311\5r:\2\u0310\u030e\3\2\2\2\u0310\u030f\3\2\2\2\u0311"+
+ "\u0312\3\2\2\2\u0312\u0315\5b\62\2\u0313\u0314\7^\2\2\u0314\u0316\5b\62"+
+ "\2\u0315\u0313\3\2\2\2\u0315\u0316\3\2\2\2\u0316a\3\2\2\2\u0317\u0318"+
+ "\t\17\2\2\u0318c\3\2\2\2\u0319\u031a\5h\65\2\u031ae\3\2\2\2\u031b\u031c"+
+ "\5h\65\2\u031c\u031d\7~\2\2\u031d\u031f\3\2\2\2\u031e\u031b\3\2\2\2\u031f"+
+ "\u0322\3\2\2\2\u0320\u031e\3\2\2\2\u0320\u0321\3\2\2\2\u0321\u0323\3\2"+
+ "\2\2\u0322\u0320\3\2\2\2\u0323\u0324\5h\65\2\u0324g\3\2\2\2\u0325\u0328"+
+ "\5l\67\2\u0326\u0328\5n8\2\u0327\u0325\3\2\2\2\u0327\u0326\3\2\2\2\u0328"+
+ "i\3\2\2\2\u0329\u032a\5h\65\2\u032a\u032b\7\6\2\2\u032b\u032d\3\2\2\2"+
+ "\u032c\u0329\3\2\2\2\u032c\u032d\3\2\2\2\u032d\u032e\3\2\2\2\u032e\u0336"+
+ "\7\u0085\2\2\u032f\u0330\5h\65\2\u0330\u0331\7\6\2\2\u0331\u0333\3\2\2"+
+ "\2\u0332\u032f\3\2\2\2\u0332\u0333\3\2\2\2\u0333\u0334\3\2\2\2\u0334\u0336"+
+ "\5h\65\2\u0335\u032c\3\2\2\2\u0335\u0332\3\2\2\2\u0336k\3\2\2\2\u0337"+
+ "\u033a\7\u0086\2\2\u0338\u033a\7\u0087\2\2\u0339\u0337\3\2\2\2\u0339\u0338"+
+ "\3\2\2\2\u033am\3\2\2\2\u033b\u033f\7\u0083\2\2\u033c\u033f\5v<\2\u033d"+
+ "\u033f\7\u0084\2\2\u033e\u033b\3\2\2\2\u033e\u033c\3\2\2\2\u033e\u033d"+
+ "\3\2\2\2\u033fo\3\2\2\2\u0340\u0343\7\u0082\2\2\u0341\u0343\7\u0081\2"+
+ "\2\u0342\u0340\3\2\2\2\u0342\u0341\3\2\2\2\u0343q\3\2\2\2\u0344\u0345"+
+ "\t\20\2\2\u0345s\3\2\2\2\u0346\u0347\7c\2\2\u0347\u0348\5\64\33\2\u0348"+
+ "\u0349\7\\\2\2\u0349\u034a\5\64\33\2\u034au\3\2\2\2\u034b\u034c\t\21\2"+
+ "\2\u034cw\3\2\2\2s\u0087\u0089\u008d\u0096\u0098\u009c\u00a3\u00a7\u00ad"+
+ "\u00b2\u00b7\u00bb\u00c0\u00c8\u00cc\u00d4\u00d7\u00dd\u00e2\u00e5\u00ea"+
+ "\u00ed\u00ef\u00f7\u00fa\u0106\u0109\u010c\u0113\u011a\u011e\u0122\u0126"+
+ "\u012a\u012e\u0133\u0137\u013f\u0143\u0146\u014d\u0158\u015b\u015f\u016e"+
+ "\u0173\u0176\u017c\u0183\u018a\u018d\u0191\u0195\u0199\u019b\u01a6\u01ab"+
+ "\u01ae\u01b2\u01b5\u01bb\u01be\u01c4\u01c7\u01c9\u01db\u01e0\u01e3\u0206"+
+ "\u020e\u0210\u0217\u021c\u021f\u0227\u0230\u0236\u023e\u0243\u0249\u024c"+
+ "\u0253\u025b\u0261\u026d\u026f\u027a\u0289\u028e\u0292\u0296\u029d\u02a3"+
+ "\u02af\u02c4\u02d2\u02d7\u02de\u02e1\u02e8\u02f1\u0304\u030c\u0310\u0315"+
+ "\u0320\u0327\u032c\u0332\u0335\u0339\u033e\u0342";
public static final ATN _ATN =
new ATNDeserializer().deserialize(_serializedATN.toCharArray());
static {
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseVisitor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseVisitor.java
index 7f44a1593c2..bc8d06c1dcc 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseVisitor.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseVisitor.java
@@ -173,6 +173,12 @@ interface SqlBaseVisitor extends ParseTreeVisitor {
* @return the visitor result
*/
T visitSetQuantifier(SqlBaseParser.SetQuantifierContext ctx);
+ /**
+ * Visit a parse tree produced by {@link SqlBaseParser#selectItems}.
+ * @param ctx the parse tree
+ * @return the visitor result
+ */
+ T visitSelectItems(SqlBaseParser.SelectItemsContext ctx);
/**
* Visit a parse tree produced by the {@code selectExpression}
* labeled alternative in {@link SqlBaseParser#selectItem}.
@@ -225,6 +231,24 @@ interface SqlBaseVisitor extends ParseTreeVisitor {
* @return the visitor result
*/
T visitAliasedRelation(SqlBaseParser.AliasedRelationContext ctx);
+ /**
+ * Visit a parse tree produced by {@link SqlBaseParser#pivotClause}.
+ * @param ctx the parse tree
+ * @return the visitor result
+ */
+ T visitPivotClause(SqlBaseParser.PivotClauseContext ctx);
+ /**
+ * Visit a parse tree produced by {@link SqlBaseParser#pivotArgs}.
+ * @param ctx the parse tree
+ * @return the visitor result
+ */
+ T visitPivotArgs(SqlBaseParser.PivotArgsContext ctx);
+ /**
+ * Visit a parse tree produced by {@link SqlBaseParser#namedValueExpression}.
+ * @param ctx the parse tree
+ * @return the visitor result
+ */
+ T visitNamedValueExpression(SqlBaseParser.NamedValueExpressionContext ctx);
/**
* Visit a parse tree produced by {@link SqlBaseParser#expression}.
* @param ctx the parse tree
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Aggregate.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Aggregate.java
index 35d93e3a68c..39fef8188b2 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Aggregate.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Aggregate.java
@@ -10,8 +10,8 @@ import org.elasticsearch.xpack.sql.expression.Attribute;
import org.elasticsearch.xpack.sql.expression.Expression;
import org.elasticsearch.xpack.sql.expression.Expressions;
import org.elasticsearch.xpack.sql.expression.NamedExpression;
-import org.elasticsearch.xpack.sql.tree.Source;
import org.elasticsearch.xpack.sql.tree.NodeInfo;
+import org.elasticsearch.xpack.sql.tree.Source;
import java.util.List;
import java.util.Objects;
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Pivot.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Pivot.java
new file mode 100644
index 00000000000..4a0639d8b78
--- /dev/null
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Pivot.java
@@ -0,0 +1,142 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+
+package org.elasticsearch.xpack.sql.plan.logical;
+
+import org.elasticsearch.xpack.sql.capabilities.Resolvables;
+import org.elasticsearch.xpack.sql.expression.Attribute;
+import org.elasticsearch.xpack.sql.expression.AttributeSet;
+import org.elasticsearch.xpack.sql.expression.Expression;
+import org.elasticsearch.xpack.sql.expression.ExpressionId;
+import org.elasticsearch.xpack.sql.expression.Expressions;
+import org.elasticsearch.xpack.sql.expression.NamedExpression;
+import org.elasticsearch.xpack.sql.expression.function.Function;
+import org.elasticsearch.xpack.sql.tree.NodeInfo;
+import org.elasticsearch.xpack.sql.tree.Source;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Objects;
+
+import static java.util.Collections.singletonList;
+
+public class Pivot extends UnaryPlan {
+
+ private final Expression column;
+ private final List values;
+ private final List aggregates;
+ // derived properties
+ private AttributeSet groupingSet;
+ private AttributeSet valueOutput;
+ private List output;
+
+ public Pivot(Source source, LogicalPlan child, Expression column, List values, List aggregates) {
+ super(source, child);
+ this.column = column;
+ this.values = values;
+ this.aggregates = aggregates;
+ }
+
+ @Override
+ protected NodeInfo info() {
+ return NodeInfo.create(this, Pivot::new, child(), column, values, aggregates);
+ }
+
+ @Override
+ protected Pivot replaceChild(LogicalPlan newChild) {
+ return new Pivot(source(), newChild, column, values, aggregates);
+ }
+
+ public Expression column() {
+ return column;
+ }
+
+ public List values() {
+ return values;
+ }
+
+ public List aggregates() {
+ return aggregates;
+ }
+
+ public AttributeSet groupingSet() {
+ if (groupingSet == null) {
+ AttributeSet columnSet = Expressions.references(singletonList(column));
+ // grouping can happen only on "primitive" fields, thus exclude multi-fields or nested docs
+ // the verifier enforces this rule so it does not catch folks by surprise
+ groupingSet = new AttributeSet(Expressions.onlyPrimitiveFieldAttributes(child().output()))
+ // make sure to have the column as the last entry (helps with translation)
+ .subtract(columnSet)
+ .subtract(Expressions.references(aggregates))
+ .combine(columnSet);
+ }
+ return groupingSet;
+ }
+
+ public AttributeSet valuesOutput() {
+ // TODO: the generated id is a hack since it can clash with other potentially generated ids
+ if (valueOutput == null) {
+ List out = new ArrayList<>(aggregates.size() * values.size());
+ if (aggregates.size() == 1) {
+ NamedExpression agg = aggregates.get(0);
+ for (NamedExpression value : values) {
+ ExpressionId id = new ExpressionId(agg.id().hashCode() + value.id().hashCode());
+ out.add(value.toAttribute().withDataType(agg.dataType()).withId(id));
+ }
+ }
+ // for multiple args, concat the function and the value
+ else {
+ for (NamedExpression agg : aggregates) {
+ String name = agg instanceof Function ? ((Function) agg).functionName() : agg.name();
+ for (NamedExpression value : values) {
+ ExpressionId id = new ExpressionId(agg.id().hashCode() + value.id().hashCode());
+ out.add(value.toAttribute().withName(value.name() + "_" + name).withDataType(agg.dataType()).withId(id));
+ }
+ }
+ }
+ valueOutput = new AttributeSet(out);
+ }
+ return valueOutput;
+ }
+
+ @Override
+ public List output() {
+ if (output == null) {
+ output = new ArrayList<>(groupingSet()
+ .subtract(Expressions.references(singletonList(column)))
+ .combine(valuesOutput()));
+ }
+
+ return output;
+ }
+
+ @Override
+ public boolean expressionsResolved() {
+ return column.resolved() && Resolvables.resolved(values) && Resolvables.resolved(aggregates);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(column, values, aggregates, child());
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+
+ if (obj == null || getClass() != obj.getClass()) {
+ return false;
+ }
+
+ Pivot other = (Pivot) obj;
+ return Objects.equals(column, other.column)
+ && Objects.equals(values, other.values)
+ && Objects.equals(aggregates, other.aggregates)
+ && Objects.equals(child(), other.child());
+ }
+}
\ No newline at end of file
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/PivotExec.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/PivotExec.java
new file mode 100644
index 00000000000..579a53696ee
--- /dev/null
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/PivotExec.java
@@ -0,0 +1,63 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.xpack.sql.plan.physical;
+
+import org.elasticsearch.xpack.sql.expression.Attribute;
+import org.elasticsearch.xpack.sql.plan.logical.Pivot;
+import org.elasticsearch.xpack.sql.tree.NodeInfo;
+import org.elasticsearch.xpack.sql.tree.Source;
+
+import java.util.List;
+import java.util.Objects;
+
+public class PivotExec extends UnaryExec implements Unexecutable {
+
+ private final Pivot pivot;
+
+ public PivotExec(Source source, PhysicalPlan child, Pivot pivot) {
+ super(source, child);
+ this.pivot = pivot;
+ }
+
+ @Override
+ protected NodeInfo info() {
+ return NodeInfo.create(this, PivotExec::new, child(), pivot);
+ }
+
+ @Override
+ protected PivotExec replaceChild(PhysicalPlan newChild) {
+ return new PivotExec(source(), newChild, pivot);
+ }
+
+ @Override
+ public List output() {
+ return pivot.output();
+ }
+
+ public Pivot pivot() {
+ return pivot;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(pivot, child());
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null || getClass() != obj.getClass()) {
+ return false;
+ }
+
+ PivotExec other = (PivotExec) obj;
+
+ return Objects.equals(pivot, other.pivot)
+ && Objects.equals(child(), other.child());
+ }
+}
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/Mapper.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/Mapper.java
index b32ad961ae9..522d5a944dc 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/Mapper.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/Mapper.java
@@ -14,6 +14,7 @@ import org.elasticsearch.xpack.sql.plan.logical.Limit;
import org.elasticsearch.xpack.sql.plan.logical.LocalRelation;
import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan;
import org.elasticsearch.xpack.sql.plan.logical.OrderBy;
+import org.elasticsearch.xpack.sql.plan.logical.Pivot;
import org.elasticsearch.xpack.sql.plan.logical.Project;
import org.elasticsearch.xpack.sql.plan.logical.With;
import org.elasticsearch.xpack.sql.plan.logical.command.Command;
@@ -25,6 +26,7 @@ import org.elasticsearch.xpack.sql.plan.physical.LimitExec;
import org.elasticsearch.xpack.sql.plan.physical.LocalExec;
import org.elasticsearch.xpack.sql.plan.physical.OrderExec;
import org.elasticsearch.xpack.sql.plan.physical.PhysicalPlan;
+import org.elasticsearch.xpack.sql.plan.physical.PivotExec;
import org.elasticsearch.xpack.sql.plan.physical.ProjectExec;
import org.elasticsearch.xpack.sql.plan.physical.UnplannedExec;
import org.elasticsearch.xpack.sql.querydsl.container.QueryContainer;
@@ -88,6 +90,11 @@ class Mapper extends RuleExecutor {
return new AggregateExec(p.source(), map(a.child()), a.groupings(), a.aggregates());
}
+ if (p instanceof Pivot) {
+ Pivot pv = (Pivot) p;
+ return new PivotExec(pv.source(), map(pv.child()), pv);
+ }
+
if (p instanceof EsRelation) {
EsRelation c = (EsRelation) p;
List output = c.output();
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java
index ae875d6fc6e..3931ada3836 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java
@@ -8,10 +8,13 @@ package org.elasticsearch.xpack.sql.planner;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.xpack.sql.SqlIllegalArgumentException;
import org.elasticsearch.xpack.sql.execution.search.AggRef;
+import org.elasticsearch.xpack.sql.execution.search.FieldExtraction;
import org.elasticsearch.xpack.sql.expression.Alias;
import org.elasticsearch.xpack.sql.expression.Attribute;
import org.elasticsearch.xpack.sql.expression.AttributeMap;
+import org.elasticsearch.xpack.sql.expression.AttributeSet;
import org.elasticsearch.xpack.sql.expression.Expression;
+import org.elasticsearch.xpack.sql.expression.ExpressionId;
import org.elasticsearch.xpack.sql.expression.Expressions;
import org.elasticsearch.xpack.sql.expression.Foldables;
import org.elasticsearch.xpack.sql.expression.NamedExpression;
@@ -32,6 +35,7 @@ import org.elasticsearch.xpack.sql.expression.gen.pipeline.AggPathInput;
import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe;
import org.elasticsearch.xpack.sql.expression.gen.pipeline.UnaryPipe;
import org.elasticsearch.xpack.sql.expression.gen.processor.Processor;
+import org.elasticsearch.xpack.sql.plan.logical.Pivot;
import org.elasticsearch.xpack.sql.plan.physical.AggregateExec;
import org.elasticsearch.xpack.sql.plan.physical.EsQueryExec;
import org.elasticsearch.xpack.sql.plan.physical.FilterExec;
@@ -39,6 +43,7 @@ import org.elasticsearch.xpack.sql.plan.physical.LimitExec;
import org.elasticsearch.xpack.sql.plan.physical.LocalExec;
import org.elasticsearch.xpack.sql.plan.physical.OrderExec;
import org.elasticsearch.xpack.sql.plan.physical.PhysicalPlan;
+import org.elasticsearch.xpack.sql.plan.physical.PivotExec;
import org.elasticsearch.xpack.sql.plan.physical.ProjectExec;
import org.elasticsearch.xpack.sql.planner.QueryTranslator.GroupingContext;
import org.elasticsearch.xpack.sql.planner.QueryTranslator.QueryTranslation;
@@ -52,6 +57,7 @@ import org.elasticsearch.xpack.sql.querydsl.container.GlobalCountRef;
import org.elasticsearch.xpack.sql.querydsl.container.GroupByRef;
import org.elasticsearch.xpack.sql.querydsl.container.GroupByRef.Property;
import org.elasticsearch.xpack.sql.querydsl.container.MetricAggRef;
+import org.elasticsearch.xpack.sql.querydsl.container.PivotColumnRef;
import org.elasticsearch.xpack.sql.querydsl.container.QueryContainer;
import org.elasticsearch.xpack.sql.querydsl.container.ScoreSort;
import org.elasticsearch.xpack.sql.querydsl.container.ScriptSort;
@@ -64,14 +70,17 @@ import org.elasticsearch.xpack.sql.rule.RuleExecutor;
import org.elasticsearch.xpack.sql.session.EmptyExecutable;
import org.elasticsearch.xpack.sql.util.Check;
+import java.util.ArrayList;
import java.util.Arrays;
import java.util.LinkedHashMap;
+import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicReference;
import static org.elasticsearch.xpack.sql.planner.QueryTranslator.and;
import static org.elasticsearch.xpack.sql.planner.QueryTranslator.toAgg;
import static org.elasticsearch.xpack.sql.planner.QueryTranslator.toQuery;
+import static org.elasticsearch.xpack.sql.util.CollectionUtils.combine;
/**
* Folds the PhysicalPlan into a {@link Query}.
@@ -85,6 +94,7 @@ class QueryFolder extends RuleExecutor {
@Override
protected Iterable.Batch> batches() {
Batch rollup = new Batch("Fold queries",
+ new FoldPivot(),
new FoldAggregate(),
new FoldProject(),
new FoldFilter(),
@@ -149,7 +159,8 @@ class QueryFolder extends RuleExecutor {
queryC.sort(),
queryC.limit(),
queryC.shouldTrackHits(),
- queryC.shouldIncludeFrozen());
+ queryC.shouldIncludeFrozen(),
+ queryC.minPageSize());
return new EsQueryExec(exec.source(), exec.index(), project.output(), clone);
}
return project;
@@ -179,7 +190,8 @@ class QueryFolder extends RuleExecutor {
qContainer.sort(),
qContainer.limit(),
qContainer.shouldTrackHits(),
- qContainer.shouldIncludeFrozen());
+ qContainer.shouldIncludeFrozen(),
+ qContainer.minPageSize());
return exec.with(qContainer);
}
@@ -204,190 +216,190 @@ class QueryFolder extends RuleExecutor {
private static class FoldAggregate extends FoldingRule {
@Override
protected PhysicalPlan rule(AggregateExec a) {
-
if (a.child() instanceof EsQueryExec) {
EsQueryExec exec = (EsQueryExec) a.child();
+ return fold(a, exec);
+ }
+ return a;
+ }
+
+ static EsQueryExec fold(AggregateExec a, EsQueryExec exec) {
+ // build the group aggregation
+ // and also collect info about it (since the group columns might be used inside the select)
- // build the group aggregation
- // and also collect info about it (since the group columns might be used inside the select)
+ GroupingContext groupingContext = QueryTranslator.groupBy(a.groupings());
- GroupingContext groupingContext = QueryTranslator.groupBy(a.groupings());
+ QueryContainer queryC = exec.queryContainer();
+ if (groupingContext != null) {
+ queryC = queryC.addGroups(groupingContext.groupMap.values());
+ }
- QueryContainer queryC = exec.queryContainer();
- if (groupingContext != null) {
- queryC = queryC.addGroups(groupingContext.groupMap.values());
- }
+ Map aliases = new LinkedHashMap<>();
+ // tracker for compound aggs seen in a group
+ Map compoundAggMap = new LinkedHashMap<>();
- Map aliases = new LinkedHashMap<>();
- // tracker for compound aggs seen in a group
- Map compoundAggMap = new LinkedHashMap<>();
+ // followed by actual aggregates
+ for (NamedExpression ne : a.aggregates()) {
- // followed by actual aggregates
- for (NamedExpression ne : a.aggregates()) {
+ // unwrap alias - it can be
+ // - an attribute (since we support aliases inside group-by)
+ // SELECT emp_no ... GROUP BY emp_no
+ // SELECT YEAR(hire_date) ... GROUP BY YEAR(hire_date)
- // unwrap alias - it can be
- // - an attribute (since we support aliases inside group-by)
- // SELECT emp_no ... GROUP BY emp_no
- // SELECT YEAR(hire_date) ... GROUP BY YEAR(hire_date)
+ // - an agg function (typically)
+ // SELECT COUNT(*), AVG(salary) ... GROUP BY salary;
- // - an agg function (typically)
- // SELECT COUNT(*), AVG(salary) ... GROUP BY salary;
+ // - a scalar function, which can be applied on an attribute or aggregate and can require one or multiple inputs
- // - a scalar function, which can be applied on an attribute or aggregate and can require one or multiple inputs
+ // SELECT SIN(emp_no) ... GROUP BY emp_no
+ // SELECT CAST(YEAR(hire_date)) ... GROUP BY YEAR(hire_date)
+ // SELECT CAST(AVG(salary)) ... GROUP BY salary
+ // SELECT AVG(salary) + SIN(MIN(salary)) ... GROUP BY salary
- // SELECT SIN(emp_no) ... GROUP BY emp_no
- // SELECT CAST(YEAR(hire_date)) ... GROUP BY YEAR(hire_date)
- // SELECT CAST(AVG(salary)) ... GROUP BY salary
- // SELECT AVG(salary) + SIN(MIN(salary)) ... GROUP BY salary
+ if (ne instanceof Alias || ne instanceof Function) {
+ Alias as = ne instanceof Alias ? (Alias) ne : null;
+ Expression child = as != null ? as.child() : ne;
- if (ne instanceof Alias || ne instanceof Function) {
- Alias as = ne instanceof Alias ? (Alias) ne : null;
- Expression child = as != null ? as.child() : ne;
+ // record aliases in case they are later referred in the tree
+ if (as != null && as.child() instanceof NamedExpression) {
+ aliases.put(as.toAttribute(), ((NamedExpression) as.child()).toAttribute());
+ }
- // record aliases in case they are later referred in the tree
- if (as != null && as.child() instanceof NamedExpression) {
- aliases.put(as.toAttribute(), ((NamedExpression) as.child()).toAttribute());
- }
+ //
+ // look first for scalar functions which might wrap the actual grouped target
+ // (e.g.
+ // CAST(field) GROUP BY field or
+ // ABS(YEAR(field)) GROUP BY YEAR(field) or
+ // ABS(AVG(salary)) ... GROUP BY salary
+ // )
+ if (child instanceof ScalarFunction) {
+ ScalarFunction f = (ScalarFunction) child;
+ Pipe proc = f.asPipe();
- //
- // look first for scalar functions which might wrap the actual grouped target
- // (e.g.
- // CAST(field) GROUP BY field or
- // ABS(YEAR(field)) GROUP BY YEAR(field) or
- // ABS(AVG(salary)) ... GROUP BY salary
- // )
- if (child instanceof ScalarFunction) {
- ScalarFunction f = (ScalarFunction) child;
- Pipe proc = f.asPipe();
+ final AtomicReference qC = new AtomicReference<>(queryC);
- final AtomicReference qC = new AtomicReference<>(queryC);
-
- proc = proc.transformUp(p -> {
- // bail out if the def is resolved
- if (p.resolved()) {
- return p;
- }
-
- // get the backing expression and check if it belongs to a agg group or whether it's
- // an expression in the first place
- Expression exp = p.expression();
- GroupByKey matchingGroup = null;
- if (groupingContext != null) {
- // is there a group (aggregation) for this expression ?
- matchingGroup = groupingContext.groupFor(exp);
- }
- else {
- // a scalar function can be used only if has already been mentioned for grouping
- // (otherwise it is the opposite of grouping)
- if (exp instanceof ScalarFunction) {
- throw new FoldingException(exp, "Scalar function " +exp.toString()
- + " can be used only if included already in grouping");
- }
- }
-
- // found match for expression; if it's an attribute or scalar, end the processing chain with
- // the reference to the backing agg
- if (matchingGroup != null) {
- if (exp instanceof Attribute || exp instanceof ScalarFunction || exp instanceof GroupingFunction) {
- Processor action = null;
- boolean isDateBased = exp.dataType().isDateBased();
- /*
- * special handling of dates since aggs return the typed Date object which needs
- * extraction instead of handling this in the scroller, the folder handles this
- * as it already got access to the extraction action
- */
- if (exp instanceof DateTimeHistogramFunction) {
- action = ((UnaryPipe) p).action();
- isDateBased = true;
- }
- return new AggPathInput(exp.source(), exp,
- new GroupByRef(matchingGroup.id(), null, isDateBased), action);
- }
- }
- // or found an aggregate expression (which has to work on an attribute used for grouping)
- // (can happen when dealing with a root group)
- if (Functions.isAggregate(exp)) {
- Tuple withFunction = addAggFunction(matchingGroup,
- (AggregateFunction) exp, compoundAggMap, qC.get());
- qC.set(withFunction.v1());
- return withFunction.v2();
- }
- // not an aggregate and no matching - go to a higher node (likely a function YEAR(birth_date))
+ proc = proc.transformUp(p -> {
+ // bail out if the def is resolved
+ if (p.resolved()) {
return p;
- });
-
- if (!proc.resolved()) {
- throw new FoldingException(child, "Cannot find grouping for '{}'", Expressions.name(child));
}
- // add the computed column
- queryC = qC.get().addColumn(new ComputedRef(proc), f.toAttribute());
-
- // TODO: is this needed?
- // redirect the alias to the scalar group id (changing the id altogether doesn't work it is
- // already used in the aggpath)
- //aliases.put(as.toAttribute(), sf.toAttribute());
- }
- // apply the same logic above (for function inputs) to non-scalar functions with small variations:
- // instead of adding things as input, add them as full blown column
- else {
+ // get the backing expression and check if it belongs to a agg group or whether it's
+ // an expression in the first place
+ Expression exp = p.expression();
GroupByKey matchingGroup = null;
if (groupingContext != null) {
// is there a group (aggregation) for this expression ?
- matchingGroup = groupingContext.groupFor(child);
+ matchingGroup = groupingContext.groupFor(exp);
+ } else {
+ // a scalar function can be used only if has already been mentioned for grouping
+ // (otherwise it is the opposite of grouping)
+ if (exp instanceof ScalarFunction) {
+ throw new FoldingException(exp,
+ "Scalar function " + exp.toString() + " can be used only if included already in grouping");
+ }
}
- // attributes can only refer to declared groups
- if (child instanceof Attribute) {
- Check.notNull(matchingGroup, "Cannot find group [{}]", Expressions.name(child));
- queryC = queryC.addColumn(
- new GroupByRef(matchingGroup.id(), null, child.dataType().isDateBased()), ((Attribute) child));
+
+ // found match for expression; if it's an attribute or scalar, end the processing chain with
+ // the reference to the backing agg
+ if (matchingGroup != null) {
+ if (exp instanceof Attribute || exp instanceof ScalarFunction || exp instanceof GroupingFunction) {
+ Processor action = null;
+ boolean isDateBased = exp.dataType().isDateBased();
+ /*
+ * special handling of dates since aggs return the typed Date object which needs
+ * extraction instead of handling this in the scroller, the folder handles this
+ * as it already got access to the extraction action
+ */
+ if (exp instanceof DateTimeHistogramFunction) {
+ action = ((UnaryPipe) p).action();
+ isDateBased = true;
+ }
+ return new AggPathInput(exp.source(), exp, new GroupByRef(matchingGroup.id(), null, isDateBased),
+ action);
+ }
}
- // handle histogram
- else if (child instanceof GroupingFunction) {
- queryC = queryC.addColumn(new GroupByRef(matchingGroup.id(), null, child.dataType().isDateBased()),
- ((GroupingFunction) child).toAttribute());
+ // or found an aggregate expression (which has to work on an attribute used for grouping)
+ // (can happen when dealing with a root group)
+ if (Functions.isAggregate(exp)) {
+ Tuple withFunction = addAggFunction(matchingGroup, (AggregateFunction) exp,
+ compoundAggMap, qC.get());
+ qC.set(withFunction.v1());
+ return withFunction.v2();
}
+ // not an aggregate and no matching - go to a higher node (likely a function YEAR(birth_date))
+ return p;
+ });
+
+ if (!proc.resolved()) {
+ throw new FoldingException(child, "Cannot find grouping for '{}'", Expressions.name(child));
+ }
+
+ // add the computed column
+ queryC = qC.get().addColumn(new ComputedRef(proc), f.toAttribute());
+
+ // TODO: is this needed?
+ // redirect the alias to the scalar group id (changing the id altogether doesn't work it is
+ // already used in the aggpath)
+ //aliases.put(as.toAttribute(), sf.toAttribute());
+ }
+ // apply the same logic above (for function inputs) to non-scalar functions with small variations:
+ // instead of adding things as input, add them as full blown column
+ else {
+ GroupByKey matchingGroup = null;
+ if (groupingContext != null) {
+ // is there a group (aggregation) for this expression ?
+ matchingGroup = groupingContext.groupFor(child);
+ }
+ // attributes can only refer to declared groups
+ if (child instanceof Attribute) {
+ Check.notNull(matchingGroup, "Cannot find group [{}]", Expressions.name(child));
+ queryC = queryC.addColumn(new GroupByRef(matchingGroup.id(), null, child.dataType().isDateBased()),
+ ((Attribute) child));
+ }
+ // handle histogram
+ else if (child instanceof GroupingFunction) {
+ queryC = queryC.addColumn(new GroupByRef(matchingGroup.id(), null, child.dataType().isDateBased()),
+ ((GroupingFunction) child).toAttribute());
+ }
else if (child.foldable()) {
queryC = queryC.addColumn(ne.toAttribute());
}
- // fallback to regular agg functions
- else {
- // the only thing left is agg function
- Check.isTrue(Functions.isAggregate(child),
- "Expected aggregate function inside alias; got [{}]", child.nodeString());
- AggregateFunction af = (AggregateFunction) child;
- Tuple withAgg = addAggFunction(matchingGroup, af, compoundAggMap, queryC);
- // make sure to add the inner id (to handle compound aggs)
- queryC = withAgg.v1().addColumn(withAgg.v2().context(), af.toAttribute());
- }
+ // fallback to regular agg functions
+ else {
+ // the only thing left is agg function
+ Check.isTrue(Functions.isAggregate(child), "Expected aggregate function inside alias; got [{}]",
+ child.nodeString());
+ AggregateFunction af = (AggregateFunction) child;
+ Tuple withAgg = addAggFunction(matchingGroup, af, compoundAggMap, queryC);
+ // make sure to add the inner id (to handle compound aggs)
+ queryC = withAgg.v1().addColumn(withAgg.v2().context(), af.toAttribute());
}
+ }
// not an Alias or Function means it's an Attribute so apply the same logic as above
- } else {
- GroupByKey matchingGroup = null;
- if (groupingContext != null) {
- matchingGroup = groupingContext.groupFor(ne);
- Check.notNull(matchingGroup, "Cannot find group [{}]", Expressions.name(ne));
+ } else {
+ GroupByKey matchingGroup = null;
+ if (groupingContext != null) {
+ matchingGroup = groupingContext.groupFor(ne);
+ Check.notNull(matchingGroup, "Cannot find group [{}]", Expressions.name(ne));
- queryC = queryC.addColumn(
- new GroupByRef(matchingGroup.id(), null, ne.dataType().isDateBased()), ne.toAttribute());
- }
+ queryC = queryC.addColumn(new GroupByRef(matchingGroup.id(), null, ne.dataType().isDateBased()), ne.toAttribute());
+ }
else if (ne.foldable()) {
queryC = queryC.addColumn(ne.toAttribute());
}
}
}
- if (!aliases.isEmpty()) {
- Map newAliases = new LinkedHashMap<>(queryC.aliases());
- newAliases.putAll(aliases);
- queryC = queryC.withAliases(new AttributeMap<>(newAliases));
- }
- return new EsQueryExec(exec.source(), exec.index(), a.output(), queryC);
+ if (!aliases.isEmpty()) {
+ Map newAliases = new LinkedHashMap<>(queryC.aliases());
+ newAliases.putAll(aliases);
+ queryC = queryC.withAliases(new AttributeMap<>(newAliases));
}
- return a;
+ return new EsQueryExec(exec.source(), exec.index(), a.output(), queryC);
}
- private Tuple addAggFunction(GroupByKey groupingAgg, AggregateFunction f,
+ private static Tuple addAggFunction(GroupByKey groupingAgg, AggregateFunction f,
Map compoundAggMap, QueryContainer queryC) {
String functionId = f.functionId();
// handle count as a special case agg
@@ -551,6 +563,52 @@ class QueryFolder extends RuleExecutor {
}
}
+
+ private static class FoldPivot extends FoldingRule {
+
+ @Override
+ protected PhysicalPlan rule(PivotExec plan) {
+ if (plan.child() instanceof EsQueryExec) {
+ EsQueryExec exec = (EsQueryExec) plan.child();
+ Pivot p = plan.pivot();
+ EsQueryExec fold = FoldAggregate
+ .fold(new AggregateExec(plan.source(), exec,
+ new ArrayList<>(p.groupingSet()), combine(p.groupingSet(), p.aggregates())), exec);
+
+ // replace the aggregate extractors with pivot specific extractors
+ // these require a reference to the pivoting column in order to compare the value
+ // due to the Pivot structure - the column is the last entry in the grouping set
+ QueryContainer query = fold.queryContainer();
+
+ List> fields = new ArrayList<>(query.fields());
+ int startingIndex = fields.size() - p.aggregates().size() - 1;
+ // pivot grouping
+ Tuple groupTuple = fields.remove(startingIndex);
+ AttributeSet valuesOutput = plan.pivot().valuesOutput();
+
+ for (int i = startingIndex; i < fields.size(); i++) {
+ Tuple tuple = fields.remove(i);
+ for (Attribute attribute : valuesOutput) {
+ fields.add(new Tuple<>(new PivotColumnRef(groupTuple.v1(), tuple.v1(), attribute.fold()), attribute.id()));
+ }
+ i += valuesOutput.size();
+ }
+
+ return fold.with(new QueryContainer(query.query(), query.aggs(),
+ fields,
+ query.aliases(),
+ query.pseudoFunctions(),
+ query.scalarFunctions(),
+ query.sort(),
+ query.limit(),
+ query.shouldTrackHits(),
+ query.shouldIncludeFrozen(),
+ valuesOutput.size()));
+ }
+ return plan;
+ }
+ }
+
//
// local
//
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/Verifier.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/Verifier.java
index 1e527657ae0..fe4ec05ab33 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/Verifier.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/Verifier.java
@@ -5,7 +5,9 @@
*/
package org.elasticsearch.xpack.sql.planner;
+import org.elasticsearch.xpack.sql.expression.function.aggregate.InnerAggregate;
import org.elasticsearch.xpack.sql.plan.physical.PhysicalPlan;
+import org.elasticsearch.xpack.sql.plan.physical.PivotExec;
import org.elasticsearch.xpack.sql.plan.physical.Unexecutable;
import org.elasticsearch.xpack.sql.plan.physical.UnplannedExec;
import org.elasticsearch.xpack.sql.tree.Node;
@@ -14,6 +16,8 @@ import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
+import static org.elasticsearch.common.logging.LoggerMessageFormat.format;
+
abstract class Verifier {
static class Failure {
@@ -53,8 +57,8 @@ abstract class Verifier {
}
}
- private static Failure fail(Node> source, String message) {
- return new Failure(source, message);
+ private static Failure fail(Node> source, String message, Object... args) {
+ return new Failure(source, format(null, message, args));
}
static List verifyMappingPlan(PhysicalPlan plan) {
@@ -70,10 +74,22 @@ abstract class Verifier {
}
});
});
+ // verify Pivot
+ checkInnerAggsPivot(plan, failures);
return failures;
}
+ private static void checkInnerAggsPivot(PhysicalPlan plan, List failures) {
+ plan.forEachDown(p -> {
+ p.pivot().aggregates().forEach(agg -> agg.forEachDown(e -> {
+ if (e instanceof InnerAggregate) {
+ failures.add(fail(e, "Aggregation [{}] not supported (yet) by PIVOT", e.sourceText()));
+ }
+ }));
+ }, PivotExec.class);
+ }
+
static List verifyExecutingPlan(PhysicalPlan plan) {
List failures = new ArrayList<>();
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/PivotColumnRef.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/PivotColumnRef.java
new file mode 100644
index 00000000000..60ee3b7409c
--- /dev/null
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/PivotColumnRef.java
@@ -0,0 +1,51 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+
+package org.elasticsearch.xpack.sql.querydsl.container;
+
+import org.elasticsearch.xpack.sql.execution.search.AggRef;
+import org.elasticsearch.xpack.sql.execution.search.FieldExtraction;
+
+public class PivotColumnRef extends AggRef {
+
+ private final FieldExtraction agg;
+ private final FieldExtraction pivot;
+ private final Object value;
+
+ public PivotColumnRef(FieldExtraction pivot, FieldExtraction agg, Object value) {
+ this.pivot = pivot;
+ this.agg = agg;
+ // due to the way Elasticsearch aggs work
+ // promote the object to expect types so that the comparison works
+ this.value = esAggType(value);
+ }
+
+ private static Object esAggType(Object value) {
+ if (value instanceof Number) {
+ Number n = (Number) value;
+ if (value instanceof Double) {
+ return value;
+ }
+ if (value instanceof Float) {
+ return Double.valueOf(n.doubleValue());
+ }
+ return Long.valueOf(n.longValue());
+ }
+ return value;
+ }
+
+ public FieldExtraction pivot() {
+ return pivot;
+ }
+
+ public FieldExtraction agg() {
+ return agg;
+ }
+
+ public Object value() {
+ return value;
+ }
+}
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/QueryContainer.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/QueryContainer.java
index 5ff560f4baa..c75a2008202 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/QueryContainer.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/QueryContainer.java
@@ -83,13 +83,15 @@ public class QueryContainer {
private final int limit;
private final boolean trackHits;
private final boolean includeFrozen;
+ // used when pivoting for retrieving at least one pivot row
+ private final int minPageSize;
// computed
private Boolean aggsOnly;
private Boolean customSort;
public QueryContainer() {
- this(null, null, null, null, null, null, null, -1, false, false);
+ this(null, null, null, null, null, null, null, -1, false, false, -1);
}
public QueryContainer(Query query,
@@ -102,7 +104,8 @@ public class QueryContainer {
Set sort,
int limit,
boolean trackHits,
- boolean includeFrozen) {
+ boolean includeFrozen,
+ int minPageSize) {
this.query = query;
this.aggs = aggs == null ? Aggs.EMPTY : aggs;
this.fields = fields == null || fields.isEmpty() ? emptyList() : fields;
@@ -113,6 +116,7 @@ public class QueryContainer {
this.limit = limit;
this.trackHits = trackHits;
this.includeFrozen = includeFrozen;
+ this.minPageSize = minPageSize;
}
/**
@@ -247,49 +251,62 @@ public class QueryContainer {
return includeFrozen;
}
+ public int minPageSize() {
+ return minPageSize;
+ }
+
//
// copy methods
//
public QueryContainer with(Query q) {
- return new QueryContainer(q, aggs, fields, aliases, pseudoFunctions, scalarFunctions, sort, limit, trackHits, includeFrozen);
+ return new QueryContainer(q, aggs, fields, aliases, pseudoFunctions, scalarFunctions, sort, limit, trackHits, includeFrozen,
+ minPageSize);
+ }
+
+ public QueryContainer withFields(List> f) {
+ return new QueryContainer(query, aggs, f, aliases, pseudoFunctions, scalarFunctions, sort, limit, trackHits, includeFrozen,
+ minPageSize);
}
public QueryContainer withAliases(AttributeMap a) {
- return new QueryContainer(query, aggs, fields, a, pseudoFunctions, scalarFunctions, sort, limit, trackHits, includeFrozen);
+ return new QueryContainer(query, aggs, fields, a, pseudoFunctions, scalarFunctions, sort, limit, trackHits, includeFrozen,
+ minPageSize);
}
public QueryContainer withPseudoFunctions(Map p) {
- return new QueryContainer(query, aggs, fields, aliases, p, scalarFunctions, sort, limit, trackHits, includeFrozen);
+ return new QueryContainer(query, aggs, fields, aliases, p, scalarFunctions, sort, limit, trackHits, includeFrozen, minPageSize);
}
public QueryContainer with(Aggs a) {
- return new QueryContainer(query, a, fields, aliases, pseudoFunctions, scalarFunctions, sort, limit, trackHits, includeFrozen);
+ return new QueryContainer(query, a, fields, aliases, pseudoFunctions, scalarFunctions, sort, limit, trackHits, includeFrozen,
+ minPageSize);
}
public QueryContainer withLimit(int l) {
return l == limit ? this : new QueryContainer(query, aggs, fields, aliases, pseudoFunctions, scalarFunctions, sort, l, trackHits,
- includeFrozen);
+ includeFrozen, minPageSize);
}
public QueryContainer withTrackHits() {
return trackHits ? this : new QueryContainer(query, aggs, fields, aliases, pseudoFunctions, scalarFunctions, sort, limit, true,
- includeFrozen);
+ includeFrozen, minPageSize);
}
public QueryContainer withFrozen() {
return includeFrozen ? this : new QueryContainer(query, aggs, fields, aliases, pseudoFunctions, scalarFunctions, sort, limit,
- trackHits, true);
+ trackHits, true, minPageSize);
}
public QueryContainer withScalarProcessors(AttributeMap procs) {
- return new QueryContainer(query, aggs, fields, aliases, pseudoFunctions, procs, sort, limit, trackHits, includeFrozen);
+ return new QueryContainer(query, aggs, fields, aliases, pseudoFunctions, procs, sort, limit, trackHits, includeFrozen, minPageSize);
}
public QueryContainer addSort(Sort sortable) {
Set sort = new LinkedHashSet<>(this.sort);
sort.add(sortable);
- return new QueryContainer(query, aggs, fields, aliases, pseudoFunctions, scalarFunctions, sort, limit, trackHits, includeFrozen);
+ return new QueryContainer(query, aggs, fields, aliases, pseudoFunctions, scalarFunctions, sort, limit, trackHits, includeFrozen,
+ minPageSize);
}
private String aliasName(Attribute attr) {
@@ -344,7 +361,8 @@ public class QueryContainer {
false, attr.parent().name());
return new Tuple<>(
- new QueryContainer(q, aggs, fields, aliases, pseudoFunctions, scalarFunctions, sort, limit, trackHits, includeFrozen),
+ new QueryContainer(q, aggs, fields, aliases, pseudoFunctions, scalarFunctions, sort, limit, trackHits, includeFrozen,
+ minPageSize),
nestedFieldRef);
}
@@ -447,7 +465,7 @@ public class QueryContainer {
ExpressionId id = attr instanceof AggregateFunctionAttribute ? ((AggregateFunctionAttribute) attr).innerId() : attr.id();
return new QueryContainer(query, aggs, combine(fields, new Tuple<>(ref, id)), aliases, pseudoFunctions,
scalarFunctions,
- sort, limit, trackHits, includeFrozen);
+ sort, limit, trackHits, includeFrozen, minPageSize);
}
public AttributeMap scalarFunctions() {
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursors.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursors.java
index 8f2c3735602..6f1ee47f4da 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursors.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursors.java
@@ -12,7 +12,8 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.xpack.sql.SqlIllegalArgumentException;
import org.elasticsearch.xpack.sql.common.io.SqlStreamInput;
import org.elasticsearch.xpack.sql.common.io.SqlStreamOutput;
-import org.elasticsearch.xpack.sql.execution.search.CompositeAggregationCursor;
+import org.elasticsearch.xpack.sql.execution.search.CompositeAggCursor;
+import org.elasticsearch.xpack.sql.execution.search.PivotCursor;
import org.elasticsearch.xpack.sql.execution.search.ScrollCursor;
import org.elasticsearch.xpack.sql.execution.search.extractor.BucketExtractors;
import org.elasticsearch.xpack.sql.execution.search.extractor.HitExtractors;
@@ -45,7 +46,8 @@ public final class Cursors {
// cursors
entries.add(new NamedWriteableRegistry.Entry(Cursor.class, EmptyCursor.NAME, in -> Cursor.EMPTY));
entries.add(new NamedWriteableRegistry.Entry(Cursor.class, ScrollCursor.NAME, ScrollCursor::new));
- entries.add(new NamedWriteableRegistry.Entry(Cursor.class, CompositeAggregationCursor.NAME, CompositeAggregationCursor::new));
+ entries.add(new NamedWriteableRegistry.Entry(Cursor.class, CompositeAggCursor.NAME, CompositeAggCursor::new));
+ entries.add(new NamedWriteableRegistry.Entry(Cursor.class, PivotCursor.NAME, PivotCursor::new));
entries.add(new NamedWriteableRegistry.Entry(Cursor.class, TextFormatterCursor.NAME, TextFormatterCursor::new));
entries.add(new NamedWriteableRegistry.Entry(Cursor.class, ListCursor.NAME, ListCursor::new));
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/ListCursor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/ListCursor.java
index 7e20abc31de..a07b7adfe37 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/ListCursor.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/ListCursor.java
@@ -21,7 +21,7 @@ import static java.util.Collections.emptyList;
public class ListCursor implements Cursor {
- public static final String NAME = "p";
+ public static final String NAME = "l";
private final List> data;
private final int columnCount;
diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java
index 8844301006f..b4068932bf0 100644
--- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java
+++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java
@@ -844,4 +844,57 @@ public class VerifierErrorMessagesTests extends ESTestCase {
accept("SELECT ST_X(shape) FROM test");
}
-}
+ //
+ // Pivot verifications
+ //
+ public void testPivotNonExactColumn() {
+ assertEquals("1:72: Field [text] of data type [text] cannot be used for grouping;"
+ + " No keyword/multi-field defined exact matches for [text]; define one or use MATCH/QUERY instead",
+ error("SELECT * FROM (SELECT int, text, keyword FROM test) " + "PIVOT(AVG(int) FOR text IN ('bla'))"));
+ }
+
+ public void testPivotColumnUsedInsteadOfAgg() {
+ assertEquals("1:59: No aggregate function found in PIVOT at [int]",
+ error("SELECT * FROM (SELECT int, keyword, bool FROM test) " + "PIVOT(int FOR keyword IN ('bla'))"));
+ }
+
+ public void testPivotScalarUsedInsteadOfAgg() {
+ assertEquals("1:59: No aggregate function found in PIVOT at [ROUND(int)]",
+ error("SELECT * FROM (SELECT int, keyword, bool FROM test) " + "PIVOT(ROUND(int) FOR keyword IN ('bla'))"));
+ }
+
+ public void testPivotScalarUsedAlongSideAgg() {
+ assertEquals("1:59: Non-aggregate function found in PIVOT at [AVG(int) + ROUND(int)]",
+ error("SELECT * FROM (SELECT int, keyword, bool FROM test) " + "PIVOT(AVG(int) + ROUND(int) FOR keyword IN ('bla'))"));
+ }
+
+ public void testPivotValueNotFoldable() {
+ assertEquals("1:91: Non-literal [bool] found inside PIVOT values",
+ error("SELECT * FROM (SELECT int, keyword, bool FROM test) " + "PIVOT(AVG(int) FOR keyword IN ('bla', bool))"));
+ }
+
+ public void testPivotWithFunctionInput() {
+ assertEquals("1:37: No functions allowed (yet); encountered [YEAR(date)]",
+ error("SELECT * FROM (SELECT int, keyword, YEAR(date) FROM test) " + "PIVOT(AVG(int) FOR keyword IN ('bla'))"));
+ }
+
+ public void testPivotWithFoldableFunctionInValues() {
+ assertEquals("1:85: Non-literal [UCASE('bla')] found inside PIVOT values",
+ error("SELECT * FROM (SELECT int, keyword, bool FROM test) " + "PIVOT(AVG(int) FOR keyword IN ( UCASE('bla') ))"));
+ }
+
+ public void testPivotWithNull() {
+ assertEquals("1:85: Null not allowed as a PIVOT value",
+ error("SELECT * FROM (SELECT int, keyword, bool FROM test) " + "PIVOT(AVG(int) FOR keyword IN ( null ))"));
+ }
+
+ public void testPivotValuesHaveDifferentTypeThanColumn() {
+ assertEquals("1:81: Literal ['bla'] of type [keyword] does not match type [boolean] of PIVOT column [bool]",
+ error("SELECT * FROM (SELECT int, keyword, bool FROM test) " + "PIVOT(AVG(int) FOR bool IN ('bla'))"));
+ }
+
+ public void testPivotValuesWithMultipleDifferencesThanColumn() {
+ assertEquals("1:81: Literal ['bla'] of type [keyword] does not match type [boolean] of PIVOT column [bool]",
+ error("SELECT * FROM (SELECT int, keyword, bool FROM test) " + "PIVOT(AVG(int) FOR bool IN ('bla', true))"));
+ }
+}
\ No newline at end of file
diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggregationCursorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggregationCursorTests.java
index 4216db7cb70..195d11be434 100644
--- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggregationCursorTests.java
+++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggregationCursorTests.java
@@ -19,8 +19,8 @@ import java.util.BitSet;
import java.util.List;
import java.util.function.Supplier;
-public class CompositeAggregationCursorTests extends AbstractSqlWireSerializingTestCase {
- public static CompositeAggregationCursor randomCompositeCursor() {
+public class CompositeAggregationCursorTests extends AbstractSqlWireSerializingTestCase {
+ public static CompositeAggCursor randomCompositeCursor() {
int extractorsSize = between(1, 20);
ZoneId id = randomSafeZone();
List extractors = new ArrayList<>(extractorsSize);
@@ -28,7 +28,7 @@ public class CompositeAggregationCursorTests extends AbstractSqlWireSerializingT
extractors.add(randomBucketExtractor(id));
}
- return new CompositeAggregationCursor(new byte[randomInt(256)], extractors, randomBitSet(extractorsSize),
+ return new CompositeAggCursor(new byte[randomInt(256)], extractors, randomBitSet(extractorsSize),
randomIntBetween(10, 1024), randomBoolean(), randomAlphaOfLength(5));
}
@@ -41,8 +41,8 @@ public class CompositeAggregationCursorTests extends AbstractSqlWireSerializingT
}
@Override
- protected CompositeAggregationCursor mutateInstance(CompositeAggregationCursor instance) throws IOException {
- return new CompositeAggregationCursor(instance.next(), instance.extractors(),
+ protected CompositeAggCursor mutateInstance(CompositeAggCursor instance) throws IOException {
+ return new CompositeAggCursor(instance.next(), instance.extractors(),
randomValueOtherThan(instance.mask(), () -> randomBitSet(instance.extractors().size())),
randomValueOtherThan(instance.limit(), () -> randomIntBetween(1, 512)),
!instance.includeFrozen(),
@@ -50,17 +50,17 @@ public class CompositeAggregationCursorTests extends AbstractSqlWireSerializingT
}
@Override
- protected CompositeAggregationCursor createTestInstance() {
+ protected CompositeAggCursor createTestInstance() {
return randomCompositeCursor();
}
@Override
- protected Reader instanceReader() {
- return CompositeAggregationCursor::new;
+ protected Reader instanceReader() {
+ return CompositeAggCursor::new;
}
@Override
- protected ZoneId instanceZoneId(CompositeAggregationCursor instance) {
+ protected ZoneId instanceZoneId(CompositeAggCursor instance) {
List extractors = instance.extractors();
for (BucketExtractor bucketExtractor : extractors) {
ZoneId zoneId = MetricAggExtractorTests.extractZoneId(bucketExtractor);
diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java
index 2d3b6cdee52..0238cfe8591 100644
--- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java
+++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java
@@ -7,6 +7,7 @@ package org.elasticsearch.xpack.sql.optimizer;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.xpack.sql.analysis.analyzer.Analyzer.PruneSubqueryAliases;
+import org.elasticsearch.xpack.sql.analysis.index.EsIndex;
import org.elasticsearch.xpack.sql.expression.Alias;
import org.elasticsearch.xpack.sql.expression.Expression;
import org.elasticsearch.xpack.sql.expression.Expression.TypeResolution;
@@ -20,6 +21,7 @@ import org.elasticsearch.xpack.sql.expression.Order;
import org.elasticsearch.xpack.sql.expression.Order.OrderDirection;
import org.elasticsearch.xpack.sql.expression.function.Function;
import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunction;
+import org.elasticsearch.xpack.sql.expression.function.aggregate.Avg;
import org.elasticsearch.xpack.sql.expression.function.aggregate.Count;
import org.elasticsearch.xpack.sql.expression.function.aggregate.First;
import org.elasticsearch.xpack.sql.expression.function.aggregate.Last;
@@ -87,14 +89,17 @@ import org.elasticsearch.xpack.sql.optimizer.Optimizer.PropagateEquals;
import org.elasticsearch.xpack.sql.optimizer.Optimizer.PruneDuplicateFunctions;
import org.elasticsearch.xpack.sql.optimizer.Optimizer.ReplaceFoldableAttributes;
import org.elasticsearch.xpack.sql.optimizer.Optimizer.ReplaceMinMaxWithTopHits;
+import org.elasticsearch.xpack.sql.optimizer.Optimizer.RewritePivot;
import org.elasticsearch.xpack.sql.optimizer.Optimizer.SimplifyCase;
import org.elasticsearch.xpack.sql.optimizer.Optimizer.SimplifyConditional;
import org.elasticsearch.xpack.sql.optimizer.Optimizer.SortAggregateOnOrderBy;
import org.elasticsearch.xpack.sql.plan.logical.Aggregate;
+import org.elasticsearch.xpack.sql.plan.logical.EsRelation;
import org.elasticsearch.xpack.sql.plan.logical.Filter;
import org.elasticsearch.xpack.sql.plan.logical.LocalRelation;
import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan;
import org.elasticsearch.xpack.sql.plan.logical.OrderBy;
+import org.elasticsearch.xpack.sql.plan.logical.Pivot;
import org.elasticsearch.xpack.sql.plan.logical.Project;
import org.elasticsearch.xpack.sql.plan.logical.SubQueryAlias;
import org.elasticsearch.xpack.sql.plan.logical.command.ShowTables;
@@ -1498,4 +1503,23 @@ public class OptimizerTests extends ESTestCase {
assertEquals(firstAlias, groupings.get(0));
assertEquals(secondAlias, groupings.get(1));
}
-}
+
+ public void testPivotRewrite() {
+ FieldAttribute column = getFieldAttribute("pivot");
+ FieldAttribute number = getFieldAttribute("number");
+ List values = Arrays.asList(new Alias(EMPTY, "ONE", L(1)), new Alias(EMPTY, "TWO", L(2)));
+ List aggs = Arrays.asList(new Avg(EMPTY, number));
+ Pivot pivot = new Pivot(EMPTY, new EsRelation(EMPTY, new EsIndex("table", emptyMap()), false), column, values, aggs);
+
+ LogicalPlan result = new RewritePivot().apply(pivot);
+ assertEquals(Pivot.class, result.getClass());
+ Pivot pv = (Pivot) result;
+ assertEquals(pv.aggregates(), aggs);
+ assertEquals(Filter.class, pv.child().getClass());
+ Filter f = (Filter) pv.child();
+ assertEquals(In.class, f.condition().getClass());
+ In in = (In) f.condition();
+ assertEquals(column, in.value());
+ assertEquals(Arrays.asList(L(1), L(2)), in.list());
+ }
+}
\ No newline at end of file
diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/PostOptimizerVerifierTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/PostOptimizerVerifierTests.java
new file mode 100644
index 00000000000..4e89fdb2154
--- /dev/null
+++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/PostOptimizerVerifierTests.java
@@ -0,0 +1,77 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+
+package org.elasticsearch.xpack.sql.planner;
+
+import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.xpack.sql.TestUtils;
+import org.elasticsearch.xpack.sql.analysis.analyzer.Analyzer;
+import org.elasticsearch.xpack.sql.analysis.analyzer.Verifier;
+import org.elasticsearch.xpack.sql.analysis.index.EsIndex;
+import org.elasticsearch.xpack.sql.analysis.index.IndexResolution;
+import org.elasticsearch.xpack.sql.expression.function.FunctionRegistry;
+import org.elasticsearch.xpack.sql.optimizer.Optimizer;
+import org.elasticsearch.xpack.sql.parser.SqlParser;
+import org.elasticsearch.xpack.sql.plan.physical.PhysicalPlan;
+import org.elasticsearch.xpack.sql.stats.Metrics;
+import org.elasticsearch.xpack.sql.type.EsField;
+import org.elasticsearch.xpack.sql.type.TypesTests;
+import org.junit.After;
+import org.junit.Before;
+
+import java.util.Map;
+
+public class PostOptimizerVerifierTests extends ESTestCase {
+
+ private SqlParser parser;
+ private Analyzer analyzer;
+ private Optimizer optimizer;
+ private Planner planner;
+ private IndexResolution indexResolution;
+
+ @Before
+ public void init() {
+ parser = new SqlParser();
+
+ Map mapping = TypesTests.loadMapping("mapping-multi-field-variation.json");
+ EsIndex test = new EsIndex("test", mapping);
+ indexResolution = IndexResolution.valid(test);
+ analyzer = new Analyzer(TestUtils.TEST_CFG, new FunctionRegistry(), indexResolution, new Verifier(new Metrics()));
+ optimizer = new Optimizer();
+ planner = new Planner();
+ }
+
+ @After
+ public void destroy() {
+ parser = null;
+ analyzer = null;
+ }
+
+ private PhysicalPlan plan(String sql) {
+ return planner.plan(optimizer.optimize(analyzer.analyze(parser.createStatement(sql), true)), true);
+ }
+
+ private String error(String sql) {
+ return error(indexResolution, sql);
+ }
+
+ private String error(IndexResolution getIndexResult, String sql) {
+ PlanningException e = expectThrows(PlanningException.class, () -> plan(sql));
+ assertTrue(e.getMessage().startsWith("Found "));
+ String header = "Found 1 problem(s)\nline ";
+ return e.getMessage().substring(header.length());
+ }
+
+ public void testPivotInnerAgg() {
+ assertEquals("1:59: Aggregation [SUM_OF_SQUARES(int)] not supported (yet) by PIVOT",
+ error("SELECT * FROM (SELECT int, keyword, bool FROM test) " + "PIVOT(SUM_OF_SQUARES(int) FOR keyword IN ('bla'))"));
+ }
+
+ public void testPivotNestedInnerAgg() {
+ assertEquals("1:65: Aggregation [SUM_OF_SQUARES(int)] not supported (yet) by PIVOT",
+ error("SELECT * FROM (SELECT int, keyword, bool FROM test) " + "PIVOT(ROUND(SUM_OF_SQUARES(int)) FOR keyword IN ('bla'))"));
+ }
+}
diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryFolderTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryFolderTests.java
index c94da662151..11f6cc949de 100644
--- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryFolderTests.java
+++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryFolderTests.java
@@ -11,6 +11,7 @@ import org.elasticsearch.xpack.sql.analysis.analyzer.Analyzer;
import org.elasticsearch.xpack.sql.analysis.analyzer.Verifier;
import org.elasticsearch.xpack.sql.analysis.index.EsIndex;
import org.elasticsearch.xpack.sql.analysis.index.IndexResolution;
+import org.elasticsearch.xpack.sql.expression.Expressions;
import org.elasticsearch.xpack.sql.expression.function.FunctionRegistry;
import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunctionAttribute;
import org.elasticsearch.xpack.sql.optimizer.Optimizer;
@@ -26,8 +27,10 @@ import org.elasticsearch.xpack.sql.type.TypesTests;
import org.junit.AfterClass;
import org.junit.BeforeClass;
+import java.util.Arrays;
import java.util.Map;
+import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.endsWith;
import static org.hamcrest.Matchers.startsWith;
@@ -397,4 +400,18 @@ public class QueryFolderTests extends ESTestCase {
AggregateFunctionAttribute afa = (AggregateFunctionAttribute) ee.output().get(0);
assertThat(afa.propertyPath(), endsWith("[3.0]"));
}
+
+ public void testFoldingOfPivot() {
+ PhysicalPlan p = plan("SELECT * FROM (SELECT int, keyword, bool FROM test) PIVOT(AVG(int) FOR keyword IN ('A', 'B'))");
+ assertEquals(EsQueryExec.class, p.getClass());
+ EsQueryExec ee = (EsQueryExec) p;
+ assertEquals(3, ee.output().size());
+ assertEquals(Arrays.asList("bool", "'A'", "'B'"), Expressions.names(ee.output()));
+ String q = ee.toString().replaceAll("\\s+", "");
+ assertThat(q, containsString("\"query\":{\"terms\":{\"keyword\":[\"A\",\"B\"]"));
+ String a = ee.queryContainer().aggs().asAggBuilder().toString().replaceAll("\\s+", "");
+ assertThat(a, containsString("\"terms\":{\"field\":\"bool\""));
+ assertThat(a, containsString("\"terms\":{\"field\":\"keyword\""));
+ assertThat(a, containsString("{\"avg\":{\"field\":\"int\"}"));
+ }
}
From 08f28e642b9aa24a2c9be973782807f8ec47f4ce Mon Sep 17 00:00:00 2001
From: Jim Ferenczi
Date: Mon, 23 Sep 2019 19:37:15 +0200
Subject: [PATCH 11/94] Replace SearchContext with QueryShardContext in query
builder tests (#46978)
This commit replaces the SearchContext used in AbstractQueryTestCase with
a QueryShardContext in order to reduce the visibility of search contexts.
Relates #46523
---
.../query/RankFeatureQueryBuilderTests.java | 3 +--
.../join/query/HasChildQueryBuilderTests.java | 19 +++++--------------
.../query/HasParentQueryBuilderTests.java | 19 +++++--------------
.../join/query/ParentIdQueryBuilderTests.java | 4 ++--
.../PercolateQueryBuilderTests.java | 3 +--
.../index/query/InnerHitContextBuilder.java | 4 ++++
...angeFieldQueryStringQueryBuilderTests.java | 3 +--
.../index/query/BoolQueryBuilderTests.java | 4 +---
.../query/BoostingQueryBuilderTests.java | 7 +++----
.../query/CommonTermsQueryBuilderTests.java | 3 +--
.../query/ConstantScoreQueryBuilderTests.java | 5 ++---
.../index/query/DisMaxQueryBuilderTests.java | 5 ++---
.../DistanceFeatureQueryBuilderTests.java | 9 +++++----
.../index/query/ExistsQueryBuilderTests.java | 19 +++++++++----------
.../FieldMaskingSpanQueryBuilderTests.java | 7 ++++---
.../index/query/FuzzyQueryBuilderTests.java | 3 +--
.../GeoBoundingBoxQueryBuilderTests.java | 4 +---
.../query/GeoDistanceQueryBuilderTests.java | 3 +--
.../query/GeoPolygonQueryBuilderTests.java | 3 +--
.../query/GeoShapeQueryBuilderTests.java | 3 +--
.../index/query/IdsQueryBuilderTests.java | 7 +++----
.../query/IntervalQueryBuilderTests.java | 3 +--
.../query/MatchAllQueryBuilderTests.java | 3 +--
.../MatchBoolPrefixQueryBuilderTests.java | 5 +++--
.../query/MatchNoneQueryBuilderTests.java | 3 +--
.../MatchPhrasePrefixQueryBuilderTests.java | 3 +--
.../query/MatchPhraseQueryBuilderTests.java | 4 +---
.../index/query/MatchQueryBuilderTests.java | 4 +---
.../query/MoreLikeThisQueryBuilderTests.java | 3 +--
.../query/MultiMatchQueryBuilderTests.java | 3 +--
.../index/query/NestedQueryBuilderTests.java | 19 ++++++-------------
.../index/query/PrefixQueryBuilderTests.java | 3 +--
.../query/QueryStringQueryBuilderTests.java | 3 +--
.../index/query/RangeQueryBuilderTests.java | 17 ++++++++---------
.../index/query/RegexpQueryBuilderTests.java | 3 +--
.../index/query/ScriptQueryBuilderTests.java | 3 +--
.../query/ScriptScoreQueryBuilderTests.java | 3 +--
.../query/SimpleQueryStringBuilderTests.java | 3 +--
.../SpanContainingQueryBuilderTests.java | 3 +--
.../query/SpanFirstQueryBuilderTests.java | 3 +--
.../index/query/SpanGapQueryBuilderTests.java | 7 +++----
.../query/SpanMultiTermQueryBuilderTests.java | 5 ++---
.../query/SpanNearQueryBuilderTests.java | 7 +++----
.../index/query/SpanNotQueryBuilderTests.java | 7 +++----
.../index/query/SpanOrQueryBuilderTests.java | 5 ++---
.../query/SpanTermQueryBuilderTests.java | 5 ++---
.../query/SpanWithinQueryBuilderTests.java | 3 +--
.../index/query/TermQueryBuilderTests.java | 5 ++---
.../index/query/TermsQueryBuilderTests.java | 3 +--
.../query/TermsSetQueryBuilderTests.java | 3 +--
.../index/query/TypeQueryBuilderTests.java | 3 +--
.../query/WildcardQueryBuilderTests.java | 3 +--
.../index/query/WrapperQueryBuilderTests.java | 5 ++---
.../FunctionScoreQueryBuilderTests.java | 3 +--
.../test/AbstractBuilderTestCase.java | 18 ------------------
.../test/AbstractQueryTestCase.java | 16 +++++++---------
.../PinnedQueryBuilderTests.java | 4 ++--
.../index/query/ShapeQueryBuilderTests.java | 4 ++--
58 files changed, 123 insertions(+), 211 deletions(-)
diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/query/RankFeatureQueryBuilderTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/query/RankFeatureQueryBuilderTests.java
index aea37e2a8ee..0cd048184b9 100644
--- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/query/RankFeatureQueryBuilderTests.java
+++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/query/RankFeatureQueryBuilderTests.java
@@ -29,7 +29,6 @@ import org.elasticsearch.index.mapper.MapperExtrasPlugin;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.query.RankFeatureQueryBuilder.ScoreFunction;
import org.elasticsearch.plugins.Plugin;
-import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.test.AbstractQueryTestCase;
import java.io.IOException;
@@ -91,7 +90,7 @@ public class RankFeatureQueryBuilderTests extends AbstractQueryTestCase expectedClass = FeatureField.newSaturationQuery("", "", 1, 1).getClass();
assertThat(query, either(instanceOf(MatchNoDocsQuery.class)).or(instanceOf(expectedClass)));
}
diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java
index 2e682eda733..f84fbb214df 100644
--- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java
+++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java
@@ -52,8 +52,6 @@ import org.elasticsearch.index.query.WrapperQueryBuilder;
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.join.ParentJoinPlugin;
import org.elasticsearch.plugins.Plugin;
-import org.elasticsearch.search.fetch.subphase.InnerHitsContext;
-import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.sort.FieldSortBuilder;
import org.elasticsearch.search.sort.SortOrder;
import org.elasticsearch.test.AbstractQueryTestCase;
@@ -171,7 +169,7 @@ public class HasChildQueryBuilderTests extends AbstractQueryTestCase innerHitBuilders = new HashMap<>();
InnerHitContextBuilder.extractInnerHits(queryBuilder, innerHitBuilders);
- final InnerHitsContext innerHitsContext = new InnerHitsContext();
- for (InnerHitContextBuilder builder : innerHitBuilders.values()) {
- builder.build(searchContext, innerHitsContext);
- }
- assertEquals(1, innerHitsContext.getInnerHits().size());
- assertTrue(innerHitsContext.getInnerHits().containsKey(queryBuilder.innerHit().getName()));
- InnerHitsContext.InnerHitSubContext innerHits = innerHitsContext.getInnerHits().get(queryBuilder.innerHit().getName());
- assertEquals(innerHits.size(), queryBuilder.innerHit().getSize());
- assertEquals(innerHits.sort().sort.getSort().length, 1);
- assertEquals(innerHits.sort().sort.getSort()[0].getField(), STRING_FIELD_NAME_2);
+ assertTrue(innerHitBuilders.containsKey(queryBuilder.innerHit().getName()));
+ InnerHitContextBuilder innerHits = innerHitBuilders.get(queryBuilder.innerHit().getName());
+ assertEquals(innerHits.innerHitBuilder(), queryBuilder.innerHit());
}
}
diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasParentQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasParentQueryBuilderTests.java
index 73d29314130..1d26467853c 100644
--- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasParentQueryBuilderTests.java
+++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasParentQueryBuilderTests.java
@@ -40,8 +40,6 @@ import org.elasticsearch.index.query.TermQueryBuilder;
import org.elasticsearch.index.query.WrapperQueryBuilder;
import org.elasticsearch.join.ParentJoinPlugin;
import org.elasticsearch.plugins.Plugin;
-import org.elasticsearch.search.fetch.subphase.InnerHitsContext;
-import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.sort.FieldSortBuilder;
import org.elasticsearch.search.sort.SortOrder;
import org.elasticsearch.test.AbstractQueryTestCase;
@@ -138,7 +136,7 @@ public class HasParentQueryBuilderTests extends AbstractQueryTestCase innerHitBuilders = new HashMap<>();
InnerHitContextBuilder.extractInnerHits(queryBuilder, innerHitBuilders);
- final InnerHitsContext innerHitsContext = new InnerHitsContext();
- for (InnerHitContextBuilder builder : innerHitBuilders.values()) {
- builder.build(searchContext, innerHitsContext);
- }
- assertEquals(1, innerHitsContext.getInnerHits().size());
- assertTrue(innerHitsContext.getInnerHits().containsKey(queryBuilder.innerHit().getName()));
- InnerHitsContext.InnerHitSubContext innerHits = innerHitsContext.getInnerHits().get(queryBuilder.innerHit().getName());
- assertEquals(innerHits.size(), queryBuilder.innerHit().getSize());
- assertEquals(innerHits.sort().sort.getSort().length, 1);
- assertEquals(innerHits.sort().sort.getSort()[0].getField(), STRING_FIELD_NAME_2);
+ assertTrue(innerHitBuilders.containsKey(queryBuilder.innerHit().getName()));
+ InnerHitContextBuilder innerHits = innerHitBuilders.get(queryBuilder.innerHit().getName());
+ assertEquals(innerHits.innerHitBuilder(), queryBuilder.innerHit());
}
}
diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/ParentIdQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/ParentIdQueryBuilderTests.java
index 83441ef92d2..f43214515be 100644
--- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/ParentIdQueryBuilderTests.java
+++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/ParentIdQueryBuilderTests.java
@@ -32,10 +32,10 @@ import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.query.QueryShardException;
import org.elasticsearch.join.ParentJoinPlugin;
import org.elasticsearch.plugins.Plugin;
-import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.test.AbstractQueryTestCase;
import org.hamcrest.Matchers;
@@ -111,7 +111,7 @@ public class ParentIdQueryBuilderTests extends AbstractQueryTestCase clauses = new ArrayList<>();
clauses.addAll(getBooleanClauses(queryBuilder.must(), BooleanClause.Occur.MUST, context));
clauses.addAll(getBooleanClauses(queryBuilder.mustNot(), BooleanClause.Occur.MUST_NOT, context));
diff --git a/server/src/test/java/org/elasticsearch/index/query/BoostingQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/BoostingQueryBuilderTests.java
index 0e0f767d5a5..534126ee5f3 100644
--- a/server/src/test/java/org/elasticsearch/index/query/BoostingQueryBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/index/query/BoostingQueryBuilderTests.java
@@ -21,7 +21,6 @@ package org.elasticsearch.index.query;
import org.apache.lucene.queries.function.FunctionScoreQuery;
import org.apache.lucene.search.Query;
-import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.test.AbstractQueryTestCase;
import java.io.IOException;
@@ -40,9 +39,9 @@ public class BoostingQueryBuilderTests extends AbstractQueryTestCase queries = AbstractQueryBuilder.toQueries(queryBuilder.innerQueries(), context.getQueryShardContext());
+ protected void doAssertLuceneQuery(DisMaxQueryBuilder queryBuilder, Query query, QueryShardContext context) throws IOException {
+ Collection queries = AbstractQueryBuilder.toQueries(queryBuilder.innerQueries(), context);
assertThat(query, instanceOf(DisjunctionMaxQuery.class));
DisjunctionMaxQuery disjunctionMaxQuery = (DisjunctionMaxQuery) query;
assertThat(disjunctionMaxQuery.getTieBreakerMultiplier(), equalTo(queryBuilder.tieBreaker()));
diff --git a/server/src/test/java/org/elasticsearch/index/query/DistanceFeatureQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/DistanceFeatureQueryBuilderTests.java
index c2fcfdd7140..c1622057b6b 100644
--- a/server/src/test/java/org/elasticsearch/index/query/DistanceFeatureQueryBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/index/query/DistanceFeatureQueryBuilderTests.java
@@ -29,7 +29,6 @@ import org.elasticsearch.common.unit.DistanceUnit;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.mapper.DateFieldMapper;
import org.elasticsearch.index.mapper.MapperService;
-import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.test.AbstractQueryTestCase;
import org.joda.time.DateTime;
import org.elasticsearch.index.query.DistanceFeatureQueryBuilder.Origin;
@@ -74,7 +73,9 @@ public class DistanceFeatureQueryBuilderTests extends AbstractQueryTestCase fields = context.getQueryShardContext().simpleMatchToIndexNames(fieldPattern);
- Collection mappedFields = fields.stream().filter((field) -> context.getQueryShardContext().getObjectMapper(field) != null
- || context.getQueryShardContext().getMapperService().fullName(field) != null).collect(Collectors.toList());
- if (context.mapperService().getIndexSettings().getIndexVersionCreated().before(Version.V_6_1_0)) {
+ Collection fields = context.simpleMatchToIndexNames(fieldPattern);
+ Collection mappedFields = fields.stream().filter((field) -> context.getObjectMapper(field) != null
+ || context.getMapperService().fullName(field) != null).collect(Collectors.toList());
+ if (context.getIndexSettings().getIndexVersionCreated().before(Version.V_6_1_0)) {
if (fields.size() == 1) {
assertThat(query, instanceOf(ConstantScoreQuery.class));
ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) query;
@@ -93,21 +92,21 @@ public class ExistsQueryBuilderTests extends AbstractQueryTestCase childFields = new ArrayList<>();
- context.getQueryShardContext().getObjectMapper(field).forEach(mapper -> childFields.add(mapper.name()));
+ context.getObjectMapper(field).forEach(mapper -> childFields.add(mapper.name()));
assertThat(booleanQuery.clauses().size(), equalTo(childFields.size()));
for (int i = 0; i < childFields.size(); i++) {
BooleanClause booleanClause = booleanQuery.clauses().get(i);
assertThat(booleanClause.getOccur(), equalTo(BooleanClause.Occur.SHOULD));
}
- } else if (context.getQueryShardContext().getMapperService().fullName(field).hasDocValues()) {
+ } else if (context.getMapperService().fullName(field).hasDocValues()) {
assertThat(constantScoreQuery.getQuery(), instanceOf(DocValuesFieldExistsQuery.class));
DocValuesFieldExistsQuery dvExistsQuery = (DocValuesFieldExistsQuery) constantScoreQuery.getQuery();
assertEquals(field, dvExistsQuery.getField());
- } else if (context.getQueryShardContext().getMapperService().fullName(field).omitNorms() == false) {
+ } else if (context.getMapperService().fullName(field).omitNorms() == false) {
assertThat(constantScoreQuery.getQuery(), instanceOf(NormsFieldExistsQuery.class));
NormsFieldExistsQuery normsExistsQuery = (NormsFieldExistsQuery) constantScoreQuery.getQuery();
assertEquals(field, normsExistsQuery.getField());
diff --git a/server/src/test/java/org/elasticsearch/index/query/FieldMaskingSpanQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/FieldMaskingSpanQueryBuilderTests.java
index 9d98a12358f..f564972b2a4 100644
--- a/server/src/test/java/org/elasticsearch/index/query/FieldMaskingSpanQueryBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/index/query/FieldMaskingSpanQueryBuilderTests.java
@@ -21,7 +21,6 @@ package org.elasticsearch.index.query;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.spans.FieldMaskingSpanQuery;
-import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.test.AbstractQueryTestCase;
import java.io.IOException;
@@ -43,12 +42,14 @@ public class FieldMaskingSpanQueryBuilderTests extends AbstractQueryTestCase
}
@Override
- protected void doAssertLuceneQuery(IdsQueryBuilder queryBuilder, Query query, SearchContext context) throws IOException {
+ protected void doAssertLuceneQuery(IdsQueryBuilder queryBuilder, Query query, QueryShardContext context) throws IOException {
boolean allTypes = queryBuilder.types().length == 0 ||
queryBuilder.types().length == 1 && "_all".equals(queryBuilder.types()[0]);
if (queryBuilder.ids().size() == 0
// no types
- || context.getQueryShardContext().fieldMapper(IdFieldMapper.NAME) == null
+ || context.fieldMapper(IdFieldMapper.NAME) == null
// there are types, but disjoint from the query
|| (allTypes == false &&
- Arrays.asList(queryBuilder.types()).indexOf(context.mapperService().documentMapper().type()) == -1)) {
+ Arrays.asList(queryBuilder.types()).indexOf(context.getMapperService().documentMapper().type()) == -1)) {
assertThat(query, instanceOf(MatchNoDocsQuery.class));
} else {
assertThat(query, instanceOf(TermInSetQuery.class));
diff --git a/server/src/test/java/org/elasticsearch/index/query/IntervalQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/IntervalQueryBuilderTests.java
index 15f9b52d23b..379719f3616 100644
--- a/server/src/test/java/org/elasticsearch/index/query/IntervalQueryBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/index/query/IntervalQueryBuilderTests.java
@@ -35,7 +35,6 @@ import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptContext;
import org.elasticsearch.script.ScriptService;
-import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.test.AbstractQueryTestCase;
import java.io.IOException;
@@ -134,7 +133,7 @@ public class IntervalQueryBuilderTests extends AbstractQueryTestCase 0) {
assertThat(query, instanceOf(BooleanQuery.class));
BooleanQuery booleanQuery = (BooleanQuery) query;
diff --git a/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java
index 13309fa6edf..68506b443c0 100644
--- a/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java
@@ -41,7 +41,6 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.Fuzziness;
import org.elasticsearch.index.query.MultiMatchQueryBuilder.Type;
import org.elasticsearch.index.search.MatchQuery;
-import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.test.AbstractQueryTestCase;
import java.io.IOException;
@@ -156,7 +155,7 @@ public class MultiMatchQueryBuilderTests extends AbstractQueryTestCase innerHitInternals = new HashMap<>();
InnerHitContextBuilder.extractInnerHits(queryBuilder, innerHitInternals);
- InnerHitsContext innerHitsContext = new InnerHitsContext();
- for (InnerHitContextBuilder builder : innerHitInternals.values()) {
- builder.build(searchContext, innerHitsContext);
- }
- assertEquals(1, innerHitsContext.getInnerHits().size());
- assertTrue(innerHitsContext.getInnerHits().containsKey(queryBuilder.innerHit().getName()));
- InnerHitsContext.InnerHitSubContext innerHits = innerHitsContext.getInnerHits().get(queryBuilder.innerHit().getName());
- assertEquals(innerHits.size(), queryBuilder.innerHit().getSize());
- assertEquals(innerHits.sort().sort.getSort().length, 1);
- assertEquals(innerHits.sort().sort.getSort()[0].getField(), INT_FIELD_NAME);
+ assertTrue(innerHitInternals.containsKey(queryBuilder.innerHit().getName()));
+ InnerHitContextBuilder innerHits = innerHitInternals.get(queryBuilder.innerHit().getName());
+ assertEquals(innerHits.innerHitBuilder(), queryBuilder.innerHit());
}
}
diff --git a/server/src/test/java/org/elasticsearch/index/query/PrefixQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/PrefixQueryBuilderTests.java
index 2f868d02921..ee56a67092d 100644
--- a/server/src/test/java/org/elasticsearch/index/query/PrefixQueryBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/index/query/PrefixQueryBuilderTests.java
@@ -24,7 +24,6 @@ import org.apache.lucene.search.MultiTermQuery;
import org.apache.lucene.search.PrefixQuery;
import org.apache.lucene.search.Query;
import org.elasticsearch.common.ParsingException;
-import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.test.AbstractQueryTestCase;
import java.io.IOException;
@@ -68,7 +67,7 @@ public class PrefixQueryBuilderTests extends AbstractQueryTestCase spanQueryBuilderIterator = queryBuilder.clauses().iterator();
for (SpanQuery spanQuery : spanNearQuery.getClauses()) {
- assertThat(spanQuery, equalTo(spanQueryBuilderIterator.next().toQuery(context.getQueryShardContext())));
+ assertThat(spanQuery, equalTo(spanQueryBuilderIterator.next().toQuery(context)));
}
} else if (query instanceof SpanTermQuery || query instanceof SpanBoostQuery) {
assertThat(queryBuilder.clauses().size(), equalTo(1));
- assertThat(query, equalTo(queryBuilder.clauses().get(0).toQuery(context.getQueryShardContext())));
+ assertThat(query, equalTo(queryBuilder.clauses().get(0).toQuery(context)));
}
}
diff --git a/server/src/test/java/org/elasticsearch/index/query/SpanNotQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/SpanNotQueryBuilderTests.java
index 7df58553e27..ed6deb68448 100644
--- a/server/src/test/java/org/elasticsearch/index/query/SpanNotQueryBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/index/query/SpanNotQueryBuilderTests.java
@@ -25,7 +25,6 @@ import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
-import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.test.AbstractQueryTestCase;
import java.io.IOException;
@@ -56,11 +55,11 @@ public class SpanNotQueryBuilderTests extends AbstractQueryTestCase spanQueryBuilderIterator = queryBuilder.clauses().iterator();
for (SpanQuery spanQuery : spanOrQuery.getClauses()) {
- assertThat(spanQuery, equalTo(spanQueryBuilderIterator.next().toQuery(context.getQueryShardContext())));
+ assertThat(spanQuery, equalTo(spanQueryBuilderIterator.next().toQuery(context)));
}
}
diff --git a/server/src/test/java/org/elasticsearch/index/query/SpanTermQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/SpanTermQueryBuilderTests.java
index a5ef596e025..27f20f2295a 100644
--- a/server/src/test/java/org/elasticsearch/index/query/SpanTermQueryBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/index/query/SpanTermQueryBuilderTests.java
@@ -27,7 +27,6 @@ import org.apache.lucene.search.spans.SpanTermQuery;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.lucene.BytesRefs;
import org.elasticsearch.index.mapper.MappedFieldType;
-import org.elasticsearch.search.internal.SearchContext;
import java.io.IOException;
@@ -59,14 +58,14 @@ public class SpanTermQueryBuilderTests extends AbstractTermQueryTestCase> IFD getForField(MappedFieldType fieldType) {
- return serviceHolder.indexFieldDataService.getForField(fieldType); // need to build / parse inner hits sort fields
- }
-
- };
- return testSearchContext;
- }
-
@After
public void afterTest() {
serviceHolder.clientInvocationHandler.delegate = null;
diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java
index 089423770d5..c405ed619c8 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java
@@ -54,7 +54,6 @@ import org.elasticsearch.index.query.QueryRewriteContext;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.query.Rewriteable;
import org.elasticsearch.index.query.support.QueryParsers;
-import org.elasticsearch.search.internal.SearchContext;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
@@ -422,14 +421,13 @@ public abstract class AbstractQueryTestCase>
context.setAllowUnmappedFields(true);
QB firstQuery = createTestQueryBuilder();
QB controlQuery = copyQuery(firstQuery);
- SearchContext searchContext = getSearchContext(context);
/* we use a private rewrite context here since we want the most realistic way of asserting that we are cacheable or not.
* We do it this way in SearchService where
* we first rewrite the query with a private context, then reset the context and then build the actual lucene query*/
QueryBuilder rewritten = rewriteQuery(firstQuery, new QueryShardContext(context));
Query firstLuceneQuery = rewritten.toQuery(context);
assertNotNull("toQuery should not return null", firstLuceneQuery);
- assertLuceneQuery(firstQuery, firstLuceneQuery, searchContext);
+ assertLuceneQuery(firstQuery, firstLuceneQuery, context);
//remove after assertLuceneQuery since the assertLuceneQuery impl might access the context as well
assertTrue(
"query is not equal to its copy after calling toQuery, firstQuery: " + firstQuery + ", secondQuery: " + controlQuery,
@@ -445,10 +443,10 @@ public abstract class AbstractQueryTestCase>
secondQuery.queryName(secondQuery.queryName() == null ? randomAlphaOfLengthBetween(1, 30) : secondQuery.queryName()
+ randomAlphaOfLengthBetween(1, 10));
}
- searchContext = getSearchContext(context);
+ context = new QueryShardContext(context);
Query secondLuceneQuery = rewriteQuery(secondQuery, context).toQuery(context);
assertNotNull("toQuery should not return null", secondLuceneQuery);
- assertLuceneQuery(secondQuery, secondLuceneQuery, searchContext);
+ assertLuceneQuery(secondQuery, secondLuceneQuery, context);
if (builderGeneratesCacheableQueries()) {
assertEquals("two equivalent query builders lead to different lucene queries",
@@ -494,11 +492,11 @@ public abstract class AbstractQueryTestCase>
/**
* Checks the result of {@link QueryBuilder#toQuery(QueryShardContext)} given the original {@link QueryBuilder}
* and {@link QueryShardContext}. Verifies that named queries and boost are properly handled and delegates to
- * {@link #doAssertLuceneQuery(AbstractQueryBuilder, Query, SearchContext)} for query specific checks.
+ * {@link #doAssertLuceneQuery(AbstractQueryBuilder, Query, QueryShardContext)} for query specific checks.
*/
- private void assertLuceneQuery(QB queryBuilder, Query query, SearchContext context) throws IOException {
+ private void assertLuceneQuery(QB queryBuilder, Query query, QueryShardContext context) throws IOException {
if (queryBuilder.queryName() != null) {
- Query namedQuery = context.getQueryShardContext().copyNamedQueries().get(queryBuilder.queryName());
+ Query namedQuery = context.copyNamedQueries().get(queryBuilder.queryName());
assertThat(namedQuery, equalTo(query));
}
if (query != null) {
@@ -522,7 +520,7 @@ public abstract class AbstractQueryTestCase>
* Checks the result of {@link QueryBuilder#toQuery(QueryShardContext)} given the original {@link QueryBuilder}
* and {@link QueryShardContext}. Contains the query specific checks to be implemented by subclasses.
*/
- protected abstract void doAssertLuceneQuery(QB queryBuilder, Query query, SearchContext context) throws IOException;
+ protected abstract void doAssertLuceneQuery(QB queryBuilder, Query query, QueryShardContext context) throws IOException;
protected void assertTermOrBoostQuery(Query query, String field, String value, float fieldBoost) {
if (fieldBoost != AbstractQueryBuilder.DEFAULT_BOOST) {
diff --git a/x-pack/plugin/search-business-rules/src/test/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilderTests.java b/x-pack/plugin/search-business-rules/src/test/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilderTests.java
index 57db7972655..db3d46fc1a7 100644
--- a/x-pack/plugin/search-business-rules/src/test/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilderTests.java
+++ b/x-pack/plugin/search-business-rules/src/test/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilderTests.java
@@ -18,9 +18,9 @@ import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.query.MatchAllQueryBuilder;
import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.query.TermQueryBuilder;
import org.elasticsearch.plugins.Plugin;
-import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.test.AbstractQueryTestCase;
import java.io.IOException;
@@ -89,7 +89,7 @@ public class PinnedQueryBuilderTests extends AbstractQueryTestCase
Date: Mon, 23 Sep 2019 13:21:37 -0700
Subject: [PATCH 12/94] Add support for aliases in queries on _index. (#46944)
Previously, queries on the _index field were not able to specify index aliases.
This was a regression in functionality compared to the 'indices' query that was
deprecated and removed in 6.0.
Now queries on _index can specify an alias, which is resolved to the concrete
index names when we check whether an index matches. To match a remote shard
target, the pattern needs to be of the form 'cluster:index' to match the
fully-qualified index name. Index aliases can be specified in the following query
types: term, terms, prefix, and wildcard.
---
docs/reference/migration/migrate_7_5.asciidoc | 30 +++++++
.../multi_cluster/90_index_name_query.yml | 58 ++++++++++++
.../org/elasticsearch/index/IndexModule.java | 4 +-
.../org/elasticsearch/index/IndexService.java | 9 +-
.../index/mapper/IndexFieldMapper.java | 42 ++++-----
.../index/query/QueryShardContext.java | 73 +++++++++------
.../index/query/SearchIndexNameMatcher.java | 84 +++++++++++++++++
.../elasticsearch/indices/IndicesService.java | 6 +-
.../java/org/elasticsearch/node/Node.java | 8 +-
.../elasticsearch/index/IndexModuleTests.java | 2 +-
.../index/mapper/DateFieldTypeTests.java | 4 +-
.../mapper/FieldNamesFieldTypeTests.java | 2 +-
.../index/mapper/IndexFieldTypeTests.java | 24 +++--
.../index/mapper/RangeFieldTypeTests.java | 2 +-
.../query/IntervalQueryBuilderTests.java | 2 +-
.../index/query/QueryShardContextTests.java | 22 +----
.../index/query/RangeQueryRewriteTests.java | 9 +-
.../query/SearchIndexNameMatcherTests.java | 90 +++++++++++++++++++
.../query/SimpleQueryStringBuilderTests.java | 10 ---
.../query/WildcardQueryBuilderTests.java | 15 ----
.../bucket/histogram/ExtendedBoundsTests.java | 2 +-
.../ScriptedMetricAggregatorTests.java | 2 +-
.../highlight/HighlightBuilderTests.java | 2 +-
.../rescore/QueryRescorerBuilderTests.java | 4 +-
.../search/sort/AbstractSortTestCase.java | 2 +-
.../AbstractSuggestionBuilderTestCase.java | 2 +-
.../snapshots/SnapshotResiliencyTests.java | 1 +
.../aggregations/AggregatorTestCase.java | 8 +-
.../test/AbstractBuilderTestCase.java | 2 +-
.../search/MockSearchServiceTests.java | 2 +-
.../DocumentSubsetBitsetCacheTests.java | 2 +-
...ityIndexReaderWrapperIntegrationTests.java | 4 +-
.../job/RollupIndexerIndexingTests.java | 2 +-
33 files changed, 392 insertions(+), 139 deletions(-)
create mode 100644 docs/reference/migration/migrate_7_5.asciidoc
create mode 100644 qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/90_index_name_query.yml
create mode 100644 server/src/main/java/org/elasticsearch/index/query/SearchIndexNameMatcher.java
create mode 100644 server/src/test/java/org/elasticsearch/index/query/SearchIndexNameMatcherTests.java
diff --git a/docs/reference/migration/migrate_7_5.asciidoc b/docs/reference/migration/migrate_7_5.asciidoc
new file mode 100644
index 00000000000..2334ce8aa5a
--- /dev/null
+++ b/docs/reference/migration/migrate_7_5.asciidoc
@@ -0,0 +1,30 @@
+[[breaking-changes-7.5]]
+== Breaking changes in 7.5
+++++
+7.5
+++++
+
+This section discusses the changes that you need to be aware of when migrating
+your application to Elasticsearch 7.5.
+
+See also <> and <>.
+
+coming[7.5.0]
+
+//NOTE: The notable-breaking-changes tagged regions are re-used in the
+//Installation and Upgrade Guide
+
+//tag::notable-breaking-changes[]
+
+//end::notable-breaking-changes[]
+
+[discrete]
+[[breaking_75_search_changes]]
+=== Search Changes
+
+[discrete]
+==== Stricter checking for wildcard queries on _index
+Previously, a wildcard query on the `_index` field matched directly against the
+fully-qualified index name. Now, in order to match against remote indices like
+i`cluster:index`, the query must contain a colon, as in `cl*ster:inde*`. This
+behavior aligns with the way indices are matched in the search endpoint.
diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/90_index_name_query.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/90_index_name_query.yml
new file mode 100644
index 00000000000..030dad662df
--- /dev/null
+++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/90_index_name_query.yml
@@ -0,0 +1,58 @@
+---
+setup:
+ - do:
+ indices.create:
+ index: single_doc_index
+ body:
+ settings:
+ index:
+ number_of_shards: 1
+ number_of_replicas: 0
+---
+teardown:
+ - do:
+ indices.delete:
+ index: single_doc_index
+ ignore_unavailable: true
+
+---
+"Test that queries on _index match against the correct indices.":
+
+ - do:
+ bulk:
+ refresh: true
+ body:
+ - '{"index": {"_index": "single_doc_index"}}'
+ - '{"f1": "local_cluster", "sort_field": 0}'
+
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: "single_doc_index,my_remote_cluster:single_doc_index"
+ body:
+ query:
+ term:
+ "_index": "single_doc_index"
+
+ - match: { hits.total: 1 }
+ - match: { hits.hits.0._index: "single_doc_index"}
+ - match: { _shards.total: 2 }
+ - match: { _shards.successful: 2 }
+ - match: { _shards.skipped : 0}
+ - match: { _shards.failed: 0 }
+
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: "single_doc_index,my_remote_cluster:single_doc_index"
+ body:
+ query:
+ term:
+ "_index": "my_remote_cluster:single_doc_index"
+
+ - match: { hits.total: 1 }
+ - match: { hits.hits.0._index: "my_remote_cluster:single_doc_index"}
+ - match: { _shards.total: 2 }
+ - match: { _shards.successful: 2 }
+ - match: { _shards.skipped : 0}
+ - match: { _shards.failed: 0 }
diff --git a/server/src/main/java/org/elasticsearch/index/IndexModule.java b/server/src/main/java/org/elasticsearch/index/IndexModule.java
index 6ef335144eb..b10d84ef1c6 100644
--- a/server/src/main/java/org/elasticsearch/index/IndexModule.java
+++ b/server/src/main/java/org/elasticsearch/index/IndexModule.java
@@ -30,6 +30,7 @@ import org.apache.lucene.util.Constants;
import org.apache.lucene.util.SetOnce;
import org.elasticsearch.Version;
import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.CheckedFunction;
import org.elasticsearch.common.TriFunction;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
@@ -386,6 +387,7 @@ public final class IndexModule {
BigArrays bigArrays,
ThreadPool threadPool,
ScriptService scriptService,
+ ClusterService clusterService,
Client client,
IndicesQueryCache indicesQueryCache,
MapperRegistry mapperRegistry,
@@ -411,7 +413,7 @@ public final class IndexModule {
return new IndexService(indexSettings, indexCreationContext, environment, xContentRegistry,
new SimilarityService(indexSettings, scriptService, similarities),
shardStoreDeleter, analysisRegistry, engineFactory, circuitBreakerService, bigArrays, threadPool, scriptService,
- client, queryCache, directoryFactory, eventListener, readerWrapperFactory, mapperRegistry,
+ clusterService, client, queryCache, directoryFactory, eventListener, readerWrapperFactory, mapperRegistry,
indicesFieldDataCache, searchOperationListeners, indexOperationListeners, namedWriteableRegistry);
}
diff --git a/server/src/main/java/org/elasticsearch/index/IndexService.java b/server/src/main/java/org/elasticsearch/index/IndexService.java
index da470a04afa..5e2ac0dbac6 100644
--- a/server/src/main/java/org/elasticsearch/index/IndexService.java
+++ b/server/src/main/java/org/elasticsearch/index/IndexService.java
@@ -32,6 +32,7 @@ import org.elasticsearch.Version;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.CheckedFunction;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
@@ -57,6 +58,7 @@ import org.elasticsearch.index.engine.EngineFactory;
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.query.SearchIndexNameMatcher;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.seqno.RetentionLeaseSyncer;
import org.elasticsearch.index.shard.IndexEventListener;
@@ -134,6 +136,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
private final ThreadPool threadPool;
private final BigArrays bigArrays;
private final ScriptService scriptService;
+ private final ClusterService clusterService;
private final Client client;
private final CircuitBreakerService circuitBreakerService;
private Supplier indexSortSupplier;
@@ -151,6 +154,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
BigArrays bigArrays,
ThreadPool threadPool,
ScriptService scriptService,
+ ClusterService clusterService,
Client client,
QueryCache queryCache,
IndexStorePlugin.DirectoryFactory directoryFactory,
@@ -201,6 +205,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
this.bigArrays = bigArrays;
this.threadPool = threadPool;
this.scriptService = scriptService;
+ this.clusterService = clusterService;
this.client = client;
this.eventListener = eventListener;
this.nodeEnv = nodeEnv;
@@ -530,9 +535,11 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
* {@link IndexReader}-specific optimizations, such as rewriting containing range queries.
*/
public QueryShardContext newQueryShardContext(int shardId, IndexSearcher searcher, LongSupplier nowInMillis, String clusterAlias) {
+ SearchIndexNameMatcher indexNameMatcher = new SearchIndexNameMatcher(index().getName(), clusterAlias, clusterService);
return new QueryShardContext(
shardId, indexSettings, bigArrays, indexCache.bitsetFilterCache(), indexFieldData::getForField, mapperService(),
- similarityService(), scriptService, xContentRegistry, namedWriteableRegistry, client, searcher, nowInMillis, clusterAlias);
+ similarityService(), scriptService, xContentRegistry, namedWriteableRegistry, client, searcher, nowInMillis, clusterAlias,
+ indexNameMatcher);
}
/**
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java
index 276a8e7583c..4e690640135 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java
@@ -129,11 +129,16 @@ public class IndexFieldMapper extends MetadataFieldMapper {
*/
@Override
public Query termQuery(Object value, @Nullable QueryShardContext context) {
- if (isSameIndex(value, context.getFullyQualifiedIndex().getName())) {
+ String pattern = value instanceof BytesRef
+ ? ((BytesRef) value).utf8ToString()
+ : value.toString();
+ if (context.indexMatches(pattern)) {
+ // No need to OR these clauses - we can only logically be
+ // running in the context of just one of these index names.
return Queries.newMatchAllQuery();
} else {
- return Queries.newMatchNoDocsQuery("Index didn't match. Index queried: " + context.index().getName()
- + " vs. " + value);
+ return Queries.newMatchNoDocsQuery("The index [" + context.getFullyQualifiedIndex().getName() +
+ "] doesn't match the provided value [" + value + "].");
}
}
@@ -143,26 +148,29 @@ public class IndexFieldMapper extends MetadataFieldMapper {
return super.termsQuery(values, context);
}
for (Object value : values) {
- if (isSameIndex(value, context.getFullyQualifiedIndex().getName())) {
+ String pattern = value instanceof BytesRef
+ ? ((BytesRef) value).utf8ToString()
+ : value.toString();
+ if (context.indexMatches(pattern)) {
// No need to OR these clauses - we can only logically be
// running in the context of just one of these index names.
return Queries.newMatchAllQuery();
}
}
// None of the listed index names are this one
- return Queries.newMatchNoDocsQuery("Index didn't match. Index queried: " + context.getFullyQualifiedIndex().getName()
- + " vs. " + values);
+ return Queries.newMatchNoDocsQuery("The index [" + context.getFullyQualifiedIndex().getName() +
+ "] doesn't match the provided values [" + values + "].");
}
@Override
public Query prefixQuery(String value,
@Nullable MultiTermQuery.RewriteMethod method,
QueryShardContext context) {
- String indexName = context.getFullyQualifiedIndex().getName();
- if (indexName.startsWith(value)) {
+ String pattern = value + "*";
+ if (context.indexMatches(pattern)) {
return Queries.newMatchAllQuery();
} else {
- return Queries.newMatchNoDocsQuery("The index [" + indexName +
+ return Queries.newMatchNoDocsQuery("The index [" + context.getFullyQualifiedIndex().getName() +
"] doesn't match the provided prefix [" + value + "].");
}
}
@@ -176,8 +184,8 @@ public class IndexFieldMapper extends MetadataFieldMapper {
if (pattern.matcher(indexName).matches()) {
return Queries.newMatchAllQuery();
} else {
- return Queries.newMatchNoDocsQuery("The index [" + indexName +
- "] doesn't match the provided pattern [" + value + "].");
+ return Queries.newMatchNoDocsQuery("The index [" + context.getFullyQualifiedIndex().getName()
+ + "] doesn't match the provided pattern [" + value + "].");
}
}
@@ -185,20 +193,14 @@ public class IndexFieldMapper extends MetadataFieldMapper {
public Query wildcardQuery(String value,
@Nullable MultiTermQuery.RewriteMethod method,
QueryShardContext context) {
- String indexName = context.getFullyQualifiedIndex().getName();
- if (isSameIndex(value, indexName)) {
+ if (context.indexMatches(value)) {
return Queries.newMatchAllQuery();
} else {
- return Queries.newMatchNoDocsQuery("The index [" + indexName +
- "] doesn't match the provided pattern [" + value + "].");
+ return Queries.newMatchNoDocsQuery("The index [" + context.getFullyQualifiedIndex().getName()
+ + "] doesn't match the provided pattern [" + value + "].");
}
}
- private boolean isSameIndex(Object value, String indexName) {
- String pattern = value instanceof BytesRef ? ((BytesRef) value).utf8ToString() : value.toString();
- return Regex.simpleMatch(pattern, indexName);
- }
-
@Override
public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName) {
return new ConstantIndexFieldData.Builder(mapperService -> fullyQualifiedIndexName);
diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryShardContext.java b/server/src/main/java/org/elasticsearch/index/query/QueryShardContext.java
index a631ea319b4..b6eea750748 100644
--- a/server/src/main/java/org/elasticsearch/index/query/QueryShardContext.java
+++ b/server/src/main/java/org/elasticsearch/index/query/QueryShardContext.java
@@ -69,6 +69,7 @@ import java.util.Set;
import java.util.function.BiConsumer;
import java.util.function.BiFunction;
import java.util.function.LongSupplier;
+import java.util.function.Predicate;
import static java.util.Collections.unmodifiableMap;
@@ -93,7 +94,9 @@ public class QueryShardContext extends QueryRewriteContext {
private String[] types = Strings.EMPTY_ARRAY;
private boolean cacheable = true;
private final SetOnce frozen = new SetOnce<>();
+
private final Index fullyQualifiedIndex;
+ private final Predicate indexNameMatcher;
public void setTypes(String... types) {
this.types = types;
@@ -109,45 +112,48 @@ public class QueryShardContext extends QueryRewriteContext {
private NestedScope nestedScope;
public QueryShardContext(int shardId,
- IndexSettings indexSettings,
- BigArrays bigArrays,
- BitsetFilterCache bitsetFilterCache,
- BiFunction> indexFieldDataLookup,
- MapperService mapperService,
- SimilarityService similarityService,
- ScriptService scriptService,
- NamedXContentRegistry xContentRegistry,
- NamedWriteableRegistry namedWriteableRegistry,
- Client client,
- IndexSearcher searcher,
- LongSupplier nowInMillis,
- String clusterAlias) {
+ IndexSettings indexSettings,
+ BigArrays bigArrays,
+ BitsetFilterCache bitsetFilterCache,
+ BiFunction> indexFieldDataLookup,
+ MapperService mapperService,
+ SimilarityService similarityService,
+ ScriptService scriptService,
+ NamedXContentRegistry xContentRegistry,
+ NamedWriteableRegistry namedWriteableRegistry,
+ Client client,
+ IndexSearcher searcher,
+ LongSupplier nowInMillis,
+ String clusterAlias,
+ Predicate indexNameMatcher) {
this(shardId, indexSettings, bigArrays, bitsetFilterCache, indexFieldDataLookup, mapperService, similarityService,
- scriptService, xContentRegistry, namedWriteableRegistry, client, searcher, nowInMillis,
+ scriptService, xContentRegistry, namedWriteableRegistry, client, searcher, nowInMillis, indexNameMatcher,
new Index(RemoteClusterAware.buildRemoteIndexName(clusterAlias, indexSettings.getIndex().getName()),
indexSettings.getIndex().getUUID()));
}
public QueryShardContext(QueryShardContext source) {
this(source.shardId, source.indexSettings, source.bigArrays, source.bitsetFilterCache, source.indexFieldDataService,
- source.mapperService, source.similarityService, source.scriptService, source.getXContentRegistry(),
- source.getWriteableRegistry(), source.client, source.searcher, source.nowInMillis, source.fullyQualifiedIndex);
+ source.mapperService, source.similarityService, source.scriptService, source.getXContentRegistry(),
+ source.getWriteableRegistry(), source.client, source.searcher, source.nowInMillis, source.indexNameMatcher,
+ source.fullyQualifiedIndex);
}
private QueryShardContext(int shardId,
- IndexSettings indexSettings,
- BigArrays bigArrays,
- BitsetFilterCache bitsetFilterCache,
- BiFunction> indexFieldDataLookup,
- MapperService mapperService,
- SimilarityService similarityService,
- ScriptService scriptService,
- NamedXContentRegistry xContentRegistry,
- NamedWriteableRegistry namedWriteableRegistry,
- Client client,
- IndexSearcher searcher,
- LongSupplier nowInMillis,
- Index fullyQualifiedIndex) {
+ IndexSettings indexSettings,
+ BigArrays bigArrays,
+ BitsetFilterCache bitsetFilterCache,
+ BiFunction> indexFieldDataLookup,
+ MapperService mapperService,
+ SimilarityService similarityService,
+ ScriptService scriptService,
+ NamedXContentRegistry xContentRegistry,
+ NamedWriteableRegistry namedWriteableRegistry,
+ Client client,
+ IndexSearcher searcher,
+ LongSupplier nowInMillis,
+ Predicate indexNameMatcher,
+ Index fullyQualifiedIndex) {
super(xContentRegistry, namedWriteableRegistry, client, nowInMillis);
this.shardId = shardId;
this.similarityService = similarityService;
@@ -160,6 +166,7 @@ public class QueryShardContext extends QueryRewriteContext {
this.scriptService = scriptService;
this.indexSettings = indexSettings;
this.searcher = searcher;
+ this.indexNameMatcher = indexNameMatcher;
this.fullyQualifiedIndex = fullyQualifiedIndex;
}
@@ -311,6 +318,14 @@ public class QueryShardContext extends QueryRewriteContext {
return indexSettings.getIndexVersionCreated();
}
+ /**
+ * Given an index pattern, checks whether it matches against the current shard. The pattern
+ * may represent a fully qualified index name if the search targets remote shards.
+ */
+ public boolean indexMatches(String pattern) {
+ return indexNameMatcher.test(pattern);
+ }
+
public ParsedQuery toQuery(QueryBuilder queryBuilder) {
return toQuery(queryBuilder, q -> {
Query query = q.toQuery(this);
diff --git a/server/src/main/java/org/elasticsearch/index/query/SearchIndexNameMatcher.java b/server/src/main/java/org/elasticsearch/index/query/SearchIndexNameMatcher.java
new file mode 100644
index 00000000000..b2329d1d54c
--- /dev/null
+++ b/server/src/main/java/org/elasticsearch/index/query/SearchIndexNameMatcher.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
+import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.transport.RemoteClusterAware;
+
+import java.util.function.Predicate;
+
+/**
+ * A predicate that checks whether an index pattern matches the current search shard target.
+ */
+public class SearchIndexNameMatcher implements Predicate {
+ private final String indexName;
+ private final String clusterAlias;
+ private final ClusterService clusterService;
+ private final IndexNameExpressionResolver expressionResolver;
+
+ /**
+ * Creates a new index name matcher.
+ *
+ * @param indexName he name of the local index.
+ * @param clusterAlias the cluster alias of this search shard target. If it is a local target, the alias
+ * should be null or equal to {@link RemoteClusterAware#LOCAL_CLUSTER_GROUP_KEY}.
+ * @param clusterService the cluster service.
+ */
+ public SearchIndexNameMatcher(String indexName,
+ String clusterAlias,
+ ClusterService clusterService) {
+ this.indexName = indexName;
+ this.clusterAlias = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY.equals(clusterAlias) ? null : clusterAlias;
+ this.clusterService = clusterService;
+ this.expressionResolver = new IndexNameExpressionResolver();
+ }
+
+ /**
+ * Given an index pattern, checks whether it matches against the current shard.
+ *
+ * If this shard represents a remote shard target, then in order to match the pattern contain
+ * the separator ':', and must match on both the cluster alias and index name.
+ */
+ public boolean test(String pattern) {
+ int separatorIndex = pattern.indexOf(RemoteClusterAware.REMOTE_CLUSTER_INDEX_SEPARATOR);
+ if (separatorIndex < 0) {
+ return clusterAlias == null && matchesIndex(pattern);
+ } else {
+ String clusterPattern = pattern.substring(0, separatorIndex);
+ String indexPattern = pattern.substring(separatorIndex + 1);
+
+ return Regex.simpleMatch(clusterPattern, clusterAlias) && matchesIndex(indexPattern);
+ }
+ }
+
+ private boolean matchesIndex(String pattern) {
+ String[] concreteIndices = expressionResolver.concreteIndexNames(
+ clusterService.state(), IndicesOptions.lenientExpandOpen(), pattern);
+ for (String index : concreteIndices) {
+ if (Regex.simpleMatch(index, indexName)) {
+ return true;
+ }
+ }
+ return false;
+ }
+}
diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java
index 38ab7149521..b6c87e576bd 100644
--- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java
+++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java
@@ -43,6 +43,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.CheckedFunction;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.breaker.CircuitBreaker;
@@ -186,6 +187,7 @@ public class IndicesService extends AbstractLifecycleComponent
private final CircuitBreakerService circuitBreakerService;
private final BigArrays bigArrays;
private final ScriptService scriptService;
+ private final ClusterService clusterService;
private final Client client;
private volatile Map indices = emptyMap();
private final Map> pendingDeletes = new HashMap<>();
@@ -213,7 +215,7 @@ public class IndicesService extends AbstractLifecycleComponent
AnalysisRegistry analysisRegistry, IndexNameExpressionResolver indexNameExpressionResolver,
MapperRegistry mapperRegistry, NamedWriteableRegistry namedWriteableRegistry, ThreadPool threadPool,
IndexScopedSettings indexScopedSettings, CircuitBreakerService circuitBreakerService, BigArrays bigArrays,
- ScriptService scriptService, Client client, MetaStateService metaStateService,
+ ScriptService scriptService, ClusterService clusterService, Client client, MetaStateService metaStateService,
Collection>> engineFactoryProviders,
Map directoryFactories) {
this.settings = settings;
@@ -235,6 +237,7 @@ public class IndicesService extends AbstractLifecycleComponent
this.circuitBreakerService = circuitBreakerService;
this.bigArrays = bigArrays;
this.scriptService = scriptService;
+ this.clusterService = clusterService;
this.client = client;
this.indicesFieldDataCache = new IndicesFieldDataCache(settings, new IndexFieldDataCache.Listener() {
@Override
@@ -556,6 +559,7 @@ public class IndicesService extends AbstractLifecycleComponent
bigArrays,
threadPool,
scriptService,
+ clusterService,
client,
indicesQueryCache,
mapperRegistry,
diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java
index 86a55ceb47a..efa7ddcd657 100644
--- a/server/src/main/java/org/elasticsearch/node/Node.java
+++ b/server/src/main/java/org/elasticsearch/node/Node.java
@@ -427,10 +427,10 @@ public class Node implements Closeable {
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
final IndicesService indicesService =
- new IndicesService(settings, pluginsService, nodeEnvironment, xContentRegistry, analysisModule.getAnalysisRegistry(),
- clusterModule.getIndexNameExpressionResolver(), indicesModule.getMapperRegistry(), namedWriteableRegistry,
- threadPool, settingsModule.getIndexScopedSettings(), circuitBreakerService, bigArrays,
- scriptModule.getScriptService(), client, metaStateService, engineFactoryProviders, indexStoreFactories);
+ new IndicesService(settings, pluginsService, nodeEnvironment, xContentRegistry, analysisModule.getAnalysisRegistry(),
+ clusterModule.getIndexNameExpressionResolver(), indicesModule.getMapperRegistry(), namedWriteableRegistry,
+ threadPool, settingsModule.getIndexScopedSettings(), circuitBreakerService, bigArrays, scriptModule.getScriptService(),
+ clusterService, client, metaStateService, engineFactoryProviders, indexStoreFactories);
final AliasValidator aliasValidator = new AliasValidator();
diff --git a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java
index d052fa365be..7c8d7b902fb 100644
--- a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java
+++ b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java
@@ -150,7 +150,7 @@ public class IndexModuleTests extends ESTestCase {
private IndexService newIndexService(IndexModule module) throws IOException {
return module.newIndexService(CREATE_INDEX, nodeEnvironment, xContentRegistry(), deleter, circuitBreakerService, bigArrays,
- threadPool, scriptService, null, indicesQueryCache, mapperRegistry,
+ threadPool, scriptService, clusterService, null, indicesQueryCache, mapperRegistry,
new IndicesFieldDataCache(settings, listener), writableRegistry());
}
diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java
index 479f4d7fc55..6ac59169ad9 100644
--- a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java
+++ b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java
@@ -179,7 +179,7 @@ public class DateFieldTypeTests extends FieldTypeTestCase {
QueryShardContext context = new QueryShardContext(0,
new IndexSettings(IndexMetaData.builder("foo").settings(indexSettings).build(), indexSettings),
BigArrays.NON_RECYCLING_INSTANCE, null, null, null, null, null,
- xContentRegistry(), writableRegistry(), null, null, () -> nowInMillis, null);
+ xContentRegistry(), writableRegistry(), null, null, () -> nowInMillis, null, null);
MappedFieldType ft = createDefaultFieldType();
ft.setName("field");
String date = "2015-10-12T14:10:55";
@@ -202,7 +202,7 @@ public class DateFieldTypeTests extends FieldTypeTestCase {
QueryShardContext context = new QueryShardContext(0,
new IndexSettings(IndexMetaData.builder("foo").settings(indexSettings).build(), indexSettings),
BigArrays.NON_RECYCLING_INSTANCE, null, null, null, null, null, xContentRegistry(), writableRegistry(),
- null, null, () -> nowInMillis, null);
+ null, null, () -> nowInMillis, null, null);
MappedFieldType ft = createDefaultFieldType();
ft.setName("field");
String date1 = "2015-10-12T14:10:55";
diff --git a/server/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldTypeTests.java
index 9bbeecdfc8f..1a9460115f0 100644
--- a/server/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldTypeTests.java
+++ b/server/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldTypeTests.java
@@ -68,7 +68,7 @@ public class FieldNamesFieldTypeTests extends FieldTypeTestCase {
QueryShardContext queryShardContext = new QueryShardContext(0,
indexSettings, BigArrays.NON_RECYCLING_INSTANCE, null, null, mapperService,
- null, null, null, null, null, null, () -> 0L, null);
+ null, null, null, null, null, null, () -> 0L, null, null);
fieldNamesFieldType.setEnabled(true);
Query termQuery = fieldNamesFieldType.termQuery("field_name", queryShardContext);
assertEquals(new TermQuery(new Term(FieldNamesFieldMapper.CONTENT_TYPE, "field_name")), termQuery);
diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IndexFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IndexFieldTypeTests.java
index 82f0edf24f4..11b365ff16e 100644
--- a/server/src/test/java/org/elasticsearch/index/mapper/IndexFieldTypeTests.java
+++ b/server/src/test/java/org/elasticsearch/index/mapper/IndexFieldTypeTests.java
@@ -21,11 +21,14 @@ package org.elasticsearch.index.mapper;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.MatchNoDocsQuery;
-import org.elasticsearch.index.Index;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.query.QueryShardContext;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
+import java.util.function.Predicate;
public class IndexFieldTypeTests extends FieldTypeTestCase {
@@ -62,12 +65,15 @@ public class IndexFieldTypeTests extends FieldTypeTestCase {
}
private QueryShardContext createContext() {
- QueryShardContext context = mock(QueryShardContext.class);
+ IndexMetaData indexMetaData = IndexMetaData.builder("index")
+ .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT))
+ .numberOfShards(1)
+ .numberOfReplicas(0)
+ .build();
+ IndexSettings indexSettings = new IndexSettings(indexMetaData, Settings.EMPTY);
- Index index = new Index("index", "123");
- when(context.getFullyQualifiedIndex()).thenReturn(index);
- when(context.index()).thenReturn(index);
-
- return context;
+ Predicate indexNameMatcher = pattern -> Regex.simpleMatch(pattern, "index");
+ return new QueryShardContext(0, indexSettings, null, null, null, null, null, null, xContentRegistry(), writableRegistry(),
+ null, null, System::currentTimeMillis, null, indexNameMatcher);
}
}
diff --git a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java
index 16fe2ceee8f..79ab18afbd5 100644
--- a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java
+++ b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java
@@ -229,7 +229,7 @@ public class RangeFieldTypeTests extends FieldTypeTestCase {
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings(randomAlphaOfLengthBetween(1, 10), indexSettings);
return new QueryShardContext(0, idxSettings, BigArrays.NON_RECYCLING_INSTANCE, null, null, null, null, null,
- xContentRegistry(), writableRegistry(), null, null, () -> nowInMillis, null);
+ xContentRegistry(), writableRegistry(), null, null, () -> nowInMillis, null, null);
}
public void testDateRangeQueryUsingMappingFormat() {
diff --git a/server/src/test/java/org/elasticsearch/index/query/IntervalQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/IntervalQueryBuilderTests.java
index 379719f3616..4f2d9d217f9 100644
--- a/server/src/test/java/org/elasticsearch/index/query/IntervalQueryBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/index/query/IntervalQueryBuilderTests.java
@@ -371,7 +371,7 @@ public class IntervalQueryBuilderTests extends AbstractQueryTestCase
mappedFieldType.fielddataBuilder(idxName).build(indexSettings, mappedFieldType, null, null, null),
mapperService, null, null, NamedXContentRegistry.EMPTY, new NamedWriteableRegistry(Collections.emptyList()),
- null, null, () -> nowInMillis, clusterAlias);
+ null, null, () -> nowInMillis, clusterAlias, null);
}
}
diff --git a/server/src/test/java/org/elasticsearch/index/query/RangeQueryRewriteTests.java b/server/src/test/java/org/elasticsearch/index/query/RangeQueryRewriteTests.java
index f4d7c90488f..83ab9c8e62b 100644
--- a/server/src/test/java/org/elasticsearch/index/query/RangeQueryRewriteTests.java
+++ b/server/src/test/java/org/elasticsearch/index/query/RangeQueryRewriteTests.java
@@ -41,7 +41,7 @@ public class RangeQueryRewriteTests extends ESSingleNodeTestCase {
IndexReader reader = new MultiReader();
QueryRewriteContext context = new QueryShardContext(0, indexService.getIndexSettings(), BigArrays.NON_RECYCLING_INSTANCE,
null, null, indexService.mapperService(), null, null, xContentRegistry(), writableRegistry(),
- null, new IndexSearcher(reader), null, null);
+ null, new IndexSearcher(reader), null, null, null);
RangeQueryBuilder range = new RangeQueryBuilder("foo");
assertEquals(Relation.DISJOINT, range.getRelation(context));
}
@@ -57,9 +57,8 @@ public class RangeQueryRewriteTests extends ESSingleNodeTestCase {
.endObject().endObject());
indexService.mapperService().merge("type",
new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE);
- QueryRewriteContext context = new QueryShardContext(0, indexService.getIndexSettings(), BigArrays.NON_RECYCLING_INSTANCE,
- null, null, indexService.mapperService(), null, null,
- xContentRegistry(), writableRegistry(), null, null, null, null);
+ QueryRewriteContext context = new QueryShardContext(0, indexService.getIndexSettings(), null, null, null,
+ indexService.mapperService(), null, null, xContentRegistry(), writableRegistry(), null, null, null, null, null);
RangeQueryBuilder range = new RangeQueryBuilder("foo");
// can't make assumptions on a missing reader, so it must return INTERSECT
assertEquals(Relation.INTERSECTS, range.getRelation(context));
@@ -79,7 +78,7 @@ public class RangeQueryRewriteTests extends ESSingleNodeTestCase {
IndexReader reader = new MultiReader();
QueryRewriteContext context = new QueryShardContext(0, indexService.getIndexSettings(), BigArrays.NON_RECYCLING_INSTANCE,
null, null, indexService.mapperService(), null, null, xContentRegistry(), writableRegistry(),
- null, new IndexSearcher(reader), null, null);
+ null, new IndexSearcher(reader), null, null, null);
RangeQueryBuilder range = new RangeQueryBuilder("foo");
// no values -> DISJOINT
assertEquals(Relation.DISJOINT, range.getRelation(context));
diff --git a/server/src/test/java/org/elasticsearch/index/query/SearchIndexNameMatcherTests.java b/server/src/test/java/org/elasticsearch/index/query/SearchIndexNameMatcherTests.java
new file mode 100644
index 00000000000..a796586bcf5
--- /dev/null
+++ b/server/src/test/java/org/elasticsearch/index/query/SearchIndexNameMatcherTests.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.AliasMetaData;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ESTestCase;
+import org.junit.Before;
+
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+public class SearchIndexNameMatcherTests extends ESTestCase {
+ private SearchIndexNameMatcher matcher;
+ private SearchIndexNameMatcher remoteMatcher;
+
+ @Before
+ public void setUpMatchers() {
+ MetaData.Builder metaDataBuilder = MetaData.builder()
+ .put(indexBuilder("index1").putAlias(AliasMetaData.builder("alias")))
+ .put(indexBuilder("index2").putAlias(AliasMetaData.builder("alias")))
+ .put(indexBuilder("index3"));
+ ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(metaDataBuilder).build();
+
+ ClusterService clusterService = mock(ClusterService.class);
+ when(clusterService.state()).thenReturn(state);
+
+ matcher = new SearchIndexNameMatcher("index1", "", clusterService);
+ remoteMatcher = new SearchIndexNameMatcher("index1", "cluster", clusterService);
+ }
+
+ private static IndexMetaData.Builder indexBuilder(String index) {
+ Settings.Builder settings = settings(Version.CURRENT).
+ put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0);
+ return IndexMetaData.builder(index).settings(settings);
+ }
+
+ public void testLocalIndex() {
+ assertTrue(matcher.test("index1"));
+ assertTrue(matcher.test("ind*x1"));
+ assertFalse(matcher.test("index2"));
+
+ assertTrue(matcher.test("alias"));
+ assertTrue(matcher.test("*lias"));
+
+ assertFalse(matcher.test("cluster:index1"));
+ }
+
+ public void testRemoteIndex() {
+ assertTrue(remoteMatcher.test("cluster:index1"));
+ assertTrue(remoteMatcher.test("cluster:ind*x1"));
+ assertTrue(remoteMatcher.test("*luster:ind*x1"));
+ assertFalse(remoteMatcher.test("cluster:index2"));
+
+ assertTrue(remoteMatcher.test("cluster:alias"));
+ assertTrue(remoteMatcher.test("cluster:*lias"));
+
+ assertFalse(remoteMatcher.test("index1"));
+ assertFalse(remoteMatcher.test("alias"));
+
+ assertFalse(remoteMatcher.test("*index1"));
+ assertFalse(remoteMatcher.test("*alias"));
+ assertFalse(remoteMatcher.test("cluster*"));
+ assertFalse(remoteMatcher.test("cluster*index1"));
+ }
+}
diff --git a/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java
index 78a7ca35eae..bad1a6c7045 100644
--- a/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java
@@ -27,7 +27,6 @@ import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.BoostQuery;
import org.apache.lucene.search.DisjunctionMaxQuery;
import org.apache.lucene.search.FuzzyQuery;
-import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.PrefixQuery;
@@ -416,15 +415,6 @@ public class SimpleQueryStringBuilderTests extends AbstractQueryTestCase now, null);
+ null, null, () -> now, null, null);
DateFormatter formatter = DateFormatter.forPattern("dateOptionalTime");
DocValueFormat format = new DocValueFormat.DateTime(formatter, ZoneOffset.UTC, DateFieldMapper.Resolution.MILLISECONDS);
diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregatorTests.java
index 7203b5dd443..9d0d1d69f02 100644
--- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregatorTests.java
+++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregatorTests.java
@@ -426,6 +426,6 @@ public class ScriptedMetricAggregatorTests extends AggregatorTestCase {
Map engines = Collections.singletonMap(scriptEngine.getType(), scriptEngine);
ScriptService scriptService = new ScriptService(Settings.EMPTY, engines, ScriptModule.CORE_CONTEXTS);
return new QueryShardContext(0, indexSettings, BigArrays.NON_RECYCLING_INSTANCE, null, null, mapperService, null, scriptService,
- xContentRegistry(), writableRegistry(), null, null, System::currentTimeMillis, null);
+ xContentRegistry(), writableRegistry(), null, null, System::currentTimeMillis, null, null);
}
}
diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java
index 65d2e92555c..a1f669558a5 100644
--- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java
@@ -280,7 +280,7 @@ public class HighlightBuilderTests extends ESTestCase {
// shard context will only need indicesQueriesRegistry for building Query objects nested in highlighter
QueryShardContext mockShardContext = new QueryShardContext(0, idxSettings, BigArrays.NON_RECYCLING_INSTANCE,
null, null, null, null, null, xContentRegistry(), namedWriteableRegistry,
- null, null, System::currentTimeMillis, null) {
+ null, null, System::currentTimeMillis, null, null) {
@Override
public MappedFieldType fieldMapper(String name) {
TextFieldMapper.Builder builder = new TextFieldMapper.Builder(name);
diff --git a/server/src/test/java/org/elasticsearch/search/rescore/QueryRescorerBuilderTests.java b/server/src/test/java/org/elasticsearch/search/rescore/QueryRescorerBuilderTests.java
index 995cfa3b1c9..accf23a9644 100644
--- a/server/src/test/java/org/elasticsearch/search/rescore/QueryRescorerBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/search/rescore/QueryRescorerBuilderTests.java
@@ -144,7 +144,7 @@ public class QueryRescorerBuilderTests extends ESTestCase {
// shard context will only need indicesQueriesRegistry for building Query objects nested in query rescorer
QueryShardContext mockShardContext = new QueryShardContext(0, idxSettings, BigArrays.NON_RECYCLING_INSTANCE,
null, null, null, null, null,
- xContentRegistry(), namedWriteableRegistry, null, null, () -> nowInMillis, null) {
+ xContentRegistry(), namedWriteableRegistry, null, null, () -> nowInMillis, null, null) {
@Override
public MappedFieldType fieldMapper(String name) {
TextFieldMapper.Builder builder = new TextFieldMapper.Builder(name);
@@ -188,7 +188,7 @@ public class QueryRescorerBuilderTests extends ESTestCase {
// shard context will only need indicesQueriesRegistry for building Query objects nested in query rescorer
QueryShardContext mockShardContext = new QueryShardContext(0, idxSettings, BigArrays.NON_RECYCLING_INSTANCE,
null, null, null, null, null,
- xContentRegistry(), namedWriteableRegistry, null, null, () -> nowInMillis, null) {
+ xContentRegistry(), namedWriteableRegistry, null, null, () -> nowInMillis, null, null) {
@Override
public MappedFieldType fieldMapper(String name) {
TextFieldMapper.Builder builder = new TextFieldMapper.Builder(name);
diff --git a/server/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java b/server/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java
index a09cb4b0dfa..28ca23df124 100644
--- a/server/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java
+++ b/server/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java
@@ -192,7 +192,7 @@ public abstract class AbstractSortTestCase> extends EST
return builder.build(idxSettings, fieldType, new IndexFieldDataCache.None(), null, null);
};
return new QueryShardContext(0, idxSettings, BigArrays.NON_RECYCLING_INSTANCE, bitsetFilterCache, indexFieldDataLookup,
- null, null, scriptService, xContentRegistry(), namedWriteableRegistry, null, null, () -> randomNonNegativeLong(), null) {
+ null, null, scriptService, xContentRegistry(), namedWriteableRegistry, null, null, () -> randomNonNegativeLong(), null, null) {
@Override
public MappedFieldType fieldMapper(String name) {
diff --git a/server/src/test/java/org/elasticsearch/search/suggest/AbstractSuggestionBuilderTestCase.java b/server/src/test/java/org/elasticsearch/search/suggest/AbstractSuggestionBuilderTestCase.java
index d0289c7fa97..f60c3f07740 100644
--- a/server/src/test/java/org/elasticsearch/search/suggest/AbstractSuggestionBuilderTestCase.java
+++ b/server/src/test/java/org/elasticsearch/search/suggest/AbstractSuggestionBuilderTestCase.java
@@ -181,7 +181,7 @@ public abstract class AbstractSuggestionBuilderTestCase nowInMillis, null);
+ namedWriteableRegistry, this.client, searcher, () -> nowInMillis, null, null);
}
ScriptModule createScriptModule(List scriptPlugins) {
diff --git a/test/framework/src/test/java/org/elasticsearch/search/MockSearchServiceTests.java b/test/framework/src/test/java/org/elasticsearch/search/MockSearchServiceTests.java
index 668495f6f70..8a8842487f1 100644
--- a/test/framework/src/test/java/org/elasticsearch/search/MockSearchServiceTests.java
+++ b/test/framework/src/test/java/org/elasticsearch/search/MockSearchServiceTests.java
@@ -43,7 +43,7 @@ public class MockSearchServiceTests extends ESTestCase {
final long nowInMillis = randomNonNegativeLong();
SearchContext s = new TestSearchContext(new QueryShardContext(0,
new IndexSettings(EMPTY_INDEX_METADATA, Settings.EMPTY), BigArrays.NON_RECYCLING_INSTANCE, null, null, null, null, null,
- xContentRegistry(), writableRegistry(), null, null, () -> nowInMillis, null)) {
+ xContentRegistry(), writableRegistry(), null, null, () -> nowInMillis, null, null)) {
@Override
public SearchShardTarget shardTarget() {
diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCacheTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCacheTests.java
index f78b9c2aa6f..a50c39d4e6a 100644
--- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCacheTests.java
+++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCacheTests.java
@@ -241,7 +241,7 @@ public class DocumentSubsetBitsetCacheTests extends ESTestCase {
final QueryShardContext context = new QueryShardContext(shardId.id(), indexSettings, BigArrays.NON_RECYCLING_INSTANCE,
null, null, mapperService, null, null, xContentRegistry(), writableRegistry(),
- client, new IndexSearcher(directoryReader), () -> nowInMillis, null);
+ client, new IndexSearcher(directoryReader), () -> nowInMillis, null, null);
body.accept(context, leaf);
}
}
diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexReaderWrapperIntegrationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexReaderWrapperIntegrationTests.java
index 8214d327491..ca49e4ae4a3 100644
--- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexReaderWrapperIntegrationTests.java
+++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexReaderWrapperIntegrationTests.java
@@ -85,7 +85,7 @@ public class SecurityIndexReaderWrapperIntegrationTests extends AbstractBuilderT
final long nowInMillis = randomNonNegativeLong();
QueryShardContext realQueryShardContext = new QueryShardContext(shardId.id(), indexSettings, BigArrays.NON_RECYCLING_INSTANCE,
null, null, mapperService, null, null, xContentRegistry(), writableRegistry(),
- client, null, () -> nowInMillis, null);
+ client, null, () -> nowInMillis, null, null);
QueryShardContext queryShardContext = spy(realQueryShardContext);
DocumentSubsetBitsetCache bitsetCache = new DocumentSubsetBitsetCache(Settings.EMPTY);
XPackLicenseState licenseState = mock(XPackLicenseState.class);
@@ -200,7 +200,7 @@ public class SecurityIndexReaderWrapperIntegrationTests extends AbstractBuilderT
final long nowInMillis = randomNonNegativeLong();
QueryShardContext realQueryShardContext = new QueryShardContext(shardId.id(), indexSettings, BigArrays.NON_RECYCLING_INSTANCE,
null, null, mapperService, null, null, xContentRegistry(), writableRegistry(),
- client, null, () -> nowInMillis, null);
+ client, null, () -> nowInMillis, null, null);
QueryShardContext queryShardContext = spy(realQueryShardContext);
DocumentSubsetBitsetCache bitsetCache = new DocumentSubsetBitsetCache(Settings.EMPTY);
diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java
index 7f5a8232a6d..492d24b88f0 100644
--- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java
+++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java
@@ -93,7 +93,7 @@ public class RollupIndexerIndexingTests extends AggregatorTestCase {
settings = createIndexSettings();
queryShardContext = new QueryShardContext(0, settings,
BigArrays.NON_RECYCLING_INSTANCE, null, null, null, null, null,
- null, null, null, null, () -> 0L, null);
+ null, null, null, null, () -> 0L, null, null);
}
public void testSimpleDateHisto() throws Exception {
From 5ca37db60cc98916b07b0bef98b3c5a8c5420cc7 Mon Sep 17 00:00:00 2001
From: Lee Hinman
Date: Mon, 23 Sep 2019 17:06:27 -0600
Subject: [PATCH 13/94] Mute
SLMSnapshotBlockingIntegTests.testRetentionWhileSnapshotInProgress
Relates to #46508
---
.../elasticsearch/xpack/slm/SLMSnapshotBlockingIntegTests.java | 1 +
1 file changed, 1 insertion(+)
diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SLMSnapshotBlockingIntegTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SLMSnapshotBlockingIntegTests.java
index b42a1f98074..05ef3fa792f 100644
--- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SLMSnapshotBlockingIntegTests.java
+++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SLMSnapshotBlockingIntegTests.java
@@ -143,6 +143,7 @@ public class SLMSnapshotBlockingIntegTests extends ESIntegTestCase {
}
}
+ @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/46508")
public void testRetentionWhileSnapshotInProgress() throws Exception {
final String indexName = "test";
final String policyId = "slm-policy";
From 6986d7f9680cd20de60ae615c77281a9f955611f Mon Sep 17 00:00:00 2001
From: Tanguy Leroux
Date: Tue, 24 Sep 2019 08:57:39 +0200
Subject: [PATCH 14/94] Add blob container retries tests for Google Cloud
Storage (#46968)
Similarly to what has been done for S3 in #45383, this commit
adds unit tests that verify the behavior of the SDK client and
blob container implementation for Google Storage when the
remote service returns errors.
The main purpose was to add an extra test to the specific retry
logic for 410-Gone errors added in #45963.
Relates #45963
---
...CloudStorageBlobContainerRetriesTests.java | 433 ++++++++++++++++++
...eCloudStorageBlobStoreRepositoryTests.java | 158 ++-----
.../repositories/gcs/TestUtils.java | 158 +++++++
3 files changed, 626 insertions(+), 123 deletions(-)
create mode 100644 plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java
create mode 100644 plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/TestUtils.java
diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java
new file mode 100644
index 00000000000..714ea968ff0
--- /dev/null
+++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java
@@ -0,0 +1,433 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.repositories.gcs;
+
+import com.google.api.gax.retrying.RetrySettings;
+import com.google.cloud.http.HttpTransportOptions;
+import com.google.cloud.storage.StorageException;
+import com.google.cloud.storage.StorageOptions;
+import com.sun.net.httpserver.HttpContext;
+import com.sun.net.httpserver.HttpServer;
+import org.apache.http.HttpStatus;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.SuppressForbidden;
+import org.elasticsearch.common.UUIDs;
+import org.elasticsearch.common.blobstore.BlobContainer;
+import org.elasticsearch.common.blobstore.BlobPath;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.io.Streams;
+import org.elasticsearch.common.lucene.store.ByteArrayIndexInput;
+import org.elasticsearch.common.lucene.store.InputStreamIndexInput;
+import org.elasticsearch.common.network.InetAddresses;
+import org.elasticsearch.common.settings.MockSecureSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.CountDown;
+import org.elasticsearch.mocksocket.MockHttpServer;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.rest.RestUtils;
+import org.elasticsearch.test.ESTestCase;
+import org.junit.After;
+import org.junit.Before;
+import org.threeten.bp.Duration;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.SocketTimeoutException;
+import java.nio.file.NoSuchFileException;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.elasticsearch.repositories.ESBlobStoreTestCase.randomBytes;
+import static org.elasticsearch.repositories.gcs.GoogleCloudStorageClientSettings.CREDENTIALS_FILE_SETTING;
+import static org.elasticsearch.repositories.gcs.GoogleCloudStorageClientSettings.ENDPOINT_SETTING;
+import static org.elasticsearch.repositories.gcs.GoogleCloudStorageClientSettings.READ_TIMEOUT_SETTING;
+import static org.elasticsearch.repositories.gcs.GoogleCloudStorageClientSettings.TOKEN_URI_SETTING;
+import static org.elasticsearch.repositories.gcs.TestUtils.createServiceAccount;
+import static org.hamcrest.Matchers.anyOf;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.notNullValue;
+
+@SuppressForbidden(reason = "use a http server")
+public class GoogleCloudStorageBlobContainerRetriesTests extends ESTestCase {
+
+ private HttpServer httpServer;
+
+ private String httpServerUrl() {
+ assertThat(httpServer, notNullValue());
+ InetSocketAddress address = httpServer.getAddress();
+ return "http://" + InetAddresses.toUriString(address.getAddress()) + ":" + address.getPort();
+ }
+
+ @Before
+ public void setUp() throws Exception {
+ httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0);
+ httpServer.start();
+ super.setUp();
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ httpServer.stop(0);
+ super.tearDown();
+ }
+
+ private BlobContainer createBlobContainer(final int maxRetries, final @Nullable TimeValue readTimeout) {
+ final Settings.Builder clientSettings = Settings.builder();
+ final String client = randomAlphaOfLength(5).toLowerCase(Locale.ROOT);
+ clientSettings.put(ENDPOINT_SETTING.getConcreteSettingForNamespace(client).getKey(), httpServerUrl());
+ clientSettings.put(TOKEN_URI_SETTING.getConcreteSettingForNamespace(client).getKey(), httpServerUrl() + "/token");
+ if (readTimeout != null) {
+ clientSettings.put(READ_TIMEOUT_SETTING.getConcreteSettingForNamespace(client).getKey(), readTimeout);
+ }
+
+ final MockSecureSettings secureSettings = new MockSecureSettings();
+ secureSettings.setFile(CREDENTIALS_FILE_SETTING.getConcreteSettingForNamespace(client).getKey(), createServiceAccount(random()));
+ clientSettings.setSecureSettings(secureSettings);
+
+ final GoogleCloudStorageService service = new GoogleCloudStorageService() {
+ @Override
+ StorageOptions createStorageOptions(final GoogleCloudStorageClientSettings clientSettings,
+ final HttpTransportOptions httpTransportOptions) {
+ StorageOptions options = super.createStorageOptions(clientSettings, httpTransportOptions);
+ return options.toBuilder()
+ .setRetrySettings(RetrySettings.newBuilder()
+ .setTotalTimeout(options.getRetrySettings().getTotalTimeout())
+ .setInitialRetryDelay(Duration.ofMillis(10L))
+ .setRetryDelayMultiplier(options.getRetrySettings().getRetryDelayMultiplier())
+ .setMaxRetryDelay(Duration.ofSeconds(1L))
+ .setMaxAttempts(maxRetries)
+ .setJittered(false)
+ .setInitialRpcTimeout(options.getRetrySettings().getInitialRpcTimeout())
+ .setRpcTimeoutMultiplier(options.getRetrySettings().getRpcTimeoutMultiplier())
+ .setMaxRpcTimeout(options.getRetrySettings().getMaxRpcTimeout())
+ .build())
+ .build();
+ }
+ };
+ service.refreshAndClearCache(GoogleCloudStorageClientSettings.load(clientSettings.build()));
+
+ final List httpContexts = Arrays.asList(
+ // Auth
+ httpServer.createContext("/token", exchange -> {
+ byte[] response = ("{\"access_token\":\"foo\",\"token_type\":\"Bearer\",\"expires_in\":3600}").getBytes(UTF_8);
+ exchange.getResponseHeaders().add("Content-Type", "application/json");
+ exchange.sendResponseHeaders(HttpStatus.SC_OK, response.length);
+ exchange.getResponseBody().write(response);
+ exchange.close();
+ }),
+ // Does bucket exists?
+ httpServer.createContext("/storage/v1/b/bucket", exchange -> {
+ byte[] response = ("{\"kind\":\"storage#bucket\",\"name\":\"bucket\",\"id\":\"0\"}").getBytes(UTF_8);
+ exchange.getResponseHeaders().add("Content-Type", "application/json; charset=utf-8");
+ exchange.sendResponseHeaders(HttpStatus.SC_OK, response.length);
+ exchange.getResponseBody().write(response);
+ exchange.close();
+ })
+ );
+
+ final GoogleCloudStorageBlobStore blobStore = new GoogleCloudStorageBlobStore("bucket", client, service);
+ httpContexts.forEach(httpContext -> httpServer.removeContext(httpContext));
+
+ return new GoogleCloudStorageBlobContainer(BlobPath.cleanPath(), blobStore);
+ }
+
+ public void testReadNonexistentBlobThrowsNoSuchFileException() {
+ final BlobContainer blobContainer = createBlobContainer(between(1, 5), null);
+ final Exception exception = expectThrows(NoSuchFileException.class,
+ () -> Streams.readFully(blobContainer.readBlob("read_nonexistent_blob")));
+ assertThat(exception.getMessage().toLowerCase(Locale.ROOT), containsString("blob [read_nonexistent_blob] does not exist"));
+ }
+
+ public void testReadBlobWithRetries() throws Exception {
+ final int maxRetries = randomIntBetween(2, 10);
+ final CountDown countDown = new CountDown(maxRetries);
+
+ final byte[] bytes = randomBlobContent();
+ httpServer.createContext("/download/storage/v1/b/bucket/o/read_blob_max_retries", exchange -> {
+ Streams.readFully(exchange.getRequestBody());
+ if (countDown.countDown()) {
+ exchange.getResponseHeaders().add("Content-Type", "application/octet-stream");
+ exchange.sendResponseHeaders(RestStatus.OK.getStatus(), bytes.length);
+ exchange.getResponseBody().write(bytes);
+ exchange.close();
+ return;
+ }
+ exchange.sendResponseHeaders(HttpStatus.SC_INTERNAL_SERVER_ERROR, -1);
+ if (randomBoolean()) {
+ exchange.close();
+ }
+ });
+
+ final BlobContainer blobContainer = createBlobContainer(maxRetries, TimeValue.timeValueMillis(between(100, 500)));
+ try (InputStream inputStream = blobContainer.readBlob("read_blob_max_retries")) {
+ assertArrayEquals(bytes, BytesReference.toBytes(Streams.readFully(inputStream)));
+ assertThat(countDown.isCountedDown(), is(true));
+ }
+ }
+
+ public void testReadBlobWithReadTimeouts() {
+ final int maxRetries = randomIntBetween(1, 3);
+ final BlobContainer blobContainer = createBlobContainer(maxRetries, TimeValue.timeValueMillis(between(100, 200)));
+
+ // HTTP server does not send a response
+ httpServer.createContext("/download/storage/v1/b/bucket/o/read_blob_unresponsive", exchange -> {});
+
+ StorageException storageException = expectThrows(StorageException.class,
+ () -> Streams.readFully(blobContainer.readBlob("read_blob_unresponsive")));
+ assertThat(storageException.getMessage().toLowerCase(Locale.ROOT), containsString("read timed out"));
+ assertThat(storageException.getCause(), instanceOf(SocketTimeoutException.class));
+
+ // HTTP server sends a partial response
+ final byte[] bytes = randomBlobContent();
+ httpServer.createContext("/download/storage/v1/b/bucket/o/read_blob_incomplete", exchange -> {
+ exchange.getResponseHeaders().add("Content-Type", "text/plain; charset=utf-8");
+ exchange.sendResponseHeaders(HttpStatus.SC_OK, bytes.length);
+ final int bytesToSend = randomIntBetween(0, bytes.length - 1);
+ if (bytesToSend > 0) {
+ exchange.getResponseBody().write(bytes, 0, bytesToSend);
+ }
+ if (randomBoolean()) {
+ exchange.getResponseBody().flush();
+ }
+ });
+
+ storageException = expectThrows(StorageException.class, () -> {
+ try (InputStream stream = blobContainer.readBlob("read_blob_incomplete")) {
+ Streams.readFully(stream);
+ }
+ });
+ assertThat(storageException.getMessage().toLowerCase(Locale.ROOT), containsString("read timed out"));
+ assertThat(storageException.getCause(), instanceOf(SocketTimeoutException.class));
+ }
+
+ public void testWriteBlobWithRetries() throws Exception {
+ final int maxRetries = randomIntBetween(2, 10);
+ final CountDown countDown = new CountDown(maxRetries);
+
+ final byte[] bytes = randomBlobContent();
+ httpServer.createContext("/upload/storage/v1/b/bucket/o", exchange -> {
+ assertThat(exchange.getRequestURI().getQuery(), containsString("uploadType=multipart"));
+ if (countDown.countDown()) {
+ Optional> content = TestUtils.parseMultipartRequestBody(exchange.getRequestBody());
+ assertThat(content.isPresent(), is(true));
+ assertThat(content.get().v1(), equalTo("write_blob_max_retries"));
+ if (Objects.deepEquals(bytes, content.get().v2().array())) {
+ byte[] response = ("{\"bucket\":\"bucket\",\"name\":\"" + content.get().v1() + "\"}").getBytes(UTF_8);
+ exchange.getResponseHeaders().add("Content-Type", "application/json");
+ exchange.sendResponseHeaders(RestStatus.OK.getStatus(), response.length);
+ exchange.getResponseBody().write(response);
+ } else {
+ exchange.sendResponseHeaders(HttpStatus.SC_BAD_REQUEST, -1);
+ }
+ exchange.close();
+ return;
+ }
+ if (randomBoolean()) {
+ if (randomBoolean()) {
+ Streams.readFully(exchange.getRequestBody(), new byte[randomIntBetween(1, Math.max(1, bytes.length - 1))]);
+ } else {
+ Streams.readFully(exchange.getRequestBody());
+ exchange.sendResponseHeaders(HttpStatus.SC_INTERNAL_SERVER_ERROR, -1);
+ }
+ }
+ exchange.close();
+ });
+
+ final BlobContainer blobContainer = createBlobContainer(maxRetries, null);
+ try (InputStream stream = new InputStreamIndexInput(new ByteArrayIndexInput("desc", bytes), bytes.length)) {
+ blobContainer.writeBlob("write_blob_max_retries", stream, bytes.length, false);
+ }
+ assertThat(countDown.isCountedDown(), is(true));
+ }
+
+ public void testWriteBlobWithReadTimeouts() {
+ final byte[] bytes = randomByteArrayOfLength(randomIntBetween(10, 128));
+ final TimeValue readTimeout = TimeValue.timeValueMillis(randomIntBetween(100, 500));
+ final BlobContainer blobContainer = createBlobContainer(1, readTimeout);
+
+ // HTTP server does not send a response
+ httpServer.createContext("/upload/storage/v1/b/bucket/o", exchange -> {
+ if (randomBoolean()) {
+ if (randomBoolean()) {
+ Streams.readFully(exchange.getRequestBody(), new byte[randomIntBetween(1, bytes.length - 1)]);
+ } else {
+ Streams.readFully(exchange.getRequestBody());
+ }
+ }
+ });
+
+ Exception exception = expectThrows(StorageException.class, () -> {
+ try (InputStream stream = new InputStreamIndexInput(new ByteArrayIndexInput("desc", bytes), bytes.length)) {
+ blobContainer.writeBlob("write_blob_timeout", stream, bytes.length, false);
+ }
+ });
+ assertThat(exception.getMessage().toLowerCase(Locale.ROOT), containsString("read timed out"));
+
+ assertThat(exception.getCause(), instanceOf(SocketTimeoutException.class));
+ assertThat(exception.getCause().getMessage().toLowerCase(Locale.ROOT), containsString("read timed out"));
+ }
+
+ public void testWriteLargeBlob() throws IOException {
+ // See {@link BaseWriteChannel#DEFAULT_CHUNK_SIZE}
+ final int defaultChunkSize = 8 * 256 * 1024;
+ final int nbChunks = randomIntBetween(3, 5);
+ final int lastChunkSize = randomIntBetween(1, defaultChunkSize - 1);
+ final int totalChunks = nbChunks + 1;
+ final byte[] data = randomBytes(defaultChunkSize * nbChunks + lastChunkSize);
+ assertThat(data.length, greaterThan(GoogleCloudStorageBlobStore.LARGE_BLOB_THRESHOLD_BYTE_SIZE));
+
+ logger.debug("resumable upload is composed of [{}] total chunks ([{}] chunks of length [{}] and last chunk of length [{}]",
+ totalChunks, nbChunks, defaultChunkSize, lastChunkSize);
+
+ final int nbErrors = 2; // we want all requests to fail at least once
+ final AtomicInteger countInits = new AtomicInteger(nbErrors);
+ final AtomicInteger countUploads = new AtomicInteger(nbErrors * totalChunks);
+ final AtomicBoolean allow410Gone = new AtomicBoolean(randomBoolean());
+ final AtomicBoolean allowReadTimeout = new AtomicBoolean(rarely());
+ final int wrongChunk = randomIntBetween(1, totalChunks);
+
+ final AtomicReference sessionUploadId = new AtomicReference<>(UUIDs.randomBase64UUID());
+ logger.debug("starting with resumable upload id [{}]", sessionUploadId.get());
+
+ httpServer.createContext("/upload/storage/v1/b/bucket/o", exchange -> {
+ final Map params = new HashMap<>();
+ RestUtils.decodeQueryString(exchange.getRequestURI().getQuery(), 0, params);
+ assertThat(params.get("uploadType"), equalTo("resumable"));
+
+ if ("POST".equals(exchange.getRequestMethod())) {
+ assertThat(params.get("name"), equalTo("write_large_blob"));
+ if (countInits.decrementAndGet() <= 0) {
+ byte[] response = Streams.readFully(exchange.getRequestBody()).utf8ToString().getBytes(UTF_8);
+ exchange.getResponseHeaders().add("Content-Type", "application/json");
+ exchange.getResponseHeaders().add("Location", httpServerUrl() +
+ "/upload/storage/v1/b/bucket/o?uploadType=resumable&upload_id=" + sessionUploadId.get());
+ exchange.sendResponseHeaders(RestStatus.OK.getStatus(), response.length);
+ exchange.getResponseBody().write(response);
+ exchange.close();
+ return;
+ }
+ if (allowReadTimeout.get()) {
+ assertThat(wrongChunk, greaterThan(0));
+ return;
+ }
+
+ } else if ("PUT".equals(exchange.getRequestMethod())) {
+ final String uploadId = params.get("upload_id");
+ if (uploadId.equals(sessionUploadId.get()) == false) {
+ logger.debug("session id [{}] is gone", uploadId);
+ assertThat(wrongChunk, greaterThan(0));
+ Streams.readFully(exchange.getRequestBody());
+ exchange.sendResponseHeaders(HttpStatus.SC_GONE, -1);
+ exchange.close();
+ return;
+ }
+
+ if (countUploads.get() == (wrongChunk * nbErrors)) {
+ if (allowReadTimeout.compareAndSet(true, false)) {
+ assertThat(wrongChunk, greaterThan(0));
+ return;
+ }
+ if (allow410Gone.compareAndSet(true, false)) {
+ final String newUploadId = UUIDs.randomBase64UUID(random());
+ logger.debug("chunk [{}] gone, updating session ids [{} -> {}]", wrongChunk, sessionUploadId.get(), newUploadId);
+ sessionUploadId.set(newUploadId);
+
+ // we must reset the counters because the whole object upload will be retried
+ countInits.set(nbErrors);
+ countUploads.set(nbErrors * totalChunks);
+
+ Streams.readFully(exchange.getRequestBody());
+ exchange.sendResponseHeaders(HttpStatus.SC_GONE, -1);
+ exchange.close();
+ return;
+ }
+ }
+
+ final String range = exchange.getRequestHeaders().getFirst("Content-Range");
+ assertTrue(Strings.hasLength(range));
+
+ if (countUploads.decrementAndGet() % 2 == 0) {
+ final ByteArrayOutputStream requestBody = new ByteArrayOutputStream();
+ final long bytesRead = Streams.copy(exchange.getRequestBody(), requestBody);
+ assertThat(Math.toIntExact(bytesRead), anyOf(equalTo(defaultChunkSize), equalTo(lastChunkSize)));
+
+ final int rangeStart = TestUtils.getContentRangeStart(range);
+ final int rangeEnd = TestUtils.getContentRangeEnd(range);
+ assertThat(rangeEnd + 1 - rangeStart, equalTo(Math.toIntExact(bytesRead)));
+ assertArrayEquals(Arrays.copyOfRange(data, rangeStart, rangeEnd + 1), requestBody.toByteArray());
+
+ final Integer limit = TestUtils.getContentRangeLimit(range);
+ if (limit != null) {
+ exchange.sendResponseHeaders(RestStatus.OK.getStatus(), -1);
+ exchange.close();
+ return;
+ } else {
+ exchange.getResponseHeaders().add("Range", String.format(Locale.ROOT, "bytes=%d/%d", rangeStart, rangeEnd));
+ exchange.getResponseHeaders().add("Content-Length", "0");
+ exchange.sendResponseHeaders(308 /* Resume Incomplete */, -1);
+ exchange.close();
+ return;
+ }
+ }
+ }
+
+ // read all the request body, otherwise the SDK client throws a non-retryable StorageException
+ Streams.readFully(exchange.getRequestBody());
+ if (randomBoolean()) {
+ exchange.sendResponseHeaders(HttpStatus.SC_INTERNAL_SERVER_ERROR, -1);
+ }
+ exchange.close();
+ });
+
+ final TimeValue readTimeout = allowReadTimeout.get() ? TimeValue.timeValueSeconds(3) : null;
+
+ final BlobContainer blobContainer = createBlobContainer(nbErrors + 1, readTimeout);
+ try (InputStream stream = new InputStreamIndexInput(new ByteArrayIndexInput("desc", data), data.length)) {
+ blobContainer.writeBlob("write_large_blob", stream, data.length, false);
+ }
+
+ assertThat(countInits.get(), equalTo(0));
+ assertThat(countUploads.get(), equalTo(0));
+ assertThat(allow410Gone.get(), is(false));
+ }
+
+ private static byte[] randomBlobContent() {
+ return randomByteArrayOfLength(randomIntBetween(1, frequently() ? 512 : 1 << 20)); // rarely up to 1mb
+ }
+}
diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java
index 914746f7830..0fa9dfe9102 100644
--- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java
+++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java
@@ -31,6 +31,7 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.io.Streams;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.MockSecureSettings;
@@ -38,8 +39,6 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
-import org.elasticsearch.common.xcontent.XContentBuilder;
-import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.env.Environment;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.repositories.Repository;
@@ -54,9 +53,6 @@ import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.URLDecoder;
-import java.security.KeyPairGenerator;
-import java.util.Arrays;
-import java.util.Base64;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
@@ -64,13 +60,12 @@ import java.util.Iterator;
import java.util.List;
import java.util.Locale;
import java.util.Map;
-import java.util.UUID;
+import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
-import java.util.zip.GZIPInputStream;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.elasticsearch.repositories.gcs.GoogleCloudStorageClientSettings.CREDENTIALS_FILE_SETTING;
@@ -78,6 +73,7 @@ import static org.elasticsearch.repositories.gcs.GoogleCloudStorageClientSetting
import static org.elasticsearch.repositories.gcs.GoogleCloudStorageClientSettings.TOKEN_URI_SETTING;
import static org.elasticsearch.repositories.gcs.GoogleCloudStorageRepository.BUCKET;
import static org.elasticsearch.repositories.gcs.GoogleCloudStorageRepository.CLIENT_NAME;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
@SuppressForbidden(reason = "this test uses a HttpServer to emulate a Google Cloud Storage endpoint")
public class GoogleCloudStorageBlobStoreRepositoryTests extends ESMockAPIBasedRepositoryIntegTestCase {
@@ -119,7 +115,7 @@ public class GoogleCloudStorageBlobStoreRepositoryTests extends ESMockAPIBasedRe
@Override
protected Settings nodeSettings(int nodeOrdinal) {
if (serviceAccount == null) {
- serviceAccount = createServiceAccount();
+ serviceAccount = TestUtils.createServiceAccount(random());
}
final Settings.Builder settings = Settings.builder();
@@ -218,31 +214,6 @@ public class GoogleCloudStorageBlobStoreRepositoryTests extends ESMockAPIBasedRe
}
}
- private static byte[] createServiceAccount() {
- try {
- final KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance("RSA");
- keyPairGenerator.initialize(1024);
- final String privateKey = Base64.getEncoder().encodeToString(keyPairGenerator.generateKeyPair().getPrivate().getEncoded());
-
- final ByteArrayOutputStream out = new ByteArrayOutputStream();
- try (XContentBuilder builder = new XContentBuilder(XContentType.JSON.xContent(), out)) {
- builder.startObject();
- {
- builder.field("type", "service_account");
- builder.field("project_id", getTestClass().getName().toLowerCase(Locale.ROOT));
- builder.field("private_key_id", UUID.randomUUID().toString());
- builder.field("private_key", "-----BEGIN PRIVATE KEY-----\n" + privateKey + "\n-----END PRIVATE KEY-----\n");
- builder.field("client_email", "elastic@appspot.gserviceaccount.com");
- builder.field("client_id", String.valueOf(randomNonNegativeLong()));
- }
- builder.endObject();
- }
- return out.toByteArray();
- } catch (Exception e) {
- throw new AssertionError("Unable to create service account file", e);
- }
- }
-
/**
* Minimal HTTP handler that acts as a Google Cloud Storage compliant server
*/
@@ -345,65 +316,16 @@ public class GoogleCloudStorageBlobStoreRepositoryTests extends ESMockAPIBasedRe
exchange.getResponseBody().write(response);
} else if (Regex.simpleMatch("POST /upload/storage/v1/b/bucket/*uploadType=multipart*", request)) {
- try (BufferedInputStream in = new BufferedInputStream(new GZIPInputStream(exchange.getRequestBody()))) {
- byte[] response = new byte[0];
- String blob = null;
- int read;
- while ((read = in.read()) != -1) {
- boolean markAndContinue = false;
- try (ByteArrayOutputStream out = new ByteArrayOutputStream()) {
- do { // search next consecutive {carriage return, new line} chars and stop
- if ((char) read == '\r') {
- int next = in.read();
- if (next != -1) {
- if (next == '\n') {
- break;
- }
- out.write(read);
- out.write(next);
- continue;
- }
- }
- out.write(read);
- } while ((read = in.read()) != -1);
+ Optional> content = TestUtils.parseMultipartRequestBody(exchange.getRequestBody());
+ if (content.isPresent()) {
+ blobs.put(content.get().v1(), content.get().v2());
- final String line = new String(out.toByteArray(), UTF_8);
- if (line.length() == 0 || line.equals("\r\n") || line.startsWith("--")
- || line.toLowerCase(Locale.ROOT).startsWith("content")) {
- markAndContinue = true;
- } else if (line.startsWith("{\"bucket\":\"bucket\"")) {
- markAndContinue = true;
- Matcher matcher = Pattern.compile("\"name\":\"([^\"]*)\"").matcher(line);
- if (matcher.find()) {
- blob = matcher.group(1);
- response = line.getBytes(UTF_8);
- }
- }
- if (markAndContinue) {
- in.mark(Integer.MAX_VALUE);
- continue;
- }
- }
- if (blob != null) {
- in.reset();
- try (ByteArrayOutputStream binary = new ByteArrayOutputStream()) {
- while ((read = in.read()) != -1) {
- binary.write(read);
- }
- binary.flush();
- byte[] tmp = binary.toByteArray();
- // removes the trailing end "\r\n--__END_OF_PART__--\r\n" which is 23 bytes long
- blobs.put(blob, new BytesArray(Arrays.copyOf(tmp, tmp.length - 23)));
-
- exchange.getResponseHeaders().add("Content-Type", "application/json");
- exchange.sendResponseHeaders(RestStatus.OK.getStatus(), response.length);
- exchange.getResponseBody().write(response);
-
- } finally {
- blob = null;
- }
- }
- }
+ byte[] response = ("{\"bucket\":\"bucket\",\"name\":\"" + content.get().v1() + "\"}").getBytes(UTF_8);
+ exchange.getResponseHeaders().add("Content-Type", "application/json");
+ exchange.sendResponseHeaders(RestStatus.OK.getStatus(), response.length);
+ exchange.getResponseBody().write(response);
+ } else {
+ exchange.sendResponseHeaders(RestStatus.BAD_REQUEST.getStatus(), -1);
}
} else if (Regex.simpleMatch("POST /upload/storage/v1/b/bucket/*uploadType=resumable*", request)) {
@@ -426,41 +348,31 @@ public class GoogleCloudStorageBlobStoreRepositoryTests extends ESMockAPIBasedRe
RestUtils.decodeQueryString(exchange.getRequestURI().getQuery(), 0, params);
final String blobName = params.get("test_blob_name");
+ byte[] blob = blobs.get(blobName).array();
+ assertNotNull(blob);
+
final String range = exchange.getRequestHeaders().getFirst("Content-Range");
- assert Strings.hasLength(range);
+ final Integer limit = TestUtils.getContentRangeLimit(range);
+ final int start = TestUtils.getContentRangeStart(range);
+ final int end = TestUtils.getContentRangeEnd(range);
- Matcher matcher = Pattern.compile("bytes ([^/]*)/([0-9\\*]*)").matcher(range);
- if (matcher.find()) {
- String bytes = matcher.group(1);
- String limit = matcher.group(2);
- byte[] blob = blobs.get(blobName).array();
- assert blob != null;
- // client is uploading a chunk
- matcher = Pattern.compile("([0-9]*)-([0-9]*)").matcher(bytes);
- assert matcher.find();
+ final ByteArrayOutputStream out = new ByteArrayOutputStream();
+ long bytesRead = Streams.copy(exchange.getRequestBody(), out);
+ int length = Math.max(end + 1, limit != null ? limit : 0);
+ assertThat((int) bytesRead, lessThanOrEqualTo(length));
+ if (length > blob.length) {
+ blob = ArrayUtil.growExact(blob, length);
+ }
+ System.arraycopy(out.toByteArray(), 0, blob, start, Math.toIntExact(bytesRead));
+ blobs.put(blobName, new BytesArray(blob));
- int end = Integer.parseInt(matcher.group(2));
- int start = Integer.parseInt(matcher.group(1));
-
- final ByteArrayOutputStream out = new ByteArrayOutputStream();
- long count = Streams.copy(exchange.getRequestBody(), out);
- int length = Math.max(end + 1, "*".equals(limit) ? 0 : Integer.parseInt(limit));
- assert count <= length;
- if (length > blob.length) {
- blob = ArrayUtil.growExact(blob, length);
- }
- assert blob.length >= end;
- System.arraycopy(out.toByteArray(), 0, blob, start, Math.toIntExact(count));
- blobs.put(blobName, new BytesArray(blob));
-
- if ("*".equals(limit)) {
- exchange.getResponseHeaders().add("Range", String.format(Locale.ROOT, "bytes=%d/%d", start, end));
- exchange.getResponseHeaders().add("Content-Length", "0");
- exchange.sendResponseHeaders(308 /* Resume Incomplete */, -1);
- } else {
- assert blob.length == Integer.parseInt(limit);
- exchange.sendResponseHeaders(RestStatus.OK.getStatus(), -1);
- }
+ if (limit == null) {
+ exchange.getResponseHeaders().add("Range", String.format(Locale.ROOT, "bytes=%d/%d", start, end));
+ exchange.getResponseHeaders().add("Content-Length", "0");
+ exchange.sendResponseHeaders(308 /* Resume Incomplete */, -1);
+ } else {
+ assertThat(limit, lessThanOrEqualTo(blob.length));
+ exchange.sendResponseHeaders(RestStatus.OK.getStatus(), -1);
}
} else {
exchange.sendResponseHeaders(RestStatus.INTERNAL_SERVER_ERROR.getStatus(), -1);
diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/TestUtils.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/TestUtils.java
new file mode 100644
index 00000000000..a6ae0578fbd
--- /dev/null
+++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/TestUtils.java
@@ -0,0 +1,158 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.repositories.gcs;
+
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentType;
+
+import java.io.BufferedInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.security.KeyPairGenerator;
+import java.util.Arrays;
+import java.util.Base64;
+import java.util.Locale;
+import java.util.Optional;
+import java.util.Random;
+import java.util.UUID;
+import java.util.function.BiFunction;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.util.zip.GZIPInputStream;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+final class TestUtils {
+
+ private TestUtils() {}
+
+ /**
+ * Creates a random Service Account file for testing purpose
+ */
+ static byte[] createServiceAccount(final Random random) {
+ try {
+ final KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance("RSA");
+ keyPairGenerator.initialize(1024);
+ final String privateKey = Base64.getEncoder().encodeToString(keyPairGenerator.generateKeyPair().getPrivate().getEncoded());
+
+ final ByteArrayOutputStream out = new ByteArrayOutputStream();
+ try (XContentBuilder builder = new XContentBuilder(XContentType.JSON.xContent(), out)) {
+ builder.startObject();
+ {
+ builder.field("type", "service_account");
+ builder.field("project_id", "test");
+ builder.field("private_key_id", UUID.randomUUID().toString());
+ builder.field("private_key", "-----BEGIN PRIVATE KEY-----\n" + privateKey + "\n-----END PRIVATE KEY-----\n");
+ builder.field("client_email", "elastic@appspot.gserviceaccount.com");
+ builder.field("client_id", String.valueOf(Math.abs(random.nextLong())));
+ }
+ builder.endObject();
+ }
+ return out.toByteArray();
+ } catch (Exception e) {
+ throw new AssertionError("Unable to create service account file", e);
+ }
+ }
+
+ static Optional> parseMultipartRequestBody(final InputStream requestBody) throws IOException {
+ Tuple content = null;
+ try (BufferedInputStream in = new BufferedInputStream(new GZIPInputStream(requestBody))) {
+ String name = null;
+ int read;
+ while ((read = in.read()) != -1) {
+ boolean markAndContinue = false;
+ try (ByteArrayOutputStream out = new ByteArrayOutputStream()) {
+ do { // search next consecutive {carriage return, new line} chars and stop
+ if ((char) read == '\r') {
+ int next = in.read();
+ if (next != -1) {
+ if (next == '\n') {
+ break;
+ }
+ out.write(read);
+ out.write(next);
+ continue;
+ }
+ }
+ out.write(read);
+ } while ((read = in.read()) != -1);
+
+ final String line = new String(out.toByteArray(), UTF_8);
+ if (line.length() == 0 || line.equals("\r\n") || line.startsWith("--")
+ || line.toLowerCase(Locale.ROOT).startsWith("content")) {
+ markAndContinue = true;
+ } else if (line.startsWith("{\"bucket\":\"bucket\"")) {
+ markAndContinue = true;
+ Matcher matcher = Pattern.compile("\"name\":\"([^\"]*)\"").matcher(line);
+ if (matcher.find()) {
+ name = matcher.group(1);
+ }
+ }
+ if (markAndContinue) {
+ in.mark(Integer.MAX_VALUE);
+ continue;
+ }
+ }
+ if (name != null) {
+ in.reset();
+ try (ByteArrayOutputStream binary = new ByteArrayOutputStream()) {
+ while ((read = in.read()) != -1) {
+ binary.write(read);
+ }
+ binary.flush();
+ byte[] tmp = binary.toByteArray();
+ // removes the trailing end "\r\n--__END_OF_PART__--\r\n" which is 23 bytes long
+ content = Tuple.tuple(name, new BytesArray(Arrays.copyOf(tmp, tmp.length - 23)));
+ }
+ }
+ }
+ }
+ return Optional.ofNullable(content);
+ }
+
+ private static final Pattern PATTERN_CONTENT_RANGE = Pattern.compile("bytes ([^/]*)/([0-9\\*]*)");
+ private static final Pattern PATTERN_CONTENT_RANGE_BYTES = Pattern.compile("([0-9]*)-([0-9]*)");
+
+ private static Integer parse(final Pattern pattern, final String contentRange, final BiFunction fn) {
+ final Matcher matcher = pattern.matcher(contentRange);
+ if (matcher.matches() == false || matcher.groupCount() != 2) {
+ throw new IllegalArgumentException("Unable to parse content range header");
+ }
+ return fn.apply(matcher.group(1), matcher.group(2));
+ }
+
+ static Integer getContentRangeLimit(final String contentRange) {
+ return parse(PATTERN_CONTENT_RANGE, contentRange, (bytes, limit) -> "*".equals(limit) ? null : Integer.parseInt(limit));
+ }
+
+ static int getContentRangeStart(final String contentRange) {
+ return parse(PATTERN_CONTENT_RANGE, contentRange,
+ (bytes, limit) -> parse(PATTERN_CONTENT_RANGE_BYTES, bytes,
+ (start, end) -> Integer.parseInt(start)));
+ }
+
+ static int getContentRangeEnd(final String contentRange) {
+ return parse(PATTERN_CONTENT_RANGE, contentRange,
+ (bytes, limit) -> parse(PATTERN_CONTENT_RANGE_BYTES, bytes,
+ (start, end) -> Integer.parseInt(end)));
+ }
+}
From 618efcfcf94a5803b4974113a9e572b7bb23011f Mon Sep 17 00:00:00 2001
From: maidoo
Date: Tue, 24 Sep 2019 16:01:07 +0800
Subject: [PATCH 15/94] Add submitDeleteByQueryTask method to
RestHighLevelClient (#46833)
The HLRC has a method for reindex, that allows to trigger an async reindex by running RestHighLevelClient.submitReindexTask and RestHighLevelClient.reindex. The delete by query however only has an RestHighLevelClient.deleteByQuery method (and its async counterpart), but no RestHighLevelClient.submitDeleteByQueryTask. So add RestHighLevelClient.submitDeleteByQueryTask
Closes #46395
---
.../client/RequestConverters.java | 60 +++++++++++--------
.../client/RestHighLevelClient.java | 15 +++++
.../org/elasticsearch/client/ReindexIT.java | 41 +++++++++++++
.../client/RequestConvertersTests.java | 1 +
4 files changed, 92 insertions(+), 25 deletions(-)
diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java
index 2fbfeb21a2e..169fe405e87 100644
--- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java
+++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java
@@ -553,6 +553,10 @@ final class RequestConverters {
return prepareReindexRequest(reindexRequest, false);
}
+ static Request submitDeleteByQuery(DeleteByQueryRequest deleteByQueryRequest) throws IOException {
+ return prepareDeleteByQueryRequest(deleteByQueryRequest, false);
+ }
+
private static Request prepareReindexRequest(ReindexRequest reindexRequest, boolean waitForCompletion) throws IOException {
String endpoint = new EndpointBuilder().addPathPart("_reindex").build();
Request request = new Request(HttpPost.METHOD_NAME, endpoint);
@@ -572,6 +576,36 @@ final class RequestConverters {
return request;
}
+ private static Request prepareDeleteByQueryRequest(DeleteByQueryRequest deleteByQueryRequest,
+ boolean waitForCompletion) throws IOException {
+ String endpoint =
+ endpoint(deleteByQueryRequest.indices(), deleteByQueryRequest.getDocTypes(), "_delete_by_query");
+ Request request = new Request(HttpPost.METHOD_NAME, endpoint);
+ Params params = new Params()
+ .withRouting(deleteByQueryRequest.getRouting())
+ .withRefresh(deleteByQueryRequest.isRefresh())
+ .withTimeout(deleteByQueryRequest.getTimeout())
+ .withWaitForActiveShards(deleteByQueryRequest.getWaitForActiveShards())
+ .withRequestsPerSecond(deleteByQueryRequest.getRequestsPerSecond())
+ .withIndicesOptions(deleteByQueryRequest.indicesOptions())
+ .withWaitForCompletion(waitForCompletion);
+ if (deleteByQueryRequest.isAbortOnVersionConflict() == false) {
+ params.putParam("conflicts", "proceed");
+ }
+ if (deleteByQueryRequest.getBatchSize() != AbstractBulkByScrollRequest.DEFAULT_SCROLL_SIZE) {
+ params.putParam("scroll_size", Integer.toString(deleteByQueryRequest.getBatchSize()));
+ }
+ if (deleteByQueryRequest.getScrollTime() != AbstractBulkByScrollRequest.DEFAULT_SCROLL_TIMEOUT) {
+ params.putParam("scroll", deleteByQueryRequest.getScrollTime());
+ }
+ if (deleteByQueryRequest.getMaxDocs() > 0) {
+ params.putParam("max_docs", Integer.toString(deleteByQueryRequest.getMaxDocs()));
+ }
+ request.addParameters(params.asMap());
+ request.setEntity(createEntity(deleteByQueryRequest, REQUEST_BODY_CONTENT_TYPE));
+ return request;
+ }
+
static Request updateByQuery(UpdateByQueryRequest updateByQueryRequest) throws IOException {
String endpoint =
endpoint(updateByQueryRequest.indices(), updateByQueryRequest.getDocTypes(), "_update_by_query");
@@ -602,31 +636,7 @@ final class RequestConverters {
}
static Request deleteByQuery(DeleteByQueryRequest deleteByQueryRequest) throws IOException {
- String endpoint =
- endpoint(deleteByQueryRequest.indices(), deleteByQueryRequest.getDocTypes(), "_delete_by_query");
- Request request = new Request(HttpPost.METHOD_NAME, endpoint);
- Params params = new Params()
- .withRouting(deleteByQueryRequest.getRouting())
- .withRefresh(deleteByQueryRequest.isRefresh())
- .withTimeout(deleteByQueryRequest.getTimeout())
- .withWaitForActiveShards(deleteByQueryRequest.getWaitForActiveShards())
- .withRequestsPerSecond(deleteByQueryRequest.getRequestsPerSecond())
- .withIndicesOptions(deleteByQueryRequest.indicesOptions());
- if (deleteByQueryRequest.isAbortOnVersionConflict() == false) {
- params.putParam("conflicts", "proceed");
- }
- if (deleteByQueryRequest.getBatchSize() != AbstractBulkByScrollRequest.DEFAULT_SCROLL_SIZE) {
- params.putParam("scroll_size", Integer.toString(deleteByQueryRequest.getBatchSize()));
- }
- if (deleteByQueryRequest.getScrollTime() != AbstractBulkByScrollRequest.DEFAULT_SCROLL_TIMEOUT) {
- params.putParam("scroll", deleteByQueryRequest.getScrollTime());
- }
- if (deleteByQueryRequest.getMaxDocs() > 0) {
- params.putParam("max_docs", Integer.toString(deleteByQueryRequest.getMaxDocs()));
- }
- request.addParameters(params.asMap());
- request.setEntity(createEntity(deleteByQueryRequest, REQUEST_BODY_CONTENT_TYPE));
- return request;
+ return prepareDeleteByQueryRequest(deleteByQueryRequest, true);
}
static Request rethrottleReindex(RethrottleRequest rethrottleRequest) {
diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java
index 0992edd936c..65d517231f2 100644
--- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java
+++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java
@@ -590,6 +590,21 @@ public class RestHighLevelClient implements Closeable {
);
}
+ /**
+ * Submits a delete by query task
+ * See
+ * Delete By Query API on elastic.co
+ * @param deleteByQueryRequest the request
+ * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
+ * @return the submission response
+ */
+ public final TaskSubmissionResponse submitDeleteByQueryTask(DeleteByQueryRequest deleteByQueryRequest,
+ RequestOptions options) throws IOException {
+ return performRequestAndParseEntity(
+ deleteByQueryRequest, RequestConverters::submitDeleteByQuery, options, TaskSubmissionResponse::fromXContent, emptySet()
+ );
+ }
+
/**
* Asynchronously executes a delete by query request.
* See
diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ReindexIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ReindexIT.java
index 90cfa3a9388..256e38da858 100644
--- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ReindexIT.java
+++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ReindexIT.java
@@ -436,6 +436,47 @@ public class ReindexIT extends ESRestHighLevelClientTestCase {
}
}
+ public void testDeleteByQueryTask() throws Exception {
+ final String sourceIndex = "source456";
+ {
+ // Prepare
+ Settings settings = Settings.builder()
+ .put("number_of_shards", 1)
+ .put("number_of_replicas", 0)
+ .build();
+ createIndex(sourceIndex, settings);
+ assertEquals(
+ RestStatus.OK,
+ highLevelClient().bulk(
+ new BulkRequest()
+ .add(new IndexRequest(sourceIndex).id("1")
+ .source(Collections.singletonMap("foo", 1), XContentType.JSON))
+ .add(new IndexRequest(sourceIndex).id("2")
+ .source(Collections.singletonMap("foo", 2), XContentType.JSON))
+ .add(new IndexRequest(sourceIndex).id("3")
+ .source(Collections.singletonMap("foo", 3), XContentType.JSON))
+ .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE),
+ RequestOptions.DEFAULT
+ ).status()
+ );
+ }
+ {
+ // tag::submit-delete_by_query-task
+ DeleteByQueryRequest deleteByQueryRequest = new DeleteByQueryRequest();
+ deleteByQueryRequest.indices(sourceIndex);
+ deleteByQueryRequest.setQuery(new IdsQueryBuilder().addIds("1"));
+ deleteByQueryRequest.setRefresh(true);
+
+ TaskSubmissionResponse deleteByQuerySubmission = highLevelClient()
+ .submitDeleteByQueryTask(deleteByQueryRequest, RequestOptions.DEFAULT);
+
+ String taskId = deleteByQuerySubmission.getTask();
+ // end::submit-delete_by_query-task
+
+ assertBusy(checkCompletionStatus(client(), taskId));
+ }
+ }
+
private static TaskId findTaskToRethrottle(String actionName) throws IOException {
long start = System.nanoTime();
ListTasksRequest request = new ListTasksRequest();
diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java
index 57f6a579c70..106a58edebf 100644
--- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java
+++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java
@@ -582,6 +582,7 @@ public class RequestConvertersTests extends ESTestCase {
}
setRandomIndicesOptions(deleteByQueryRequest::setIndicesOptions, deleteByQueryRequest::indicesOptions, expectedParams);
setRandomTimeout(deleteByQueryRequest::setTimeout, ReplicationRequest.DEFAULT_TIMEOUT, expectedParams);
+ expectedParams.put("wait_for_completion", Boolean.TRUE.toString());
Request request = RequestConverters.deleteByQuery(deleteByQueryRequest);
StringJoiner joiner = new StringJoiner("/", "/", "");
joiner.add(String.join(",", deleteByQueryRequest.indices()));
From 56224068d46f87654b434431c59cc6454f8e7531 Mon Sep 17 00:00:00 2001
From: Colin Goodheart-Smithe
Date: Tue, 24 Sep 2019 09:23:25 +0100
Subject: [PATCH 16/94] Release highlights for 7.4.0 (#46963)
Co-Authored-By: James Rodewig
---
.../release-notes/highlights-7.4.0.asciidoc | 156 ++++++++++++++++++
.../release-notes/highlights.asciidoc | 6 +-
2 files changed, 160 insertions(+), 2 deletions(-)
create mode 100644 docs/reference/release-notes/highlights-7.4.0.asciidoc
diff --git a/docs/reference/release-notes/highlights-7.4.0.asciidoc b/docs/reference/release-notes/highlights-7.4.0.asciidoc
new file mode 100644
index 00000000000..03a1b6dcf4f
--- /dev/null
+++ b/docs/reference/release-notes/highlights-7.4.0.asciidoc
@@ -0,0 +1,156 @@
+[[release-highlights-7.4.0]]
+== 7.4.0 release highlights
+++++
+7.4.0
+++++
+
+//NOTE: The notable-highlights tagged regions are re-used in the
+//Installation and Upgrade Guide
+
+// tag::notable-highlights[]
+[float]
+==== Results pinning
+
+You can use the new <>
+to define the first records
+(and the order in which they are returned)
+in a result set directly within {es}.
+
+// end::notable-highlights[]
+
+// tag::notable-highlights[]
+[float]
+==== New `shape` field type
+
+A new <> field type has been added,
+which allows you to position and query shapes
+in a geometry of your choosing.
+
+// end::notable-highlights[]
+
+// tag::notable-highlights[]
+[float]
+==== Circle ingest processor
+
+A new <> has been added,
+which translates circles into regular polygons (bounded by the circles).
+This makes ingesting, indexing, searching, and aggregating circles both easy and efficient.
+
+// end::notable-highlights[]
+
+// tag::notable-highlights[]
+[float]
+==== Aggregations on range fields
+
+The <>
+and <>
+aggregations now support the <> field type.
+
+Range aggregations are useful
+when counting ranges that overlap with specific buckets
+(e.g. the number of phone calls that took place during a specific minute).
+
+// end::notable-highlights[]
+
+// tag::notable-highlights[]
+[float]
+==== Cumulative cardinality aggregation
+
+A new <>
+has been added
+as part of our ongoing effort to provide advanced aggregations.
+
+You can use this new pipeline aggregation
+to calculate a net-new total of document occurrences
+within a given time range.
+
+// end::notable-highlights[]
+
+// tag::notable-highlights[]
+[float]
+==== Snapshot lifecycle management
+
+We’re introducing <>,
+which allows an administrator to define policies,
+via API or {kibana-ref}/index-lifecycle-policies.html[{kib} UI],
+that manage when and how often snapshots are taken.
+You can use SLM
+to ensure that appropriate, recent backups are ready
+if disaster strikes
+or you need to restore {es} data.
+
+// end::notable-highlights[]
+
+// tag::notable-highlights[]
+[float]
+==== API key management
+
+New {stack-ov}/security-privileges.html[cluster privileges] to manage API keys have been added,
+allowing cluster administrators to manage everything,
+and regular users to manage their own keys.
+Users can create API keys
+and use them to provide long-term credentials
+while interacting with {es}.
+
+// end::notable-highlights[]
+
+// tag::notable-highlights[]
+[float]
+==== TLS settings for email notifications
+
+Notifications may contain sensitive information that must be protected over the wire. This requires that communication with the mail server is encrypted and authenticated properly.
+{es} now supports custom <> for email notifications,
+allowing secure connections to servers with custom security configuration.
+
+// end::notable-highlights[]
+
+// tag::notable-highlights[]
+[float]
+==== Automatic query cancellation
+
+{es} now automatically terminates queries
+sent through the `_search` endpoint
+when the initiating connection is closed.
+
+// end::notable-highlights[]
+
+// tag::notable-highlights[]
+[float]
+==== Support for AdoptOpenJDK
+
+AdoptOpenJDK 13 is now supported and shipped with {es} as the pre-bundled JDK.
+
+If you want to use your own JDK,
+you can still do so by setting `JAVA_HOME` before starting Elasticsearch.
+
+The availability of a notarized AdoptOpenJDK package
+(per the new requirements for software running on macOS Catalina)
+facilitates notarization of {es} for continued support on macOS.
+
+// end::notable-highlights[]
+
+// tag::notable-highlights[]
+[float]
+==== Regression analysis - Experimental
+
+{stack-ov}/dfa-regression.html[Regression analysis] is an experimental machine learning process
+for estimating the relationships among a number of feature variables and a dependent variable,
+then making further predictions based on the described relationship.
+
+// end::notable-highlights[]
+
+// tag::notable-highlights[]
+[float]
+==== New vector distance functions for document script scoring - Experimental
+
+Two experimential similarity measurements—
+Manhattan distance (L1 norm)
+and Euclidean distance (L2 norm)—
+have been added.
+Like the dot product and cosine similarity,
+the Euclidean and Manhattan distances are provided as <>
+so that they may be incorporated with other query elements
+as part of a <> query.
+
+// end::notable-highlights[]
+
diff --git a/docs/reference/release-notes/highlights.asciidoc b/docs/reference/release-notes/highlights.asciidoc
index 1b48c8118c6..dbf45641958 100644
--- a/docs/reference/release-notes/highlights.asciidoc
+++ b/docs/reference/release-notes/highlights.asciidoc
@@ -3,9 +3,10 @@
[partintro]
--
-This section summarizes the most important changes in each release. For the
-full list, see <> and <>.
+This section summarizes the most important changes in each release. For the
+full list, see <> and <>.
+* <>
* <>
* <>
* <>
@@ -13,6 +14,7 @@ full list, see <> and <>.
--
+include::highlights-7.4.0.asciidoc[]
include::highlights-7.3.0.asciidoc[]
include::highlights-7.2.0.asciidoc[]
include::highlights-7.1.0.asciidoc[]
From 98e6bb4d01de21788e36a88e0dc3d67ed5a25a37 Mon Sep 17 00:00:00 2001
From: Ioannis Kakavas
Date: Tue, 24 Sep 2019 12:47:56 +0300
Subject: [PATCH 17/94] Workaround JDK-8213202 in SSLClientAuthTests (#46995)
This change works around JDK-8213202, which is a bug related to TLSv1.3
session resumption before JDK 11.0.3 that occurs when there are
multiple concurrent sessions being established. Nodes connecting to
each other will trigger this bug when client authentication is
disabled, which is the case for SSLClientAuthTests.
Backport of #46680
---
.../java/org/elasticsearch/xpack/ssl/SSLClientAuthTests.java | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLClientAuthTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLClientAuthTests.java
index e5fb9c71831..37cf17792a7 100644
--- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLClientAuthTests.java
+++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLClientAuthTests.java
@@ -96,6 +96,10 @@ public class SSLClientAuthTests extends SecurityIntegTestCase {
return builder
// invert the require auth settings
.put("xpack.security.transport.ssl.client_authentication", SSLClientAuth.NONE)
+ // Due to the TLSv1.3 bug with session resumption when client authentication is not
+ // used, we need to set the protocols since we disabled client auth for transport
+ // to avoid failures on pre 11.0.3 JDKs. See #getProtocols
+ .putList("xpack.security.transport.ssl.supported_protocols", getProtocols())
.put("xpack.security.http.ssl.enabled", true)
.put("xpack.security.http.ssl.client_authentication", SSLClientAuth.REQUIRED)
.build();
From 6943a3101fea1a4a438323c6ec98cf9650e2245c Mon Sep 17 00:00:00 2001
From: David Turner
Date: Tue, 24 Sep 2019 12:31:13 +0100
Subject: [PATCH 18/94] Cut PersistedState interface from GatewayMetaState
(#46655)
Today `GatewayMetaState` implements `PersistedState` but it's an error to use
it as a `PersistedState` before it's been started, or if the node is
master-ineligible. It also holds some fields that are meaningless on nodes that
do not persist their states. Finally, it takes responsibility for both loading
the original cluster state and some of the high-level logic for writing the
cluster state back to disk.
This commit addresses these concerns by introducing a more specific
`PersistedState` implementation for use on master-eligible nodes which is only
instantiated if and when it's appropriate. It also moves the fields and
high-level persistence logic into a new `IncrementalClusterStateWriter` with a
more appropriate lifecycle.
Follow-up to #46326 and #46532
Relates #47001
---
.../gateway/GatewayMetaState.java | 603 +++++-------------
.../IncrementalClusterStateWriter.java | 384 +++++++++++
.../java/org/elasticsearch/node/Node.java | 4 +-
.../GatewayMetaStatePersistedStateTests.java | 48 +-
.../gateway/GatewayMetaStateTests.java | 397 +-----------
.../IncrementalClusterStateWriterTests.java | 429 +++++++++++++
.../AbstractCoordinatorTestCase.java | 10 +-
.../gateway/MockGatewayMetaState.java | 16 +-
8 files changed, 1003 insertions(+), 888 deletions(-)
create mode 100644 server/src/main/java/org/elasticsearch/gateway/IncrementalClusterStateWriter.java
create mode 100644 server/src/test/java/org/elasticsearch/gateway/IncrementalClusterStateWriterTests.java
diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java
index c6e9182fd8f..f9433ee6059 100644
--- a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java
+++ b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java
@@ -29,6 +29,7 @@ import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ClusterStateApplier;
import org.elasticsearch.cluster.coordination.CoordinationState.PersistedState;
import org.elasticsearch.cluster.coordination.InMemoryPersistedState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
@@ -36,8 +37,6 @@ import org.elasticsearch.cluster.metadata.Manifest;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService;
import org.elasticsearch.cluster.node.DiscoveryNode;
-import org.elasticsearch.cluster.routing.RoutingNode;
-import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.collect.Tuple;
@@ -49,124 +48,104 @@ import org.elasticsearch.plugins.MetaDataUpgrader;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
import java.util.Map;
-import java.util.Set;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.UnaryOperator;
/**
- * This class is responsible for storing/retrieving metadata to/from disk.
- * When instance of this class is created, constructor ensures that this version is compatible with state stored on disk and performs
- * state upgrade if necessary. Also it checks that atomic move is supported on the filesystem level, because it's a must for metadata
- * store algorithm.
- * Please note that the state being loaded when constructing the instance of this class is NOT the state that will be used as a
- * {@link ClusterState#metaData()}. Instead when node is starting up, it calls {@link #getMetaData()} method and if this node is
- * elected as master, it requests metaData from other master eligible nodes. After that, master node performs re-conciliation on the
- * gathered results, re-creates {@link ClusterState} and broadcasts this state to other nodes in the cluster.
+ * Loads (and maybe upgrades) cluster metadata at startup, and persistently stores cluster metadata for future restarts.
+ *
+ * When started, ensures that this version is compatible with the state stored on disk, and performs a state upgrade if necessary. Note that
+ * the state being loaded when constructing the instance of this class is not necessarily the state that will be used as {@link
+ * ClusterState#metaData()} because it might be stale or incomplete. Master-eligible nodes must perform an election to find a complete and
+ * non-stale state, and master-ineligible nodes receive the real cluster state from the elected master after joining the cluster.
*/
-public class GatewayMetaState implements PersistedState {
- protected static final Logger logger = LogManager.getLogger(GatewayMetaState.class);
+public class GatewayMetaState {
+ private static final Logger logger = LogManager.getLogger(GatewayMetaState.class);
- private final MetaStateService metaStateService;
- private final Settings settings;
-
- // On master-eligible Zen2 nodes, we use this very object for the PersistedState (so that the state is actually persisted); on other
- // nodes we use an InMemoryPersistedState instead and persist using a cluster applier if needed. In all cases it's an error to try and
- // use this object as a PersistedState before calling start(). TODO stop implementing PersistedState at the top level.
+ // Set by calling start()
private final SetOnce persistedState = new SetOnce<>();
- // on master-eligible nodes we call updateClusterState under the Coordinator's mutex; on master-ineligible data nodes we call
- // updateClusterState on the (unique) cluster applier thread; on other nodes we never call updateClusterState. In all cases there's no
- // need to synchronize access to these variables.
- protected Manifest previousManifest;
- protected ClusterState previousClusterState;
- protected boolean incrementalWrite;
-
- public GatewayMetaState(Settings settings, MetaStateService metaStateService) {
- this.settings = settings;
- this.metaStateService = metaStateService;
+ public PersistedState getPersistedState() {
+ final PersistedState persistedState = this.persistedState.get();
+ assert persistedState != null : "not started";
+ return persistedState;
}
- public void start(TransportService transportService, ClusterService clusterService,
- MetaDataIndexUpgradeService metaDataIndexUpgradeService, MetaDataUpgrader metaDataUpgrader) {
- assert previousClusterState == null : "should only start once, but already have " + previousClusterState;
+ public MetaData getMetaData() {
+ return getPersistedState().getLastAcceptedState().metaData();
+ }
+
+ public void start(Settings settings, TransportService transportService, ClusterService clusterService,
+ MetaStateService metaStateService, MetaDataIndexUpgradeService metaDataIndexUpgradeService,
+ MetaDataUpgrader metaDataUpgrader) {
+ assert persistedState.get() == null : "should only start once, but already have " + persistedState.get();
+
+ final Tuple manifestClusterStateTuple;
try {
- upgradeMetaData(metaDataIndexUpgradeService, metaDataUpgrader);
- initializeClusterState(ClusterName.CLUSTER_NAME_SETTING.get(settings));
+ upgradeMetaData(settings, metaStateService, metaDataIndexUpgradeService, metaDataUpgrader);
+ manifestClusterStateTuple = loadStateAndManifest(ClusterName.CLUSTER_NAME_SETTING.get(settings), metaStateService);
} catch (IOException e) {
throw new ElasticsearchException("failed to load metadata", e);
}
- incrementalWrite = false;
-
- applyClusterStateUpdaters(transportService, clusterService);
+ final IncrementalClusterStateWriter incrementalClusterStateWriter
+ = new IncrementalClusterStateWriter(metaStateService, manifestClusterStateTuple.v1(),
+ prepareInitialClusterState(transportService, clusterService, manifestClusterStateTuple.v2()));
if (DiscoveryModule.DISCOVERY_TYPE_SETTING.get(settings).equals(DiscoveryModule.ZEN_DISCOVERY_TYPE)) {
- // only for tests that simulate a mixed Zen1/Zen2 clusters, see Zen1IT
- if (isMasterOrDataNode()) {
- clusterService.addLowPriorityApplier(this::applyClusterState);
+ // only for tests that simulate mixed Zen1/Zen2 clusters, see Zen1IT
+ if (isMasterOrDataNode(settings)) {
+ clusterService.addLowPriorityApplier(new GatewayClusterApplier(incrementalClusterStateWriter));
}
- persistedState.set(new InMemoryPersistedState(getCurrentTerm(), getLastAcceptedState()));
+ persistedState.set(new InMemoryPersistedState(manifestClusterStateTuple.v1().getCurrentTerm(), manifestClusterStateTuple.v2()));
+ } else if (DiscoveryNode.isMasterNode(settings) == false) {
+ if (DiscoveryNode.isDataNode(settings)) {
+ // Master-eligible nodes persist index metadata for all indices regardless of whether they hold any shards or not. It's
+ // vitally important to the safety of the cluster coordination system that master-eligible nodes persist this metadata when
+ // _accepting_ the cluster state (i.e. before it is committed). This persistence happens on the generic threadpool.
+ //
+ // In contrast, master-ineligible data nodes only persist the index metadata for shards that they hold. When all shards of
+ // an index are moved off such a node the IndicesStore is responsible for removing the corresponding index directory,
+ // including the metadata, and does so on the cluster applier thread.
+ //
+ // This presents a problem: if a shard is unassigned from a node and then reassigned back to it again then there is a race
+ // between the IndicesStore deleting the index folder and the CoordinationState concurrently trying to write the updated
+ // metadata into it. We could probably solve this with careful synchronization, but in fact there is no need. The persisted
+ // state on master-ineligible data nodes is mostly ignored - it's only there to support dangling index imports, which is
+ // inherently unsafe anyway. Thus we can safely delay metadata writes on master-ineligible data nodes until applying the
+ // cluster state, which is what this does:
+ clusterService.addLowPriorityApplier(new GatewayClusterApplier(incrementalClusterStateWriter));
+ }
+
+ // Master-ineligible nodes do not need to persist the cluster state when accepting it because they are not in the voting
+ // configuration, so it's ok if they have a stale or incomplete cluster state when restarted. We track the latest cluster state
+ // in memory instead.
+ persistedState.set(new InMemoryPersistedState(manifestClusterStateTuple.v1().getCurrentTerm(), manifestClusterStateTuple.v2()));
} else {
- if (DiscoveryNode.isMasterNode(settings) == false) {
- if (DiscoveryNode.isDataNode(settings)) {
- // Master-eligible nodes persist index metadata for all indices regardless of whether they hold any shards or not. It's
- // vitally important to the safety of the cluster coordination system that master-eligible nodes persist this metadata
- // when _accepting_ the cluster state (i.e. before it is committed). This persistence happens on the generic threadpool.
- //
- // In contrast, master-ineligible data nodes only persist the index metadata for shards that they hold. When all shards
- // of an index are moved off such a node the IndicesStore is responsible for removing the corresponding index directory,
- // including the metadata, and does so on the cluster applier thread.
- //
- // This presents a problem: if a shard is unassigned from a node and then reassigned back to it again then there is a
- // race between the IndicesStore deleting the index folder and the CoordinationState concurrently trying to write the
- // updated metadata into it. We could probably solve this with careful synchronization, but in fact there is no need.
- // The persisted state on master-ineligible data nodes is mostly ignored - it's only there to support dangling index
- // imports, which is inherently unsafe anyway. Thus we can safely delay metadata writes on master-ineligible data nodes
- // until applying the cluster state, which is what this does:
- clusterService.addLowPriorityApplier(this::applyClusterState);
- }
- persistedState.set(new InMemoryPersistedState(getCurrentTerm(), getLastAcceptedState()));
- } else {
- persistedState.set(this);
- }
+ // Master-ineligible nodes must persist the cluster state when accepting it because they must reload the (complete, fresh)
+ // last-accepted cluster state when restarted.
+ persistedState.set(new GatewayPersistedState(incrementalClusterStateWriter));
}
}
- private void initializeClusterState(ClusterName clusterName) throws IOException {
- long startNS = System.nanoTime();
- Tuple manifestAndMetaData = metaStateService.loadFullState();
- previousManifest = manifestAndMetaData.v1();
-
- final MetaData metaData = manifestAndMetaData.v2();
-
- previousClusterState = ClusterState.builder(clusterName)
- .version(previousManifest.getClusterStateVersion())
- .metaData(metaData).build();
-
- logger.debug("took {} to load state", TimeValue.timeValueMillis(TimeValue.nsecToMSec(System.nanoTime() - startNS)));
- }
-
- protected void applyClusterStateUpdaters(TransportService transportService, ClusterService clusterService) {
- assert previousClusterState.nodes().getLocalNode() == null : "applyClusterStateUpdaters must only be called once";
+ // exposed so it can be overridden by tests
+ ClusterState prepareInitialClusterState(TransportService transportService, ClusterService clusterService, ClusterState clusterState) {
+ assert clusterState.nodes().getLocalNode() == null : "prepareInitialClusterState must only be called once";
assert transportService.getLocalNode() != null : "transport service is not yet started";
-
- previousClusterState = Function.identity()
+ return Function.identity()
.andThen(ClusterStateUpdaters::addStateNotRecoveredBlock)
.andThen(state -> ClusterStateUpdaters.setLocalNode(state, transportService.getLocalNode()))
.andThen(state -> ClusterStateUpdaters.upgradeAndArchiveUnknownOrInvalidSettings(state, clusterService.getClusterSettings()))
.andThen(ClusterStateUpdaters::recoverClusterBlocks)
- .apply(previousClusterState);
+ .apply(clusterState);
}
- protected void upgradeMetaData(MetaDataIndexUpgradeService metaDataIndexUpgradeService, MetaDataUpgrader metaDataUpgrader)
- throws IOException {
- if (isMasterOrDataNode()) {
+ // exposed so it can be overridden by tests
+ void upgradeMetaData(Settings settings, MetaStateService metaStateService, MetaDataIndexUpgradeService metaDataIndexUpgradeService,
+ MetaDataUpgrader metaDataUpgrader) throws IOException {
+ if (isMasterOrDataNode(settings)) {
try {
final Tuple metaStateAndData = metaStateService.loadFullState();
final Manifest manifest = metaStateAndData.v1();
@@ -179,7 +158,8 @@ public class GatewayMetaState implements PersistedState {
// if there is manifest file, it means metadata is properly persisted to all data paths
// if there is no manifest file (upgrade from 6.x to 7.x) metadata might be missing on some data paths,
// but anyway we will re-write it as soon as we receive first ClusterState
- final AtomicClusterStateWriter writer = new AtomicClusterStateWriter(metaStateService, manifest);
+ final IncrementalClusterStateWriter.AtomicClusterStateWriter writer
+ = new IncrementalClusterStateWriter.AtomicClusterStateWriter(metaStateService, manifest);
final MetaData upgradedMetaData = upgradeMetaData(metaData, metaDataIndexUpgradeService, metaDataUpgrader);
final long globalStateGeneration;
@@ -207,233 +187,25 @@ public class GatewayMetaState implements PersistedState {
}
}
- private boolean isMasterOrDataNode() {
+ private static Tuple loadStateAndManifest(ClusterName clusterName,
+ MetaStateService metaStateService) throws IOException {
+ final long startNS = System.nanoTime();
+ final Tuple manifestAndMetaData = metaStateService.loadFullState();
+ final Manifest manifest = manifestAndMetaData.v1();
+
+ final ClusterState clusterState = ClusterState.builder(clusterName)
+ .version(manifest.getClusterStateVersion())
+ .metaData(manifestAndMetaData.v2()).build();
+
+ logger.debug("took {} to load state", TimeValue.timeValueMillis(TimeValue.nsecToMSec(System.nanoTime() - startNS)));
+
+ return Tuple.tuple(manifest, clusterState);
+ }
+
+ private static boolean isMasterOrDataNode(Settings settings) {
return DiscoveryNode.isMasterNode(settings) || DiscoveryNode.isDataNode(settings);
}
- public PersistedState getPersistedState() {
- final PersistedState persistedState = this.persistedState.get();
- assert persistedState != null : "not started";
- return persistedState;
- }
-
- public MetaData getMetaData() {
- return previousClusterState.metaData();
- }
-
- private void applyClusterState(ClusterChangedEvent event) {
- assert isMasterOrDataNode();
-
- if (event.state().blocks().disableStatePersistence()) {
- incrementalWrite = false;
- return;
- }
-
- try {
- // Hack: This is to ensure that non-master-eligible Zen2 nodes always store a current term
- // that's higher than the last accepted term.
- // TODO: can we get rid of this hack?
- if (event.state().term() > getCurrentTerm()) {
- innerSetCurrentTerm(event.state().term());
- }
-
- updateClusterState(event.state(), event.previousState());
- incrementalWrite = true;
- } catch (WriteStateException e) {
- logger.warn("Exception occurred when storing new meta data", e);
- }
- }
-
- @Override
- public long getCurrentTerm() {
- return previousManifest.getCurrentTerm();
- }
-
- @Override
- public ClusterState getLastAcceptedState() {
- assert previousClusterState.nodes().getLocalNode() != null : "Cluster state is not fully built yet";
- return previousClusterState;
- }
-
- @Override
- public void setCurrentTerm(long currentTerm) {
- try {
- innerSetCurrentTerm(currentTerm);
- } catch (WriteStateException e) {
- logger.error(new ParameterizedMessage("Failed to set current term to {}", currentTerm), e);
- e.rethrowAsErrorOrUncheckedException();
- }
- }
-
- private void innerSetCurrentTerm(long currentTerm) throws WriteStateException {
- Manifest manifest = new Manifest(currentTerm, previousManifest.getClusterStateVersion(), previousManifest.getGlobalGeneration(),
- new HashMap<>(previousManifest.getIndexGenerations()));
- metaStateService.writeManifestAndCleanup("current term changed", manifest);
- previousManifest = manifest;
- }
-
- @Override
- public void setLastAcceptedState(ClusterState clusterState) {
- try {
- incrementalWrite = previousClusterState.term() == clusterState.term();
- updateClusterState(clusterState, previousClusterState);
- } catch (WriteStateException e) {
- logger.error(new ParameterizedMessage("Failed to set last accepted state with version {}", clusterState.version()), e);
- e.rethrowAsErrorOrUncheckedException();
- }
- }
-
- /**
- * This class is used to write changed global {@link MetaData}, {@link IndexMetaData} and {@link Manifest} to disk.
- * This class delegates write*
calls to corresponding write calls in {@link MetaStateService} and
- * additionally it keeps track of cleanup actions to be performed if transaction succeeds or fails.
- */
- static class AtomicClusterStateWriter {
- private static final String FINISHED_MSG = "AtomicClusterStateWriter is finished";
- private final List commitCleanupActions;
- private final List rollbackCleanupActions;
- private final Manifest previousManifest;
- private final MetaStateService metaStateService;
- private boolean finished;
-
- AtomicClusterStateWriter(MetaStateService metaStateService, Manifest previousManifest) {
- this.metaStateService = metaStateService;
- assert previousManifest != null;
- this.previousManifest = previousManifest;
- this.commitCleanupActions = new ArrayList<>();
- this.rollbackCleanupActions = new ArrayList<>();
- this.finished = false;
- }
-
- long writeGlobalState(String reason, MetaData metaData) throws WriteStateException {
- assert finished == false : FINISHED_MSG;
- try {
- rollbackCleanupActions.add(() -> metaStateService.cleanupGlobalState(previousManifest.getGlobalGeneration()));
- long generation = metaStateService.writeGlobalState(reason, metaData);
- commitCleanupActions.add(() -> metaStateService.cleanupGlobalState(generation));
- return generation;
- } catch (WriteStateException e) {
- rollback();
- throw e;
- }
- }
-
- long writeIndex(String reason, IndexMetaData metaData) throws WriteStateException {
- assert finished == false : FINISHED_MSG;
- try {
- Index index = metaData.getIndex();
- Long previousGeneration = previousManifest.getIndexGenerations().get(index);
- if (previousGeneration != null) {
- // we prefer not to clean-up index metadata in case of rollback,
- // if it's not referenced by previous manifest file
- // not to break dangling indices functionality
- rollbackCleanupActions.add(() -> metaStateService.cleanupIndex(index, previousGeneration));
- }
- long generation = metaStateService.writeIndex(reason, metaData);
- commitCleanupActions.add(() -> metaStateService.cleanupIndex(index, generation));
- return generation;
- } catch (WriteStateException e) {
- rollback();
- throw e;
- }
- }
-
- void writeManifestAndCleanup(String reason, Manifest manifest) throws WriteStateException {
- assert finished == false : FINISHED_MSG;
- try {
- metaStateService.writeManifestAndCleanup(reason, manifest);
- commitCleanupActions.forEach(Runnable::run);
- finished = true;
- } catch (WriteStateException e) {
- // if Manifest write results in dirty WriteStateException it's not safe to remove
- // new metadata files, because if Manifest was actually written to disk and its deletion
- // fails it will reference these new metadata files.
- // In the future, we might decide to add more fine grained check to understand if after
- // WriteStateException Manifest deletion has actually failed.
- if (e.isDirty() == false) {
- rollback();
- }
- throw e;
- }
- }
-
- void rollback() {
- rollbackCleanupActions.forEach(Runnable::run);
- finished = true;
- }
- }
-
- /**
- * Updates manifest and meta data on disk.
- *
- * @param newState new {@link ClusterState}
- * @param previousState previous {@link ClusterState}
- *
- * @throws WriteStateException if exception occurs. See also {@link WriteStateException#isDirty()}.
- */
- private void updateClusterState(ClusterState newState, ClusterState previousState)
- throws WriteStateException {
- MetaData newMetaData = newState.metaData();
-
- final AtomicClusterStateWriter writer = new AtomicClusterStateWriter(metaStateService, previousManifest);
- long globalStateGeneration = writeGlobalState(writer, newMetaData);
- Map indexGenerations = writeIndicesMetadata(writer, newState, previousState);
- Manifest manifest = new Manifest(previousManifest.getCurrentTerm(), newState.version(), globalStateGeneration, indexGenerations);
- writeManifest(writer, manifest);
-
- previousManifest = manifest;
- previousClusterState = newState;
- }
-
- private void writeManifest(AtomicClusterStateWriter writer, Manifest manifest) throws WriteStateException {
- if (manifest.equals(previousManifest) == false) {
- writer.writeManifestAndCleanup("changed", manifest);
- }
- }
-
- private Map writeIndicesMetadata(AtomicClusterStateWriter writer, ClusterState newState, ClusterState previousState)
- throws WriteStateException {
- Map previouslyWrittenIndices = previousManifest.getIndexGenerations();
- Set relevantIndices = getRelevantIndices(newState, previousState, previouslyWrittenIndices.keySet());
-
- Map newIndices = new HashMap<>();
-
- MetaData previousMetaData = incrementalWrite ? previousState.metaData() : null;
- Iterable actions = resolveIndexMetaDataActions(previouslyWrittenIndices, relevantIndices, previousMetaData,
- newState.metaData());
-
- for (IndexMetaDataAction action : actions) {
- long generation = action.execute(writer);
- newIndices.put(action.getIndex(), generation);
- }
-
- return newIndices;
- }
-
- private long writeGlobalState(AtomicClusterStateWriter writer, MetaData newMetaData)
- throws WriteStateException {
- if (incrementalWrite == false || MetaData.isGlobalStateEquals(previousClusterState.metaData(), newMetaData) == false) {
- return writer.writeGlobalState("changed", newMetaData);
- }
- return previousManifest.getGlobalGeneration();
- }
-
- public static Set getRelevantIndices(ClusterState state, ClusterState previousState, Set previouslyWrittenIndices) {
- Set relevantIndices;
- if (isDataOnlyNode(state)) {
- relevantIndices = getRelevantIndicesOnDataOnlyNode(state, previousState, previouslyWrittenIndices);
- } else if (state.nodes().getLocalNode().isMasterNode()) {
- relevantIndices = getRelevantIndicesForMasterEligibleNode(state);
- } else {
- relevantIndices = Collections.emptySet();
- }
- return relevantIndices;
- }
-
- private static boolean isDataOnlyNode(ClusterState state) {
- return state.nodes().getLocalNode().isMasterNode() == false && state.nodes().getLocalNode().isDataNode();
- }
-
/**
* Elasticsearch 2.0 removed several deprecated features and as well as support for Lucene 3.x. This method calls
* {@link MetaDataIndexUpgradeService} to makes sure that indices are compatible with the current version. The
@@ -489,160 +261,81 @@ public class GatewayMetaState implements PersistedState {
return false;
}
- /**
- * Returns list of {@link IndexMetaDataAction} for each relevant index.
- * For each relevant index there are 3 options:
- *
- *
- * {@link KeepPreviousGeneration} - index metadata is already stored to disk and index metadata version is not changed, no
- * action is required.
- *
- *
- * {@link WriteNewIndexMetaData} - there is no index metadata on disk and index metadata for this index should be written.
- *
- *
- * {@link WriteChangedIndexMetaData} - index metadata is already on disk, but index metadata version has changed. Updated
- * index metadata should be written to disk.
- *
- *
- *
- * @param previouslyWrittenIndices A list of indices for which the state was already written before
- * @param relevantIndices The list of indices for which state should potentially be written
- * @param previousMetaData The last meta data we know of
- * @param newMetaData The new metadata
- * @return list of {@link IndexMetaDataAction} for each relevant index.
- */
- public static List resolveIndexMetaDataActions(Map previouslyWrittenIndices,
- Set relevantIndices,
- MetaData previousMetaData,
- MetaData newMetaData) {
- List actions = new ArrayList<>();
- for (Index index : relevantIndices) {
- IndexMetaData newIndexMetaData = newMetaData.getIndexSafe(index);
- IndexMetaData previousIndexMetaData = previousMetaData == null ? null : previousMetaData.index(index);
- if (previouslyWrittenIndices.containsKey(index) == false || previousIndexMetaData == null) {
- actions.add(new WriteNewIndexMetaData(newIndexMetaData));
- } else if (previousIndexMetaData.getVersion() != newIndexMetaData.getVersion()) {
- actions.add(new WriteChangedIndexMetaData(previousIndexMetaData, newIndexMetaData));
- } else {
- actions.add(new KeepPreviousGeneration(index, previouslyWrittenIndices.get(index)));
+ private static class GatewayClusterApplier implements ClusterStateApplier {
+
+ private final IncrementalClusterStateWriter incrementalClusterStateWriter;
+
+ private GatewayClusterApplier(IncrementalClusterStateWriter incrementalClusterStateWriter) {
+ this.incrementalClusterStateWriter = incrementalClusterStateWriter;
+ }
+
+ @Override
+ public void applyClusterState(ClusterChangedEvent event) {
+ if (event.state().blocks().disableStatePersistence()) {
+ incrementalClusterStateWriter.setIncrementalWrite(false);
+ return;
+ }
+
+ try {
+ // Hack: This is to ensure that non-master-eligible Zen2 nodes always store a current term
+ // that's higher than the last accepted term.
+ // TODO: can we get rid of this hack?
+ if (event.state().term() > incrementalClusterStateWriter.getPreviousManifest().getCurrentTerm()) {
+ incrementalClusterStateWriter.setCurrentTerm(event.state().term());
+ }
+
+ incrementalClusterStateWriter.updateClusterState(event.state(), event.previousState());
+ incrementalClusterStateWriter.setIncrementalWrite(true);
+ } catch (WriteStateException e) {
+ logger.warn("Exception occurred when storing new meta data", e);
}
}
- return actions;
+
}
- private static Set getRelevantIndicesOnDataOnlyNode(ClusterState state, ClusterState previousState, Set
- previouslyWrittenIndices) {
- RoutingNode newRoutingNode = state.getRoutingNodes().node(state.nodes().getLocalNodeId());
- if (newRoutingNode == null) {
- throw new IllegalStateException("cluster state does not contain this node - cannot write index meta state");
+ private static class GatewayPersistedState implements PersistedState {
+
+ private final IncrementalClusterStateWriter incrementalClusterStateWriter;
+
+ GatewayPersistedState(IncrementalClusterStateWriter incrementalClusterStateWriter) {
+ this.incrementalClusterStateWriter = incrementalClusterStateWriter;
}
- Set indices = new HashSet<>();
- for (ShardRouting routing : newRoutingNode) {
- indices.add(routing.index());
+
+ @Override
+ public long getCurrentTerm() {
+ return incrementalClusterStateWriter.getPreviousManifest().getCurrentTerm();
}
- // we have to check the meta data also: closed indices will not appear in the routing table, but we must still write the state if
- // we have it written on disk previously
- for (IndexMetaData indexMetaData : state.metaData()) {
- boolean isOrWasClosed = indexMetaData.getState().equals(IndexMetaData.State.CLOSE);
- // if the index is open we might still have to write the state if it just transitioned from closed to open
- // so we have to check for that as well.
- IndexMetaData previousMetaData = previousState.metaData().index(indexMetaData.getIndex());
- if (previousMetaData != null) {
- isOrWasClosed = isOrWasClosed || previousMetaData.getState().equals(IndexMetaData.State.CLOSE);
- }
- if (previouslyWrittenIndices.contains(indexMetaData.getIndex()) && isOrWasClosed) {
- indices.add(indexMetaData.getIndex());
+
+ @Override
+ public ClusterState getLastAcceptedState() {
+ final ClusterState previousClusterState = incrementalClusterStateWriter.getPreviousClusterState();
+ assert previousClusterState.nodes().getLocalNode() != null : "Cluster state is not fully built yet";
+ return previousClusterState;
+ }
+
+ @Override
+ public void setCurrentTerm(long currentTerm) {
+ try {
+ incrementalClusterStateWriter.setCurrentTerm(currentTerm);
+ } catch (WriteStateException e) {
+ logger.error(new ParameterizedMessage("Failed to set current term to {}", currentTerm), e);
+ e.rethrowAsErrorOrUncheckedException();
}
}
- return indices;
- }
-
- private static Set getRelevantIndicesForMasterEligibleNode(ClusterState state) {
- Set relevantIndices = new HashSet<>();
- // we have to iterate over the metadata to make sure we also capture closed indices
- for (IndexMetaData indexMetaData : state.metaData()) {
- relevantIndices.add(indexMetaData.getIndex());
- }
- return relevantIndices;
- }
-
- /**
- * Action to perform with index metadata.
- */
- public interface IndexMetaDataAction {
- /**
- * @return index for index metadata.
- */
- Index getIndex();
-
- /**
- * Executes this action using provided {@link AtomicClusterStateWriter}.
- *
- * @return new index metadata state generation, to be used in manifest file.
- * @throws WriteStateException if exception occurs.
- */
- long execute(AtomicClusterStateWriter writer) throws WriteStateException;
- }
-
- public static class KeepPreviousGeneration implements IndexMetaDataAction {
- private final Index index;
- private final long generation;
-
- KeepPreviousGeneration(Index index, long generation) {
- this.index = index;
- this.generation = generation;
- }
@Override
- public Index getIndex() {
- return index;
+ public void setLastAcceptedState(ClusterState clusterState) {
+ try {
+ final ClusterState previousClusterState = incrementalClusterStateWriter.getPreviousClusterState();
+ incrementalClusterStateWriter.setIncrementalWrite(previousClusterState.term() == clusterState.term());
+ incrementalClusterStateWriter.updateClusterState(clusterState, previousClusterState);
+ } catch (WriteStateException e) {
+ logger.error(new ParameterizedMessage("Failed to set last accepted state with version {}", clusterState.version()), e);
+ e.rethrowAsErrorOrUncheckedException();
+ }
}
- @Override
- public long execute(AtomicClusterStateWriter writer) {
- return generation;
- }
}
- public static class WriteNewIndexMetaData implements IndexMetaDataAction {
- private final IndexMetaData indexMetaData;
-
- WriteNewIndexMetaData(IndexMetaData indexMetaData) {
- this.indexMetaData = indexMetaData;
- }
-
- @Override
- public Index getIndex() {
- return indexMetaData.getIndex();
- }
-
- @Override
- public long execute(AtomicClusterStateWriter writer) throws WriteStateException {
- return writer.writeIndex("freshly created", indexMetaData);
- }
- }
-
- public static class WriteChangedIndexMetaData implements IndexMetaDataAction {
- private final IndexMetaData newIndexMetaData;
- private final IndexMetaData oldIndexMetaData;
-
- WriteChangedIndexMetaData(IndexMetaData oldIndexMetaData, IndexMetaData newIndexMetaData) {
- this.oldIndexMetaData = oldIndexMetaData;
- this.newIndexMetaData = newIndexMetaData;
- }
-
- @Override
- public Index getIndex() {
- return newIndexMetaData.getIndex();
- }
-
- @Override
- public long execute(AtomicClusterStateWriter writer) throws WriteStateException {
- return writer.writeIndex(
- "version changed from [" + oldIndexMetaData.getVersion() + "] to [" + newIndexMetaData.getVersion() + "]",
- newIndexMetaData);
- }
- }
}
diff --git a/server/src/main/java/org/elasticsearch/gateway/IncrementalClusterStateWriter.java b/server/src/main/java/org/elasticsearch/gateway/IncrementalClusterStateWriter.java
new file mode 100644
index 00000000000..5facb826a24
--- /dev/null
+++ b/server/src/main/java/org/elasticsearch/gateway/IncrementalClusterStateWriter.java
@@ -0,0 +1,384 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.gateway;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.Manifest;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.index.Index;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * Tracks the metadata written to disk, allowing updated metadata to be written incrementally (i.e. only writing out the changed metadata).
+ */
+class IncrementalClusterStateWriter {
+
+ private final MetaStateService metaStateService;
+
+ // On master-eligible nodes we call updateClusterState under the Coordinator's mutex; on master-ineligible data nodes we call
+ // updateClusterState on the (unique) cluster applier thread; on other nodes we never call updateClusterState. In all cases there's
+ // no need to synchronize access to these fields.
+ private Manifest previousManifest;
+ private ClusterState previousClusterState;
+ private boolean incrementalWrite;
+
+ IncrementalClusterStateWriter(MetaStateService metaStateService, Manifest manifest, ClusterState clusterState) {
+ this.metaStateService = metaStateService;
+ this.previousManifest = manifest;
+ this.previousClusterState = clusterState;
+ this.incrementalWrite = false;
+ }
+
+ void setCurrentTerm(long currentTerm) throws WriteStateException {
+ Manifest manifest = new Manifest(currentTerm, previousManifest.getClusterStateVersion(), previousManifest.getGlobalGeneration(),
+ new HashMap<>(previousManifest.getIndexGenerations()));
+ metaStateService.writeManifestAndCleanup("current term changed", manifest);
+ previousManifest = manifest;
+ }
+
+ Manifest getPreviousManifest() {
+ return previousManifest;
+ }
+
+ ClusterState getPreviousClusterState() {
+ return previousClusterState;
+ }
+
+ void setIncrementalWrite(boolean incrementalWrite) {
+ this.incrementalWrite = incrementalWrite;
+ }
+
+ /**
+ * Updates manifest and meta data on disk.
+ *
+ * @param newState new {@link ClusterState}
+ * @param previousState previous {@link ClusterState}
+ *
+ * @throws WriteStateException if exception occurs. See also {@link WriteStateException#isDirty()}.
+ */
+ void updateClusterState(ClusterState newState, ClusterState previousState) throws WriteStateException {
+ MetaData newMetaData = newState.metaData();
+
+ final AtomicClusterStateWriter writer = new AtomicClusterStateWriter(metaStateService, previousManifest);
+ long globalStateGeneration = writeGlobalState(writer, newMetaData);
+ Map indexGenerations = writeIndicesMetadata(writer, newState, previousState);
+ Manifest manifest = new Manifest(previousManifest.getCurrentTerm(), newState.version(), globalStateGeneration, indexGenerations);
+ writeManifest(writer, manifest);
+
+ previousManifest = manifest;
+ previousClusterState = newState;
+ }
+
+ private void writeManifest(AtomicClusterStateWriter writer, Manifest manifest) throws WriteStateException {
+ if (manifest.equals(previousManifest) == false) {
+ writer.writeManifestAndCleanup("changed", manifest);
+ }
+ }
+
+ private Map writeIndicesMetadata(AtomicClusterStateWriter writer, ClusterState newState, ClusterState previousState)
+ throws WriteStateException {
+ Map previouslyWrittenIndices = previousManifest.getIndexGenerations();
+ Set relevantIndices = getRelevantIndices(newState, previousState, previouslyWrittenIndices.keySet());
+
+ Map newIndices = new HashMap<>();
+
+ MetaData previousMetaData = incrementalWrite ? previousState.metaData() : null;
+ Iterable actions = resolveIndexMetaDataActions(previouslyWrittenIndices, relevantIndices, previousMetaData,
+ newState.metaData());
+
+ for (IndexMetaDataAction action : actions) {
+ long generation = action.execute(writer);
+ newIndices.put(action.getIndex(), generation);
+ }
+
+ return newIndices;
+ }
+
+ private long writeGlobalState(AtomicClusterStateWriter writer, MetaData newMetaData) throws WriteStateException {
+ if (incrementalWrite == false || MetaData.isGlobalStateEquals(previousClusterState.metaData(), newMetaData) == false) {
+ return writer.writeGlobalState("changed", newMetaData);
+ }
+ return previousManifest.getGlobalGeneration();
+ }
+
+
+ /**
+ * Returns list of {@link IndexMetaDataAction} for each relevant index.
+ * For each relevant index there are 3 options:
+ *
+ *
+ * {@link KeepPreviousGeneration} - index metadata is already stored to disk and index metadata version is not changed, no
+ * action is required.
+ *
+ *
+ * {@link WriteNewIndexMetaData} - there is no index metadata on disk and index metadata for this index should be written.
+ *
+ *
+ * {@link WriteChangedIndexMetaData} - index metadata is already on disk, but index metadata version has changed. Updated
+ * index metadata should be written to disk.
+ *
+ *
+ *
+ * @param previouslyWrittenIndices A list of indices for which the state was already written before
+ * @param relevantIndices The list of indices for which state should potentially be written
+ * @param previousMetaData The last meta data we know of
+ * @param newMetaData The new metadata
+ * @return list of {@link IndexMetaDataAction} for each relevant index.
+ */
+ // exposed for tests
+ static List resolveIndexMetaDataActions(Map previouslyWrittenIndices,
+ Set relevantIndices,
+ MetaData previousMetaData,
+ MetaData newMetaData) {
+ List actions = new ArrayList<>();
+ for (Index index : relevantIndices) {
+ IndexMetaData newIndexMetaData = newMetaData.getIndexSafe(index);
+ IndexMetaData previousIndexMetaData = previousMetaData == null ? null : previousMetaData.index(index);
+
+ if (previouslyWrittenIndices.containsKey(index) == false || previousIndexMetaData == null) {
+ actions.add(new WriteNewIndexMetaData(newIndexMetaData));
+ } else if (previousIndexMetaData.getVersion() != newIndexMetaData.getVersion()) {
+ actions.add(new WriteChangedIndexMetaData(previousIndexMetaData, newIndexMetaData));
+ } else {
+ actions.add(new KeepPreviousGeneration(index, previouslyWrittenIndices.get(index)));
+ }
+ }
+ return actions;
+ }
+
+ private static Set getRelevantIndicesOnDataOnlyNode(ClusterState state, ClusterState previousState, Set
+ previouslyWrittenIndices) {
+ RoutingNode newRoutingNode = state.getRoutingNodes().node(state.nodes().getLocalNodeId());
+ if (newRoutingNode == null) {
+ throw new IllegalStateException("cluster state does not contain this node - cannot write index meta state");
+ }
+ Set indices = new HashSet<>();
+ for (ShardRouting routing : newRoutingNode) {
+ indices.add(routing.index());
+ }
+ // we have to check the meta data also: closed indices will not appear in the routing table, but we must still write the state if
+ // we have it written on disk previously
+ for (IndexMetaData indexMetaData : state.metaData()) {
+ boolean isOrWasClosed = indexMetaData.getState().equals(IndexMetaData.State.CLOSE);
+ // if the index is open we might still have to write the state if it just transitioned from closed to open
+ // so we have to check for that as well.
+ IndexMetaData previousMetaData = previousState.metaData().index(indexMetaData.getIndex());
+ if (previousMetaData != null) {
+ isOrWasClosed = isOrWasClosed || previousMetaData.getState().equals(IndexMetaData.State.CLOSE);
+ }
+ if (previouslyWrittenIndices.contains(indexMetaData.getIndex()) && isOrWasClosed) {
+ indices.add(indexMetaData.getIndex());
+ }
+ }
+ return indices;
+ }
+
+ private static Set getRelevantIndicesForMasterEligibleNode(ClusterState state) {
+ Set relevantIndices = new HashSet<>();
+ // we have to iterate over the metadata to make sure we also capture closed indices
+ for (IndexMetaData indexMetaData : state.metaData()) {
+ relevantIndices.add(indexMetaData.getIndex());
+ }
+ return relevantIndices;
+ }
+
+ // exposed for tests
+ static Set getRelevantIndices(ClusterState state, ClusterState previousState, Set previouslyWrittenIndices) {
+ Set relevantIndices;
+ if (isDataOnlyNode(state)) {
+ relevantIndices = getRelevantIndicesOnDataOnlyNode(state, previousState, previouslyWrittenIndices);
+ } else if (state.nodes().getLocalNode().isMasterNode()) {
+ relevantIndices = getRelevantIndicesForMasterEligibleNode(state);
+ } else {
+ relevantIndices = Collections.emptySet();
+ }
+ return relevantIndices;
+ }
+
+ private static boolean isDataOnlyNode(ClusterState state) {
+ return state.nodes().getLocalNode().isMasterNode() == false && state.nodes().getLocalNode().isDataNode();
+ }
+
+ /**
+ * Action to perform with index metadata.
+ */
+ interface IndexMetaDataAction {
+ /**
+ * @return index for index metadata.
+ */
+ Index getIndex();
+
+ /**
+ * Executes this action using provided {@link AtomicClusterStateWriter}.
+ *
+ * @return new index metadata state generation, to be used in manifest file.
+ * @throws WriteStateException if exception occurs.
+ */
+ long execute(AtomicClusterStateWriter writer) throws WriteStateException;
+ }
+
+ /**
+ * This class is used to write changed global {@link MetaData}, {@link IndexMetaData} and {@link Manifest} to disk.
+ * This class delegates write*
calls to corresponding write calls in {@link MetaStateService} and
+ * additionally it keeps track of cleanup actions to be performed if transaction succeeds or fails.
+ */
+ static class AtomicClusterStateWriter {
+ private static final String FINISHED_MSG = "AtomicClusterStateWriter is finished";
+ private final List commitCleanupActions;
+ private final List rollbackCleanupActions;
+ private final Manifest previousManifest;
+ private final MetaStateService metaStateService;
+ private boolean finished;
+
+ AtomicClusterStateWriter(MetaStateService metaStateService, Manifest previousManifest) {
+ this.metaStateService = metaStateService;
+ assert previousManifest != null;
+ this.previousManifest = previousManifest;
+ this.commitCleanupActions = new ArrayList<>();
+ this.rollbackCleanupActions = new ArrayList<>();
+ this.finished = false;
+ }
+
+ long writeGlobalState(String reason, MetaData metaData) throws WriteStateException {
+ assert finished == false : FINISHED_MSG;
+ try {
+ rollbackCleanupActions.add(() -> metaStateService.cleanupGlobalState(previousManifest.getGlobalGeneration()));
+ long generation = metaStateService.writeGlobalState(reason, metaData);
+ commitCleanupActions.add(() -> metaStateService.cleanupGlobalState(generation));
+ return generation;
+ } catch (WriteStateException e) {
+ rollback();
+ throw e;
+ }
+ }
+
+ long writeIndex(String reason, IndexMetaData metaData) throws WriteStateException {
+ assert finished == false : FINISHED_MSG;
+ try {
+ Index index = metaData.getIndex();
+ Long previousGeneration = previousManifest.getIndexGenerations().get(index);
+ if (previousGeneration != null) {
+ // we prefer not to clean-up index metadata in case of rollback,
+ // if it's not referenced by previous manifest file
+ // not to break dangling indices functionality
+ rollbackCleanupActions.add(() -> metaStateService.cleanupIndex(index, previousGeneration));
+ }
+ long generation = metaStateService.writeIndex(reason, metaData);
+ commitCleanupActions.add(() -> metaStateService.cleanupIndex(index, generation));
+ return generation;
+ } catch (WriteStateException e) {
+ rollback();
+ throw e;
+ }
+ }
+
+ void writeManifestAndCleanup(String reason, Manifest manifest) throws WriteStateException {
+ assert finished == false : FINISHED_MSG;
+ try {
+ metaStateService.writeManifestAndCleanup(reason, manifest);
+ commitCleanupActions.forEach(Runnable::run);
+ finished = true;
+ } catch (WriteStateException e) {
+ // If the Manifest write results in a dirty WriteStateException it's not safe to roll back, removing the new metadata files,
+ // because if the Manifest was actually written to disk and its deletion fails it will reference these new metadata files.
+ // On master-eligible nodes a dirty WriteStateException here is fatal to the node since we no longer really have any idea
+ // what the state on disk is and the only sensible response is to start again from scratch.
+ if (e.isDirty() == false) {
+ rollback();
+ }
+ throw e;
+ }
+ }
+
+ void rollback() {
+ rollbackCleanupActions.forEach(Runnable::run);
+ finished = true;
+ }
+ }
+
+ static class KeepPreviousGeneration implements IndexMetaDataAction {
+ private final Index index;
+ private final long generation;
+
+ KeepPreviousGeneration(Index index, long generation) {
+ this.index = index;
+ this.generation = generation;
+ }
+
+ @Override
+ public Index getIndex() {
+ return index;
+ }
+
+ @Override
+ public long execute(AtomicClusterStateWriter writer) {
+ return generation;
+ }
+ }
+
+ static class WriteNewIndexMetaData implements IndexMetaDataAction {
+ private final IndexMetaData indexMetaData;
+
+ WriteNewIndexMetaData(IndexMetaData indexMetaData) {
+ this.indexMetaData = indexMetaData;
+ }
+
+ @Override
+ public Index getIndex() {
+ return indexMetaData.getIndex();
+ }
+
+ @Override
+ public long execute(AtomicClusterStateWriter writer) throws WriteStateException {
+ return writer.writeIndex("freshly created", indexMetaData);
+ }
+ }
+
+ static class WriteChangedIndexMetaData implements IndexMetaDataAction {
+ private final IndexMetaData newIndexMetaData;
+ private final IndexMetaData oldIndexMetaData;
+
+ WriteChangedIndexMetaData(IndexMetaData oldIndexMetaData, IndexMetaData newIndexMetaData) {
+ this.oldIndexMetaData = oldIndexMetaData;
+ this.newIndexMetaData = newIndexMetaData;
+ }
+
+ @Override
+ public Index getIndex() {
+ return newIndexMetaData.getIndex();
+ }
+
+ @Override
+ public long execute(AtomicClusterStateWriter writer) throws WriteStateException {
+ return writer.writeIndex(
+ "version changed from [" + oldIndexMetaData.getVersion() + "] to [" + newIndexMetaData.getVersion() + "]",
+ newIndexMetaData);
+ }
+ }
+}
diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java
index efa7ddcd657..feb35a91283 100644
--- a/server/src/main/java/org/elasticsearch/node/Node.java
+++ b/server/src/main/java/org/elasticsearch/node/Node.java
@@ -482,7 +482,7 @@ public class Node implements Closeable {
).collect(Collectors.toSet());
final TransportService transportService = newTransportService(settings, transport, threadPool,
networkModule.getTransportInterceptor(), localNodeFactory, settingsModule.getClusterSettings(), taskHeaders);
- final GatewayMetaState gatewayMetaState = new GatewayMetaState(settings, metaStateService);
+ final GatewayMetaState gatewayMetaState = new GatewayMetaState();
final ResponseCollectorService responseCollectorService = new ResponseCollectorService(clusterService);
final SearchTransportService searchTransportService = new SearchTransportService(transportService,
SearchExecutionStatsCollector.makeWrapper(responseCollectorService));
@@ -700,7 +700,7 @@ public class Node implements Closeable {
// Load (and maybe upgrade) the metadata stored on disk
final GatewayMetaState gatewayMetaState = injector.getInstance(GatewayMetaState.class);
- gatewayMetaState.start(transportService, clusterService,
+ gatewayMetaState.start(settings(), transportService, clusterService, injector.getInstance(MetaStateService.class),
injector.getInstance(MetaDataIndexUpgradeService.class), injector.getInstance(MetaDataUpgrader.class));
// we load the global state here (the persistent part of the cluster state stored on disk) to
// pass it to the bootstrap checks to allow plugins to enforce certain preconditions based on the recovered state.
diff --git a/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStatePersistedStateTests.java b/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStatePersistedStateTests.java
index 107cc7541fe..e723d08d735 100644
--- a/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStatePersistedStateTests.java
+++ b/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStatePersistedStateTests.java
@@ -24,6 +24,8 @@ import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.coordination.CoordinationMetaData;
import org.elasticsearch.cluster.coordination.CoordinationMetaData.VotingConfigExclusion;
+import org.elasticsearch.cluster.coordination.CoordinationState;
+import org.elasticsearch.cluster.coordination.InMemoryPersistedState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.Manifest;
import org.elasticsearch.cluster.metadata.MetaData;
@@ -35,10 +37,10 @@ import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.test.ESTestCase;
-import java.io.IOException;
import java.util.Collections;
import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.not;
public class GatewayMetaStatePersistedStateTests extends ESTestCase {
@@ -63,21 +65,23 @@ public class GatewayMetaStatePersistedStateTests extends ESTestCase {
super.tearDown();
}
- private MockGatewayMetaState newGateway() {
- final MockGatewayMetaState gateway = new MockGatewayMetaState(settings, nodeEnvironment, xContentRegistry(), localNode);
- gateway.start();
- return gateway;
+ private CoordinationState.PersistedState newGatewayPersistedState() {
+ final MockGatewayMetaState gateway = new MockGatewayMetaState(localNode);
+ gateway.start(settings, nodeEnvironment, xContentRegistry());
+ final CoordinationState.PersistedState persistedState = gateway.getPersistedState();
+ assertThat(persistedState, not(instanceOf(InMemoryPersistedState.class)));
+ return persistedState;
}
- private MockGatewayMetaState maybeNew(MockGatewayMetaState gateway) throws IOException {
+ private CoordinationState.PersistedState maybeNew(CoordinationState.PersistedState persistedState) {
if (randomBoolean()) {
- return newGateway();
+ return newGatewayPersistedState();
}
- return gateway;
+ return persistedState;
}
- public void testInitialState() throws IOException {
- MockGatewayMetaState gateway = newGateway();
+ public void testInitialState() {
+ CoordinationState.PersistedState gateway = newGatewayPersistedState();
ClusterState state = gateway.getLastAcceptedState();
assertThat(state.getClusterName(), equalTo(clusterName));
assertTrue(MetaData.isGlobalStateEquals(state.metaData(), MetaData.EMPTY_META_DATA));
@@ -88,8 +92,8 @@ public class GatewayMetaStatePersistedStateTests extends ESTestCase {
assertThat(currentTerm, equalTo(Manifest.empty().getCurrentTerm()));
}
- public void testSetCurrentTerm() throws IOException {
- MockGatewayMetaState gateway = newGateway();
+ public void testSetCurrentTerm() {
+ CoordinationState.PersistedState gateway = newGatewayPersistedState();
for (int i = 0; i < randomIntBetween(1, 5); i++) {
final long currentTerm = randomNonNegativeLong();
@@ -142,8 +146,8 @@ public class GatewayMetaStatePersistedStateTests extends ESTestCase {
}
}
- public void testSetLastAcceptedState() throws IOException {
- MockGatewayMetaState gateway = newGateway();
+ public void testSetLastAcceptedState() {
+ CoordinationState.PersistedState gateway = newGatewayPersistedState();
final long term = randomNonNegativeLong();
for (int i = 0; i < randomIntBetween(1, 5); i++) {
@@ -165,8 +169,8 @@ public class GatewayMetaStatePersistedStateTests extends ESTestCase {
}
}
- public void testSetLastAcceptedStateTermChanged() throws IOException {
- MockGatewayMetaState gateway = newGateway();
+ public void testSetLastAcceptedStateTermChanged() {
+ CoordinationState.PersistedState gateway = newGatewayPersistedState();
final String indexName = randomAlphaOfLength(10);
final int numberOfShards = randomIntBetween(1, 5);
@@ -178,7 +182,7 @@ public class GatewayMetaStatePersistedStateTests extends ESTestCase {
gateway.setLastAcceptedState(state);
gateway = maybeNew(gateway);
- final long newTerm = randomValueOtherThan(term, () -> randomNonNegativeLong());
+ final long newTerm = randomValueOtherThan(term, ESTestCase::randomNonNegativeLong);
final int newNumberOfShards = randomValueOtherThan(numberOfShards, () -> randomIntBetween(1,5));
final IndexMetaData newIndexMetaData = createIndexMetaData(indexName, newNumberOfShards, version);
final ClusterState newClusterState = createClusterState(randomNonNegativeLong(),
@@ -189,11 +193,11 @@ public class GatewayMetaStatePersistedStateTests extends ESTestCase {
assertThat(gateway.getLastAcceptedState().metaData().index(indexName), equalTo(newIndexMetaData));
}
- public void testCurrentTermAndTermAreDifferent() throws IOException {
- MockGatewayMetaState gateway = newGateway();
+ public void testCurrentTermAndTermAreDifferent() {
+ CoordinationState.PersistedState gateway = newGatewayPersistedState();
long currentTerm = randomNonNegativeLong();
- long term = randomValueOtherThan(currentTerm, () -> randomNonNegativeLong());
+ long term = randomValueOtherThan(currentTerm, ESTestCase::randomNonNegativeLong);
gateway.setCurrentTerm(currentTerm);
gateway.setLastAcceptedState(createClusterState(randomNonNegativeLong(),
@@ -204,8 +208,8 @@ public class GatewayMetaStatePersistedStateTests extends ESTestCase {
assertThat(gateway.getLastAcceptedState().coordinationMetaData().term(), equalTo(term));
}
- public void testMarkAcceptedConfigAsCommitted() throws IOException {
- MockGatewayMetaState gateway = newGateway();
+ public void testMarkAcceptedConfigAsCommitted() {
+ CoordinationState.PersistedState gateway = newGatewayPersistedState();
//generate random coordinationMetaData with different lastAcceptedConfiguration and lastCommittedConfiguration
CoordinationMetaData coordinationMetaData;
diff --git a/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java b/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java
index c8f274c2f18..d0101f276d8 100644
--- a/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java
+++ b/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java
@@ -19,417 +19,24 @@
package org.elasticsearch.gateway;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.MockDirectoryWrapper;
import org.elasticsearch.Version;
-import org.elasticsearch.cluster.ClusterState;
-import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
-import org.elasticsearch.cluster.metadata.Manifest;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService;
-import org.elasticsearch.cluster.node.DiscoveryNodeRole;
-import org.elasticsearch.cluster.node.DiscoveryNodes;
-import org.elasticsearch.cluster.routing.RoutingTable;
-import org.elasticsearch.cluster.routing.allocation.AllocationService;
-import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
-import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.xcontent.NamedXContentRegistry;
-import org.elasticsearch.common.xcontent.XContentBuilder;
-import org.elasticsearch.common.xcontent.XContentParser;
-import org.elasticsearch.env.NodeEnvironment;
-import org.elasticsearch.index.Index;
import org.elasticsearch.plugins.MetaDataUpgrader;
+import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.TestCustomMetaData;
-import org.mockito.ArgumentCaptor;
-import java.io.IOException;
-import java.nio.file.Path;
import java.util.Arrays;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
-import static org.hamcrest.Matchers.hasSize;
-import static org.mockito.Matchers.anyString;
-import static org.mockito.Matchers.eq;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.verifyZeroInteractions;
-import static org.mockito.Mockito.when;
-public class GatewayMetaStateTests extends ESAllocationTestCase {
-
- private ClusterState noIndexClusterState(boolean masterEligible) {
- MetaData metaData = MetaData.builder().build();
- RoutingTable routingTable = RoutingTable.builder().build();
-
- return ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
- .metaData(metaData)
- .routingTable(routingTable)
- .nodes(generateDiscoveryNodes(masterEligible))
- .build();
- }
-
- private ClusterState clusterStateWithUnassignedIndex(IndexMetaData indexMetaData, boolean masterEligible) {
- MetaData metaData = MetaData.builder()
- .put(indexMetaData, false)
- .build();
-
- RoutingTable routingTable = RoutingTable.builder()
- .addAsNew(metaData.index("test"))
- .build();
-
- return ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
- .metaData(metaData)
- .routingTable(routingTable)
- .nodes(generateDiscoveryNodes(masterEligible))
- .build();
- }
-
- private ClusterState clusterStateWithAssignedIndex(IndexMetaData indexMetaData, boolean masterEligible) {
- AllocationService strategy = createAllocationService(Settings.builder()
- .put("cluster.routing.allocation.node_concurrent_recoveries", 100)
- .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always")
- .put("cluster.routing.allocation.cluster_concurrent_rebalance", 100)
- .put("cluster.routing.allocation.node_initial_primaries_recoveries", 100)
- .build());
-
- ClusterState oldClusterState = clusterStateWithUnassignedIndex(indexMetaData, masterEligible);
- RoutingTable routingTable = strategy.reroute(oldClusterState, "reroute").routingTable();
-
- MetaData metaDataNewClusterState = MetaData.builder()
- .put(oldClusterState.metaData().index("test"), false)
- .build();
-
- return ClusterState.builder(oldClusterState).routingTable(routingTable)
- .metaData(metaDataNewClusterState).version(oldClusterState.getVersion() + 1).build();
- }
-
- private ClusterState clusterStateWithClosedIndex(IndexMetaData indexMetaData, boolean masterEligible) {
- ClusterState oldClusterState = clusterStateWithAssignedIndex(indexMetaData, masterEligible);
-
- MetaData metaDataNewClusterState = MetaData.builder()
- .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).state(IndexMetaData.State.CLOSE)
- .numberOfShards(5).numberOfReplicas(2))
- .version(oldClusterState.metaData().version() + 1)
- .build();
- RoutingTable routingTable = RoutingTable.builder()
- .addAsNew(metaDataNewClusterState.index("test"))
- .build();
-
- return ClusterState.builder(oldClusterState).routingTable(routingTable)
- .metaData(metaDataNewClusterState).version(oldClusterState.getVersion() + 1).build();
- }
-
- private ClusterState clusterStateWithJustOpenedIndex(IndexMetaData indexMetaData, boolean masterEligible) {
- ClusterState oldClusterState = clusterStateWithClosedIndex(indexMetaData, masterEligible);
-
- MetaData metaDataNewClusterState = MetaData.builder()
- .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).state(IndexMetaData.State.OPEN)
- .numberOfShards(5).numberOfReplicas(2))
- .version(oldClusterState.metaData().version() + 1)
- .build();
-
- return ClusterState.builder(oldClusterState)
- .metaData(metaDataNewClusterState).version(oldClusterState.getVersion() + 1).build();
- }
-
- private DiscoveryNodes.Builder generateDiscoveryNodes(boolean masterEligible) {
- Set dataOnlyRoles = Collections.singleton(DiscoveryNodeRole.DATA_ROLE);
- return DiscoveryNodes.builder().add(newNode("node1", masterEligible ? MASTER_DATA_ROLES : dataOnlyRoles))
- .add(newNode("master_node", MASTER_DATA_ROLES)).localNodeId("node1").masterNodeId(masterEligible ? "node1" : "master_node");
- }
-
- private Set randomPrevWrittenIndices(IndexMetaData indexMetaData) {
- if (randomBoolean()) {
- return Collections.singleton(indexMetaData.getIndex());
- } else {
- return Collections.emptySet();
- }
- }
-
- private IndexMetaData createIndexMetaData(String name) {
- return IndexMetaData.builder(name).
- settings(settings(Version.CURRENT)).
- numberOfShards(5).
- numberOfReplicas(2).
- build();
- }
-
- public void testGetRelevantIndicesWithUnassignedShardsOnMasterEligibleNode() {
- IndexMetaData indexMetaData = createIndexMetaData("test");
- Set indices = GatewayMetaState.getRelevantIndices(
- clusterStateWithUnassignedIndex(indexMetaData, true),
- noIndexClusterState(true),
- randomPrevWrittenIndices(indexMetaData));
- assertThat(indices.size(), equalTo(1));
- }
-
- public void testGetRelevantIndicesWithUnassignedShardsOnDataOnlyNode() {
- IndexMetaData indexMetaData = createIndexMetaData("test");
- Set indices = GatewayMetaState.getRelevantIndices(
- clusterStateWithUnassignedIndex(indexMetaData, false),
- noIndexClusterState(false),
- randomPrevWrittenIndices(indexMetaData));
- assertThat(indices.size(), equalTo(0));
- }
-
- public void testGetRelevantIndicesWithAssignedShards() {
- IndexMetaData indexMetaData = createIndexMetaData("test");
- boolean masterEligible = randomBoolean();
- Set indices = GatewayMetaState.getRelevantIndices(
- clusterStateWithAssignedIndex(indexMetaData, masterEligible),
- clusterStateWithUnassignedIndex(indexMetaData, masterEligible),
- randomPrevWrittenIndices(indexMetaData));
- assertThat(indices.size(), equalTo(1));
- }
-
- public void testGetRelevantIndicesForClosedPrevWrittenIndexOnDataOnlyNode() {
- IndexMetaData indexMetaData = createIndexMetaData("test");
- Set indices = GatewayMetaState.getRelevantIndices(
- clusterStateWithClosedIndex(indexMetaData, false),
- clusterStateWithAssignedIndex(indexMetaData, false),
- Collections.singleton(indexMetaData.getIndex()));
- assertThat(indices.size(), equalTo(1));
- }
-
- public void testGetRelevantIndicesForClosedPrevNotWrittenIndexOnDataOnlyNode() {
- IndexMetaData indexMetaData = createIndexMetaData("test");
- Set indices = GatewayMetaState.getRelevantIndices(
- clusterStateWithJustOpenedIndex(indexMetaData, false),
- clusterStateWithClosedIndex(indexMetaData, false),
- Collections.emptySet());
- assertThat(indices.size(), equalTo(0));
- }
-
- public void testGetRelevantIndicesForWasClosedPrevWrittenIndexOnDataOnlyNode() {
- IndexMetaData indexMetaData = createIndexMetaData("test");
- Set indices = GatewayMetaState.getRelevantIndices(
- clusterStateWithJustOpenedIndex(indexMetaData, false),
- clusterStateWithClosedIndex(indexMetaData, false),
- Collections.singleton(indexMetaData.getIndex()));
- assertThat(indices.size(), equalTo(1));
- }
-
- public void testResolveStatesToBeWritten() throws WriteStateException {
- Map indices = new HashMap<>();
- Set relevantIndices = new HashSet<>();
-
- IndexMetaData removedIndex = createIndexMetaData("removed_index");
- indices.put(removedIndex.getIndex(), 1L);
-
- IndexMetaData versionChangedIndex = createIndexMetaData("version_changed_index");
- indices.put(versionChangedIndex.getIndex(), 2L);
- relevantIndices.add(versionChangedIndex.getIndex());
-
- IndexMetaData notChangedIndex = createIndexMetaData("not_changed_index");
- indices.put(notChangedIndex.getIndex(), 3L);
- relevantIndices.add(notChangedIndex.getIndex());
-
- IndexMetaData newIndex = createIndexMetaData("new_index");
- relevantIndices.add(newIndex.getIndex());
-
- MetaData oldMetaData = MetaData.builder()
- .put(removedIndex, false)
- .put(versionChangedIndex, false)
- .put(notChangedIndex, false)
- .build();
-
- MetaData newMetaData = MetaData.builder()
- .put(versionChangedIndex, true)
- .put(notChangedIndex, false)
- .put(newIndex, false)
- .build();
-
- IndexMetaData newVersionChangedIndex = newMetaData.index(versionChangedIndex.getIndex());
-
- List actions =
- GatewayMetaState.resolveIndexMetaDataActions(indices, relevantIndices, oldMetaData, newMetaData);
-
- assertThat(actions, hasSize(3));
-
- for (GatewayMetaState.IndexMetaDataAction action : actions) {
- if (action instanceof GatewayMetaState.KeepPreviousGeneration) {
- assertThat(action.getIndex(), equalTo(notChangedIndex.getIndex()));
- GatewayMetaState.AtomicClusterStateWriter writer = mock(GatewayMetaState.AtomicClusterStateWriter.class);
- assertThat(action.execute(writer), equalTo(3L));
- verifyZeroInteractions(writer);
- }
- if (action instanceof GatewayMetaState.WriteNewIndexMetaData) {
- assertThat(action.getIndex(), equalTo(newIndex.getIndex()));
- GatewayMetaState.AtomicClusterStateWriter writer = mock(GatewayMetaState.AtomicClusterStateWriter.class);
- when(writer.writeIndex("freshly created", newIndex)).thenReturn(0L);
- assertThat(action.execute(writer), equalTo(0L));
- }
- if (action instanceof GatewayMetaState.WriteChangedIndexMetaData) {
- assertThat(action.getIndex(), equalTo(newVersionChangedIndex.getIndex()));
- GatewayMetaState.AtomicClusterStateWriter writer = mock(GatewayMetaState.AtomicClusterStateWriter.class);
- when(writer.writeIndex(anyString(), eq(newVersionChangedIndex))).thenReturn(3L);
- assertThat(action.execute(writer), equalTo(3L));
- ArgumentCaptor reason = ArgumentCaptor.forClass(String.class);
- verify(writer).writeIndex(reason.capture(), eq(newVersionChangedIndex));
- assertThat(reason.getValue(), containsString(Long.toString(versionChangedIndex.getVersion())));
- assertThat(reason.getValue(), containsString(Long.toString(newVersionChangedIndex.getVersion())));
- }
- }
- }
-
- private static class MetaStateServiceWithFailures extends MetaStateService {
- private final int invertedFailRate;
- private boolean failRandomly;
-
- private MetaDataStateFormat wrap(MetaDataStateFormat format) {
- return new MetaDataStateFormat(format.getPrefix()) {
- @Override
- public void toXContent(XContentBuilder builder, T state) throws IOException {
- format.toXContent(builder, state);
- }
-
- @Override
- public T fromXContent(XContentParser parser) throws IOException {
- return format.fromXContent(parser);
- }
-
- @Override
- protected Directory newDirectory(Path dir) {
- MockDirectoryWrapper mock = newMockFSDirectory(dir);
- if (failRandomly) {
- MockDirectoryWrapper.Failure fail = new MockDirectoryWrapper.Failure() {
- @Override
- public void eval(MockDirectoryWrapper dir) throws IOException {
- int r = randomIntBetween(0, invertedFailRate);
- if (r == 0) {
- throw new MockDirectoryWrapper.FakeIOException();
- }
- }
- };
- mock.failOn(fail);
- }
- closeAfterSuite(mock);
- return mock;
- }
- };
- }
-
- MetaStateServiceWithFailures(int invertedFailRate, NodeEnvironment nodeEnv, NamedXContentRegistry namedXContentRegistry) {
- super(nodeEnv, namedXContentRegistry);
- META_DATA_FORMAT = wrap(MetaData.FORMAT);
- INDEX_META_DATA_FORMAT = wrap(IndexMetaData.FORMAT);
- MANIFEST_FORMAT = wrap(Manifest.FORMAT);
- failRandomly = false;
- this.invertedFailRate = invertedFailRate;
- }
-
- void failRandomly() {
- failRandomly = true;
- }
-
- void noFailures() {
- failRandomly = false;
- }
- }
-
- private boolean metaDataEquals(MetaData md1, MetaData md2) {
- boolean equals = MetaData.isGlobalStateEquals(md1, md2);
-
- for (IndexMetaData imd : md1) {
- IndexMetaData imd2 = md2.index(imd.getIndex());
- equals = equals && imd.equals(imd2);
- }
-
- for (IndexMetaData imd : md2) {
- IndexMetaData imd2 = md1.index(imd.getIndex());
- equals = equals && imd.equals(imd2);
- }
- return equals;
- }
-
- private static MetaData randomMetaDataForTx() {
- int settingNo = randomIntBetween(0, 10);
- MetaData.Builder builder = MetaData.builder()
- .persistentSettings(Settings.builder().put("setting" + settingNo, randomAlphaOfLength(5)).build());
- int numOfIndices = randomIntBetween(0, 3);
-
- for (int i = 0; i < numOfIndices; i++) {
- int indexNo = randomIntBetween(0, 50);
- IndexMetaData indexMetaData = IndexMetaData.builder("index" + indexNo).settings(
- Settings.builder()
- .put(IndexMetaData.SETTING_INDEX_UUID, "index" + indexNo)
- .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
- .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
- .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
- .build()
- ).build();
- builder.put(indexMetaData, false);
- }
- return builder.build();
- }
-
- public void testAtomicityWithFailures() throws IOException {
- try (NodeEnvironment env = newNodeEnvironment()) {
- MetaStateServiceWithFailures metaStateService =
- new MetaStateServiceWithFailures(randomIntBetween(100, 1000), env, xContentRegistry());
-
- // We only guarantee atomicity of writes, if there is initial Manifest file
- Manifest manifest = Manifest.empty();
- MetaData metaData = MetaData.EMPTY_META_DATA;
- metaStateService.writeManifestAndCleanup("startup", Manifest.empty());
- long currentTerm = randomNonNegativeLong();
- long clusterStateVersion = randomNonNegativeLong();
-
- metaStateService.failRandomly();
- Set possibleMetaData = new HashSet<>();
- possibleMetaData.add(metaData);
-
- for (int i = 0; i < randomIntBetween(1, 5); i++) {
- GatewayMetaState.AtomicClusterStateWriter writer =
- new GatewayMetaState.AtomicClusterStateWriter(metaStateService, manifest);
- metaData = randomMetaDataForTx();
- Map indexGenerations = new HashMap<>();
-
- try {
- long globalGeneration = writer.writeGlobalState("global", metaData);
-
- for (IndexMetaData indexMetaData : metaData) {
- long generation = writer.writeIndex("index", indexMetaData);
- indexGenerations.put(indexMetaData.getIndex(), generation);
- }
-
- Manifest newManifest = new Manifest(currentTerm, clusterStateVersion, globalGeneration, indexGenerations);
- writer.writeManifestAndCleanup("manifest", newManifest);
- possibleMetaData.clear();
- possibleMetaData.add(metaData);
- manifest = newManifest;
- } catch (WriteStateException e) {
- if (e.isDirty()) {
- possibleMetaData.add(metaData);
- /*
- * If dirty WriteStateException occurred, it's only safe to proceed if there is subsequent
- * successful write of metadata and Manifest. We prefer to break here, not to over complicate test logic.
- * See also MetaDataStateFormat#testFailRandomlyAndReadAnyState, that does not break.
- */
- break;
- }
- }
- }
-
- metaStateService.noFailures();
-
- Tuple manifestAndMetaData = metaStateService.loadFullState();
- MetaData loadedMetaData = manifestAndMetaData.v2();
-
- assertTrue(possibleMetaData.stream().anyMatch(md -> metaDataEquals(md, loadedMetaData)));
- }
- }
+public class GatewayMetaStateTests extends ESTestCase {
public void testAddCustomMetaDataOnUpgrade() throws Exception {
MetaData metaData = randomMetaData();
diff --git a/server/src/test/java/org/elasticsearch/gateway/IncrementalClusterStateWriterTests.java b/server/src/test/java/org/elasticsearch/gateway/IncrementalClusterStateWriterTests.java
new file mode 100644
index 00000000000..b41a24bb820
--- /dev/null
+++ b/server/src/test/java/org/elasticsearch/gateway/IncrementalClusterStateWriterTests.java
@@ -0,0 +1,429 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.gateway;
+
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ESAllocationTestCase;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.Manifest;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodeRole;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.NamedXContentRegistry;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.index.Index;
+import org.mockito.ArgumentCaptor;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.hasSize;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyZeroInteractions;
+import static org.mockito.Mockito.when;
+
+public class IncrementalClusterStateWriterTests extends ESAllocationTestCase {
+
+ private ClusterState noIndexClusterState(boolean masterEligible) {
+ MetaData metaData = MetaData.builder().build();
+ RoutingTable routingTable = RoutingTable.builder().build();
+
+ return ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
+ .metaData(metaData)
+ .routingTable(routingTable)
+ .nodes(generateDiscoveryNodes(masterEligible))
+ .build();
+ }
+
+ private ClusterState clusterStateWithUnassignedIndex(IndexMetaData indexMetaData, boolean masterEligible) {
+ MetaData metaData = MetaData.builder()
+ .put(indexMetaData, false)
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ return ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
+ .metaData(metaData)
+ .routingTable(routingTable)
+ .nodes(generateDiscoveryNodes(masterEligible))
+ .build();
+ }
+
+ private ClusterState clusterStateWithAssignedIndex(IndexMetaData indexMetaData, boolean masterEligible) {
+ AllocationService strategy = createAllocationService(Settings.builder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 100)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", 100)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 100)
+ .build());
+
+ ClusterState oldClusterState = clusterStateWithUnassignedIndex(indexMetaData, masterEligible);
+ RoutingTable routingTable = strategy.reroute(oldClusterState, "reroute").routingTable();
+
+ MetaData metaDataNewClusterState = MetaData.builder()
+ .put(oldClusterState.metaData().index("test"), false)
+ .build();
+
+ return ClusterState.builder(oldClusterState).routingTable(routingTable)
+ .metaData(metaDataNewClusterState).version(oldClusterState.getVersion() + 1).build();
+ }
+
+ private ClusterState clusterStateWithClosedIndex(IndexMetaData indexMetaData, boolean masterEligible) {
+ ClusterState oldClusterState = clusterStateWithAssignedIndex(indexMetaData, masterEligible);
+
+ MetaData metaDataNewClusterState = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).state(IndexMetaData.State.CLOSE)
+ .numberOfShards(5).numberOfReplicas(2))
+ .version(oldClusterState.metaData().version() + 1)
+ .build();
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaDataNewClusterState.index("test"))
+ .build();
+
+ return ClusterState.builder(oldClusterState).routingTable(routingTable)
+ .metaData(metaDataNewClusterState).version(oldClusterState.getVersion() + 1).build();
+ }
+
+ private ClusterState clusterStateWithJustOpenedIndex(IndexMetaData indexMetaData, boolean masterEligible) {
+ ClusterState oldClusterState = clusterStateWithClosedIndex(indexMetaData, masterEligible);
+
+ MetaData metaDataNewClusterState = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).state(IndexMetaData.State.OPEN)
+ .numberOfShards(5).numberOfReplicas(2))
+ .version(oldClusterState.metaData().version() + 1)
+ .build();
+
+ return ClusterState.builder(oldClusterState)
+ .metaData(metaDataNewClusterState).version(oldClusterState.getVersion() + 1).build();
+ }
+
+ private DiscoveryNodes.Builder generateDiscoveryNodes(boolean masterEligible) {
+ Set dataOnlyRoles = Collections.singleton(DiscoveryNodeRole.DATA_ROLE);
+ return DiscoveryNodes.builder().add(newNode("node1", masterEligible ? MASTER_DATA_ROLES : dataOnlyRoles))
+ .add(newNode("master_node", MASTER_DATA_ROLES)).localNodeId("node1").masterNodeId(masterEligible ? "node1" : "master_node");
+ }
+
+ private Set randomPrevWrittenIndices(IndexMetaData indexMetaData) {
+ if (randomBoolean()) {
+ return Collections.singleton(indexMetaData.getIndex());
+ } else {
+ return Collections.emptySet();
+ }
+ }
+
+ private IndexMetaData createIndexMetaData(String name) {
+ return IndexMetaData.builder(name).
+ settings(settings(Version.CURRENT)).
+ numberOfShards(5).
+ numberOfReplicas(2).
+ build();
+ }
+
+ public void testGetRelevantIndicesWithUnassignedShardsOnMasterEligibleNode() {
+ IndexMetaData indexMetaData = createIndexMetaData("test");
+ Set indices = IncrementalClusterStateWriter.getRelevantIndices(
+ clusterStateWithUnassignedIndex(indexMetaData, true),
+ noIndexClusterState(true),
+ randomPrevWrittenIndices(indexMetaData));
+ assertThat(indices.size(), equalTo(1));
+ }
+
+ public void testGetRelevantIndicesWithUnassignedShardsOnDataOnlyNode() {
+ IndexMetaData indexMetaData = createIndexMetaData("test");
+ Set indices = IncrementalClusterStateWriter.getRelevantIndices(
+ clusterStateWithUnassignedIndex(indexMetaData, false),
+ noIndexClusterState(false),
+ randomPrevWrittenIndices(indexMetaData));
+ assertThat(indices.size(), equalTo(0));
+ }
+
+ public void testGetRelevantIndicesWithAssignedShards() {
+ IndexMetaData indexMetaData = createIndexMetaData("test");
+ boolean masterEligible = randomBoolean();
+ Set indices = IncrementalClusterStateWriter.getRelevantIndices(
+ clusterStateWithAssignedIndex(indexMetaData, masterEligible),
+ clusterStateWithUnassignedIndex(indexMetaData, masterEligible),
+ randomPrevWrittenIndices(indexMetaData));
+ assertThat(indices.size(), equalTo(1));
+ }
+
+ public void testGetRelevantIndicesForClosedPrevWrittenIndexOnDataOnlyNode() {
+ IndexMetaData indexMetaData = createIndexMetaData("test");
+ Set indices = IncrementalClusterStateWriter.getRelevantIndices(
+ clusterStateWithClosedIndex(indexMetaData, false),
+ clusterStateWithAssignedIndex(indexMetaData, false),
+ Collections.singleton(indexMetaData.getIndex()));
+ assertThat(indices.size(), equalTo(1));
+ }
+
+ public void testGetRelevantIndicesForClosedPrevNotWrittenIndexOnDataOnlyNode() {
+ IndexMetaData indexMetaData = createIndexMetaData("test");
+ Set indices = IncrementalClusterStateWriter.getRelevantIndices(
+ clusterStateWithJustOpenedIndex(indexMetaData, false),
+ clusterStateWithClosedIndex(indexMetaData, false),
+ Collections.emptySet());
+ assertThat(indices.size(), equalTo(0));
+ }
+
+ public void testGetRelevantIndicesForWasClosedPrevWrittenIndexOnDataOnlyNode() {
+ IndexMetaData indexMetaData = createIndexMetaData("test");
+ Set indices = IncrementalClusterStateWriter.getRelevantIndices(
+ clusterStateWithJustOpenedIndex(indexMetaData, false),
+ clusterStateWithClosedIndex(indexMetaData, false),
+ Collections.singleton(indexMetaData.getIndex()));
+ assertThat(indices.size(), equalTo(1));
+ }
+
+ public void testResolveStatesToBeWritten() throws WriteStateException {
+ Map indices = new HashMap<>();
+ Set relevantIndices = new HashSet<>();
+
+ IndexMetaData removedIndex = createIndexMetaData("removed_index");
+ indices.put(removedIndex.getIndex(), 1L);
+
+ IndexMetaData versionChangedIndex = createIndexMetaData("version_changed_index");
+ indices.put(versionChangedIndex.getIndex(), 2L);
+ relevantIndices.add(versionChangedIndex.getIndex());
+
+ IndexMetaData notChangedIndex = createIndexMetaData("not_changed_index");
+ indices.put(notChangedIndex.getIndex(), 3L);
+ relevantIndices.add(notChangedIndex.getIndex());
+
+ IndexMetaData newIndex = createIndexMetaData("new_index");
+ relevantIndices.add(newIndex.getIndex());
+
+ MetaData oldMetaData = MetaData.builder()
+ .put(removedIndex, false)
+ .put(versionChangedIndex, false)
+ .put(notChangedIndex, false)
+ .build();
+
+ MetaData newMetaData = MetaData.builder()
+ .put(versionChangedIndex, true)
+ .put(notChangedIndex, false)
+ .put(newIndex, false)
+ .build();
+
+ IndexMetaData newVersionChangedIndex = newMetaData.index(versionChangedIndex.getIndex());
+
+ List actions =
+ IncrementalClusterStateWriter.resolveIndexMetaDataActions(indices, relevantIndices, oldMetaData, newMetaData);
+
+ assertThat(actions, hasSize(3));
+
+ for (IncrementalClusterStateWriter.IndexMetaDataAction action : actions) {
+ if (action instanceof IncrementalClusterStateWriter.KeepPreviousGeneration) {
+ assertThat(action.getIndex(), equalTo(notChangedIndex.getIndex()));
+ IncrementalClusterStateWriter.AtomicClusterStateWriter writer
+ = mock(IncrementalClusterStateWriter.AtomicClusterStateWriter.class);
+ assertThat(action.execute(writer), equalTo(3L));
+ verifyZeroInteractions(writer);
+ }
+ if (action instanceof IncrementalClusterStateWriter.WriteNewIndexMetaData) {
+ assertThat(action.getIndex(), equalTo(newIndex.getIndex()));
+ IncrementalClusterStateWriter.AtomicClusterStateWriter writer
+ = mock(IncrementalClusterStateWriter.AtomicClusterStateWriter.class);
+ when(writer.writeIndex("freshly created", newIndex)).thenReturn(0L);
+ assertThat(action.execute(writer), equalTo(0L));
+ }
+ if (action instanceof IncrementalClusterStateWriter.WriteChangedIndexMetaData) {
+ assertThat(action.getIndex(), equalTo(newVersionChangedIndex.getIndex()));
+ IncrementalClusterStateWriter.AtomicClusterStateWriter writer
+ = mock(IncrementalClusterStateWriter.AtomicClusterStateWriter.class);
+ when(writer.writeIndex(anyString(), eq(newVersionChangedIndex))).thenReturn(3L);
+ assertThat(action.execute(writer), equalTo(3L));
+ ArgumentCaptor reason = ArgumentCaptor.forClass(String.class);
+ verify(writer).writeIndex(reason.capture(), eq(newVersionChangedIndex));
+ assertThat(reason.getValue(), containsString(Long.toString(versionChangedIndex.getVersion())));
+ assertThat(reason.getValue(), containsString(Long.toString(newVersionChangedIndex.getVersion())));
+ }
+ }
+ }
+
+ private static class MetaStateServiceWithFailures extends MetaStateService {
+ private final int invertedFailRate;
+ private boolean failRandomly;
+
+ private MetaDataStateFormat wrap(MetaDataStateFormat format) {
+ return new MetaDataStateFormat(format.getPrefix()) {
+ @Override
+ public void toXContent(XContentBuilder builder, T state) throws IOException {
+ format.toXContent(builder, state);
+ }
+
+ @Override
+ public T fromXContent(XContentParser parser) throws IOException {
+ return format.fromXContent(parser);
+ }
+
+ @Override
+ protected Directory newDirectory(Path dir) {
+ MockDirectoryWrapper mock = newMockFSDirectory(dir);
+ if (failRandomly) {
+ MockDirectoryWrapper.Failure fail = new MockDirectoryWrapper.Failure() {
+ @Override
+ public void eval(MockDirectoryWrapper dir) throws IOException {
+ int r = randomIntBetween(0, invertedFailRate);
+ if (r == 0) {
+ throw new MockDirectoryWrapper.FakeIOException();
+ }
+ }
+ };
+ mock.failOn(fail);
+ }
+ closeAfterSuite(mock);
+ return mock;
+ }
+ };
+ }
+
+ MetaStateServiceWithFailures(int invertedFailRate, NodeEnvironment nodeEnv, NamedXContentRegistry namedXContentRegistry) {
+ super(nodeEnv, namedXContentRegistry);
+ META_DATA_FORMAT = wrap(MetaData.FORMAT);
+ INDEX_META_DATA_FORMAT = wrap(IndexMetaData.FORMAT);
+ MANIFEST_FORMAT = wrap(Manifest.FORMAT);
+ failRandomly = false;
+ this.invertedFailRate = invertedFailRate;
+ }
+
+ void failRandomly() {
+ failRandomly = true;
+ }
+
+ void noFailures() {
+ failRandomly = false;
+ }
+ }
+
+ private boolean metaDataEquals(MetaData md1, MetaData md2) {
+ boolean equals = MetaData.isGlobalStateEquals(md1, md2);
+
+ for (IndexMetaData imd : md1) {
+ IndexMetaData imd2 = md2.index(imd.getIndex());
+ equals = equals && imd.equals(imd2);
+ }
+
+ for (IndexMetaData imd : md2) {
+ IndexMetaData imd2 = md1.index(imd.getIndex());
+ equals = equals && imd.equals(imd2);
+ }
+ return equals;
+ }
+
+ private static MetaData randomMetaDataForTx() {
+ int settingNo = randomIntBetween(0, 10);
+ MetaData.Builder builder = MetaData.builder()
+ .persistentSettings(Settings.builder().put("setting" + settingNo, randomAlphaOfLength(5)).build());
+ int numOfIndices = randomIntBetween(0, 3);
+
+ for (int i = 0; i < numOfIndices; i++) {
+ int indexNo = randomIntBetween(0, 50);
+ IndexMetaData indexMetaData = IndexMetaData.builder("index" + indexNo).settings(
+ Settings.builder()
+ .put(IndexMetaData.SETTING_INDEX_UUID, "index" + indexNo)
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
+ .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
+ .build()
+ ).build();
+ builder.put(indexMetaData, false);
+ }
+ return builder.build();
+ }
+
+ public void testAtomicityWithFailures() throws IOException {
+ try (NodeEnvironment env = newNodeEnvironment()) {
+ MetaStateServiceWithFailures metaStateService =
+ new MetaStateServiceWithFailures(randomIntBetween(100, 1000), env, xContentRegistry());
+
+ // We only guarantee atomicity of writes, if there is initial Manifest file
+ Manifest manifest = Manifest.empty();
+ MetaData metaData = MetaData.EMPTY_META_DATA;
+ metaStateService.writeManifestAndCleanup("startup", Manifest.empty());
+ long currentTerm = randomNonNegativeLong();
+ long clusterStateVersion = randomNonNegativeLong();
+
+ metaStateService.failRandomly();
+ Set possibleMetaData = new HashSet<>();
+ possibleMetaData.add(metaData);
+
+ for (int i = 0; i < randomIntBetween(1, 5); i++) {
+ IncrementalClusterStateWriter.AtomicClusterStateWriter writer =
+ new IncrementalClusterStateWriter.AtomicClusterStateWriter(metaStateService, manifest);
+ metaData = randomMetaDataForTx();
+ Map indexGenerations = new HashMap<>();
+
+ try {
+ long globalGeneration = writer.writeGlobalState("global", metaData);
+
+ for (IndexMetaData indexMetaData : metaData) {
+ long generation = writer.writeIndex("index", indexMetaData);
+ indexGenerations.put(indexMetaData.getIndex(), generation);
+ }
+
+ Manifest newManifest = new Manifest(currentTerm, clusterStateVersion, globalGeneration, indexGenerations);
+ writer.writeManifestAndCleanup("manifest", newManifest);
+ possibleMetaData.clear();
+ possibleMetaData.add(metaData);
+ manifest = newManifest;
+ } catch (WriteStateException e) {
+ if (e.isDirty()) {
+ possibleMetaData.add(metaData);
+ /*
+ * If dirty WriteStateException occurred, it's only safe to proceed if there is subsequent
+ * successful write of metadata and Manifest. We prefer to break here, not to over complicate test logic.
+ * See also MetaDataStateFormat#testFailRandomlyAndReadAnyState, that does not break.
+ */
+ break;
+ }
+ }
+ }
+
+ metaStateService.noFailures();
+
+ Tuple manifestAndMetaData = metaStateService.loadFullState();
+ MetaData loadedMetaData = manifestAndMetaData.v2();
+
+ assertTrue(possibleMetaData.stream().anyMatch(md -> metaDataEquals(md, loadedMetaData)));
+ }
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java
index fe7b8720981..102de69cc43 100644
--- a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java
+++ b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java
@@ -703,9 +703,8 @@ public class AbstractCoordinatorTestCase extends ESTestCase {
if (rarely()) {
nodeEnvironment = newNodeEnvironment();
nodeEnvironments.add(nodeEnvironment);
- final MockGatewayMetaState gatewayMetaState
- = new MockGatewayMetaState(Settings.EMPTY, nodeEnvironment, xContentRegistry(), localNode);
- gatewayMetaState.start();
+ final MockGatewayMetaState gatewayMetaState = new MockGatewayMetaState(localNode);
+ gatewayMetaState.start(Settings.EMPTY, nodeEnvironment, xContentRegistry());
delegate = gatewayMetaState.getPersistedState();
} else {
nodeEnvironment = null;
@@ -736,9 +735,8 @@ public class AbstractCoordinatorTestCase extends ESTestCase {
new Manifest(updatedTerm, manifest.getClusterStateVersion(), manifest.getGlobalGeneration(),
manifest.getIndexGenerations()));
}
- final MockGatewayMetaState gatewayMetaState
- = new MockGatewayMetaState(Settings.EMPTY, nodeEnvironment, xContentRegistry(), newLocalNode);
- gatewayMetaState.start();
+ final MockGatewayMetaState gatewayMetaState = new MockGatewayMetaState(newLocalNode);
+ gatewayMetaState.start(Settings.EMPTY, nodeEnvironment, xContentRegistry());
delegate = gatewayMetaState.getPersistedState();
} else {
nodeEnvironment = null;
diff --git a/test/framework/src/main/java/org/elasticsearch/gateway/MockGatewayMetaState.java b/test/framework/src/main/java/org/elasticsearch/gateway/MockGatewayMetaState.java
index 006f2948831..b66b5ea3ee2 100644
--- a/test/framework/src/main/java/org/elasticsearch/gateway/MockGatewayMetaState.java
+++ b/test/framework/src/main/java/org/elasticsearch/gateway/MockGatewayMetaState.java
@@ -19,6 +19,7 @@
package org.elasticsearch.gateway;
+import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
@@ -37,24 +38,23 @@ import org.elasticsearch.transport.TransportService;
public class MockGatewayMetaState extends GatewayMetaState {
private final DiscoveryNode localNode;
- public MockGatewayMetaState(Settings settings, NodeEnvironment nodeEnvironment,
- NamedXContentRegistry xContentRegistry, DiscoveryNode localNode) {
- super(settings, new MetaStateService(nodeEnvironment, xContentRegistry));
+ public MockGatewayMetaState(DiscoveryNode localNode) {
this.localNode = localNode;
}
@Override
- protected void upgradeMetaData(MetaDataIndexUpgradeService metaDataIndexUpgradeService, MetaDataUpgrader metaDataUpgrader) {
+ void upgradeMetaData(Settings settings, MetaStateService metaStateService, MetaDataIndexUpgradeService metaDataIndexUpgradeService,
+ MetaDataUpgrader metaDataUpgrader) {
// MetaData upgrade is tested in GatewayMetaStateTests, we override this method to NOP to make mocking easier
}
@Override
- public void applyClusterStateUpdaters(TransportService transportService, ClusterService clusterService) {
+ ClusterState prepareInitialClusterState(TransportService transportService, ClusterService clusterService, ClusterState clusterState) {
// Just set localNode here, not to mess with ClusterService and IndicesService mocking
- previousClusterState = ClusterStateUpdaters.setLocalNode(previousClusterState, localNode);
+ return ClusterStateUpdaters.setLocalNode(clusterState, localNode);
}
- public void start() {
- start(null, null, null, null);
+ public void start(Settings settings, NodeEnvironment nodeEnvironment, NamedXContentRegistry xContentRegistry) {
+ start(settings, null, null, new MetaStateService(nodeEnvironment, xContentRegistry), null, null);
}
}
From 9135e2f9e391dbb94e4c8495c4fa6f3e9a00bfbf Mon Sep 17 00:00:00 2001
From: David Turner
Date: Tue, 24 Sep 2019 13:39:44 +0100
Subject: [PATCH 19/94] Improve LeaderCheck rejection messages (#46998)
Today the `LeaderChecker` rejects checks from nodes that are not in the current
cluster with the exception message `leader check from unknown node` which
offers no information about why the node is unknown. In fact the node must have
been in the cluster in the recent past, so it might help guide the user to a
more useful log message if we describe it as a `removed node` instead of an
`unknown node`. This commit changes the exception message like this, and also
tidies up a few other loose ends in the `LeaderChecker`.
---
.../cluster/coordination/LeaderChecker.java | 28 +++++++++----------
.../coordination/LeaderCheckerTests.java | 9 +++---
2 files changed, 18 insertions(+), 19 deletions(-)
diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/LeaderChecker.java b/server/src/main/java/org/elasticsearch/cluster/coordination/LeaderChecker.java
index bb6dabbc2de..d1b58320fe9 100644
--- a/server/src/main/java/org/elasticsearch/cluster/coordination/LeaderChecker.java
+++ b/server/src/main/java/org/elasticsearch/cluster/coordination/LeaderChecker.java
@@ -64,7 +64,7 @@ public class LeaderChecker {
private static final Logger logger = LogManager.getLogger(LeaderChecker.class);
- public static final String LEADER_CHECK_ACTION_NAME = "internal:coordination/fault_detection/leader_check";
+ static final String LEADER_CHECK_ACTION_NAME = "internal:coordination/fault_detection/leader_check";
// the time between checks sent to the leader
public static final Setting LEADER_CHECK_INTERVAL_SETTING =
@@ -92,7 +92,7 @@ public class LeaderChecker {
private volatile DiscoveryNodes discoveryNodes;
- public LeaderChecker(final Settings settings, final TransportService transportService, final Consumer onLeaderFailure) {
+ LeaderChecker(final Settings settings, final TransportService transportService, final Consumer onLeaderFailure) {
this.settings = settings;
leaderCheckInterval = LEADER_CHECK_INTERVAL_SETTING.get(settings);
leaderCheckTimeout = LEADER_CHECK_TIMEOUT_SETTING.get(settings);
@@ -134,7 +134,7 @@ public class LeaderChecker {
*
* @param leader the node to be checked as leader, or null if checks should be disabled
*/
- public void updateLeader(@Nullable final DiscoveryNode leader) {
+ void updateLeader(@Nullable final DiscoveryNode leader) {
assert transportService.getLocalNode().equals(leader) == false;
final CheckScheduler checkScheduler;
if (leader != null) {
@@ -154,12 +154,8 @@ public class LeaderChecker {
/**
* Update the "known" discovery nodes. Should be called on the leader before a new cluster state is published to reflect the new
* publication targets, and also called if a leader becomes a non-leader.
- * TODO if heartbeats can make nodes become followers then this needs to be called before a heartbeat is sent to a new node too.
- *
- * isLocalNodeElectedMaster() should reflect whether this node is a leader, and nodeExists()
- * should indicate whether nodes are known publication targets or not.
*/
- public void setCurrentNodes(DiscoveryNodes discoveryNodes) {
+ void setCurrentNodes(DiscoveryNodes discoveryNodes) {
logger.trace("setCurrentNodes: {}", discoveryNodes);
this.discoveryNodes = discoveryNodes;
}
@@ -174,11 +170,13 @@ public class LeaderChecker {
assert discoveryNodes != null;
if (discoveryNodes.isLocalNodeElectedMaster() == false) {
- logger.debug("non-master handling {}", request);
- throw new CoordinationStateRejectedException("non-leader rejecting leader check");
+ logger.debug("rejecting leader check on non-master {}", request);
+ throw new CoordinationStateRejectedException(
+ "rejecting leader check from [" + request.getSender() + "] sent to a node that is no longer the master");
} else if (discoveryNodes.nodeExists(request.getSender()) == false) {
- logger.debug("leader check from unknown node: {}", request);
- throw new CoordinationStateRejectedException("leader check from unknown node");
+ logger.debug("rejecting leader check from removed node: {}", request);
+ throw new CoordinationStateRejectedException(
+ "rejecting leader check since [" + request.getSender() + "] has been removed from the cluster");
} else {
logger.trace("handling {}", request);
}
@@ -332,15 +330,15 @@ public class LeaderChecker {
}
}
- public static class LeaderCheckRequest extends TransportRequest {
+ static class LeaderCheckRequest extends TransportRequest {
private final DiscoveryNode sender;
- public LeaderCheckRequest(final DiscoveryNode sender) {
+ LeaderCheckRequest(final DiscoveryNode sender) {
this.sender = sender;
}
- public LeaderCheckRequest(final StreamInput in) throws IOException {
+ LeaderCheckRequest(final StreamInput in) throws IOException {
super(in);
sender = new DiscoveryNode(in);
}
diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/LeaderCheckerTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/LeaderCheckerTests.java
index ce25d24bce6..496a25e1802 100644
--- a/server/src/test/java/org/elasticsearch/cluster/coordination/LeaderCheckerTests.java
+++ b/server/src/test/java/org/elasticsearch/cluster/coordination/LeaderCheckerTests.java
@@ -220,7 +220,7 @@ public class LeaderCheckerTests extends ESTestCase {
return;
}
assertThat(action, equalTo(LEADER_CHECK_ACTION_NAME));
- assertTrue(node.equals(leader));
+ assertEquals(node, leader);
final Response response = responseHolder[0];
deterministicTaskQueue.scheduleNow(new Runnable() {
@@ -340,7 +340,7 @@ public class LeaderCheckerTests extends ESTestCase {
assertFalse(handler.successfulResponseReceived);
assertThat(handler.transportException.getRootCause(), instanceOf(CoordinationStateRejectedException.class));
CoordinationStateRejectedException cause = (CoordinationStateRejectedException) handler.transportException.getRootCause();
- assertThat(cause.getMessage(), equalTo("leader check from unknown node"));
+ assertThat(cause.getMessage(), equalTo("rejecting leader check since [" + otherNode + "] has been removed from the cluster"));
}
{
@@ -364,7 +364,8 @@ public class LeaderCheckerTests extends ESTestCase {
assertFalse(handler.successfulResponseReceived);
assertThat(handler.transportException.getRootCause(), instanceOf(CoordinationStateRejectedException.class));
CoordinationStateRejectedException cause = (CoordinationStateRejectedException) handler.transportException.getRootCause();
- assertThat(cause.getMessage(), equalTo("non-leader rejecting leader check"));
+ assertThat(cause.getMessage(),
+ equalTo("rejecting leader check from [" + otherNode + "] sent to a node that is no longer the master"));
}
}
@@ -397,7 +398,7 @@ public class LeaderCheckerTests extends ESTestCase {
public void testLeaderCheckRequestEqualsHashcodeSerialization() {
LeaderCheckRequest request = new LeaderCheckRequest(
new DiscoveryNode(randomAlphaOfLength(10), buildNewFakeTransportAddress(), Version.CURRENT));
- // Note: the explicit cast of the CopyFunction is needed for some IDE (specifically Eclipse 4.8.0) to infer the right type
+ //noinspection RedundantCast since it is needed for some IDEs (specifically Eclipse 4.8.0) to infer the right type
EqualsHashCodeTestUtils.checkEqualsAndHashCode(request,
(CopyFunction) rq -> copyWriteable(rq, writableRegistry(), LeaderCheckRequest::new),
rq -> new LeaderCheckRequest(new DiscoveryNode(randomAlphaOfLength(10), buildNewFakeTransportAddress(), Version.CURRENT)));
From a1af2fe96ae67968f1d1c393cddd3bbfe98809d2 Mon Sep 17 00:00:00 2001
From: Jack Conradson