diff --git a/build.gradle b/build.gradle index bc0018c0d1a..b65a43aeac4 100644 --- a/build.gradle +++ b/build.gradle @@ -49,7 +49,7 @@ subprojects { } tasks.withType(LicenseHeadersTask.class) { - approvedLicenses = ['Elasticsearch Confidential'] + approvedLicenses = ['Elasticsearch Confidential', 'Generated'] additionalLicense 'ESCON', 'Elasticsearch Confidential', 'ELASTICSEARCH CONFIDENTIAL' } ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-api:${version}": ':x-pack-elasticsearch:plugin'] diff --git a/dev-tools/checkstyle_suppressions.xml b/dev-tools/checkstyle_suppressions.xml index ea7537e6a0e..481c6861a4d 100644 --- a/dev-tools/checkstyle_suppressions.xml +++ b/dev-tools/checkstyle_suppressions.xml @@ -5,6 +5,12 @@ + + + + + + diff --git a/dev-tools/ci b/dev-tools/ci index b501cb30216..9f74c613a05 100755 --- a/dev-tools/ci +++ b/dev-tools/ci @@ -57,6 +57,20 @@ case $key in "-Dtests.badapples=true" ) ;; + smokeTestSql) # TODO remove this once we are ready to merge sql down + GRADLE_CLI_ARGS=( + "--info" + "-psql" + "check" + ":x-pack-elasticsearch:plugin:precommit" + ":x-pack-elasticsearch:qa:sql:check" + ":x-pack-elasticsearch:qa:sql:multinode:check" + ":x-pack-elasticsearch:qa:sql:no-security:check" + ":x-pack-elasticsearch:qa:sql:security:check" + ":x-pack-elasticsearch:qa:sql:security:no-ssl:check" + ":x-pack-elasticsearch:qa:sql:security:ssl:check" + ) + ;; releaseTest) GRADLE_CLI_ARGS=( "--info" @@ -135,7 +149,9 @@ if [ -z ${USE_EXISTING_ES:+x} ]; then echo " -> using CI branch $BRANCH from elastic repo" } - pick_clone_target + # pick_clone_target NOCOMMIT just use master for testing our feature branch. Do not merge this..... + GH_USER="elastic" + BRANCH="master" DEPTH=1 if [ -n "$BUILD_METADATA" ]; then diff --git a/docs/build.gradle b/docs/build.gradle index 92da37f963e..73289c8fc41 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -199,6 +199,77 @@ setups['my_inactive_watch'] = ''' ''' setups['my_active_watch'] = setups['my_inactive_watch'].replace( 'active: false', 'active: true') + +// Used by SQL because it looks SQL-ish +setups['library'] = ''' + - do: + indices.create: + index: library + body: + settings: + number_of_shards: 1 + number_of_replicas: 1 + mappings: + book: + properties: + name: + type: keyword + author: + type: keyword + release_date: + type: date + page_count: + type: short + - do: + bulk: + index: library + type: book + refresh: true + body: | + {"index":{"_id": "Leviathan Wakes"}} + {"name": "Leviathan Wakes", "author": "James S.A. Corey", "release_date": "2011-06-02", "page_count": 561} + {"index":{"_id": "Hyperion"}} + {"name": "Hyperion", "author": "Dan Simmons", "release_date": "1989-05-26", "page_count": 482} + {"index":{"_id": "Dune"}} + {"name": "Dune", "author": "Frank Herbert", "release_date": "1965-06-01", "page_count": 604} + {"index":{"_id": "Consider Phlebas"}} + {"name": "Consider Phlebas", "author": "Iain M. Banks", "release_date": "1987-04-23", "page_count": 471} + {"index":{"_id": "Pandora's Star"}} + {"name": "Pandora's Star", "author": "Peter F. Hamilton", "release_date": "2004-03-02", "page_count": 768} + {"index":{"_id": "Revelation Space"}} + {"name": "Revelation Space", "author": "Alastair Reynolds", "release_date": "2000-03-15", "page_count": 585} + {"index":{"_id": "A Fire Upon the Deep"}} + {"name": "A Fire Upon the Deep", "author": "Vernor Vinge", "release_date": "1992-06-01", "page_count": 613} + {"index":{"_id": "Ender's Game"}} + {"name": "Ender's Game", "author": "Orson Scott Card", "release_date": "1985-06-01", "page_count": 324} + {"index":{"_id": "1984"}} + {"name": "1984", "author": "George Orwell", "release_date": "1985-06-01", "page_count": 328} + {"index":{"_id": "Fahrenheit 451"}} + {"name": "Fahrenheit 451", "author": "Ray Bradbury", "release_date": "1953-10-15", "page_count": 227} + {"index":{"_id": "Brave New World"}} + {"name": "Brave New World", "author": "Aldous Huxley", "release_date": "1932-06-01", "page_count": 268} + {"index":{"_id": "Foundation"}} + {"name": "Foundation", "author": "Isaac Asimov", "release_date": "1951-06-01", "page_count": 224} + {"index":{"_id": "The Giver"}} + {"name": "The Giver", "author": "Lois Lowry", "release_date": "1993-04-26", "page_count": 208} + {"index":{"_id": "Slaughterhouse-Five"}} + {"name": "Slaughterhouse-Five", "author": "Kurt Vonnegut", "release_date": "1969-06-01", "page_count": 275} + {"index":{"_id": "The Hitchhiker's Guide to the Galaxy"}} + {"name": "The Hitchhiker's Guide to the Galaxy", "author": "Douglas Adams", "release_date": "1979-10-12", "page_count": 180} + {"index":{"_id": "Snow Crash"}} + {"name": "Snow Crash", "author": "Neal Stephenson", "release_date": "1992-06-01", "page_count": 470} + {"index":{"_id": "Neuromancer"}} + {"name": "Neuromancer", "author": "William Gibson", "release_date": "1984-07-01", "page_count": 271} + {"index":{"_id": "The Handmaid's Tale"}} + {"name": "The Handmaid's Tale", "author": "Margaret Atwood", "release_date": "1985-06-01", "page_count": 311} + {"index":{"_id": "Starship Troopers"}} + {"name": "Starship Troopers", "author": "Robert A. Heinlein", "release_date": "1959-12-01", "page_count": 335} + {"index":{"_id": "The Left Hand of Darkness"}} + {"name": "The Left Hand of Darkness", "author": "Ursula K. Le Guin", "release_date": "1969-06-01", "page_count": 304} + {"index":{"_id": "The Moon is a Harsh Mistress"}} + {"name": "The Moon is a Harsh Mistress", "author": "Robert A. Heinlein", "release_date": "1966-04-01", "page_count": 288} + +''' setups['server_metrics_index'] = ''' - do: indices.create: diff --git a/docs/en/index.asciidoc b/docs/en/index.asciidoc index a8eb69054fc..5b90a23ed3b 100644 --- a/docs/en/index.asciidoc +++ b/docs/en/index.asciidoc @@ -23,6 +23,9 @@ include::release-notes/xpack-breaking.asciidoc[] :edit_url: include::{es-repo-dir}/reference/index-shared3.asciidoc[] +:edit_url!: +include::sql/index.asciidoc[] + :edit_url!: include::monitoring/index.asciidoc[] diff --git a/docs/en/settings/configuring-xes.asciidoc b/docs/en/settings/configuring-xes.asciidoc index b12969b772e..d13efc55190 100644 --- a/docs/en/settings/configuring-xes.asciidoc +++ b/docs/en/settings/configuring-xes.asciidoc @@ -11,3 +11,4 @@ include::ml-settings.asciidoc[] include::monitoring-settings.asciidoc[] include::security-settings.asciidoc[] include::notification-settings.asciidoc[] +include::sql-settings.asciidoc[] diff --git a/docs/en/settings/sql-settings.asciidoc b/docs/en/settings/sql-settings.asciidoc new file mode 100644 index 00000000000..52e11d30989 --- /dev/null +++ b/docs/en/settings/sql-settings.asciidoc @@ -0,0 +1,17 @@ +[role="xpack"] +[[sql-settings]] +=== SQL Access Settings in Elasticsearch +++++ +SQL Access Settings +++++ + +SQL Access is enabled by default when you install {xpack}. You can configure +these SQL Access settings in the `elasticsearch.yml` file. + +[float] +[[general-sql-settings]] +==== General SQL Access Settings +`xpack.sql.enabled`:: +Set to `false` to disable SQL Access on the node. + + diff --git a/docs/en/sql/endpoints/sql-cli.asciidoc b/docs/en/sql/endpoints/sql-cli.asciidoc new file mode 100644 index 00000000000..9701409c90d --- /dev/null +++ b/docs/en/sql/endpoints/sql-cli.asciidoc @@ -0,0 +1,39 @@ +[role="xpack"] +[[sql-cli]] +== SQL CLI + +The SQL CLI is a stand alone Java application for quick interaction +with X-Pack SQL. You can run it like this: + +["source","bash",subs="attributes,callouts"] +-------------------------------------------------- +$ java -jar cli-{version}.jar +-------------------------------------------------- + +You can pass the URL of the Elasticsearch instance to connect to as +the first parameter: + +["source","bash",subs="attributes,callouts"] +-------------------------------------------------- +$ java -jar cli-{version}.jar https://some.server:9200 +-------------------------------------------------- + +The cli jar is entirely stand alone and can be moved whereever it is +needed. + +Once the CLI is running you can use any <> that +Elasticsearch supports: + +[source,sqlcli] +-------------------------------------------------- +sql> SELECT * FROM library WHERE page_count > 500 ORDER BY page_count DESC; + author | name | page_count +----------------------------+-----------------------+--------------- +Victor Hugo |Les Misérables |1463 +Miguel De Cervantes Saavedra|Don Quixote |1072 +Miguel De Cervantes Saavedra|Don Quixote |1072 +Herman Melville |Moby-Dick or, The Whale|720 +Charles Dickens |Oliver Twist |608 +-------------------------------------------------- +// TODO it'd be lovely to be able to assert that this is correct but +// that is probably more work then it is worth right now. diff --git a/docs/en/sql/endpoints/sql-endpoints.asciidoc b/docs/en/sql/endpoints/sql-endpoints.asciidoc new file mode 100644 index 00000000000..d9bb59f3896 --- /dev/null +++ b/docs/en/sql/endpoints/sql-endpoints.asciidoc @@ -0,0 +1,4 @@ +include::sql-rest.asciidoc[] +include::sql-translate.asciidoc[] +include::sql-cli.asciidoc[] +include::sql-jdbc.asciidoc[] diff --git a/docs/en/sql/endpoints/sql-jdbc.asciidoc b/docs/en/sql/endpoints/sql-jdbc.asciidoc new file mode 100644 index 00000000000..4e52aae8eea --- /dev/null +++ b/docs/en/sql/endpoints/sql-jdbc.asciidoc @@ -0,0 +1,48 @@ +[role="xpack"] +[[sql-jdbc]] +== SQL JDBC + +Elasticsearch's SQL jdbc driver is a fully featured JDBC driver +for Elasticsearch. You can connect to it using the two APIs offered +by JDBC, namely `java.sql.Driver` and `DriverManager`: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{jdbc-tests}/JdbcIntegrationTestCase.java[connect-dm] +-------------------------------------------------- +<1> The server and port on which Elasticsearch is listening for +HTTP traffic. The port is usually 9200. +<2> Properties for connecting to Elasticsearch. An empty `Properties` +instance is fine for unsecured Elasticsearch. + +or `javax.sql.DataSource` through +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{jdbc-tests}/JdbcIntegrationTestCase.java[connect-ds] +-------------------------------------------------- +<1> The server and port on which Elasticsearch is listening for +HTTP traffic. The port is usually 9200. +<2> Properties for connecting to Elasticsearch. An empty `Properties` +instance is fine for unsecured Elasticsearch. + +Which one to use? Typically client applications that provide most +configuration parameters in the URL rely on the `DriverManager`-style +while `DataSource` is preferred when being _passed_ around since it can be +configured in one place and the consumer only has to call `getConnection` +without having to worry about any other parameters. + +To connect to a secured Elasticsearch server the `Properties` +should look like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{security-tests}/JdbcSecurityIT.java[admin_properties] +-------------------------------------------------- + +Once you have the connection you can use it like any other JDBC +connection. For example: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{jdbc-tests}/SimpleExampleTestCase.java[simple_example] +-------------------------------------------------- diff --git a/docs/en/sql/endpoints/sql-rest.asciidoc b/docs/en/sql/endpoints/sql-rest.asciidoc new file mode 100644 index 00000000000..f6fcd00c41e --- /dev/null +++ b/docs/en/sql/endpoints/sql-rest.asciidoc @@ -0,0 +1,186 @@ +[role="xpack"] +[[sql-rest]] +== SQL REST API + +The SQL REST API accepts SQL in a JSON document, executes it, +and returns the results. For example: + + +[source,js] +-------------------------------------------------- +POST /_xpack/sql +{ + "query": "SELECT * FROM library ORDER BY page_count DESC LIMIT 5" +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:library] + +Which returns: + +[source,text] +-------------------------------------------------- + author | name | page_count | release_date +-----------------+--------------------+---------------+--------------- +Peter F. Hamilton|Pandora's Star |768 |1078185600000 +Vernor Vinge |A Fire Upon the Deep|613 |707356800000 +Frank Herbert |Dune |604 |-144720000000 +Alastair Reynolds|Revelation Space |585 |953078400000 +James S.A. Corey |Leviathan Wakes |561 |1306972800000 +-------------------------------------------------- +// TESTRESPONSE[_cat] + +You can also choose to get results in a structured format by adding the `format` parameter. Currently supported formats: +- text (default) +- json +- smile +- yaml +- cbor + +Alternatively you can set the Accept HTTP header to the appropriate media format. +All formats above are supported, the GET parameter takes precedence over the header. + + +[source,js] +-------------------------------------------------- +POST /_xpack/sql?format=json +{ + "query": "SELECT * FROM library ORDER BY page_count DESC", + "fetch_size": 5 +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:library] + +Which returns: + +[source,js] +-------------------------------------------------- +{ + "columns": [ + {"name": "author", "type": "keyword"}, + {"name": "name", "type": "keyword"}, + {"name": "page_count", "type": "short"}, + {"name": "release_date", "type": "date"} + ], + "size": 5, + "rows": [ + ["Peter F. Hamilton", "Pandora's Star", 768, 1078185600000], + ["Vernor Vinge", "A Fire Upon the Deep", 613, 707356800000], + ["Frank Herbert", "Dune", 604, -144720000000], + ["Alastair Reynolds", "Revelation Space", 585, 953078400000], + ["James S.A. Corey", "Leviathan Wakes", 561, 1306972800000] + ], + "cursor": "sDXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAAEWWWdrRlVfSS1TbDYtcW9lc1FJNmlYdw==:BAFmBmF1dGhvcgFmBG5hbWUBZgpwYWdlX2NvdW50AWYMcmVsZWFzZV9kYXRl+v///w8=" +} +-------------------------------------------------- +// TESTRESPONSE[s/sDXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAAEWWWdrRlVfSS1TbDYtcW9lc1FJNmlYdw==:BAFmBmF1dGhvcgFmBG5hbWUBZgpwYWdlX2NvdW50AWYMcmVsZWFzZV9kYXRl\+v\/\/\/w8=/$body.cursor/] + +You can continue to the next page by sending back the `cursor` field. In +case of text format the cursor is returned as `Cursor` http header. + +[source,js] +-------------------------------------------------- +POST /_xpack/sql?format=json +{ + "cursor": "sDXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAAEWYUpOYklQMHhRUEtld3RsNnFtYU1hQQ==:BAFmBGRhdGUBZgVsaWtlcwFzB21lc3NhZ2UBZgR1c2Vy9f///w8=" +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] +// TEST[s/sDXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAAEWYUpOYklQMHhRUEtld3RsNnFtYU1hQQ==:BAFmBGRhdGUBZgVsaWtlcwFzB21lc3NhZ2UBZgR1c2Vy9f\/\/\/w8=/$body.cursor/] + +Which looks like: + +[source,js] +-------------------------------------------------- +{ + "size" : 5, + "rows" : [ + ["Dan Simmons", "Hyperion", 482, 612144000000], + ["Iain M. Banks", "Consider Phlebas", 471, 546134400000], + ["Neal Stephenson", "Snow Crash", 470, 707356800000], + ["Robert A. Heinlein", "Starship Troopers", 335, -318297600000], + ["George Orwell", "1984", 328, 486432000000] + ], + "cursor" : "sDXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAAEWODRMaXBUaVlRN21iTlRyWHZWYUdrdw==:BAFmBmF1dGhvcgFmBG5hbWUBZgpwYWdlX2NvdW50AWYMcmVsZWFzZV9kYXRl9f///w8=" +} +-------------------------------------------------- +// TESTRESPONSE[s/sDXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAAEWODRMaXBUaVlRN21iTlRyWHZWYUdrdw==:BAFmBmF1dGhvcgFmBG5hbWUBZgpwYWdlX2NvdW50AWYMcmVsZWFzZV9kYXRl9f\/\/\/w8=/$body.cursor/] + +Note that the `column` object is only part of the first page. + +You've reached the last page when there is no `cursor` returned +in the results. Like Elasticsearch's <>, +SQL may keep state in Elasticsearch to support the cursor. Unlike +scroll, receiving the last page is enough to guarantee that the +Elasticsearch state is cleared. + +To clear the state earlier, you can use the clear cursor command: + +[source,js] +-------------------------------------------------- +POST /_xpack/sql/close +{ + "cursor": "sDXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAAEWYUpOYklQMHhRUEtld3RsNnFtYU1hQQ==:BAFmBGRhdGUBZgVsaWtlcwFzB21lc3NhZ2UBZgR1c2Vy9f///w8=" +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] +// TEST[s/sDXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAAEWYUpOYklQMHhRUEtld3RsNnFtYU1hQQ==:BAFmBGRhdGUBZgVsaWtlcwFzB21lc3NhZ2UBZgR1c2Vy9f\/\/\/w8=/$body.cursor/] + +Which will like return the + +[source,js] +-------------------------------------------------- +{ + "succeeded" : true +} +-------------------------------------------------- +// TESTRESPONSE + + + +[[sql-rest-filtering]] + +You can filter the results that SQL will run on using a standard +Elasticsearch query DSL by specifying the query in the filter +parameter. + +[source,js] +-------------------------------------------------- +POST /_xpack/sql +{ + "query": "SELECT * FROM library ORDER BY page_count DESC", + "filter": { + "range": { + "page_count": { + "gte" : 100, + "lte" : 200 + } + } + }, + "fetch_size": 5 +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:library] + +Which returns: + +[source,text] +-------------------------------------------------- + author | name | page_count | release_date +---------------+------------------------------------+---------------+--------------- +Douglas Adams |The Hitchhiker's Guide to the Galaxy|180 |308534400000 +-------------------------------------------------- +// TESTRESPONSE[_cat] + +[[sql-rest-fields]] +In addition to the `query` and `cursor` fields, the request can +contain `fetch_size` and `time_zone`. `fetch_size` is a hint for how +many results to return in each page. SQL might chose to return more +or fewer results though. `time_zone` is the time zone to use for date +functions and date parsing. `time_zone` defaults to `utc` and can take +any values documented +http://www.joda.org/joda-time/apidocs/org/joda/time/DateTimeZone.html[here]. diff --git a/docs/en/sql/endpoints/sql-translate.asciidoc b/docs/en/sql/endpoints/sql-translate.asciidoc new file mode 100644 index 00000000000..19c5befb392 --- /dev/null +++ b/docs/en/sql/endpoints/sql-translate.asciidoc @@ -0,0 +1,48 @@ +[role="xpack"] +[[sql-translate]] +== SQL Translate API + +The SQL Translate API accepts SQL in a JSON document and translates it +into native Elasticsearch queries. For example: + +[source,js] +-------------------------------------------------- +POST /_xpack/sql/translate +{ + "query": "SELECT * FROM library ORDER BY page_count DESC", + "fetch_size": 10 +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:library] + +Which returns: + +[source,js] +-------------------------------------------------- +{ + "size" : 10, + "docvalue_fields" : [ + "author", + "name", + "page_count", + "release_date" + ], + "sort" : [ + { + "page_count" : { + "order" : "desc" + } + } + ] +} +-------------------------------------------------- +// TESTRESPONSE + +Which is the request that SQL will run to provide the results. +In this case, SQL will use the <> +API. If the result contained an aggregation then SQL would use +the normal <> API. + +The request body accepts all of the <> that +the <> accepts except `cursor`. diff --git a/docs/en/sql/functions/sql-functions.asciidoc b/docs/en/sql/functions/sql-functions.asciidoc new file mode 100644 index 00000000000..2cf768a8b8a --- /dev/null +++ b/docs/en/sql/functions/sql-functions.asciidoc @@ -0,0 +1,12 @@ +[[sql-functions]] +== Functions and Operators + + +// logical operators +// comparison +// conversion +// math +// date time +// aggregate + +// geospatial \ No newline at end of file diff --git a/docs/en/sql/index.asciidoc b/docs/en/sql/index.asciidoc new file mode 100644 index 00000000000..6a5600d6281 --- /dev/null +++ b/docs/en/sql/index.asciidoc @@ -0,0 +1,34 @@ +[role="xpack"] +[[xpack-sql]] += SQL Access + +:sql-tests: {docdir}/../../qa/sql +:sql-specs: {sql-tests}/src/main/resources +:jdbc-tests: {sql-tests}/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc +:security-tests: {sql-tests}/security/src/test/java/org/elasticsearch/xpack/qa/sql/security + +[partintro] +-- +X-Pack includes a SQL feature to execute SQL against Elasticsearch +indices and return tabular results. There are four main components: + +<>:: + Accepts SQL in a JSON document, executes it, and returns the + results. +<>:: + Accepts SQL in a JSON document and translates it into a native + Elasticsearch query and returns that. +<>:: + Command line application that connects to Elasticsearch to excute + SQL and print tabular results. +<>:: + A JDBC driver for Elasticsearch. +-- + +include::sql-overview.asciidoc[] +include::sql-getting-started.asciidoc[] +include::endpoints/sql-endpoints.asciidoc[] +include::functions/sql-functions.asciidoc[] +include::language/sql-language.asciidoc[] + +:jdbc-tests!: diff --git a/docs/en/sql/language/sql-data-types.asciidoc b/docs/en/sql/language/sql-data-types.asciidoc new file mode 100644 index 00000000000..0d7fc6883b8 --- /dev/null +++ b/docs/en/sql/language/sql-data-types.asciidoc @@ -0,0 +1,5 @@ +[[sql-data-types]] +=== Data Type and Mapping + +List of data types in SQL and how they actually map to Elasticsearch. +Also mention the corner cases - multi-fields, names with dots, etc... \ No newline at end of file diff --git a/docs/en/sql/language/sql-language.asciidoc b/docs/en/sql/language/sql-language.asciidoc new file mode 100644 index 00000000000..3ad7dc61748 --- /dev/null +++ b/docs/en/sql/language/sql-language.asciidoc @@ -0,0 +1,9 @@ +[[sql-spec]] +== SQL Language + +This chapter describes the SQL syntax and data types supported in X-Pack. +As a general rule, the syntax tries to adhere as much as possible to ANSI SQL to make the transition seamless. + +include::sql-data-types.asciidoc[] +include::sql-syntax.asciidoc[] +include::sql-reserved.asciidoc[] diff --git a/docs/en/sql/language/sql-reserved.asciidoc b/docs/en/sql/language/sql-reserved.asciidoc new file mode 100644 index 00000000000..920371f895b --- /dev/null +++ b/docs/en/sql/language/sql-reserved.asciidoc @@ -0,0 +1,78 @@ +[[sql-spec-reserved]] +=== Reserved Keywords + +Table with reserved keywords that need to be quoted. Also provide an example to make it more obvious. + +The following table lists all of the keywords that are reserved in Presto, +along with their status in the SQL standard. These reserved keywords must +be quoted (using double quotes) in order to be used as an identifier, for example: + +[source, sql] +---- +SELECT "AS" FROM index +---- + +[cols="^,^,^",options="header"] + +|=== + +|Keyword |SQL:2016 |SQL-92 + + +|`ALL` |reserved |reserved +|`AND` |reserved |reserved +|`ANY` |reserved |reserved +|`AS` |reserved |reserved +|`ASC` |reserved |reserved +|`BETWEEN` |reserved |reserved +|`BY` |reserved |reserved +|`CAST` |reserved |reserved +|`COLUMN` |reserved |reserved +|`CROSS` |reserved |reserved +|`DESC` |reserved |reserved +|`DESCRIBE` |reserved |reserved +|`DISTINCT` |reserved |reserved +|`EXISTS` |reserved |reserved +|`EXPLAIN` |reserved |reserved +|`EXTRACT` |reserved |reserved +|`FALSE` |reserved |reserved +|`FOR` |reserved |reserved +|`FROM` |reserved |reserved +|`FULL` |reserved |reserved +|`GROUP` |reserved |reserved +|`GROUPING` |reserved |reserved +|`HAVING` |reserved |reserved +|`IN` |reserved |reserved +|`INNER` |reserved |reserved +|`INTEGER` |reserved |reserved +|`INTO` |reserved |reserved +|`IS` |reserved |reserved +|`JOIN` |reserved |reserved +|`LAST` |reserved |reserved +|`LEFT` |reserved |reserved +|`LIKE` |reserved |reserved +|`LIMIT` |reserved |reserved +|`MATCH` |reserved |reserved +|`NATURAL` |reserved |reserved +|`NO` |reserved |reserved +|`NOT` |reserved |reserved +|`NULL` |reserved |reserved +|`ON` |reserved |reserved +|`OPTION` |reserved |reserved +|`OR` |reserved |reserved +|`ORDER` |reserved |reserved +|`OUTER` |reserved |reserved +|`RIGHT` |reserved |reserved +|`SELECT` |reserved |reserved +|`SESSION` | |reserved +|`SET` |reserved |reserved +|`TABLE` |reserved |reserved +|`THEN` |reserved |reserved +|`TO` |reserved |reserved +|`TRUE` |reserved |reserved +|`USING` |reserved |reserved +|`WHEN` |reserved |reserved +|`WHERE` |reserved |reserved +|`WITH` |reserved |reserved + +|=== \ No newline at end of file diff --git a/docs/en/sql/language/sql-syntax.asciidoc b/docs/en/sql/language/sql-syntax.asciidoc new file mode 100644 index 00000000000..dcee4ca6d55 --- /dev/null +++ b/docs/en/sql/language/sql-syntax.asciidoc @@ -0,0 +1,11 @@ +[[sql-spec-syntax]] +=== SQL Statement Syntax + +Big list of the entire syntax in SQL + +Each entry might get its own file and code snippet + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-spec}/select.sql-spec[wildcardWithOrder] +-------------------------------------------------- diff --git a/docs/en/sql/sql-getting-started.asciidoc b/docs/en/sql/sql-getting-started.asciidoc new file mode 100644 index 00000000000..0affe4042fa --- /dev/null +++ b/docs/en/sql/sql-getting-started.asciidoc @@ -0,0 +1,5 @@ +[[sql-getting-started]] +== Getting Started with SQL + +Basic chapter on using REST, CLI and JDBC to run some basic queries and return some results. +To keep this chapter should be about adding 3-4 entries of data, then using each technology to get the results out. \ No newline at end of file diff --git a/docs/en/sql/sql-overview.asciidoc b/docs/en/sql/sql-overview.asciidoc new file mode 100644 index 00000000000..cc49d61a535 --- /dev/null +++ b/docs/en/sql/sql-overview.asciidoc @@ -0,0 +1,4 @@ +[[sql-overview]] +== Overview + +Overview of the difference chapters in SQL docs. \ No newline at end of file diff --git a/docs/en/sql/standalone.asciidoc b/docs/en/sql/standalone.asciidoc new file mode 100644 index 00000000000..6649ce9eb8d --- /dev/null +++ b/docs/en/sql/standalone.asciidoc @@ -0,0 +1,9 @@ +[[elasticsearch-sql-standalone]] += Elasticsearch SQL Standalone + +:es-repo-dir: {docdir}/../../../../../elasticsearch/docs + +:edit_url: +include::{es-repo-dir}/reference/index-shared3.asciidoc[] +:edit_url!: +include::index.asciidoc[] diff --git a/plugin/build.gradle b/plugin/build.gradle index 47054277708..5e86de92119 100644 --- a/plugin/build.gradle +++ b/plugin/build.gradle @@ -10,6 +10,7 @@ import java.nio.file.StandardCopyOption group 'org.elasticsearch.plugin' apply plugin: 'elasticsearch.esplugin' + esplugin { name 'x-pack' description 'Elasticsearch Expanded Pack Plugin' @@ -30,11 +31,22 @@ dependencyLicenses { mapping from: /owasp-java-html-sanitizer.*/, to: 'owasp-java-html-sanitizer' mapping from: /transport-netty.*/, to: 'elasticsearch' mapping from: /elasticsearch-rest-client.*/, to: 'elasticsearch' + mapping from: /server.*/, to: 'elasticsearch' + mapping from: /jdbc-proto.*/, to: 'elasticsearch' + mapping from: /cli-proto.*/, to: 'elasticsearch' + mapping from: /shared-proto.*/, to: 'elasticsearch' + mapping from: /aggs-matrix-stats.*/, to: 'elasticsearch' //pulled in by sql:server mapping from: /http.*/, to: 'httpclient' // pulled in by rest client mapping from: /commons-.*/, to: 'commons' // pulled in by rest client ignoreSha 'elasticsearch-rest-client' ignoreSha 'transport-netty4' + ignoreSha 'tribe' + ignoreSha 'server' + ignoreSha 'jdbc-proto' + ignoreSha 'cli-proto' + ignoreSha 'shared-proto' ignoreSha 'elasticsearch-rest-client-sniffer' + ignoreSha 'aggs-matrix-stats' } licenseHeaders { @@ -86,6 +98,9 @@ dependencies { nativeBundle "org.elasticsearch.ml:ml-cpp:${project.version}@zip" testCompile 'org.ini4j:ini4j:0.5.2' + // sql's server components and its transitive dependencies + compile project(':x-pack-elasticsearch:sql:server') + // common test deps testCompile 'org.elasticsearch:securemock:1.2' testCompile "org.elasticsearch:mocksocket:${versions.mocksocket}" diff --git a/plugin/licenses/antlr4-runtime-4.5.3.jar.sha1 b/plugin/licenses/antlr4-runtime-4.5.3.jar.sha1 new file mode 100644 index 00000000000..535955b7d68 --- /dev/null +++ b/plugin/licenses/antlr4-runtime-4.5.3.jar.sha1 @@ -0,0 +1 @@ +2609e36f18f7e8d593cc1cddfb2ac776dc96b8e0 \ No newline at end of file diff --git a/plugin/licenses/antlr4-runtime-LICENSE.txt b/plugin/licenses/antlr4-runtime-LICENSE.txt new file mode 100644 index 00000000000..95d0a2554f6 --- /dev/null +++ b/plugin/licenses/antlr4-runtime-LICENSE.txt @@ -0,0 +1,26 @@ +[The "BSD license"] +Copyright (c) 2015 Terence Parr, Sam Harwell +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + 3. The name of the author may not be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/plugin/licenses/antlr4-runtime-NOTICE.txt b/plugin/licenses/antlr4-runtime-NOTICE.txt new file mode 100644 index 00000000000..e69de29bb2d diff --git a/plugin/src/main/java/org/elasticsearch/license/XPackLicenseState.java b/plugin/src/main/java/org/elasticsearch/license/XPackLicenseState.java index e5c33c782eb..56a7613c38a 100644 --- a/plugin/src/main/java/org/elasticsearch/license/XPackLicenseState.java +++ b/plugin/src/main/java/org/elasticsearch/license/XPackLicenseState.java @@ -57,6 +57,9 @@ public class XPackLicenseState { messages.put(XPackPlugin.UPGRADE, new String[] { "Upgrade API is disabled" }); + messages.put(XPackPlugin.SQL, new String[] { + "SQL support is disabled" + }); EXPIRATION_MESSAGES = Collections.unmodifiableMap(messages); } @@ -73,6 +76,7 @@ public class XPackLicenseState { messages.put(XPackPlugin.GRAPH, XPackLicenseState::graphAcknowledgementMessages); messages.put(XPackPlugin.MACHINE_LEARNING, XPackLicenseState::machineLearningAcknowledgementMessages); messages.put(XPackPlugin.LOGSTASH, XPackLicenseState::logstashAcknowledgementMessages); + messages.put(XPackPlugin.SQL, XPackLicenseState::sqlAcknowledgementMessages); ACKNOWLEDGMENT_MESSAGES = Collections.unmodifiableMap(messages); } @@ -209,6 +213,21 @@ public class XPackLicenseState { return Strings.EMPTY_ARRAY; } + private static String[] sqlAcknowledgementMessages(OperationMode currentMode, OperationMode newMode) { + switch (newMode) { + case BASIC: + case STANDARD: + case GOLD: + switch (currentMode) { + case TRIAL: + case PLATINUM: + return new String[] { "JDBC support will be disabled, but you can continue to use SQL CLI and REST endpoint" }; + } + break; + } + return Strings.EMPTY_ARRAY; + } + /** A wrapper for the license mode and state, to allow atomically swapping. */ private static class Status { @@ -491,4 +510,29 @@ public class XPackLicenseState { // Should work on all active licenses return localStatus.active; } + + /** + * Determine if SQL support should be enabled. + *

+ * SQL is available for all license types except {@link OperationMode#MISSING} + */ + public boolean isSqlAllowed() { + return status.active; + } + + /** + * Determine if JDBC support should be enabled. + *

+ * JDBC is available only in for {@link OperationMode#PLATINUM} and {@link OperationMode#TRIAL} licences + */ + public boolean isJdbcAllowed() { + // status is volatile + Status localStatus = status; + OperationMode operationMode = localStatus.mode; + + boolean licensed = operationMode == OperationMode.TRIAL || operationMode == OperationMode.PLATINUM; + + return licensed && localStatus.active; + } + } diff --git a/plugin/src/main/java/org/elasticsearch/xpack/XPackPlugin.java b/plugin/src/main/java/org/elasticsearch/xpack/XPackPlugin.java index abd2f7e0251..c8a1ad03f06 100644 --- a/plugin/src/main/java/org/elasticsearch/xpack/XPackPlugin.java +++ b/plugin/src/main/java/org/elasticsearch/xpack/XPackPlugin.java @@ -41,6 +41,7 @@ import org.elasticsearch.index.IndexModule; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.ingest.Processor; import org.elasticsearch.license.LicenseService; +import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.Licensing; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.plugins.ActionPlugin; @@ -85,6 +86,8 @@ import org.elasticsearch.xpack.rest.action.RestXPackUsageAction; import org.elasticsearch.xpack.security.Security; import org.elasticsearch.xpack.security.authc.AuthenticationService; import org.elasticsearch.xpack.security.authc.support.UsernamePasswordToken; +import org.elasticsearch.xpack.sql.plugin.SqlLicenseChecker; +import org.elasticsearch.xpack.sql.plugin.SqlPlugin; import org.elasticsearch.xpack.ssl.SSLConfigurationReloader; import org.elasticsearch.xpack.ssl.SSLService; import org.elasticsearch.xpack.upgrade.Upgrade; @@ -141,6 +144,9 @@ public class XPackPlugin extends Plugin implements ScriptPlugin, ActionPlugin, I /** Name constant for the upgrade feature. */ public static final String UPGRADE = "upgrade"; + /** Name constant for the sql feature. */ + public static final String SQL = "sql"; + // inside of YAML settings we still use xpack do not having handle issues with dashes private static final String SETTINGS_NAME = "xpack"; @@ -191,6 +197,7 @@ public class XPackPlugin extends Plugin implements ScriptPlugin, ActionPlugin, I protected Deprecation deprecation; protected Upgrade upgrade; + protected SqlPlugin sql; public XPackPlugin( final Settings settings, @@ -210,6 +217,19 @@ public class XPackPlugin extends Plugin implements ScriptPlugin, ActionPlugin, I this.logstash = new Logstash(settings); this.deprecation = new Deprecation(); this.upgrade = new Upgrade(settings); + // sql projects don't depend on x-pack and as a result we cannot pass XPackLicenseState object to SqlPlugin directly here + this.sql = new SqlPlugin(XPackSettings.SQL_ENABLED.get(settings), new SqlLicenseChecker( + () -> { + if (!licenseState.isSqlAllowed()) { + throw LicenseUtils.newComplianceException(XPackPlugin.SQL); + } + }, + () -> { + if (!licenseState.isJdbcAllowed()) { + throw LicenseUtils.newComplianceException("jdbc"); + } + }) + ); // Check if the node is a transport client. if (transportClientMode == false) { this.extensionsService = new XPackExtensionsService(settings, resolveXPackExtensionsFile(env), getExtensions()); @@ -279,6 +299,11 @@ public class XPackPlugin extends Plugin implements ScriptPlugin, ActionPlugin, I components.addAll(upgrade.createComponents(client, clusterService, threadPool, resourceWatcherService, scriptService, xContentRegistry)); + /* Note that we need *client*, not *internalClient* because client preserves the + * authenticated user while internalClient throws that user away and acts as the + * x-pack user. */ + components.addAll(sql.createComponents(client)); + PersistentTasksExecutorRegistry registry = new PersistentTasksExecutorRegistry(settings, tasksExecutors); PersistentTasksClusterService persistentTasksClusterService = new PersistentTasksClusterService(settings, registry, clusterService); components.add(persistentTasksClusterService); @@ -377,6 +402,7 @@ public class XPackPlugin extends Plugin implements ScriptPlugin, ActionPlugin, I actions.addAll(machineLearning.getActions()); actions.addAll(deprecation.getActions()); actions.addAll(upgrade.getActions()); + actions.addAll(sql.getActions()); return actions; } @@ -415,6 +441,8 @@ public class XPackPlugin extends Plugin implements ScriptPlugin, ActionPlugin, I indexNameExpressionResolver, nodesInCluster)); handlers.addAll(upgrade.getRestHandlers(settings, restController, clusterSettings, indexScopedSettings, settingsFilter, indexNameExpressionResolver, nodesInCluster)); + handlers.addAll(sql.getRestHandlers(settings, restController, clusterSettings, indexScopedSettings, settingsFilter, + indexNameExpressionResolver, nodesInCluster)); return handlers; } @@ -431,6 +459,7 @@ public class XPackPlugin extends Plugin implements ScriptPlugin, ActionPlugin, I entries.addAll(machineLearning.getNamedWriteables()); entries.addAll(licensing.getNamedWriteables()); entries.addAll(Security.getNamedWriteables()); + entries.addAll(SqlPlugin.getNamedWriteables()); entries.addAll(Monitoring.getNamedWriteables()); entries.addAll(Graph.getNamedWriteables()); return entries; diff --git a/plugin/src/main/java/org/elasticsearch/xpack/XPackSettings.java b/plugin/src/main/java/org/elasticsearch/xpack/XPackSettings.java index d29e1cf3f46..5dd6308d51e 100644 --- a/plugin/src/main/java/org/elasticsearch/xpack/XPackSettings.java +++ b/plugin/src/main/java/org/elasticsearch/xpack/XPackSettings.java @@ -76,6 +76,9 @@ public class XPackSettings { } }, Setting.Property.NodeScope); + /** Setting for enabling or disabling sql. Defaults to true. */ + public static final Setting SQL_ENABLED = Setting.boolSetting("xpack.sql.enabled", true, Setting.Property.NodeScope); + /* * SSL settings. These are the settings that are specifically registered for SSL. Many are private as we do not explicitly use them * but instead parse based on a prefix (eg *.ssl.*) @@ -138,6 +141,7 @@ public class XPackSettings { settings.add(HTTP_SSL_ENABLED); settings.add(RESERVED_REALM_ENABLED_SETTING); settings.add(TOKEN_SERVICE_ENABLED_SETTING); + settings.add(SQL_ENABLED); return Collections.unmodifiableList(settings); } } diff --git a/plugin/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java b/plugin/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java index ab91bbe8762..6c88dfc318a 100644 --- a/plugin/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java +++ b/plugin/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java @@ -5,15 +5,6 @@ */ package org.elasticsearch.xpack.security.authz; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.function.Predicate; - import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.CompositeIndicesRequest; @@ -73,12 +64,23 @@ import org.elasticsearch.xpack.security.user.SystemUser; import org.elasticsearch.xpack.security.user.User; import org.elasticsearch.xpack.security.user.XPackSecurityUser; import org.elasticsearch.xpack.security.user.XPackUser; +import org.elasticsearch.xpack.sql.plugin.SqlClearCursorAction; +import org.elasticsearch.xpack.sql.plugin.SqlTranslateAction; +import org.elasticsearch.xpack.sql.plugin.sql.action.SqlAction; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.Predicate; import static org.elasticsearch.xpack.security.Security.setting; import static org.elasticsearch.xpack.security.support.Exceptions.authorizationError; public class AuthorizationService extends AbstractComponent { - public static final Setting ANONYMOUS_AUTHORIZATION_EXCEPTION_SETTING = Setting.boolSetting(setting("authc.anonymous.authz_exception"), true, Property.NodeScope); public static final String INDICES_PERMISSIONS_KEY = "_indices_permissions"; @@ -226,7 +228,7 @@ public class AuthorizationService extends AbstractComponent { grant(authentication, action, request, permission.names()); return; } else { - // we do this here in addition to the denial below since we might run into an assertion on scroll requrest below if we + // we do this here in addition to the denial below since we might run into an assertion on scroll request below if we // don't have permission to read cross cluster but wrap a scroll request. throw denial(authentication, action, request, permission.names()); } @@ -273,7 +275,8 @@ public class AuthorizationService extends AbstractComponent { final MetaData metaData = clusterService.state().metaData(); final AuthorizedIndices authorizedIndices = new AuthorizedIndices(authentication.getUser(), permission, action, metaData); - final ResolvedIndices resolvedIndices = resolveIndexNames(authentication, action, request, metaData, authorizedIndices, permission); + final ResolvedIndices resolvedIndices = resolveIndexNames(authentication, action, request, + metaData, authorizedIndices, permission); assert !resolvedIndices.isEmpty() : "every indices request needs to have its indices set thus the resolved indices must not be empty"; @@ -416,8 +419,8 @@ public class AuthorizationService extends AbstractComponent { throw new IllegalArgumentException("No equivalent action for opType [" + docWriteRequest.opType() + "]"); } - private ResolvedIndices resolveIndexNames(Authentication authentication, String action, TransportRequest request, MetaData metaData, - AuthorizedIndices authorizedIndices, Role permission) { + private ResolvedIndices resolveIndexNames(Authentication authentication, String action, TransportRequest request, + MetaData metaData, AuthorizedIndices authorizedIndices, Role permission) { try { return indicesAndAliasesResolver.resolve(request, metaData, authorizedIndices); } catch (Exception e) { @@ -478,7 +481,9 @@ public class AuthorizationService extends AbstractComponent { action.equals("indices:data/read/mpercolate") || action.equals("indices:data/read/msearch/template") || action.equals("indices:data/read/search/template") || - action.equals("indices:data/write/reindex"); + action.equals("indices:data/write/reindex") || + action.equals(SqlAction.NAME) || + action.equals(SqlTranslateAction.NAME); } private static boolean isTranslatedToBulkAction(String action) { @@ -497,6 +502,7 @@ public class AuthorizationService extends AbstractComponent { action.equals(SearchTransportService.QUERY_SCROLL_ACTION_NAME) || action.equals(SearchTransportService.FREE_CONTEXT_SCROLL_ACTION_NAME) || action.equals(ClearScrollAction.NAME) || + action.equals(SqlClearCursorAction.NAME) || action.equals(SearchTransportService.CLEAR_SCROLL_CONTEXTS_ACTION_NAME); } diff --git a/plugin/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java b/plugin/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java index 394768ea516..2a2c60f667f 100644 --- a/plugin/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java +++ b/plugin/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java @@ -53,14 +53,22 @@ public class XPackLicenseStateTests extends ESTestCase { return randomFrom(OperationMode.values()); } - static OperationMode randomTrialStandardGoldOrPlatinumMode() { + public static OperationMode randomTrialStandardGoldOrPlatinumMode() { return randomFrom(TRIAL, STANDARD, GOLD, PLATINUM); } - static OperationMode randomTrialOrPlatinumMode() { + public static OperationMode randomTrialOrPlatinumMode() { return randomFrom(TRIAL, PLATINUM); } + public static OperationMode randomTrialBasicStandardGoldOrPlatinumMode() { + return randomFrom(TRIAL, BASIC, STANDARD, GOLD, PLATINUM); + } + + public static OperationMode randomBasicStandardOrGold() { + return randomFrom(BASIC, STANDARD, GOLD); + } + public void testSecurityDefaults() { XPackLicenseState licenseState = new XPackLicenseState(); assertThat(licenseState.isAuthAllowed(), is(true)); @@ -310,4 +318,83 @@ public class XPackLicenseStateTests extends ESTestCase { assertAllowed(PLATINUM, false, XPackLicenseState::isLogstashAllowed, false); assertAllowed(STANDARD, false, XPackLicenseState::isLogstashAllowed, false); } + + public void testSqlDefaults() { + XPackLicenseState licenseState = new XPackLicenseState(); + assertThat(licenseState.isSqlAllowed(), is(true)); + assertThat(licenseState.isJdbcAllowed(), is(true)); + } + + public void testSqlBasic() { + XPackLicenseState licenseState = new XPackLicenseState(); + licenseState.update(BASIC, true); + + assertThat(licenseState.isSqlAllowed(), is(true)); + assertThat(licenseState.isJdbcAllowed(), is(false)); + } + + public void testSqlBasicExpired() { + XPackLicenseState licenseState = new XPackLicenseState(); + licenseState.update(BASIC, false); + + assertThat(licenseState.isSqlAllowed(), is(false)); + assertThat(licenseState.isJdbcAllowed(), is(false)); + } + + public void testSqlStandard() { + XPackLicenseState licenseState = new XPackLicenseState(); + licenseState.update(STANDARD, true); + + assertThat(licenseState.isSqlAllowed(), is(true)); + assertThat(licenseState.isJdbcAllowed(), is(false)); + } + + public void testSqlStandardExpired() { + XPackLicenseState licenseState = new XPackLicenseState(); + licenseState.update(STANDARD, false); + + assertThat(licenseState.isSqlAllowed(), is(false)); + assertThat(licenseState.isJdbcAllowed(), is(false)); + } + + public void testSqlGold() { + XPackLicenseState licenseState = new XPackLicenseState(); + licenseState.update(GOLD, true); + + assertThat(licenseState.isSqlAllowed(), is(true)); + assertThat(licenseState.isJdbcAllowed(), is(false)); + } + + public void testSqlGoldExpired() { + XPackLicenseState licenseState = new XPackLicenseState(); + licenseState.update(GOLD, false); + + assertThat(licenseState.isSqlAllowed(), is(false)); + assertThat(licenseState.isJdbcAllowed(), is(false)); + } + + public void testSqlPlatinum() { + XPackLicenseState licenseState = new XPackLicenseState(); + licenseState.update(PLATINUM, true); + + assertThat(licenseState.isSqlAllowed(), is(true)); + assertThat(licenseState.isJdbcAllowed(), is(true)); + } + + public void testSqlPlatinumExpired() { + XPackLicenseState licenseState = new XPackLicenseState(); + licenseState.update(PLATINUM, false); + + assertThat(licenseState.isSqlAllowed(), is(false)); + assertThat(licenseState.isJdbcAllowed(), is(false)); + } + + public void testSqlAckAnyToTrialOrPlatinum() { + assertAckMesssages(XPackPlugin.SQL, randomMode(), randomTrialOrPlatinumMode(), 0); + } + + public void testSqlAckTrialOrPlatinumToNotTrialOrPlatinum() { + assertAckMesssages(XPackPlugin.SQL, randomTrialOrPlatinumMode(), randomBasicStandardOrGold(), 1); + } + } diff --git a/plugin/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java b/plugin/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java index ae1108f57fa..0c362dc2a56 100644 --- a/plugin/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java +++ b/plugin/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java @@ -5,15 +5,6 @@ */ package org.elasticsearch.xpack.security.authz; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; - import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -129,9 +120,20 @@ import org.elasticsearch.xpack.security.user.ElasticUser; import org.elasticsearch.xpack.security.user.SystemUser; import org.elasticsearch.xpack.security.user.User; import org.elasticsearch.xpack.security.user.XPackUser; +import org.elasticsearch.xpack.sql.plugin.sql.action.SqlAction; +import org.elasticsearch.xpack.sql.plugin.sql.action.SqlRequest; import org.junit.Before; import org.mockito.Mockito; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; + import static org.elasticsearch.test.SecurityTestsUtils.assertAuthenticationException; import static org.elasticsearch.test.SecurityTestsUtils.assertThrowsAuthorizationException; import static org.elasticsearch.test.SecurityTestsUtils.assertThrowsAuthorizationExceptionRunAs; @@ -306,6 +308,17 @@ public class AuthorizationServiceTests extends ESTestCase { verifyNoMoreInteractions(auditTrail); } + public void testUserWithNoRolesCannotSql() { + TransportRequest request = new SqlRequest(); + User user = new User("test user"); + mockEmptyMetaData(); + assertThrowsAuthorizationException( + () -> authorize(createAuthentication(user), SqlAction.NAME, request), + SqlAction.NAME, "test user"); + verify(auditTrail).accessDenied(user, SqlAction.NAME, request, Role.EMPTY.names()); + verifyNoMoreInteractions(auditTrail); + } + /** * Verifies that the behaviour tested in {@link #testUserWithNoRolesCanPerformRemoteSearch} * does not work for requests that are not remote-index-capable. @@ -323,13 +336,19 @@ public class AuthorizationServiceTests extends ESTestCase { } public void testUnknownRoleCausesDenial() { - TransportRequest request = new SearchRequest(); + @SuppressWarnings("unchecked") + Tuple tuple = randomFrom( + new Tuple<>(SearchAction.NAME, new SearchRequest()), + new Tuple<>(IndicesExistsAction.NAME, new IndicesExistsRequest()), + new Tuple<>(SqlAction.NAME, new SqlRequest())); + String action = tuple.v1(); + TransportRequest request = tuple.v2(); User user = new User("test user", "non-existent-role"); mockEmptyMetaData(); assertThrowsAuthorizationException( - () -> authorize(createAuthentication(user), "indices:a", request), - "indices:a", "test user"); - verify(auditTrail).accessDenied(user, "indices:a", request, Role.EMPTY.names()); + () -> authorize(createAuthentication(user), action, request), + action, "test user"); + verify(auditTrail).accessDenied(user, action, request, Role.EMPTY.names()); verifyNoMoreInteractions(auditTrail); } @@ -348,16 +367,22 @@ public class AuthorizationServiceTests extends ESTestCase { } public void testThatRoleWithNoIndicesIsDenied() { - TransportRequest request = new IndicesExistsRequest("a"); + @SuppressWarnings("unchecked") + Tuple tuple = randomFrom( + new Tuple<>(SearchAction.NAME, new SearchRequest()), + new Tuple<>(IndicesExistsAction.NAME, new IndicesExistsRequest()), + new Tuple<>(SqlAction.NAME, new SqlRequest())); + String action = tuple.v1(); + TransportRequest request = tuple.v2(); User user = new User("test user", "no_indices"); RoleDescriptor role = new RoleDescriptor("a_role", null, null, null); roleMap.put("no_indices", role); mockEmptyMetaData(); assertThrowsAuthorizationException( - () -> authorize(createAuthentication(user), "indices:a", request), - "indices:a", "test user"); - verify(auditTrail).accessDenied(user, "indices:a", request, new String[] { role.getName() }); + () -> authorize(createAuthentication(user), action, request), + action, "test user"); + verify(auditTrail).accessDenied(user, action, request, new String[] { role.getName() }); verifyNoMoreInteractions(auditTrail); } @@ -432,7 +457,8 @@ public class AuthorizationServiceTests extends ESTestCase { new String[] { role.getName() }); authorize(createAuthentication(user), SearchTransportService.QUERY_SCROLL_ACTION_NAME, request); - verify(auditTrail).accessGranted(user, SearchTransportService.QUERY_SCROLL_ACTION_NAME, request, new String[] { role.getName() }); + verify(auditTrail).accessGranted(user, SearchTransportService.QUERY_SCROLL_ACTION_NAME, request, + new String[] { role.getName() }); authorize(createAuthentication(user), SearchTransportService.FREE_CONTEXT_SCROLL_ACTION_NAME, request); verify(auditTrail).accessGranted(user, SearchTransportService.FREE_CONTEXT_SCROLL_ACTION_NAME, request, @@ -507,6 +533,7 @@ public class AuthorizationServiceTests extends ESTestCase { assertThrowsAuthorizationException( () -> authorize(createAuthentication(anonymousUser), "indices:a", request), "indices:a", anonymousUser.principal()); + verify(auditTrail).accessDenied(anonymousUser, "indices:a", request, new String[] { role.getName() }); verifyNoMoreInteractions(auditTrail); verify(clusterService, times(1)).state(); @@ -551,7 +578,7 @@ public class AuthorizationServiceTests extends ESTestCase { () -> authorize(createAuthentication(user), GetIndexAction.NAME, request)); assertThat(nfe.getIndex(), is(notNullValue())); assertThat(nfe.getIndex().getName(), is("not-an-index-*")); - verify(auditTrail).accessDenied(user, GetIndexAction.NAME, request, new String[] { role.getName() }); + verify(auditTrail).accessDenied(user, GetIndexAction.NAME, request, new String[]{ role.getName() }); verifyNoMoreInteractions(auditTrail); verify(clusterService).state(); verify(state, times(1)).metaData(); @@ -701,7 +728,7 @@ public class AuthorizationServiceTests extends ESTestCase { } // we should allow waiting for the health of the index or any index if the user has this permission - ClusterHealthRequest request = new ClusterHealthRequest(SecurityLifecycleService.SECURITY_INDEX_NAME); + TransportRequest request = new ClusterHealthRequest(SecurityLifecycleService.SECURITY_INDEX_NAME); authorize(createAuthentication(user), ClusterHealthAction.NAME, request); verify(auditTrail).accessGranted(user, ClusterHealthAction.NAME, request, new String[] { role.getName() }); @@ -709,6 +736,7 @@ public class AuthorizationServiceTests extends ESTestCase { request = new ClusterHealthRequest(SecurityLifecycleService.SECURITY_INDEX_NAME, "foo", "bar"); authorize(createAuthentication(user), ClusterHealthAction.NAME, request); verify(auditTrail).accessGranted(user, ClusterHealthAction.NAME, request, new String[] { role.getName() }); + verifyNoMoreInteractions(auditTrail); SearchRequest searchRequest = new SearchRequest("_all"); authorize(createAuthentication(user), SearchAction.NAME, searchRequest); diff --git a/plugin/src/test/java/org/elasticsearch/xpack/sql/AbstractSqlIntegTestCase.java b/plugin/src/test/java/org/elasticsearch/xpack/sql/AbstractSqlIntegTestCase.java new file mode 100644 index 00000000000..ae25ed7ed38 --- /dev/null +++ b/plugin/src/test/java/org/elasticsearch/xpack/sql/AbstractSqlIntegTestCase.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql; + +import org.elasticsearch.analysis.common.CommonAnalysisPlugin; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.reindex.ReindexPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.discovery.TestZenDiscovery; +import org.elasticsearch.xpack.XPackPlugin; +import org.elasticsearch.xpack.XPackSettings; +import org.elasticsearch.xpack.ml.MachineLearning; + +import java.util.Arrays; +import java.util.Collection; + +public abstract class AbstractSqlIntegTestCase extends ESIntegTestCase { + + @Override + protected boolean ignoreExternalCluster() { + return true; + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + Settings.Builder settings = Settings.builder().put(super.nodeSettings(nodeOrdinal)); + settings.put(XPackSettings.SECURITY_ENABLED.getKey(), false); + settings.put(XPackSettings.MONITORING_ENABLED.getKey(), false); + settings.put(XPackSettings.WATCHER_ENABLED.getKey(), false); + settings.put(XPackSettings.GRAPH_ENABLED.getKey(), false); + settings.put(XPackSettings.MACHINE_LEARNING_ENABLED.getKey(), false); + settings.put(MachineLearning.AUTODETECT_PROCESS.getKey(), false); + return settings.build(); + } + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(XPackPlugin.class, CommonAnalysisPlugin.class, ReindexPlugin.class); + } + + @Override + protected Collection> transportClientPlugins() { + return nodePlugins(); + } + + @Override + protected Settings transportClientSettings() { + // Plugin should be loaded on the transport client as well + return nodeSettings(0); + } + + @Override + protected Collection> getMockPlugins() { + return Arrays.asList(TestZenDiscovery.TestPlugin.class, TestSeedPlugin.class); + } +} + diff --git a/plugin/src/test/java/org/elasticsearch/xpack/sql/SqlActionIT.java b/plugin/src/test/java/org/elasticsearch/xpack/sql/SqlActionIT.java new file mode 100644 index 00000000000..20abdd66df6 --- /dev/null +++ b/plugin/src/test/java/org/elasticsearch/xpack/sql/SqlActionIT.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql; + +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.xpack.sql.plugin.sql.action.SqlAction; +import org.elasticsearch.xpack.sql.plugin.sql.action.SqlResponse; +import org.elasticsearch.xpack.sql.plugin.sql.action.SqlResponse.ColumnInfo; + +import java.sql.JDBCType; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +public class SqlActionIT extends AbstractSqlIntegTestCase { + public void testSqlAction() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test").get()); + client().prepareBulk() + .add(new IndexRequest("test", "doc", "1").source("data", "bar", "count", 42)) + .add(new IndexRequest("test", "doc", "2").source("data", "baz", "count", 43)) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + ensureYellow("test"); + + boolean dataBeforeCount = randomBoolean(); + String columns = dataBeforeCount ? "data, count" : "count, data"; + SqlResponse response = client().prepareExecute(SqlAction.INSTANCE) + .query("SELECT " + columns + " FROM test ORDER BY count").get(); + assertThat(response.size(), equalTo(2L)); + assertThat(response.columns(), hasSize(2)); + int dataIndex = dataBeforeCount ? 0 : 1; + int countIndex = dataBeforeCount ? 1 : 0; + assertEquals(new ColumnInfo("data", "text", JDBCType.VARCHAR, 0), response.columns().get(dataIndex)); + assertEquals(new ColumnInfo("count", "long", JDBCType.BIGINT, 20), response.columns().get(countIndex)); + + assertThat(response.rows(), hasSize(2)); + assertEquals("bar", response.rows().get(0).get(dataIndex)); + assertEquals(42L, response.rows().get(0).get(countIndex)); + assertEquals("baz", response.rows().get(1).get(dataIndex)); + assertEquals(43L, response.rows().get(1).get(countIndex)); + } +} + diff --git a/plugin/src/test/java/org/elasticsearch/xpack/sql/SqlClearCursorActionIT.java b/plugin/src/test/java/org/elasticsearch/xpack/sql/SqlClearCursorActionIT.java new file mode 100644 index 00000000000..3964bb0cc4a --- /dev/null +++ b/plugin/src/test/java/org/elasticsearch/xpack/sql/SqlClearCursorActionIT.java @@ -0,0 +1,92 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql; + +import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.xpack.sql.plugin.SqlClearCursorAction; +import org.elasticsearch.xpack.sql.plugin.SqlClearCursorAction.Response; +import org.elasticsearch.xpack.sql.plugin.sql.action.SqlAction; +import org.elasticsearch.xpack.sql.plugin.sql.action.SqlResponse; +import org.elasticsearch.xpack.sql.session.Cursor; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; + +public class SqlClearCursorActionIT extends AbstractSqlIntegTestCase { + + public void testSqlClearCursorAction() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test").get()); + BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); + int indexSize = randomIntBetween(100, 300); + logger.info("Indexing {} records", indexSize); + for (int i = 0; i < indexSize; i++) { + bulkRequestBuilder.add(new IndexRequest("test", "doc", "id" + i).source("data", "bar", "count", i)); + } + bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + ensureYellow("test"); + + assertEquals(0, getNumberOfSearchContexts()); + + int fetchSize = randomIntBetween(5, 20); + logger.info("Fetching {} records at a time", fetchSize); + SqlResponse sqlResponse = client().prepareExecute(SqlAction.INSTANCE).query("SELECT * FROM test").fetchSize(fetchSize).get(); + assertEquals(fetchSize, sqlResponse.size()); + + assertThat(getNumberOfSearchContexts(), greaterThan(0L)); + assertThat(sqlResponse.cursor(), notNullValue()); + assertThat(sqlResponse.cursor(), not(equalTo(Cursor.EMPTY))); + + Response cleanCursorResponse = client().prepareExecute(SqlClearCursorAction.INSTANCE).cursor(sqlResponse.cursor()).get(); + assertTrue(cleanCursorResponse.isSucceeded()); + + assertEquals(0, getNumberOfSearchContexts()); + } + + public void testAutoCursorCleanup() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test").get()); + BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); + int indexSize = randomIntBetween(100, 300); + logger.info("Indexing {} records", indexSize); + for (int i = 0; i < indexSize; i++) { + bulkRequestBuilder.add(new IndexRequest("test", "doc", "id" + i).source("data", "bar", "count", i)); + } + bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + ensureYellow("test"); + + assertEquals(0, getNumberOfSearchContexts()); + + int fetchSize = randomIntBetween(5, 20); + logger.info("Fetching {} records at a time", fetchSize); + SqlResponse sqlResponse = client().prepareExecute(SqlAction.INSTANCE).query("SELECT * FROM test").fetchSize(fetchSize).get(); + assertEquals(fetchSize, sqlResponse.size()); + + assertThat(getNumberOfSearchContexts(), greaterThan(0L)); + assertThat(sqlResponse.cursor(), notNullValue()); + assertThat(sqlResponse.cursor(), not(equalTo(Cursor.EMPTY))); + + long fetched = sqlResponse.size(); + do { + sqlResponse = client().prepareExecute(SqlAction.INSTANCE).cursor(sqlResponse.cursor()).get(); + fetched += sqlResponse.size(); + } while (sqlResponse.cursor().equals(Cursor.EMPTY) == false); + assertEquals(indexSize, fetched); + + Response cleanCursorResponse = client().prepareExecute(SqlClearCursorAction.INSTANCE).cursor(sqlResponse.cursor()).get(); + assertFalse(cleanCursorResponse.isSucceeded()); + + assertEquals(0, getNumberOfSearchContexts()); + } + + private long getNumberOfSearchContexts() { + return client().admin().indices().prepareStats("test").clear().setSearch(true).get() + .getIndex("test").getTotal().getSearch().getOpenContexts(); + } +} diff --git a/plugin/src/test/java/org/elasticsearch/xpack/sql/SqlDisabledIT.java b/plugin/src/test/java/org/elasticsearch/xpack/sql/SqlDisabledIT.java new file mode 100644 index 00000000000..6b9b1f346a9 --- /dev/null +++ b/plugin/src/test/java/org/elasticsearch/xpack/sql/SqlDisabledIT.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.XPackSettings; +import org.elasticsearch.xpack.sql.plugin.sql.action.SqlAction; + +import static org.hamcrest.CoreMatchers.either; +import static org.hamcrest.CoreMatchers.startsWith; + +public class SqlDisabledIT extends AbstractSqlIntegTestCase { + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(XPackSettings.SQL_ENABLED.getKey(), false) + .build(); + } + + @Override + protected Settings transportClientSettings() { + return Settings.builder() + .put(super.transportClientSettings()) + .put(XPackSettings.SQL_ENABLED.getKey(), randomBoolean()) + .build(); + } + + public void testSqlAction() throws Exception { + Throwable throwable = expectThrows(Throwable.class, + () -> client().prepareExecute(SqlAction.INSTANCE).query("SHOW tables").get()); + assertThat(throwable.getMessage(), + either(startsWith("no proxy found for action")) // disabled on client + .or(startsWith("failed to find action")) // disabled on proxy client + .or(startsWith("No handler for action [indices:data/read/sql]"))); // disabled on server + } +} + diff --git a/plugin/src/test/java/org/elasticsearch/xpack/sql/SqlLicenseIT.java b/plugin/src/test/java/org/elasticsearch/xpack/sql/SqlLicenseIT.java new file mode 100644 index 00000000000..f1822b0cb81 --- /dev/null +++ b/plugin/src/test/java/org/elasticsearch/xpack/sql/SqlLicenseIT.java @@ -0,0 +1,206 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql; + +import org.apache.http.HttpEntity; +import org.apache.http.entity.ByteArrayEntity; +import org.apache.http.entity.ContentType; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.license.AbstractLicensesIntegrationTestCase; +import org.elasticsearch.license.License; +import org.elasticsearch.license.License.OperationMode; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.fetch.subphase.FetchSourceContext; +import org.elasticsearch.transport.Netty4Plugin; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.MetaTableRequest; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.MetaTableResponse; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto; +import org.elasticsearch.xpack.sql.plugin.sql.action.SqlAction; +import org.elasticsearch.xpack.sql.plugin.sql.action.SqlResponse; +import org.elasticsearch.xpack.sql.plugin.SqlTranslateAction; +import org.elasticsearch.xpack.sql.protocol.shared.Request; +import org.elasticsearch.xpack.sql.protocol.shared.Response; +import org.hamcrest.Matchers; +import org.junit.Before; + +import java.io.DataInput; +import java.io.DataInputStream; +import java.io.DataOutput; +import java.io.DataOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Locale; + +import static java.util.Collections.emptyMap; +import static org.elasticsearch.license.XPackLicenseStateTests.randomBasicStandardOrGold; +import static org.elasticsearch.license.XPackLicenseStateTests.randomTrialBasicStandardGoldOrPlatinumMode; +import static org.elasticsearch.license.XPackLicenseStateTests.randomTrialOrPlatinumMode; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; + +public class SqlLicenseIT extends AbstractLicensesIntegrationTestCase { + @Override + protected boolean ignoreExternalCluster() { + return true; + } + + @Before + public void resetLicensing() throws Exception { + enableJdbcLicensing(); + } + + @Override + protected Collection> nodePlugins() { + // Add Netty so we can test JDBC licensing because only exists on the REST layer. + List> plugins = new ArrayList<>(super.nodePlugins()); + plugins.add(Netty4Plugin.class); + return plugins; + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + // Enable http so we can test JDBC licensing because only exists on the REST layer. + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(NetworkModule.HTTP_ENABLED.getKey(), true) + .put(NetworkModule.HTTP_TYPE_KEY, Netty4Plugin.NETTY_HTTP_TRANSPORT_NAME) + .build(); + } + + private static OperationMode randomValidSqlLicenseType() { + return randomTrialBasicStandardGoldOrPlatinumMode(); + } + + private static OperationMode randomInvalidSqlLicenseType() { + return OperationMode.MISSING; + } + + private static OperationMode randomValidJdbcLicenseType() { + return randomTrialOrPlatinumMode(); + } + + private static OperationMode randomInvalidJdbcLicenseType() { + return randomBasicStandardOrGold(); + } + + public void enableSqlLicensing() throws Exception { + updateLicensing(randomValidSqlLicenseType()); + } + + public void disableSqlLicensing() throws Exception { + updateLicensing(randomInvalidSqlLicenseType()); + } + + public void enableJdbcLicensing() throws Exception { + updateLicensing(randomValidJdbcLicenseType()); + } + + public void disableJdbcLicensing() throws Exception { + updateLicensing(randomInvalidJdbcLicenseType()); + } + + public void updateLicensing(OperationMode licenseOperationMode) throws Exception { + String licenseType = licenseOperationMode.name().toLowerCase(Locale.ROOT); + wipeAllLicenses(); + if (licenseType.equals("missing")) { + putLicenseTombstone(); + } else { + License license = org.elasticsearch.license.TestUtils.generateSignedLicense(licenseType, TimeValue.timeValueMinutes(1)); + putLicense(license); + } + } + + public void testSqlActionLicense() throws Exception { + setupTestIndex(); + disableSqlLicensing(); + + ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, + () -> client().prepareExecute(SqlAction.INSTANCE).query("SELECT * FROM test").get()); + assertThat(e.getMessage(), equalTo("current license is non-compliant for [sql]")); + enableSqlLicensing(); + + SqlResponse response = client().prepareExecute(SqlAction.INSTANCE).query("SELECT * FROM test").get(); + assertThat(response.size(), Matchers.equalTo(2L)); + } + + public void testSqlTranslateActionLicense() throws Exception { + setupTestIndex(); + disableSqlLicensing(); + + ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, + () -> client().prepareExecute(SqlTranslateAction.INSTANCE).query("SELECT * FROM test").get()); + assertThat(e.getMessage(), equalTo("current license is non-compliant for [sql]")); + enableSqlLicensing(); + + SqlTranslateAction.Response response = client().prepareExecute(SqlTranslateAction.INSTANCE).query("SELECT * FROM test").get(); + SearchSourceBuilder source = response.source(); + assertThat(source.docValueFields(), Matchers.contains("count")); + FetchSourceContext fetchSource = source.fetchSource(); + assertThat(fetchSource.includes(), Matchers.arrayContaining("data")); + } + + public void testJdbcActionLicense() throws Exception { + setupTestIndex(); + disableJdbcLicensing(); + + Request request = new MetaTableRequest("test"); + ResponseException responseException = expectThrows(ResponseException.class, () -> jdbc(request)); + assertThat(responseException.getMessage(), containsString("current license is non-compliant for [jdbc]")); + assertThat(responseException.getMessage(), containsString("security_exception")); + + + enableJdbcLicensing(); + Response response = jdbc(request); + assertThat(response, instanceOf(MetaTableResponse.class)); + } + + private Response jdbc(Request request) throws IOException { + // Convert the request to the HTTP entity that JDBC uses + HttpEntity entity; + try (BytesStreamOutput bytes = new BytesStreamOutput()) { + DataOutput out = new DataOutputStream(bytes); + Proto.INSTANCE.writeRequest(request, out); + entity = new ByteArrayEntity(BytesRef.deepCopyOf(bytes.bytes().toBytesRef()).bytes, ContentType.APPLICATION_JSON); + } + + // Execute + InputStream response = getRestClient().performRequest("POST", "/_xpack/sql/jdbc", emptyMap(), entity).getEntity().getContent(); + + // Deserialize bytes to response like JDBC does + try { + DataInput in = new DataInputStream(response); + return Proto.INSTANCE.readResponse(request, in); + } finally { + response.close(); + } + } + + // TODO test SqlGetIndicesAction. Skipping for now because of lack of serialization support. + + private void setupTestIndex() { + assertAcked(client().admin().indices().prepareCreate("test").get()); + client().prepareBulk() + .add(new IndexRequest("test", "doc", "1").source("data", "bar", "count", 42)) + .add(new IndexRequest("test", "doc", "2").source("data", "baz", "count", 43)) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + } + +} diff --git a/plugin/src/test/java/org/elasticsearch/xpack/sql/SqlTranslateActionIT.java b/plugin/src/test/java/org/elasticsearch/xpack/sql/SqlTranslateActionIT.java new file mode 100644 index 00000000000..9ff5588bceb --- /dev/null +++ b/plugin/src/test/java/org/elasticsearch/xpack/sql/SqlTranslateActionIT.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql; + +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.fetch.subphase.FetchSourceContext; +import org.elasticsearch.search.sort.SortBuilders; +import org.elasticsearch.xpack.sql.plugin.SqlTranslateAction; + +import static java.util.Collections.singletonList; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; + +public class SqlTranslateActionIT extends AbstractSqlIntegTestCase { + + public void testSqlTranslateAction() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test").get()); + client().prepareBulk() + .add(new IndexRequest("test", "doc", "1").source("data", "bar", "count", 42)) + .add(new IndexRequest("test", "doc", "2").source("data", "baz", "count", 43)) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + ensureYellow("test"); + + boolean columnOrder = randomBoolean(); + String columns = columnOrder ? "data, count" : "count, data"; + SqlTranslateAction.Response response = client().prepareExecute(SqlTranslateAction.INSTANCE) + .query("SELECT " + columns + " FROM test ORDER BY count").get(); + SearchSourceBuilder source = response.source(); + FetchSourceContext fetch = source.fetchSource(); + assertEquals(true, fetch.fetchSource()); + assertArrayEquals(new String[] { "data" }, fetch.includes()); + assertEquals(singletonList("count"), source.docValueFields()); + assertEquals(singletonList(SortBuilders.fieldSort("count")), source.sorts()); + } +} diff --git a/plugin/src/test/resources/rest-api-spec/api/xpack.sql.clear_cursor.json b/plugin/src/test/resources/rest-api-spec/api/xpack.sql.clear_cursor.json new file mode 100644 index 00000000000..d82e499c701 --- /dev/null +++ b/plugin/src/test/resources/rest-api-spec/api/xpack.sql.clear_cursor.json @@ -0,0 +1,15 @@ +{ + "xpack.sql.clear_cursor": { + "documentation": "Clear SQL cursor", + "methods": [ "POST"], + "url": { + "path": "/_xpack/sql/close", + "paths": [ "/_xpack/sql/close" ], + "parts": {} + }, + "body": { + "description" : "Specify the cursor value in the `cursor` element to clean the cursor.", + "required" : true + } + } +} diff --git a/plugin/src/test/resources/rest-api-spec/api/xpack.sql.query.json b/plugin/src/test/resources/rest-api-spec/api/xpack.sql.query.json new file mode 100644 index 00000000000..60bbcda8cad --- /dev/null +++ b/plugin/src/test/resources/rest-api-spec/api/xpack.sql.query.json @@ -0,0 +1,21 @@ +{ + "xpack.sql.query": { + "documentation": "Execute SQL", + "methods": [ "POST", "GET" ], + "url": { + "path": "/_xpack/sql", + "paths": [ "/_xpack/sql" ], + "parts": {}, + "params": { + "format": { + "type" : "string", + "description" : "a short version of the Accept header, e.g. json, yaml" + } + } + }, + "body": { + "description" : "Use the `query` element to start a query. Use the `cursor` element to continue a query.", + "required" : true + } + } +} diff --git a/plugin/src/test/resources/rest-api-spec/api/xpack.sql.translate.json b/plugin/src/test/resources/rest-api-spec/api/xpack.sql.translate.json new file mode 100644 index 00000000000..9b854665a71 --- /dev/null +++ b/plugin/src/test/resources/rest-api-spec/api/xpack.sql.translate.json @@ -0,0 +1,16 @@ +{ + "xpack.sql.translate": { + "documentation": "Translate SQL into Elasticsearch queries", + "methods": [ "POST", "GET" ], + "url": { + "path": "/_xpack/sql/translate", + "paths": [ "/_xpack/sql/translate" ], + "parts": {}, + "params": {} + }, + "body": { + "description" : "Specify the query in the `query` element.", + "required" : true + } + } + } diff --git a/plugin/src/test/resources/rest-api-spec/test/sql/sql.yml b/plugin/src/test/resources/rest-api-spec/test/sql/sql.yml new file mode 100644 index 00000000000..033d3223002 --- /dev/null +++ b/plugin/src/test/resources/rest-api-spec/test/sql/sql.yml @@ -0,0 +1,120 @@ +--- +setup: + - do: + bulk: + refresh: true + body: + - index: + _index: test + _type: doc + _id: 1 + - str: test1 + int: 1 + - index: + _index: test + _type: doc + _id: 2 + - str: test2 + int: 2 + - index: + _index: test + _type: doc + _id: 3 + - str: test3 + int: 3 + +--- +"Execute some SQL": + - do: + xpack.sql.query: + format: json + body: + query: "SELECT * FROM test ORDER BY int asc" + - match: { columns.0.name: int } + - match: { columns.1.name: str } + - match: { rows.0.0: 1 } + - match: { rows.0.1: test1 } + - match: { rows.1.0: 2 } + - match: { rows.1.1: test2 } + - match: { rows.2.0: 3 } + - match: { rows.2.1: test3 } + +--- +"Paging through results": + - do: + xpack.sql.query: + format: json + body: + query: "SELECT * FROM test ORDER BY int asc" + fetch_size: 2 + - match: { columns.0.name: int } + - match: { columns.1.name: str } + - match: { rows.0.0: 1 } + - match: { rows.0.1: test1 } + - match: { rows.1.0: 2 } + - match: { rows.1.1: test2 } + - is_true: cursor + - set: { cursor: cursor } + + - do: + xpack.sql.query: + format: json + body: + cursor: "$cursor" + - match: { rows.0.0: 3 } + - match: { rows.0.1: test3 } + - is_false: columns + - is_true: cursor + - set: { cursor: cursor } + + - do: + xpack.sql.query: + format: json + body: + cursor: "$cursor" + - is_false: columns + - is_false: cursor + - length: { rows: 0 } + +--- +"Getting textual representation": + - do: + xpack.sql.query: + format: text + body: + query: "SELECT * FROM test ORDER BY int asc" + - match: + $body: | + /^ \s+ int \s+ \| \s+ str \s+ \n + ---------------\+---------------\n + 1 \s+ \|test1 \s+ \n + 2 \s+ \|test2 \s+ \n + 3 \s+ \|test3 \s+ \n + $/ + +--- +"Clean cursor": + - do: + xpack.sql.query: + format: json + body: + query: "SELECT * FROM test ORDER BY int asc" + fetch_size: 2 + - match: { columns.0.name: int } + - match: { columns.1.name: str } + - match: { rows.0.0: 1 } + - match: { rows.0.1: test1 } + - is_true: cursor + - set: { cursor: cursor} + + - do: + xpack.sql.clear_cursor: + body: + cursor: "$cursor" + - match: { "succeeded": true } + + - do: + indices.stats: { index: 'test' } + + - match: { indices.test.total.search.open_contexts: 0 } + diff --git a/plugin/src/test/resources/rest-api-spec/test/sql/translate.yml b/plugin/src/test/resources/rest-api-spec/test/sql/translate.yml new file mode 100644 index 00000000000..b3d93e52988 --- /dev/null +++ b/plugin/src/test/resources/rest-api-spec/test/sql/translate.yml @@ -0,0 +1,29 @@ +--- +"Translate SQL": + - do: + bulk: + refresh: true + body: + - index: + _index: test + _type: doc + _id: 1 + - str: test1 + int: 1 + + - do: + xpack.sql.translate: + body: + query: "SELECT * FROM test ORDER BY int asc" + - match: + $body: + size: 1000 + _source: + includes: + - str + excludes: [] + docvalue_fields: + - int + sort: + - int: + order: asc diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index 37673239508..23c81d13fc5 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -276,6 +276,23 @@ public class FullClusterRestartIT extends ESRestTestCase { } } + public void testSqlFailsOnIndexWithTwoTypes() throws IOException { + // TODO this isn't going to trigger until we backport to 6.1 + assumeTrue("It is only possible to build an index that sql doesn't like before 6.0.0", + oldClusterVersion.before(Version.V_6_0_0_alpha1)); + if (runningAgainstOldCluster) { + client().performRequest("POST", "/testsqlfailsonindexwithtwotypes/type1", emptyMap(), + new StringEntity("{}", ContentType.APPLICATION_JSON)); + client().performRequest("POST", "/testsqlfailsonindexwithtwotypes/type2", emptyMap(), + new StringEntity("{}", ContentType.APPLICATION_JSON)); + return; + } + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest("POST", "/_xpack/sql", emptyMap(), + new StringEntity("{\"query\":\"SELECT * FROM testsqlfailsonindexwithtwotypes\"}", ContentType.APPLICATION_JSON))); + assertEquals(400, e.getResponse().getStatusLine().getStatusCode()); + assertThat(e.getMessage(), containsString("Invalid index testsqlfailsonindexwithtwotypes; contains more than one type")); + } + private String loadWatch(String watch) throws IOException { return StreamsUtils.copyToStringFromClasspath("/org/elasticsearch/xpack/restart/" + watch); } diff --git a/qa/sql/build.gradle b/qa/sql/build.gradle new file mode 100644 index 00000000000..d1c64477458 --- /dev/null +++ b/qa/sql/build.gradle @@ -0,0 +1,175 @@ +import org.elasticsearch.gradle.precommit.PrecommitTasks +import org.elasticsearch.gradle.test.RunTask + +description = 'Integration tests for SQL' +apply plugin: 'elasticsearch.build' + +dependencies { + compile "org.elasticsearch.test:framework:${versions.elasticsearch}" + + // JDBC testing dependencies + if (false == isEclipse && false == isIdea) { + // If we're not doing IDE stuff use the shadowed jar + compile(project(path: ':x-pack-elasticsearch:sql:jdbc', configuration: 'shadow')) + } else { + /* If we're doing IDE stuff then use then use the project + * dependency so the IDEs don't get confused. Transitive + * deps are OK here too because this is the only time we + * pull all of those deps in. We make sure exclude them + * below so they don't cause jar hell with the shadowed + * jar. */ + compile(project(':x-pack-elasticsearch:sql:jdbc')) + } + compile "net.sourceforge.csvjdbc:csvjdbc:1.0.34" + runtime "com.h2database:h2:1.4.194" + // used for running debug tests + runtime 'org.antlr:antlr4-runtime:4.5.3' + + // There are *no* CLI testing dependencies because we + // communicate fork a new CLI process when we need it. + + // Used to support embedded testing mode + compile(project(':x-pack-elasticsearch:sql:server')) { + transitive = false + } + compile(project(':x-pack-elasticsearch:sql:cli-proto')) { + transitive = false + } + compile "org.elasticsearch.client:transport:${version}" +} + +/* disable unit tests because these are all integration tests used + * other qa projects. */ +test.enabled = false + +dependencyLicenses.enabled = false + +// Allow for com.sun.net.httpserver.* usage for embedded mode +eclipse { + classpath.file { + whenMerged { cp -> + def con = entries.find { e -> + e.kind == "con" && e.toString().contains("org.eclipse.jdt.launching.JRE_CONTAINER") + } + con.accessRules.add(new org.gradle.plugins.ide.eclipse.model.AccessRule( + "accessible", "com/sun/net/httpserver/*")) + } + } +} +forbiddenApisMain { + bundledSignatures -= 'jdk-non-portable' + bundledSignatures += 'jdk-internal' +} + +// the main files are actually test files, so use the appropriate forbidden api sigs +forbiddenApisMain { + signaturesURLs = [PrecommitTasks.getResource('/forbidden/es-all-signatures.txt'), + PrecommitTasks.getResource('/forbidden/es-test-signatures.txt')] +} + +thirdPartyAudit.excludes = [ + // H2 dependencies that we don't actually use.... + 'javax.servlet.ServletConfig', + 'javax.servlet.ServletContext', + 'javax.servlet.ServletContextEvent', + 'javax.servlet.ServletContextListener', + 'javax.servlet.ServletOutputStream', + 'javax.servlet.http.HttpServlet', + 'javax.servlet.http.HttpServletRequest', + 'javax.servlet.http.HttpServletResponse', + 'org.apache.lucene.document.Field$Index', + 'org.apache.lucene.queryParser.QueryParser', + 'org.osgi.framework.BundleActivator', + 'org.osgi.framework.BundleContext', + 'org.osgi.service.jdbc.DataSourceFactory', + 'org.slf4j.Logger', + 'org.slf4j.LoggerFactory', + ] + +subprojects { + apply plugin: 'elasticsearch.standalone-rest-test' + configurations { + cliFixture + } + dependencies { + /* Since we're a standalone rest test we actually get transitive + * dependencies but we don't really want them because they cause + * all kinds of trouble with the jar hell checks. So we suppress + * them explicitly for non-es projects. */ + testCompile(project(':x-pack-elasticsearch:qa:sql')) { + transitive = false + } + testCompile "org.elasticsearch.test:framework:${versions.elasticsearch}" + + // JDBC testing dependencies + testRuntime(project(':x-pack-elasticsearch:sql:jdbc')) { + if (false == isEclipse && false == isIdea) { + /* Skip the transitive dependencies of the server when outside + * of an IDE because outside of an IDE we use the jdbc jar + * which includes all the transitive dependencies *already*. + * If we didn't skip these dependencies the jar hell checks + * would fail. And we need the transitive dependencies to + * run in embedded mode but only do that inside of an IDE. */ + transitive = false + } + } + testRuntime("net.sourceforge.csvjdbc:csvjdbc:1.0.34") { + transitive = false + } + testRuntime("com.h2database:h2:1.4.194") { + transitive = false + } + testRuntime("org.antlr:antlr4-runtime:4.5.3") { + transitive = false + } + + cliFixture project(':x-pack-elasticsearch:test:sql-cli-fixture') + + // Used to support embedded testing mode + testRuntime(project(':x-pack-elasticsearch:sql:server')) { + transitive = false + } + testRuntime "org.elasticsearch.client:transport:${version}" + } + + if (project.name != 'security') { + // The security project just configures it subprojects + apply plugin: 'elasticsearch.rest-test' + + task cliFixture(type: org.elasticsearch.gradle.test.AntFixture) { + Project cli = project(':x-pack-elasticsearch:sql:cli') + dependsOn project.configurations.cliFixture + dependsOn cli.jar + executable = new File(project.javaHome, 'bin/java') + env 'CLASSPATH', "${ -> project.configurations.cliFixture.asPath }" + args 'org.elasticsearch.xpack.sql.cli.fixture.CliFixture', + baseDir, "${ -> cli.jar.outputs.files.singleFile}" + } + + integTestCluster { + distribution = 'zip' + plugin project(':x-pack-elasticsearch:plugin').path + setting 'xpack.monitoring.enabled', 'false' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.watcher.enabled', 'false' + setting 'script.max_compilations_rate', '1000/1m' + dependsOn cliFixture + } + + integTestRunner { + systemProperty 'tests.cli.fixture', "${ -> cliFixture.addressAndPort }" + finalizedBy cliFixture.stopTask + } + + task run(type: RunTask) { + distribution = 'zip' + plugin project(':x-pack-elasticsearch:plugin').path + setting 'xpack.monitoring.enabled', 'false' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.watcher.enabled', 'false' + setting 'script.max_compilations_rate', '1000/1m' + dependsOn cliFixture + } + run.finalizedBy cliFixture.stopTask + } +} diff --git a/qa/sql/multinode/build.gradle b/qa/sql/multinode/build.gradle new file mode 100644 index 00000000000..fa294f63335 --- /dev/null +++ b/qa/sql/multinode/build.gradle @@ -0,0 +1,4 @@ +integTestCluster { + numNodes = 2 + setting 'xpack.security.enabled', 'false' +} diff --git a/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/CliErrorsIT.java b/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/CliErrorsIT.java new file mode 100644 index 00000000000..49161ac5ae1 --- /dev/null +++ b/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/CliErrorsIT.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.multinode; + +import org.elasticsearch.xpack.qa.sql.cli.ErrorsTestCase; + +public class CliErrorsIT extends ErrorsTestCase { +} diff --git a/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/CliFetchSizeIT.java b/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/CliFetchSizeIT.java new file mode 100644 index 00000000000..903a60ba943 --- /dev/null +++ b/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/CliFetchSizeIT.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.multinode; + +import org.elasticsearch.xpack.qa.sql.cli.FetchSizeTestCase; + +public class CliFetchSizeIT extends FetchSizeTestCase { +} diff --git a/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/CliSelectIT.java b/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/CliSelectIT.java new file mode 100644 index 00000000000..af6f986e1bc --- /dev/null +++ b/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/CliSelectIT.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.multinode; + +import org.elasticsearch.xpack.qa.sql.cli.SelectTestCase; + +public class CliSelectIT extends SelectTestCase { +} diff --git a/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/CliShowIT.java b/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/CliShowIT.java new file mode 100644 index 00000000000..07e544094d5 --- /dev/null +++ b/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/CliShowIT.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.multinode; + +import org.elasticsearch.xpack.qa.sql.cli.ShowTestCase; + +public class CliShowIT extends ShowTestCase { +} diff --git a/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcConnectionIT.java b/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcConnectionIT.java new file mode 100644 index 00000000000..d092a577646 --- /dev/null +++ b/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcConnectionIT.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.multinode; + +import org.elasticsearch.xpack.qa.sql.jdbc.ConnectionTestCase; + +public class JdbcConnectionIT extends ConnectionTestCase { +} diff --git a/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcCsvSpecIT.java b/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcCsvSpecIT.java new file mode 100644 index 00000000000..e4f17f527c1 --- /dev/null +++ b/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcCsvSpecIT.java @@ -0,0 +1,14 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.multinode; + +import org.elasticsearch.xpack.qa.sql.jdbc.CsvSpecTestCase; + +public class JdbcCsvSpecIT extends CsvSpecTestCase { + public JdbcCsvSpecIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) { + super(fileName, groupName, testName, lineNumber, testCase); + } +} diff --git a/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcDatabaseMetaDataIT.java b/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcDatabaseMetaDataIT.java new file mode 100644 index 00000000000..9bb663f190a --- /dev/null +++ b/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcDatabaseMetaDataIT.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.multinode; + +import org.elasticsearch.xpack.qa.sql.jdbc.DatabaseMetaDataTestCase; + +public class JdbcDatabaseMetaDataIT extends DatabaseMetaDataTestCase { +} diff --git a/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcErrorsIT.java b/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcErrorsIT.java new file mode 100644 index 00000000000..946a9b6c731 --- /dev/null +++ b/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcErrorsIT.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.multinode; + +import org.elasticsearch.xpack.qa.sql.jdbc.ErrorsTestCase; + +public class JdbcErrorsIT extends ErrorsTestCase { +} diff --git a/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcFetchSizeIT.java b/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcFetchSizeIT.java new file mode 100644 index 00000000000..ab623ee1590 --- /dev/null +++ b/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcFetchSizeIT.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.multinode; + +import org.elasticsearch.xpack.qa.sql.jdbc.FetchSizeTestCase; + +public class JdbcFetchSizeIT extends FetchSizeTestCase { +} diff --git a/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcShowTablesIT.java b/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcShowTablesIT.java new file mode 100644 index 00000000000..7af41a5c9d8 --- /dev/null +++ b/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcShowTablesIT.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.multinode; + +import org.elasticsearch.xpack.qa.sql.jdbc.ShowTablesTestCase; + +public class JdbcShowTablesIT extends ShowTablesTestCase { +} diff --git a/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcSimpleExampleIT.java b/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcSimpleExampleIT.java new file mode 100644 index 00000000000..5b4b94f29de --- /dev/null +++ b/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcSimpleExampleIT.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.multinode; + +import org.elasticsearch.xpack.qa.sql.jdbc.SimpleExampleTestCase; + +public class JdbcSimpleExampleIT extends SimpleExampleTestCase { +} diff --git a/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcSqlSpecIT.java b/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcSqlSpecIT.java new file mode 100644 index 00000000000..dddda28f990 --- /dev/null +++ b/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcSqlSpecIT.java @@ -0,0 +1,14 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.multinode; + +import org.elasticsearch.xpack.qa.sql.jdbc.SqlSpecTestCase; + +public class JdbcSqlSpecIT extends SqlSpecTestCase { + public JdbcSqlSpecIT(String fileName, String groupName, String testName, Integer lineNumber, String query) { + super(fileName, groupName, testName, lineNumber, query); + } +} diff --git a/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/RestSqlIT.java b/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/RestSqlIT.java new file mode 100644 index 00000000000..231cee1f343 --- /dev/null +++ b/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/RestSqlIT.java @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.multinode; + +import org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase; + +/** + * Integration test for the rest sql action. The one that speaks json directly to a + * user rather than to the JDBC driver or CLI. + */ +public class RestSqlIT extends RestSqlTestCase { +} diff --git a/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/RestSqlMultinodeIT.java b/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/RestSqlMultinodeIT.java new file mode 100644 index 00000000000..1c743c95a94 --- /dev/null +++ b/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/RestSqlMultinodeIT.java @@ -0,0 +1,116 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.multinode; + +import org.apache.http.HttpHost; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.NotEqualMessageBuilder; +import org.elasticsearch.test.rest.ESRestTestCase; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.charset.UnsupportedCharsetException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase.columnInfo; + +/** + * Tests specific to multiple nodes. + */ +public class RestSqlMultinodeIT extends ESRestTestCase { + /** + * Tests count of index run across multiple nodes. + */ + public void testIndexSpread() throws IOException { + int documents = between(10, 100); + createTestData(documents); + assertCount(client(), documents); + } + + /** + * Tests count against index on a node that doesn't have any shards of the index. + */ + public void testIndexOnWrongNode() throws IOException { + HttpHost firstHost = getClusterHosts().get(0); + String firstHostName = null; + + String match = firstHost.getHostName() + ":" + firstHost.getPort(); + Map nodesInfo = responseToMap(client().performRequest("GET", "/_nodes")); + @SuppressWarnings("unchecked") + Map nodes = (Map) nodesInfo.get("nodes"); + for (Map.Entry node : nodes.entrySet()) { + String name = node.getKey(); + Map nodeEntries = (Map) node.getValue(); + Map http = (Map) nodeEntries.get("http"); + List boundAddress = (List) http.get("bound_address"); + if (boundAddress.contains(match)) { + firstHostName = name; + break; + } + } + assertNotNull("Didn't find first host among published addresses", firstHostName); + + XContentBuilder index = JsonXContent.contentBuilder().prettyPrint().startObject(); + index.startObject("settings"); { + index.field("routing.allocation.exclude._name", firstHostName); + } + index.endObject(); + index.endObject(); + client().performRequest("PUT", "/test", emptyMap(), new StringEntity(index.string(), ContentType.APPLICATION_JSON)); + int documents = between(10, 100); + createTestData(documents); + + try (RestClient firstNodeClient = buildClient(restClientSettings(), new HttpHost[] {firstHost})) { + assertCount(firstNodeClient, documents); + } + } + + private void createTestData(int documents) throws UnsupportedCharsetException, IOException { + StringBuilder bulk = new StringBuilder(); + for (int i = 0; i < documents; i++) { + int a = 3 * i; + int b = a + 1; + int c = b + 1; + bulk.append("{\"index\":{\"_id\":\"" + i + "\"}\n"); + bulk.append("{\"a\": " + a + ", \"b\": " + b + ", \"c\": " + c + "}\n"); + } + client().performRequest("PUT", "/test/test/_bulk", singletonMap("refresh", "true"), + new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); + } + + private Map responseToMap(Response response) throws IOException { + try (InputStream content = response.getEntity().getContent()) { + return XContentHelper.convertToMap(JsonXContent.jsonXContent, content, false); + } + } + + private void assertCount(RestClient client, int count) throws IOException { + Map expected = new HashMap<>(); + expected.put("columns", singletonList(columnInfo("COUNT(1)", "long"))); + expected.put("rows", singletonList(singletonList(count))); + expected.put("size", 1); + + Map actual = responseToMap(client.performRequest("POST", "/_xpack/sql", singletonMap("format", "json"), + new StringEntity("{\"query\": \"SELECT COUNT(*) FROM test\"}", ContentType.APPLICATION_JSON))); + + if (false == expected.equals(actual)) { + NotEqualMessageBuilder message = new NotEqualMessageBuilder(); + message.compareMaps(actual, expected); + fail("Response does not match:\n" + message.toString()); + } + } +} diff --git a/qa/sql/no-security/build.gradle b/qa/sql/no-security/build.gradle new file mode 100644 index 00000000000..e1351ae803b --- /dev/null +++ b/qa/sql/no-security/build.gradle @@ -0,0 +1,7 @@ +integTestCluster { + setting 'xpack.security.enabled', 'false' +} + +run { + setting 'xpack.security.enabled', 'false' +} diff --git a/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliErrorsIT.java b/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliErrorsIT.java new file mode 100644 index 00000000000..be6c003c746 --- /dev/null +++ b/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliErrorsIT.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.nosecurity; + +import org.elasticsearch.xpack.qa.sql.cli.ErrorsTestCase; + +public class CliErrorsIT extends ErrorsTestCase { +} diff --git a/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliExplainIT.java b/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliExplainIT.java new file mode 100644 index 00000000000..93a410b7d21 --- /dev/null +++ b/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliExplainIT.java @@ -0,0 +1,139 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.nosecurity; + +import org.elasticsearch.xpack.qa.sql.cli.CliIntegrationTestCase; + +import java.io.IOException; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.startsWith; + +public class CliExplainIT extends CliIntegrationTestCase { + public void testExplainBasic() throws IOException { + index("test", body -> body.field("test_field", "test_value")); + + assertThat(command("EXPLAIN (PLAN PARSED) SELECT * FROM test"), containsString("plan")); + assertThat(readLine(), startsWith("----------")); + assertThat(readLine(), startsWith("With[{}]")); + assertThat(readLine(), startsWith("\\_Project[[?*]]")); + assertThat(readLine(), startsWith(" \\_UnresolvedRelation[[index=test],null,Unknown index [test]]")); + assertEquals("", readLine()); + + assertThat(command("EXPLAIN " + (randomBoolean() ? "" : "(PLAN ANALYZED) ") + "SELECT * FROM test"), containsString("plan")); + assertThat(readLine(), startsWith("----------")); + assertThat(readLine(), startsWith("Project[[test_field{r}#")); + assertThat(readLine(), startsWith("\\_SubQueryAlias[test]")); + assertThat(readLine(), startsWith(" \\_EsRelation[test][test_field{r}#")); + assertEquals("", readLine()); + + assertThat(command("EXPLAIN (PLAN OPTIMIZED) SELECT * FROM test"), containsString("plan")); + assertThat(readLine(), startsWith("----------")); + assertThat(readLine(), startsWith("Project[[test_field{r}#")); + assertThat(readLine(), startsWith("\\_EsRelation[test][test_field{r}#")); + assertEquals("", readLine()); + + // TODO in this case we should probably remove the source filtering entirely. Right? It costs but we don't need it. + assertThat(command("EXPLAIN (PLAN EXECUTABLE) SELECT * FROM test"), containsString("plan")); + assertThat(readLine(), startsWith("----------")); + assertThat(readLine(), startsWith("EsQueryExec[test,{")); + assertThat(readLine(), startsWith(" \"_source\" : {")); + assertThat(readLine(), startsWith(" \"includes\" : [")); + assertThat(readLine(), startsWith(" \"test_field\"")); + assertThat(readLine(), startsWith(" ],")); + assertThat(readLine(), startsWith(" \"excludes\" : [ ]")); + assertThat(readLine(), startsWith(" }")); + assertThat(readLine(), startsWith("}]")); + assertEquals("", readLine()); + } + + public void testExplainWithWhere() throws IOException { + index("test", body -> body.field("test_field", "test_value1").field("i", 1)); + index("test", body -> body.field("test_field", "test_value2").field("i", 2)); + + assertThat(command("EXPLAIN (PLAN PARSED) SELECT * FROM test WHERE i = 2"), containsString("plan")); + assertThat(readLine(), startsWith("----------")); + assertThat(readLine(), startsWith("With[{}]")); + assertThat(readLine(), startsWith("\\_Project[[?*]]")); + assertThat(readLine(), startsWith(" \\_Filter[?i = 2]")); + assertThat(readLine(), startsWith(" \\_UnresolvedRelation[[index=test],null,Unknown index [test]]")); + assertEquals("", readLine()); + + assertThat(command("EXPLAIN " + (randomBoolean() ? "" : "(PLAN ANALYZED) ") + "SELECT * FROM test WHERE i = 2"), + containsString("plan")); + assertThat(readLine(), startsWith("----------")); + assertThat(readLine(), startsWith("Project[[i{r}#")); + assertThat(readLine(), startsWith("\\_Filter[i{r}#")); + assertThat(readLine(), startsWith(" \\_SubQueryAlias[test]")); + assertThat(readLine(), startsWith(" \\_EsRelation[test][i{r}#")); + assertEquals("", readLine()); + + assertThat(command("EXPLAIN (PLAN OPTIMIZED) SELECT * FROM test WHERE i = 2"), containsString("plan")); + assertThat(readLine(), startsWith("----------")); + assertThat(readLine(), startsWith("Project[[i{r}#")); + assertThat(readLine(), startsWith("\\_Filter[i{r}#")); + assertThat(readLine(), startsWith(" \\_EsRelation[test][i{r}#")); + assertEquals("", readLine()); + + assertThat(command("EXPLAIN (PLAN EXECUTABLE) SELECT * FROM test WHERE i = 2"), containsString("plan")); + assertThat(readLine(), startsWith("----------")); + assertThat(readLine(), startsWith("EsQueryExec[test,{")); + assertThat(readLine(), startsWith(" \"query\" : {")); + assertThat(readLine(), startsWith(" \"term\" : {")); + assertThat(readLine(), startsWith(" \"i\" : {")); + assertThat(readLine(), startsWith(" \"value\" : 2,")); + assertThat(readLine(), startsWith(" \"boost\" : 1.0")); + assertThat(readLine(), startsWith(" }")); + assertThat(readLine(), startsWith(" }")); + assertThat(readLine(), startsWith(" },")); + assertThat(readLine(), startsWith(" \"_source\" : {")); + assertThat(readLine(), startsWith(" \"includes\" : [")); + assertThat(readLine(), startsWith(" \"test_field\"")); + assertThat(readLine(), startsWith(" ],")); + assertThat(readLine(), startsWith(" \"excludes\" : [ ]")); + assertThat(readLine(), startsWith(" },")); + assertThat(readLine(), startsWith(" \"docvalue_fields\" : [")); + assertThat(readLine(), startsWith(" \"i\"")); + assertThat(readLine(), startsWith(" ]")); + assertThat(readLine(), startsWith("}]")); + assertEquals("", readLine()); + } + + public void testExplainWithCount() throws IOException { + index("test", body -> body.field("test_field", "test_value1").field("i", 1)); + index("test", body -> body.field("test_field", "test_value2").field("i", 2)); + + assertThat(command("EXPLAIN (PLAN PARSED) SELECT COUNT(*) FROM test"), containsString("plan")); + assertThat(readLine(), startsWith("----------")); + assertThat(readLine(), startsWith("With[{}]")); + assertThat(readLine(), startsWith("\\_Project[[?COUNT(?*)]]")); + assertThat(readLine(), startsWith(" \\_UnresolvedRelation[[index=test],null,Unknown index [test]]")); + assertEquals("", readLine()); + + assertThat(command("EXPLAIN " + (randomBoolean() ? "" : "(PLAN ANALYZED) ") + "SELECT COUNT(*) FROM test"), + containsString("plan")); + assertThat(readLine(), startsWith("----------")); + assertThat(readLine(), startsWith("Aggregate[[],[COUNT(1)#")); + assertThat(readLine(), startsWith("\\_SubQueryAlias[test]")); + assertThat(readLine(), startsWith(" \\_EsRelation[test][i{r}#")); + assertEquals("", readLine()); + + assertThat(command("EXPLAIN (PLAN OPTIMIZED) SELECT COUNT(*) FROM test"), containsString("plan")); + assertThat(readLine(), startsWith("----------")); + assertThat(readLine(), startsWith("Aggregate[[],[COUNT(1)#")); + assertThat(readLine(), startsWith("\\_EsRelation[test][i{r}#")); + assertEquals("", readLine()); + + assertThat(command("EXPLAIN (PLAN EXECUTABLE) SELECT COUNT(*) FROM test"), containsString("plan")); + assertThat(readLine(), startsWith("----------")); + assertThat(readLine(), startsWith("EsQueryExec[test,{")); + assertThat(readLine(), startsWith(" \"size\" : 0,")); + assertThat(readLine(), startsWith(" \"_source\" : false,")); + assertThat(readLine(), startsWith(" \"stored_fields\" : \"_none_\"")); + assertThat(readLine(), startsWith("}]")); + assertEquals("", readLine()); + } +} diff --git a/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliFetchSizeIT.java b/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliFetchSizeIT.java new file mode 100644 index 00000000000..e4d2ef1a0e2 --- /dev/null +++ b/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliFetchSizeIT.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.nosecurity; + +import org.elasticsearch.xpack.qa.sql.cli.FetchSizeTestCase; + +public class CliFetchSizeIT extends FetchSizeTestCase { +} diff --git a/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliSelectIT.java b/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliSelectIT.java new file mode 100644 index 00000000000..56a112df021 --- /dev/null +++ b/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliSelectIT.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.nosecurity; + +import org.elasticsearch.xpack.qa.sql.cli.SelectTestCase; + +public class CliSelectIT extends SelectTestCase { +} diff --git a/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliShowIT.java b/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliShowIT.java new file mode 100644 index 00000000000..2720b45d2a7 --- /dev/null +++ b/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliShowIT.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.nosecurity; + +import org.elasticsearch.xpack.qa.sql.cli.ShowTestCase; + +public class CliShowIT extends ShowTestCase { +} diff --git a/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcConnectionIT.java b/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcConnectionIT.java new file mode 100644 index 00000000000..e75cf6d059d --- /dev/null +++ b/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcConnectionIT.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.nosecurity; + +import org.elasticsearch.xpack.qa.sql.jdbc.ConnectionTestCase; + +public class JdbcConnectionIT extends ConnectionTestCase { +} diff --git a/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcCsvSpecIT.java b/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcCsvSpecIT.java new file mode 100644 index 00000000000..09860d375b5 --- /dev/null +++ b/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcCsvSpecIT.java @@ -0,0 +1,14 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.nosecurity; + +import org.elasticsearch.xpack.qa.sql.jdbc.CsvSpecTestCase; + +public class JdbcCsvSpecIT extends CsvSpecTestCase { + public JdbcCsvSpecIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) { + super(fileName, groupName, testName, lineNumber, testCase); + } +} diff --git a/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcDatabaseMetaDataIT.java b/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcDatabaseMetaDataIT.java new file mode 100644 index 00000000000..f653049b9a1 --- /dev/null +++ b/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcDatabaseMetaDataIT.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.nosecurity; + +import org.elasticsearch.xpack.qa.sql.jdbc.DatabaseMetaDataTestCase; + +public class JdbcDatabaseMetaDataIT extends DatabaseMetaDataTestCase { +} diff --git a/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcErrorsIT.java b/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcErrorsIT.java new file mode 100644 index 00000000000..21a52b609bb --- /dev/null +++ b/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcErrorsIT.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.nosecurity; + +import org.elasticsearch.xpack.qa.sql.jdbc.ErrorsTestCase; + +public class JdbcErrorsIT extends ErrorsTestCase { +} diff --git a/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcFetchSizeIT.java b/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcFetchSizeIT.java new file mode 100644 index 00000000000..b64290957c0 --- /dev/null +++ b/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcFetchSizeIT.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.nosecurity; + +import org.elasticsearch.xpack.qa.sql.jdbc.FetchSizeTestCase; + +public class JdbcFetchSizeIT extends FetchSizeTestCase { +} diff --git a/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcShowTablesIT.java b/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcShowTablesIT.java new file mode 100644 index 00000000000..f68b022f6ad --- /dev/null +++ b/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcShowTablesIT.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.nosecurity; + +import org.elasticsearch.xpack.qa.sql.jdbc.ShowTablesTestCase; + +public class JdbcShowTablesIT extends ShowTablesTestCase { +} diff --git a/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcSimpleExampleIT.java b/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcSimpleExampleIT.java new file mode 100644 index 00000000000..08539667cf9 --- /dev/null +++ b/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcSimpleExampleIT.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.nosecurity; + +import org.elasticsearch.xpack.qa.sql.jdbc.SimpleExampleTestCase; + +public class JdbcSimpleExampleIT extends SimpleExampleTestCase { +} diff --git a/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcSqlSpecIT.java b/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcSqlSpecIT.java new file mode 100644 index 00000000000..fb658270729 --- /dev/null +++ b/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcSqlSpecIT.java @@ -0,0 +1,14 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.nosecurity; + +import org.elasticsearch.xpack.qa.sql.jdbc.SqlSpecTestCase; + +public class JdbcSqlSpecIT extends SqlSpecTestCase { + public JdbcSqlSpecIT(String fileName, String groupName, String testName, Integer lineNumber, String query) { + super(fileName, groupName, testName, lineNumber, query); + } +} \ No newline at end of file diff --git a/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/RestSqlIT.java b/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/RestSqlIT.java new file mode 100644 index 00000000000..e22c8fb0852 --- /dev/null +++ b/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/RestSqlIT.java @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.nosecurity; + +import org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase; + +/** + * Integration test for the rest sql action. The one that speaks json directly to a + * user rather than to the JDBC driver or CLI. + */ +public class RestSqlIT extends RestSqlTestCase { +} diff --git a/qa/sql/security/build.gradle b/qa/sql/security/build.gradle new file mode 100644 index 00000000000..7cc6b3f6d7e --- /dev/null +++ b/qa/sql/security/build.gradle @@ -0,0 +1,57 @@ +dependencies { + testCompile(project(path: ':x-pack-elasticsearch:plugin', configuration: 'runtime')) { + transitive = false + } +} + +Project mainProject = project + +subprojects { + // Use resources from the parent project in subprojects + sourceSets { + test { + java { + srcDirs = ["${mainProject.projectDir}/src/test/java"] + } + resources { + srcDirs = ["${mainProject.projectDir}/src/test/resources"] + } + } + } + + dependencies { + testCompile(project(path: ':x-pack-elasticsearch:plugin', configuration: 'runtime')) { + transitive = false + } + } + + integTestCluster { + // Setup auditing so we can use it in some tests + setting 'xpack.security.audit.enabled', 'true' + setting 'xpack.security.audit.outputs', 'logfile' + // Setup roles used by tests + extraConfigFile 'x-pack/roles.yml', '../roles.yml' + /* Setup the one admin user that we run the tests as. + * Tests use "run as" to get different users. */ + setupCommand 'setupUser#test_admin', + 'bin/x-pack/users', 'useradd', 'test_admin', '-p', 'x-pack-test-password', '-r', 'superuser' + // Subprojects override the wait condition to work properly with security + } + + integTestRunner { + systemProperty 'tests.audit.logfile', + "${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }_access.log" + } + + run { + // Setup auditing so we can use it in some tests + setting 'xpack.security.audit.enabled', 'true' + setting 'xpack.security.audit.outputs', 'logfile' + // Setup roles used by tests + extraConfigFile 'x-pack/roles.yml', '../roles.yml' + /* Setup the one admin user that we run the tests as. + * Tests use "run as" to get different users. */ + setupCommand 'setupUser#test_admin', + 'bin/x-pack/users', 'useradd', 'test_admin', '-p', 'x-pack-test-password', '-r', 'superuser' + } +} diff --git a/qa/sql/security/no-ssl/build.gradle b/qa/sql/security/no-ssl/build.gradle new file mode 100644 index 00000000000..9ea95bb4a57 --- /dev/null +++ b/qa/sql/security/no-ssl/build.gradle @@ -0,0 +1,16 @@ +integTestRunner { + systemProperty 'tests.ssl.enabled', 'false' +} + +integTestCluster { + waitCondition = { node, ant -> + File tmpFile = new File(node.cwd, 'wait.success') + ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", + dest: tmpFile.toString(), + username: 'test_admin', + password: 'x-pack-test-password', + ignoreerrors: true, + retries: 10) + return tmpFile.exists() + } +} diff --git a/qa/sql/security/roles.yml b/qa/sql/security/roles.yml new file mode 100644 index 00000000000..ff7d8f1b035 --- /dev/null +++ b/qa/sql/security/roles.yml @@ -0,0 +1,60 @@ +read_all: + cluster: + - "cluster:monitor/main" # Used by JDBC's MetaData + indices: + - names: test + privileges: [read, "indices:admin/get"] + - names: bort + privileges: [read, "indices:admin/get"] + +read_something_else: + cluster: + - "cluster:monitor/main" # Used by JDBC's MetaData + indices: + - names: something_that_isnt_test + privileges: [read, "indices:admin/get"] + +read_test_a: + cluster: + - "cluster:monitor/main" # Used by JDBC's MetaData + indices: + - names: test + privileges: [read, "indices:admin/get"] + field_security: + grant: [a] + +read_test_a_and_b: + cluster: + - "cluster:monitor/main" # Used by JDBC's MetaData + indices: + - names: test + privileges: [read, "indices:admin/get"] + field_security: + grant: ["*"] + except: [c] + +read_test_without_c_3: + cluster: + - "cluster:monitor/main" # Used by JDBC's MetaData + indices: + - names: test + privileges: [read, "indices:admin/get"] + query: | + { + "bool": { + "must_not": [ + { + "match": { + "c": 3 + } + } + ] + } + } + +read_bort: + cluster: + - "cluster:monitor/main" # Used by JDBC's MetaData + indices: + - names: bort + privileges: [read, "indices:admin/get"] diff --git a/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliErrorsIT.java b/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliErrorsIT.java new file mode 100644 index 00000000000..b59c9f91282 --- /dev/null +++ b/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliErrorsIT.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.security; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.qa.sql.cli.ErrorsTestCase; +import org.elasticsearch.xpack.qa.sql.cli.RemoteCli.SecurityConfig; + +public class CliErrorsIT extends ErrorsTestCase { + @Override + protected Settings restClientSettings() { + return RestSqlIT.securitySettings(); + } + + @Override + protected String getProtocol() { + return RestSqlIT.SSL_ENABLED ? "https" : "http"; + } + + @Override + protected SecurityConfig securityConfig() { + return CliSecurityIT.adminSecurityConfig(); + } +} diff --git a/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliFetchSizeIT.java b/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliFetchSizeIT.java new file mode 100644 index 00000000000..8420f8a2a7c --- /dev/null +++ b/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliFetchSizeIT.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.security; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.qa.sql.cli.FetchSizeTestCase; +import org.elasticsearch.xpack.qa.sql.cli.RemoteCli.SecurityConfig; + +public class CliFetchSizeIT extends FetchSizeTestCase { + @Override + protected Settings restClientSettings() { + return RestSqlIT.securitySettings(); + } + + @Override + protected String getProtocol() { + return RestSqlIT.SSL_ENABLED ? "https" : "http"; + } + + @Override + protected SecurityConfig securityConfig() { + return CliSecurityIT.adminSecurityConfig(); + } +} diff --git a/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliSecurityIT.java b/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliSecurityIT.java new file mode 100644 index 00000000000..76b66ebb8ee --- /dev/null +++ b/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliSecurityIT.java @@ -0,0 +1,171 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.security; + +import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.xpack.qa.sql.cli.RemoteCli; +import org.elasticsearch.xpack.qa.sql.cli.RemoteCli.SecurityConfig; +import java.net.URISyntaxException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.xpack.qa.sql.cli.CliIntegrationTestCase.elasticsearchAddress; +import static org.hamcrest.Matchers.both; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.startsWith; + +public class CliSecurityIT extends SqlSecurityTestCase { + static SecurityConfig adminSecurityConfig() { + String keystoreLocation; + String keystorePassword; + if (RestSqlIT.SSL_ENABLED) { + Path keyStore; + try { + keyStore = PathUtils.get(RestSqlIT.class.getResource("/test-node.jks").toURI()); + } catch (URISyntaxException e) { + throw new RuntimeException("exception while reading the store", e); + } + if (!Files.exists(keyStore)) { + throw new IllegalStateException("Keystore file [" + keyStore + "] does not exist."); + } + keystoreLocation = keyStore.toAbsolutePath().toString(); + keystorePassword = "keypass"; + } else { + keystoreLocation = null; + keystorePassword = null; + } + return new SecurityConfig(RestSqlIT.SSL_ENABLED, "test_admin", "x-pack-test-password", keystoreLocation, keystorePassword); + } + + /** + * Perform security test actions using the CLI. + */ + private static class CliActions implements Actions { + private SecurityConfig userSecurity(String user) { + SecurityConfig admin = adminSecurityConfig(); + if (user == null) { + return admin; + } + return new SecurityConfig(RestSqlIT.SSL_ENABLED, user, "testpass", admin.keystoreLocation(), admin.keystorePassword()); + } + + @Override + public void queryWorksAsAdmin() throws Exception { + try (RemoteCli cli = new RemoteCli(elasticsearchAddress(), true, adminSecurityConfig())) { + assertThat(cli.command("SELECT * FROM test ORDER BY a"), containsString("a | b | c")); + assertEquals("---------------+---------------+---------------", cli.readLine()); + assertThat(cli.readLine(), containsString("1 |2 |3")); + assertThat(cli.readLine(), containsString("4 |5 |6")); + assertEquals("", cli.readLine()); + } + } + + @Override + public void expectMatchesAdmin(String adminSql, String user, String userSql) throws Exception { + expectMatchesAdmin(adminSql, user, userSql, cli -> {}); + } + + @Override + public void expectScrollMatchesAdmin(String adminSql, String user, String userSql) throws Exception { + expectMatchesAdmin(adminSql, user, userSql, cli -> { + assertEquals("fetch size set to [90m1[0m", cli.command("fetch size = 1")); + assertEquals("fetch separator set to \"[90m -- fetch sep -- [0m\"", + cli.command("fetch separator = \" -- fetch sep -- \"")); + }); + } + + public void expectMatchesAdmin(String adminSql, String user, String userSql, + CheckedConsumer customizer) throws Exception { + List adminResult = new ArrayList<>(); + try (RemoteCli cli = new RemoteCli(elasticsearchAddress(), true, adminSecurityConfig())) { + customizer.accept(cli); + adminResult.add(cli.command(adminSql)); + String line; + do { + line = cli.readLine(); + adminResult.add(line); + } while (false == (line.equals("[0m") || line.equals(""))); + adminResult.add(line); + } + + Iterator expected = adminResult.iterator(); + try (RemoteCli cli = new RemoteCli(elasticsearchAddress(), true, userSecurity(user))) { + customizer.accept(cli); + assertTrue(expected.hasNext()); + assertEquals(expected.next(), cli.command(userSql)); + String line; + do { + line = cli.readLine(); + assertTrue(expected.hasNext()); + assertEquals(expected.next(), line); + } while (false == (line.equals("[0m") || line.equals(""))); + assertTrue(expected.hasNext()); + assertEquals(expected.next(), line); + assertFalse(expected.hasNext()); + } + } + + @Override + public void expectDescribe(Map columns, String user) throws Exception { + try (RemoteCli cli = new RemoteCli(elasticsearchAddress(), true, userSecurity(user))) { + assertThat(cli.command("DESCRIBE test"), containsString("column | type")); + assertEquals("---------------+---------------", cli.readLine()); + for (Map.Entry column : columns.entrySet()) { + assertThat(cli.readLine(), both(startsWith(column.getKey())).and(containsString("|" + column.getValue()))); + } + assertEquals("", cli.readLine()); + } + } + + @Override + public void expectShowTables(List tables, String user) throws Exception { + try (RemoteCli cli = new RemoteCli(elasticsearchAddress(), true, userSecurity(user))) { + assertThat(cli.command("SHOW TABLES"), containsString("table")); + assertEquals("---------------", cli.readLine()); + for (String table : tables) { + assertThat(cli.readLine(), containsString(table)); + } + assertEquals("", cli.readLine()); + } + } + + @Override + public void expectUnknownIndex(String user, String sql) throws Exception { + try (RemoteCli cli = new RemoteCli(elasticsearchAddress(), true, userSecurity(user))) { + assertThat(cli.command(sql), containsString("Bad request")); + assertThat(cli.readLine(), containsString("Unknown index")); + } + } + + @Override + public void expectForbidden(String user, String sql) throws Exception { + /* + * Cause the CLI to skip its connection test on startup so we + * can get a forbidden exception when we run the query. + */ + try (RemoteCli cli = new RemoteCli(elasticsearchAddress(), false, userSecurity(user))) { + assertThat(cli.command(sql), containsString("is unauthorized for user [" + user + "]")); + } + } + + @Override + public void expectUnknownColumn(String user, String sql, String column) throws Exception { + try (RemoteCli cli = new RemoteCli(elasticsearchAddress(), true, userSecurity(user))) { + assertThat(cli.command(sql), containsString("[1;31mBad request")); + assertThat(cli.readLine(), containsString("Unknown column [" + column + "][1;23;31m][0m")); + } + } + } + + public CliSecurityIT() { + super(new CliActions()); + } +} diff --git a/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliSelectIT.java b/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliSelectIT.java new file mode 100644 index 00000000000..706fbf13f66 --- /dev/null +++ b/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliSelectIT.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.security; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.qa.sql.cli.RemoteCli.SecurityConfig; +import org.elasticsearch.xpack.qa.sql.cli.SelectTestCase; + +public class CliSelectIT extends SelectTestCase { + @Override + protected Settings restClientSettings() { + return RestSqlIT.securitySettings(); + } + + @Override + protected String getProtocol() { + return RestSqlIT.SSL_ENABLED ? "https" : "http"; + } + + @Override + protected SecurityConfig securityConfig() { + return CliSecurityIT.adminSecurityConfig(); + } +} diff --git a/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliShowIT.java b/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliShowIT.java new file mode 100644 index 00000000000..f1f9d2a6258 --- /dev/null +++ b/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliShowIT.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.security; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.qa.sql.cli.RemoteCli.SecurityConfig; +import org.elasticsearch.xpack.qa.sql.cli.ShowTestCase; + +public class CliShowIT extends ShowTestCase { + @Override + protected Settings restClientSettings() { + return RestSqlIT.securitySettings(); + } + + @Override + protected String getProtocol() { + return RestSqlIT.SSL_ENABLED ? "https" : "http"; + } + + @Override + protected SecurityConfig securityConfig() { + return CliSecurityIT.adminSecurityConfig(); + } +} diff --git a/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcConnectionIT.java b/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcConnectionIT.java new file mode 100644 index 00000000000..08aa73f68b9 --- /dev/null +++ b/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcConnectionIT.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.security; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.qa.sql.jdbc.ConnectionTestCase; + +import java.util.Properties; + +public class JdbcConnectionIT extends ConnectionTestCase { + @Override + protected Settings restClientSettings() { + return RestSqlIT.securitySettings(); + } + + @Override + protected String getProtocol() { + return RestSqlIT.SSL_ENABLED ? "https" : "http"; + } + + @Override + protected Properties connectionProperties() { + Properties properties = super.connectionProperties(); + properties.putAll(JdbcSecurityIT.adminProperties()); + return properties; + } +} diff --git a/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcCsvSpecIT.java b/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcCsvSpecIT.java new file mode 100644 index 00000000000..3375b663404 --- /dev/null +++ b/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcCsvSpecIT.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.security; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.qa.sql.jdbc.CsvSpecTestCase; + +import java.util.Properties; + +public class JdbcCsvSpecIT extends CsvSpecTestCase { + public JdbcCsvSpecIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) { + super(fileName, groupName, testName, lineNumber, testCase); + } + + @Override + protected Settings restClientSettings() { + return RestSqlIT.securitySettings(); + } + + @Override + protected String getProtocol() { + return RestSqlIT.SSL_ENABLED ? "https" : "http"; + } + + @Override + protected Properties connectionProperties() { + Properties sp = super.connectionProperties(); + sp.putAll(JdbcSecurityIT.adminProperties()); + return sp; + } +} diff --git a/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcDatabaseMetaDataIT.java b/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcDatabaseMetaDataIT.java new file mode 100644 index 00000000000..f4aafe4090b --- /dev/null +++ b/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcDatabaseMetaDataIT.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.security; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.qa.sql.jdbc.DatabaseMetaDataTestCase; + +import java.util.Properties; + +public class JdbcDatabaseMetaDataIT extends DatabaseMetaDataTestCase { + @Override + protected Settings restClientSettings() { + return RestSqlIT.securitySettings(); + } + + @Override + protected String getProtocol() { + return RestSqlIT.SSL_ENABLED ? "https" : "http"; + } + + @Override + protected Properties connectionProperties() { + Properties properties = super.connectionProperties(); + properties.putAll(JdbcSecurityIT.adminProperties()); + return properties; + } +} diff --git a/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcErrorsIT.java b/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcErrorsIT.java new file mode 100644 index 00000000000..2ed8ac7941f --- /dev/null +++ b/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcErrorsIT.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.security; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.qa.sql.jdbc.ErrorsTestCase; + +import java.util.Properties; + +public class JdbcErrorsIT extends ErrorsTestCase { + @Override + protected Settings restClientSettings() { + return RestSqlIT.securitySettings(); + } + + @Override + protected String getProtocol() { + return RestSqlIT.SSL_ENABLED ? "https" : "http"; + } + + @Override + protected Properties connectionProperties() { + Properties properties = super.connectionProperties(); + properties.putAll(JdbcSecurityIT.adminProperties()); + return properties; + } +} diff --git a/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcFetchSizeIT.java b/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcFetchSizeIT.java new file mode 100644 index 00000000000..ac239193e99 --- /dev/null +++ b/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcFetchSizeIT.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.security; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.qa.sql.jdbc.FetchSizeTestCase; + +import java.util.Properties; + +public class JdbcFetchSizeIT extends FetchSizeTestCase { + @Override + protected Settings restClientSettings() { + return RestSqlIT.securitySettings(); + } + + @Override + protected String getProtocol() { + return RestSqlIT.SSL_ENABLED ? "https" : "http"; + } + + @Override + protected Properties connectionProperties() { + Properties properties = super.connectionProperties(); + properties.putAll(JdbcSecurityIT.adminProperties()); + return properties; + } +} diff --git a/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcSecurityIT.java b/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcSecurityIT.java new file mode 100644 index 00000000000..2391946ceaa --- /dev/null +++ b/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcSecurityIT.java @@ -0,0 +1,359 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.security; + +import org.elasticsearch.action.admin.indices.get.GetIndexAction; +import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.xpack.qa.sql.jdbc.LocalH2; +import java.net.URISyntaxException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.List; +import java.util.Map; +import java.util.Properties; + +import static org.elasticsearch.xpack.qa.sql.jdbc.JdbcAssert.assertResultSets; +import static org.elasticsearch.xpack.qa.sql.jdbc.JdbcIntegrationTestCase.elasticsearchAddress; +import static org.elasticsearch.xpack.qa.sql.jdbc.JdbcIntegrationTestCase.randomKnownTimeZone; +import static org.elasticsearch.xpack.qa.sql.security.RestSqlIT.SSL_ENABLED; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; + +public class JdbcSecurityIT extends SqlSecurityTestCase { + static Properties adminProperties() { + // tag::admin_properties + Properties properties = new Properties(); + properties.put("user", "test_admin"); + properties.put("password", "x-pack-test-password"); + // end::admin_properties + addSslPropertiesIfNeeded(properties); + return properties; + } + + static Connection es(Properties properties) throws SQLException { + Properties props = new Properties(); + props.put("timezone", randomKnownTimeZone()); + props.putAll(properties); + String scheme = SSL_ENABLED ? "https" : "http"; + return DriverManager.getConnection("jdbc:es://" + scheme + "://" + elasticsearchAddress(), props); + } + + static Properties userProperties(String user) { + if (user == null) { + return adminProperties(); + } + Properties prop = new Properties(); + prop.put("user", user); + prop.put("password", "testpass"); + addSslPropertiesIfNeeded(prop); + return prop; + } + + private static void addSslPropertiesIfNeeded(Properties properties) { + if (false == SSL_ENABLED) { + return; + } + Path keyStore; + try { + keyStore = PathUtils.get(RestSqlIT.class.getResource("/test-node.jks").toURI()); + } catch (URISyntaxException e) { + throw new RuntimeException("exception while reading the store", e); + } + if (!Files.exists(keyStore)) { + throw new IllegalStateException("Keystore file [" + keyStore + "] does not exist."); + } + String keyStoreStr = keyStore.toAbsolutePath().toString(); + + properties.put("ssl", "true"); + properties.put("ssl.keystore.location", keyStoreStr); + properties.put("ssl.keystore.pass", "keypass"); + properties.put("ssl.truststore.location", keyStoreStr); + properties.put("ssl.truststore.pass", "keypass"); + } + + static void expectActionMatchesAdmin(CheckedFunction adminAction, + String user, CheckedFunction userAction) throws Exception { + try (Connection adminConnection = es(adminProperties()); + Connection userConnection = es(userProperties(user))) { + assertResultSets(adminAction.apply(adminConnection), userAction.apply(userConnection)); + } + } + + static void expectForbidden(String user, CheckedConsumer action) throws Exception { + expectError(user, action, "is unauthorized for user [" + user + "]"); + } + + static void expectUnknownIndex(String user, CheckedConsumer action) throws Exception { + expectError(user, action, "Unknown index"); + } + + static void expectError(String user, CheckedConsumer action, String errorMessage) throws Exception { + SQLException e; + try (Connection connection = es(userProperties(user))) { + e = expectThrows(SQLException.class, () -> action.accept(connection)); + } + assertThat(e.getMessage(), containsString(errorMessage)); + } + + static void expectActionThrowsUnknownColumn(String user, + CheckedConsumer action, String column) throws Exception { + SQLException e; + try (Connection connection = es(userProperties(user))) { + e = expectThrows(SQLException.class, () -> action.accept(connection)); + } + assertThat(e.getMessage(), containsString("Unknown column [" + column + "]")); + } + + private static class JdbcActions implements Actions { + @Override + public void queryWorksAsAdmin() throws Exception { + try (Connection h2 = LocalH2.anonymousDb(); + Connection es = es(adminProperties())) { + h2.createStatement().executeUpdate("CREATE TABLE test (a BIGINT, b BIGINT, c BIGINT)"); + h2.createStatement().executeUpdate("INSERT INTO test (a, b, c) VALUES (1, 2, 3), (4, 5, 6)"); + + ResultSet expected = h2.createStatement().executeQuery("SELECT * FROM test ORDER BY a"); + assertResultSets(expected, es.createStatement().executeQuery("SELECT * FROM test ORDER BY a")); + } + } + + @Override + public void expectMatchesAdmin(String adminSql, String user, String userSql) throws Exception { + expectActionMatchesAdmin( + con -> con.createStatement().executeQuery(adminSql), + user, + con -> con.createStatement().executeQuery(userSql)); + } + + @Override + public void expectScrollMatchesAdmin(String adminSql, String user, String userSql) throws Exception { + expectActionMatchesAdmin( + con -> { + Statement st = con.createStatement(); + st.setFetchSize(1); + return st.executeQuery(adminSql); + }, + user, + con -> { + Statement st = con.createStatement(); + st.setFetchSize(1); + return st.executeQuery(userSql); + }); + } + + @Override + public void expectDescribe(Map columns, String user) throws Exception { + try (Connection h2 = LocalH2.anonymousDb(); + Connection es = es(userProperties(user))) { + // h2 doesn't have the same sort of DESCRIBE that we have so we emulate it + h2.createStatement().executeUpdate("CREATE TABLE mock (column VARCHAR, type VARCHAR)"); + if (columns.size() > 0) { + StringBuilder insert = new StringBuilder(); + insert.append("INSERT INTO mock (column, type) VALUES "); + boolean first = true; + for (Map.Entry column : columns.entrySet()) { + if (first) { + first = false; + } else { + insert.append(", "); + } + insert.append("('").append(column.getKey()).append("', '").append(column.getValue()).append("')"); + } + h2.createStatement().executeUpdate(insert.toString()); + } + + ResultSet expected = h2.createStatement().executeQuery("SELECT * FROM mock"); + assertResultSets(expected, es.createStatement().executeQuery("DESCRIBE test")); + } + } + + @Override + public void expectShowTables(List tables, String user) throws Exception { + try (Connection h2 = LocalH2.anonymousDb(); + Connection es = es(userProperties(user))) { + // h2 doesn't spit out the same columns we do so we emulate + h2.createStatement().executeUpdate("CREATE TABLE mock (table VARCHAR)"); + StringBuilder insert = new StringBuilder(); + insert.append("INSERT INTO mock (table) VALUES "); + boolean first = true; + for (String table : tables) { + if (first) { + first = false; + } else { + insert.append(", "); + } + insert.append("('").append(table).append("')"); + } + h2.createStatement().executeUpdate(insert.toString()); + + ResultSet expected = h2.createStatement().executeQuery("SELECT * FROM mock ORDER BY table"); + assertResultSets(expected, es.createStatement().executeQuery("SHOW TABLES")); + } + } + + @Override + public void expectForbidden(String user, String sql) throws Exception { + JdbcSecurityIT.expectForbidden(user, con -> con.createStatement().executeQuery(sql)); + } + + @Override + public void expectUnknownIndex(String user, String sql) throws Exception { + JdbcSecurityIT.expectUnknownIndex(user, con -> con.createStatement().executeQuery(sql)); + } + + @Override + public void expectUnknownColumn(String user, String sql, String column) throws Exception { + expectActionThrowsUnknownColumn( + user, + con -> con.createStatement().executeQuery(sql), + column); + } + } + + public JdbcSecurityIT() { + super(new JdbcActions()); + } + + // Metadata methods only available to JDBC + public void testMetaDataGetTablesWithFullAccess() throws Exception { + createUser("full_access", "read_all"); + + expectActionMatchesAdmin( + con -> con.getMetaData().getTables("%", "%", "%", null), + "full_access", + con -> con.getMetaData().getTables("%", "%", "%", null)); + new AuditLogAsserter() + .expect(true, GetIndexAction.NAME, "test_admin", contains("bort", "test")) + .expect(true, GetIndexAction.NAME, "full_access", contains("bort", "test")) + .assertLogs(); + } + + public void testMetaDataGetTablesWithNoAccess() throws Exception { + createUser("no_access", "read_nothing"); + + expectForbidden("no_access", con -> con.getMetaData().getTables("%", "%", "%", null)); + new AuditLogAsserter() + // TODO figure out why this generates *no* logs + // .expect(false, GetIndexAction.NAME, "no_access", contains("bort", "test")) + .assertLogs(); + } + + public void testMetaDataGetTablesWithLimitedAccess() throws Exception { + createUser("read_bort", "read_bort"); + + expectActionMatchesAdmin( + con -> con.getMetaData().getTables("%", "%", "bort", null), + "read_bort", + con -> con.getMetaData().getTables("%", "%", "%", null)); + new AuditLogAsserter() + .expect(true, GetIndexAction.NAME, "test_admin", contains("bort")) + .expect(true, GetIndexAction.NAME, "read_bort", contains("bort")) + .assertLogs(); + } + + public void testMetaDataGetTablesWithInAccessibleIndex() throws Exception { + createUser("read_bort", "read_bort"); + + expectActionMatchesAdmin( + con -> con.getMetaData().getTables("%", "%", "not_created", null), + "read_bort", + con -> con.getMetaData().getTables("%", "%", "test", null)); + new AuditLogAsserter() + .expect(true, GetIndexAction.NAME, "test_admin", contains("*", "-*")) + .expect(true, GetIndexAction.NAME, "read_bort", contains("*", "-*")) + .assertLogs(); + } + + public void testMetaDataGetColumnsWorksAsFullAccess() throws Exception { + createUser("full_access", "read_all"); + + expectActionMatchesAdmin( + con -> con.getMetaData().getColumns("%", "%", "%", "%"), + "full_access", + con -> con.getMetaData().getColumns("%", "%", "%", "%")); + new AuditLogAsserter() + .expect(true, GetIndexAction.NAME, "test_admin", contains("bort", "test")) + .expect(true, GetIndexAction.NAME, "full_access", contains("bort", "test")) + .assertLogs(); + } + + public void testMetaDataGetColumnsWithNoAccess() throws Exception { + createUser("no_access", "read_nothing"); + + expectForbidden("no_access", con -> con.getMetaData().getColumns("%", "%", "%", "%")); + new AuditLogAsserter() + // TODO figure out why this generates *no* logs + // .expect(false, GetIndexAction.NAME, "no_access", contains("bort", "test")) + .assertLogs(); + } + + public void testMetaDataGetColumnsWithWrongAccess() throws Exception { + createUser("wrong_access", "read_something_else"); + + expectActionMatchesAdmin( + con -> con.getMetaData().getColumns("%", "%", "not_created", "%"), + "wrong_access", + con -> con.getMetaData().getColumns("%", "%", "test", "%")); + new AuditLogAsserter() + .expect(true, GetIndexAction.NAME, "test_admin", contains("*", "-*")) + .expect(true, GetIndexAction.NAME, "wrong_access", contains("*", "-*")) + .assertLogs(); + } + + public void testMetaDataGetColumnsSingleFieldGranted() throws Exception { + createUser("only_a", "read_test_a"); + + expectActionMatchesAdmin( + con -> con.getMetaData().getColumns("%", "%", "test", "a"), + "only_a", + con -> con.getMetaData().getColumns("%", "%", "test", "%")); + new AuditLogAsserter() + .expect(true, GetIndexAction.NAME, "test_admin", contains("test")) + .expect(true, GetIndexAction.NAME, "only_a", contains("test")) + .assertLogs(); + } + + public void testMetaDataGetColumnsSingleFieldExcepted() throws Exception { + createUser("not_c", "read_test_a_and_b"); + + /* Since there is no easy way to get a result from the admin side with + * both 'a' and 'b' we'll have to roll our own assertion here, but we + * are intentionally much less restrictive then the tests elsewhere. */ + try (Connection con = es(userProperties("not_c"))) { + ResultSet result = con.getMetaData().getColumns("%", "%", "test", "%"); + assertTrue(result.next()); + String columnName = result.getString(4); + assertEquals("a", columnName); + assertTrue(result.next()); + columnName = result.getString(4); + assertEquals("b", columnName); + assertFalse(result.next()); + } + new AuditLogAsserter() + .expect(true, GetIndexAction.NAME, "not_c", contains("test")) + .assertLogs(); + } + + public void testMetaDataGetColumnsDocumentExcluded() throws Exception { + createUser("no_3s", "read_test_without_c_3"); + + expectActionMatchesAdmin( + con -> con.getMetaData().getColumns("%", "%", "test", "%"), + "no_3s", + con -> con.getMetaData().getColumns("%", "%", "test", "%")); + new AuditLogAsserter() + .expect(true, GetIndexAction.NAME, "test_admin", contains("test")) + .expect(true, GetIndexAction.NAME, "no_3s", contains("test")) + .assertLogs(); + } +} diff --git a/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcShowTablesIT.java b/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcShowTablesIT.java new file mode 100644 index 00000000000..ab76b3f33a1 --- /dev/null +++ b/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcShowTablesIT.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.security; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.qa.sql.jdbc.ShowTablesTestCase; + +import java.util.Properties; + +public class JdbcShowTablesIT extends ShowTablesTestCase { + @Override + protected Settings restClientSettings() { + return RestSqlIT.securitySettings(); + } + + @Override + protected String getProtocol() { + return RestSqlIT.SSL_ENABLED ? "https" : "http"; + } + + @Override + protected Properties connectionProperties() { + Properties sp = super.connectionProperties(); + sp.putAll(JdbcSecurityIT.adminProperties()); + return sp; + } +} diff --git a/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcSimpleExampleIT.java b/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcSimpleExampleIT.java new file mode 100644 index 00000000000..b01fe72333b --- /dev/null +++ b/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcSimpleExampleIT.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.security; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.qa.sql.jdbc.SimpleExampleTestCase; + +import java.util.Properties; + +public class JdbcSimpleExampleIT extends SimpleExampleTestCase { + @Override + protected Settings restClientSettings() { + return RestSqlIT.securitySettings(); + } + + @Override + protected String getProtocol() { + return RestSqlIT.SSL_ENABLED ? "https" : "http"; + } + + @Override + protected Properties connectionProperties() { + Properties properties = super.connectionProperties(); + properties.putAll(JdbcSecurityIT.adminProperties()); + return properties; + } +} diff --git a/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcSqlSpecIT.java b/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcSqlSpecIT.java new file mode 100644 index 00000000000..609847f513e --- /dev/null +++ b/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcSqlSpecIT.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.security; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.qa.sql.jdbc.SqlSpecTestCase; + +import java.util.Properties; + +public class JdbcSqlSpecIT extends SqlSpecTestCase { + public JdbcSqlSpecIT(String fileName, String groupName, String testName, Integer lineNumber, String query) { + super(fileName, groupName, testName, lineNumber, query); + } + + @Override + protected Settings restClientSettings() { + return RestSqlIT.securitySettings(); + } + + @Override + protected String getProtocol() { + return RestSqlIT.SSL_ENABLED ? "https" : "http"; + } + + @Override + protected Properties connectionProperties() { + Properties sp = super.connectionProperties(); + sp.putAll(JdbcSecurityIT.adminProperties()); + return sp; + } +} diff --git a/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/RestSqlIT.java b/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/RestSqlIT.java new file mode 100644 index 00000000000..bbcc47bb0da --- /dev/null +++ b/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/RestSqlIT.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.security; + +import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase; + +import static org.elasticsearch.xpack.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; + +import java.net.URISyntaxException; +import java.nio.file.Files; +import java.nio.file.Path; + +/** + * Integration test for the rest sql action. The one that speaks json directly to a + * user rather than to the JDBC driver or CLI. + */ +public class RestSqlIT extends RestSqlTestCase { + static final boolean SSL_ENABLED = Booleans.parseBoolean(System.getProperty("tests.ssl.enabled")); + + static Settings securitySettings() { + String token = basicAuthHeaderValue("test_admin", new SecureString("x-pack-test-password".toCharArray())); + Settings.Builder builder = Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token); + if (SSL_ENABLED) { + Path keyStore; + try { + keyStore = PathUtils.get(RestSqlIT.class.getResource("/test-node.jks").toURI()); + } catch (URISyntaxException e) { + throw new RuntimeException("exception while reading the store", e); + } + if (!Files.exists(keyStore)) { + throw new IllegalStateException("Keystore file [" + keyStore + "] does not exist."); + } + builder.put(ESRestTestCase.TRUSTSTORE_PATH, keyStore) + .put(ESRestTestCase.TRUSTSTORE_PASSWORD, "keypass"); + } + return builder.build(); + } + + @Override + protected Settings restClientSettings() { + return securitySettings(); + } + + @Override + protected String getProtocol() { + return RestSqlIT.SSL_ENABLED ? "https" : "http"; + } +} diff --git a/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/RestSqlSecurityIT.java b/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/RestSqlSecurityIT.java new file mode 100644 index 00000000000..57283b892e2 --- /dev/null +++ b/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/RestSqlSecurityIT.java @@ -0,0 +1,192 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.security; + +import org.apache.http.Header; +import org.apache.http.HttpEntity; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.apache.http.message.BasicHeader; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.NotEqualMessageBuilder; +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.singletonMap; +import static org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase.columnInfo; +import static java.util.Collections.singletonList; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; + +public class RestSqlSecurityIT extends SqlSecurityTestCase { + private static class RestActions implements Actions { + @Override + public void queryWorksAsAdmin() throws Exception { + Map expected = new HashMap<>(); + expected.put("columns", Arrays.asList( + columnInfo("a", "long"), + columnInfo("b", "long"), + columnInfo("c", "long"))); + expected.put("rows", Arrays.asList( + Arrays.asList(1, 2, 3), + Arrays.asList(4, 5, 6))); + expected.put("size", 2); + + assertResponse(expected, runSql(null, "SELECT * FROM test ORDER BY a")); + } + + @Override + public void expectMatchesAdmin(String adminSql, String user, String userSql) throws Exception { + assertResponse(runSql(null, adminSql), runSql(user, userSql)); + } + + @Override + public void expectScrollMatchesAdmin(String adminSql, String user, String userSql) throws Exception { + Map adminResponse = runSql(null, + new StringEntity("{\"query\": \"" + adminSql + "\", \"fetch_size\": 1}", ContentType.APPLICATION_JSON)); + Map otherResponse = runSql(user, + new StringEntity("{\"query\": \"" + adminSql + "\", \"fetch_size\": 1}", ContentType.APPLICATION_JSON)); + + String adminCursor = (String) adminResponse.remove("cursor"); + String otherCursor = (String) otherResponse.remove("cursor"); + assertNotNull(adminCursor); + assertNotNull(otherCursor); + assertResponse(adminResponse, otherResponse); + while (true) { + adminResponse = runSql(null, new StringEntity("{\"cursor\": \"" + adminCursor + "\"}", ContentType.APPLICATION_JSON)); + otherResponse = runSql(user, new StringEntity("{\"cursor\": \"" + otherCursor + "\"}", ContentType.APPLICATION_JSON)); + adminCursor = (String) adminResponse.remove("cursor"); + otherCursor = (String) otherResponse.remove("cursor"); + assertResponse(adminResponse, otherResponse); + if (adminCursor == null) { + assertNull(otherCursor); + return; + } + assertNotNull(otherCursor); + } + } + + @Override + public void expectDescribe(Map columns, String user) throws Exception { + Map expected = new HashMap<>(3); + expected.put("columns", Arrays.asList( + columnInfo("column", "keyword"), + columnInfo("type", "keyword"))); + List> rows = new ArrayList<>(columns.size()); + for (Map.Entry column : columns.entrySet()) { + rows.add(Arrays.asList(column.getKey(), column.getValue())); + } + expected.put("rows", rows); + expected.put("size", columns.size()); + + assertResponse(expected, runSql(user, "DESCRIBE test")); + } + + @Override + public void expectShowTables(List tables, String user) throws Exception { + Map expected = new HashMap<>(); + expected.put("columns", singletonList(columnInfo("table", "keyword"))); + List> rows = new ArrayList<>(); + for (String table : tables) { + rows.add(singletonList(table)); + } + expected.put("rows", rows); + expected.put("size", tables.size()); + assertResponse(expected, runSql(user, "SHOW TABLES")); + } + + @Override + public void expectForbidden(String user, String sql) { + ResponseException e = expectThrows(ResponseException.class, () -> runSql(user, sql)); + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(403)); + assertThat(e.getMessage(), containsString("unauthorized")); + } + + @Override + public void expectUnknownIndex(String user, String sql) { + ResponseException e = expectThrows(ResponseException.class, () -> runSql(user, sql)); + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(400)); + assertThat(e.getMessage(), containsString("Unknown index")); + } + + @Override + public void expectUnknownColumn(String user, String sql, String column) throws Exception { + ResponseException e = expectThrows(ResponseException.class, () -> runSql(user, sql)); + assertThat(e.getMessage(), containsString("Unknown column [" + column + "]")); + } + + private static Map runSql(@Nullable String asUser, String sql) throws IOException { + return runSql(asUser, new StringEntity("{\"query\": \"" + sql + "\"}", ContentType.APPLICATION_JSON)); + } + + private static Map runSql(@Nullable String asUser, HttpEntity entity) throws IOException { + Header[] headers = asUser == null ? new Header[0] : new Header[] {new BasicHeader("es-security-runas-user", asUser)}; + Response response = client().performRequest("POST", "/_xpack/sql", singletonMap("format", "json"), entity, headers); + return toMap(response); + } + + private static void assertResponse(Map expected, Map actual) { + if (false == expected.equals(actual)) { + NotEqualMessageBuilder message = new NotEqualMessageBuilder(); + message.compareMaps(actual, expected); + fail("Response does not match:\n" + message.toString()); + } + } + + private static Map toMap(Response response) throws IOException { + try (InputStream content = response.getEntity().getContent()) { + return XContentHelper.convertToMap(JsonXContent.jsonXContent, content, false); + } + } + } + + public RestSqlSecurityIT() { + super(new RestActions()); + } + + /** + * Test the hijacking a scroll fails. This test is only implemented for + * REST because it is the only API where it is simple to hijack a scroll. + * It should excercise the same code as the other APIs but if we were truly + * paranoid we'd hack together something to test the others as well. + */ + public void testHijackScrollFails() throws Exception { + createUser("full_access", "read_all"); + + Map adminResponse = RestActions.runSql(null, + new StringEntity("{\"query\": \"SELECT * FROM test\", \"fetch_size\": 1}", ContentType.APPLICATION_JSON)); + + String cursor = (String) adminResponse.remove("cursor"); + assertNotNull(cursor); + + ResponseException e = expectThrows(ResponseException.class, () -> + RestActions.runSql("full_access", new StringEntity("{\"cursor\":\"" + cursor + "\"}", ContentType.APPLICATION_JSON))); + // TODO return a better error message for bad scrolls + assertThat(e.getMessage(), containsString("No search context found for id")); + assertEquals(404, e.getResponse().getStatusLine().getStatusCode()); + + new AuditLogAsserter() + .expectSqlCompositeAction("test_admin", "test") + .expect(true, SQL_ACTION_NAME, "full_access", empty()) + // One scroll access denied per shard + .expect(false, SQL_ACTION_NAME, "full_access", empty(), "InternalScrollSearchRequest") + .expect(false, SQL_ACTION_NAME, "full_access", empty(), "InternalScrollSearchRequest") + .expect(false, SQL_ACTION_NAME, "full_access", empty(), "InternalScrollSearchRequest") + .expect(false, SQL_ACTION_NAME, "full_access", empty(), "InternalScrollSearchRequest") + .expect(false, SQL_ACTION_NAME, "full_access", empty(), "InternalScrollSearchRequest") + .assertLogs(); + } +} diff --git a/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/SqlSecurityTestCase.java b/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/SqlSecurityTestCase.java new file mode 100644 index 00000000000..fed2e93dc07 --- /dev/null +++ b/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/SqlSecurityTestCase.java @@ -0,0 +1,619 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.security; + +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.apache.lucene.util.SuppressForbidden; +import org.elasticsearch.SpecialPermission; +import org.elasticsearch.action.admin.indices.get.GetIndexAction; +import org.elasticsearch.action.admin.indices.get.GetIndexRequest; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.hamcrest.Matcher; +import org.junit.AfterClass; +import org.junit.Before; + +import java.io.BufferedReader; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; +import java.util.function.Function; +import java.util.regex.Pattern; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.hasItems; + +public abstract class SqlSecurityTestCase extends ESRestTestCase { + /** + * Actions taken by this test. + *

+ * For methods that take {@code user} a {@code null} user means "use the admin". + */ + protected interface Actions { + void queryWorksAsAdmin() throws Exception; + /** + * Assert that running some sql as a user returns the same result as running it as + * the administrator. + */ + void expectMatchesAdmin(String adminSql, String user, String userSql) throws Exception; + /** + * Same as {@link #expectMatchesAdmin(String, String, String)} but sets the scroll size + * to 1 and completely scrolls the results. + */ + void expectScrollMatchesAdmin(String adminSql, String user, String userSql) throws Exception; + void expectDescribe(Map columns, String user) throws Exception; + void expectShowTables(List tables, String user) throws Exception; + void expectForbidden(String user, String sql) throws Exception; + void expectUnknownIndex(String user, String sql) throws Exception; + void expectUnknownColumn(String user, String sql, String column) throws Exception; + } + + protected static final String SQL_ACTION_NAME = "indices:data/read/sql"; + /** + * Location of the audit log file. We could technically figure this out by reading the admin + * APIs but it isn't worth doing because we also have to give ourselves permission to read + * the file and that must be done by setting a system property and reading it in + * {@code plugin-security.policy}. So we may as well have gradle set the property. + */ + private static final Path AUDIT_LOG_FILE = lookupAuditLog(); + + @SuppressForbidden(reason="security doesn't work with mock filesystem") + private static Path lookupAuditLog() { + String auditLogFileString = System.getProperty("tests.audit.logfile"); + if (null == auditLogFileString) { + throw new IllegalStateException("tests.audit.logfile must be set to run this test. It is automatically " + + "set by gradle. If you must set it yourself then it should be the absolute path to the audit " + + "log file generated by running x-pack with audit logging enabled."); + } + return Paths.get(auditLogFileString); + } + + private static boolean oneTimeSetup = false; + private static boolean auditFailure = false; + + /** + * The actions taken by this test. + */ + private final Actions actions; + + /** + * How much of the audit log was written before the test started. + */ + private long auditLogWrittenBeforeTestStart; + + public SqlSecurityTestCase(Actions actions) { + this.actions = actions; + } + + /** + * All tests run as a an administrative user but use + * es-security-runas-user to become a less privileged user when needed. + */ + @Override + protected Settings restClientSettings() { + return RestSqlIT.securitySettings(); + } + + @Override + protected boolean preserveIndicesUponCompletion() { + /* We can't wipe the cluster between tests because that nukes the audit + * trail index which makes the auditing flaky. Instead we wipe all + * indices after the entire class is finished. */ + return true; + } + + @Before + public void oneTimeSetup() throws Exception { + if (oneTimeSetup) { + /* Since we don't wipe the cluster between tests we only need to + * write the test data once. */ + return; + } + StringBuilder bulk = new StringBuilder(); + bulk.append("{\"index\":{\"_index\": \"test\", \"_type\": \"doc\", \"_id\":\"1\"}\n"); + bulk.append("{\"a\": 1, \"b\": 2, \"c\": 3}\n"); + bulk.append("{\"index\":{\"_index\": \"test\", \"_type\": \"doc\", \"_id\":\"2\"}\n"); + bulk.append("{\"a\": 4, \"b\": 5, \"c\": 6}\n"); + bulk.append("{\"index\":{\"_index\": \"bort\", \"_type\": \"doc\", \"_id\":\"1\"}\n"); + bulk.append("{\"a\": \"test\"}\n"); + client().performRequest("PUT", "/_bulk", singletonMap("refresh", "true"), + new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); + oneTimeSetup = true; + } + + @Before + public void setInitialAuditLogOffset() { + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + sm.checkPermission(new SpecialPermission()); + } + AccessController.doPrivileged((PrivilegedAction) () -> { + if (false == Files.exists(AUDIT_LOG_FILE)) { + auditLogWrittenBeforeTestStart = 0; + return null; + } + if (false == Files.isRegularFile(AUDIT_LOG_FILE)) { + throw new IllegalStateException("expected tests.audit.logfile [" + AUDIT_LOG_FILE + "]to be a plain file but wasn't"); + } + try { + auditLogWrittenBeforeTestStart = Files.size(AUDIT_LOG_FILE); + } catch (IOException e) { + throw new RuntimeException(e); + } + return null; + }); + } + + @AfterClass + public static void wipeIndicesAfterTests() throws IOException { + try { + adminClient().performRequest("DELETE", "*"); + } catch (ResponseException e) { + // 404 here just means we had no indexes + if (e.getResponse().getStatusLine().getStatusCode() != 404) { + throw e; + } + } finally { + // Clear the static state so other subclasses can reuse it later + oneTimeSetup = false; + auditFailure = false; + } + } + + @Override + protected String getProtocol() { + return RestSqlIT.SSL_ENABLED ? "https" : "http"; + } + + public void testQueryWorksAsAdmin() throws Exception { + actions.queryWorksAsAdmin(); + new AuditLogAsserter() + .expectSqlCompositeAction("test_admin", "test") + .assertLogs(); + } + + public void testQueryWithFullAccess() throws Exception { + createUser("full_access", "read_all"); + + actions.expectMatchesAdmin("SELECT * FROM test ORDER BY a", "full_access", "SELECT * FROM test ORDER BY a"); + new AuditLogAsserter() + .expectSqlCompositeAction("test_admin", "test") + .expectSqlCompositeAction("full_access", "test") + .assertLogs(); + } + + public void testScrollWithFullAccess() throws Exception { + createUser("full_access", "read_all"); + + actions.expectScrollMatchesAdmin("SELECT * FROM test ORDER BY a", "full_access", "SELECT * FROM test ORDER BY a"); + new AuditLogAsserter() + .expectSqlCompositeAction("test_admin", "test") + /* Scrolling doesn't have to access the index again, at least not through sql. + * If we asserted query and scroll logs then we would see the scroll. */ + .expect(true, SQL_ACTION_NAME, "test_admin", empty()) + .expect(true, SQL_ACTION_NAME, "test_admin", empty()) + .expectSqlCompositeAction("full_access", "test") + .expect(true, SQL_ACTION_NAME, "full_access", empty()) + .expect(true, SQL_ACTION_NAME, "full_access", empty()) + .assertLogs(); + } + + public void testQueryNoAccess() throws Exception { + createUser("no_access", "read_nothing"); + + actions.expectForbidden("no_access", "SELECT * FROM test"); + new AuditLogAsserter() + .expect(false, SQL_ACTION_NAME, "no_access", empty()) + .assertLogs(); + } + + public void testQueryWrongAccess() throws Exception { + createUser("wrong_access", "read_something_else"); + + actions.expectUnknownIndex("wrong_access", "SELECT * FROM test"); + new AuditLogAsserter() + //This user has permission to run sql queries so they are given preliminary authorization + .expect(true, SQL_ACTION_NAME, "wrong_access", empty()) + //the following get index is granted too but against the no indices placeholder, as ignore_unavailable=true + .expect(true, GetIndexAction.NAME, "wrong_access", hasItems("*", "-*")) + .assertLogs(); + } + + public void testQuerySingleFieldGranted() throws Exception { + createUser("only_a", "read_test_a"); + + actions.expectMatchesAdmin("SELECT a FROM test ORDER BY a", "only_a", "SELECT * FROM test ORDER BY a"); + new AuditLogAsserter() + .expectSqlCompositeAction("test_admin", "test") + .expectSqlCompositeAction("only_a", "test") + .assertLogs(); + } + + public void testScrollWithSingleFieldGranted() throws Exception { + createUser("only_a", "read_test_a"); + + actions.expectScrollMatchesAdmin("SELECT a FROM test ORDER BY a", "only_a", "SELECT * FROM test ORDER BY a"); + new AuditLogAsserter() + .expectSqlCompositeAction("test_admin", "test") + /* Scrolling doesn't have to access the index again, at least not through sql. + * If we asserted query and scroll logs then we would see the scoll. */ + .expect(true, SQL_ACTION_NAME, "test_admin", empty()) + .expect(true, SQL_ACTION_NAME, "test_admin", empty()) + .expectSqlCompositeAction("only_a", "test") + .expect(true, SQL_ACTION_NAME, "only_a", empty()) + .expect(true, SQL_ACTION_NAME, "only_a", empty()) + .assertLogs(); + } + + public void testQueryStringSingeFieldGrantedWrongRequested() throws Exception { + createUser("only_a", "read_test_a"); + + actions.expectUnknownColumn("only_a", "SELECT c FROM test", "c"); + /* The user has permission to query the index but one of the + * columns that they explicitly mention is hidden from them + * by field level access control. This *looks* like a successful + * query from the audit side because all the permissions checked + * out but it failed in SQL because it couldn't compile the + * query without the metadata for the missing field. */ + new AuditLogAsserter() + .expectSqlCompositeAction("only_a", "test") + .assertLogs(); + } + + public void testQuerySingleFieldExcepted() throws Exception { + createUser("not_c", "read_test_a_and_b"); + + actions.expectMatchesAdmin("SELECT a, b FROM test ORDER BY a", "not_c", "SELECT * FROM test ORDER BY a"); + new AuditLogAsserter() + .expectSqlCompositeAction("test_admin", "test") + .expectSqlCompositeAction("not_c", "test") + .assertLogs(); + } + + public void testScrollWithSingleFieldExcepted() throws Exception { + createUser("not_c", "read_test_a_and_b"); + + actions.expectScrollMatchesAdmin("SELECT a, b FROM test ORDER BY a", "not_c", "SELECT * FROM test ORDER BY a"); + new AuditLogAsserter() + .expectSqlCompositeAction("test_admin", "test") + /* Scrolling doesn't have to access the index again, at least not through sql. + * If we asserted query and scroll logs then we would see the scroll. */ + .expect(true, SQL_ACTION_NAME, "test_admin", empty()) + .expect(true, SQL_ACTION_NAME, "test_admin", empty()) + .expectSqlCompositeAction("not_c", "test") + .expect(true, SQL_ACTION_NAME, "not_c", empty()) + .expect(true, SQL_ACTION_NAME, "not_c", empty()) + .assertLogs(); + } + + public void testQuerySingleFieldExceptionedWrongRequested() throws Exception { + createUser("not_c", "read_test_a_and_b"); + + actions.expectUnknownColumn("not_c", "SELECT c FROM test", "c"); + /* The user has permission to query the index but one of the + * columns that they explicitly mention is hidden from them + * by field level access control. This *looks* like a successful + * query from the audit side because all the permissions checked + * out but it failed in SQL because it couldn't compile the + * query without the metadata for the missing field. */ + new AuditLogAsserter() + .expectSqlCompositeAction("not_c", "test") + .assertLogs(); + } + + public void testQueryDocumentExcluded() throws Exception { + createUser("no_3s", "read_test_without_c_3"); + + actions.expectMatchesAdmin("SELECT * FROM test WHERE c != 3 ORDER BY a", "no_3s", "SELECT * FROM test ORDER BY a"); + new AuditLogAsserter() + .expectSqlCompositeAction("test_admin", "test") + .expectSqlCompositeAction("no_3s", "test") + .assertLogs(); + } + + public void testShowTablesWorksAsAdmin() throws Exception { + actions.expectShowTables(Arrays.asList("bort", "test"), null); + new AuditLogAsserter() + .expectSqlCompositeAction("test_admin", "bort", "test") + .assertLogs(); + } + + public void testShowTablesWorksAsFullAccess() throws Exception { + createUser("full_access", "read_all"); + + actions.expectMatchesAdmin("SHOW TABLES", "full_access", "SHOW TABLES"); + new AuditLogAsserter() + .expectSqlCompositeAction("test_admin", "bort", "test") + .expectSqlCompositeAction("full_access", "bort", "test") + .assertLogs(); + } + + public void testShowTablesWithNoAccess() throws Exception { + createUser("no_access", "read_nothing"); + + actions.expectForbidden("no_access", "SHOW TABLES"); + new AuditLogAsserter() + .expect(false, SQL_ACTION_NAME, "no_access", empty()) + .assertLogs(); + } + + public void testShowTablesWithLimitedAccess() throws Exception { + createUser("read_bort", "read_bort"); + + actions.expectMatchesAdmin("SHOW TABLES LIKE 'bort'", "read_bort", "SHOW TABLES"); + new AuditLogAsserter() + .expectSqlCompositeAction("test_admin", "bort") + .expectSqlCompositeAction("read_bort", "bort") + .assertLogs(); + } + + public void testShowTablesWithLimitedAccessUnaccessableIndex() throws Exception { + createUser("read_bort", "read_bort"); + + actions.expectMatchesAdmin("SHOW TABLES LIKE 'not_created'", "read_bort", "SHOW TABLES LIKE 'test'"); + new AuditLogAsserter() + .expect(true, SQL_ACTION_NAME, "test_admin", empty()) + .expect(true, GetIndexAction.NAME, "test_admin", contains("*", "-*")) + .expect(true, SQL_ACTION_NAME, "read_bort", empty()) + .expect(true, GetIndexAction.NAME, "read_bort", contains("*", "-*")) + .assertLogs(); + } + + public void testDescribeWorksAsAdmin() throws Exception { + Map expected = new TreeMap<>(); + expected.put("a", "BIGINT"); + expected.put("b", "BIGINT"); + expected.put("c", "BIGINT"); + actions.expectDescribe(expected, null); + new AuditLogAsserter() + .expectSqlCompositeAction("test_admin", "test") + .assertLogs(); + } + + public void testDescribeWorksAsFullAccess() throws Exception { + createUser("full_access", "read_all"); + + actions.expectMatchesAdmin("DESCRIBE test", "full_access", "DESCRIBE test"); + new AuditLogAsserter() + .expectSqlCompositeAction("test_admin", "test") + .expectSqlCompositeAction("full_access", "test") + .assertLogs(); + } + + public void testDescribeWithNoAccess() throws Exception { + createUser("no_access", "read_nothing"); + + actions.expectForbidden("no_access", "DESCRIBE test"); + new AuditLogAsserter() + .expect(false, SQL_ACTION_NAME, "no_access", empty()) + .assertLogs(); + } + + public void testDescribeWithWrongAccess() throws Exception { + createUser("wrong_access", "read_something_else"); + + actions.expectDescribe(Collections.emptyMap(), "wrong_access"); + new AuditLogAsserter() + //This user has permission to run sql queries so they are given preliminary authorization + .expect(true, SQL_ACTION_NAME, "wrong_access", empty()) + //the following get index is granted too but against the no indices placeholder, as ignore_unavailable=true + .expect(true, GetIndexAction.NAME, "wrong_access", hasItems("*", "-*")) + .assertLogs(); + } + + public void testDescribeSingleFieldGranted() throws Exception { + createUser("only_a", "read_test_a"); + + actions.expectDescribe(singletonMap("a", "BIGINT"), "only_a"); + new AuditLogAsserter() + .expectSqlCompositeAction("only_a", "test") + .assertLogs(); + } + + public void testDescribeSingleFieldExcepted() throws Exception { + createUser("not_c", "read_test_a_and_b"); + + Map expected = new TreeMap<>(); + expected.put("a", "BIGINT"); + expected.put("b", "BIGINT"); + actions.expectDescribe(expected, "not_c"); + new AuditLogAsserter() + .expectSqlCompositeAction("not_c", "test") + .assertLogs(); + } + + public void testDescribeDocumentExcluded() throws Exception { + createUser("no_3s", "read_test_without_c_3"); + + actions.expectMatchesAdmin("DESCRIBE test", "no_3s", "DESCRIBE test"); + new AuditLogAsserter() + .expectSqlCompositeAction("test_admin", "test") + .expectSqlCompositeAction("no_3s", "test") + .assertLogs(); + } + + protected static void createUser(String name, String role) throws IOException { + XContentBuilder user = JsonXContent.contentBuilder().prettyPrint().startObject(); { + user.field("password", "testpass"); + user.field("roles", role); + } + user.endObject(); + client().performRequest("PUT", "/_xpack/security/user/" + name, emptyMap(), + new StringEntity(user.string(), ContentType.APPLICATION_JSON)); + } + + /** + * Used to assert audit logs. Logs are asserted to match in any order because + * we don't always scroll in the same order but each log checker must match a + * single log and all logs must be matched. + */ + protected final class AuditLogAsserter { + private final List, Boolean>> logCheckers = new ArrayList<>(); + + public AuditLogAsserter expectSqlCompositeAction(String user, String... indices) { + expect(true, SQL_ACTION_NAME, user, empty()); + expect(true, GetIndexAction.NAME, user, hasItems(indices)); + return this; + } + + public AuditLogAsserter expect(boolean granted, String action, String principal, + Matcher> indicesMatcher) { + String request; + switch (action) { + case SQL_ACTION_NAME: + request = "SqlRequest"; + break; + case GetIndexAction.NAME: + request = GetIndexRequest.class.getSimpleName(); + break; + default: + throw new IllegalArgumentException("Unknown action [" + action + "]"); + } + return expect(granted, action, principal, indicesMatcher, request); + } + + public AuditLogAsserter expect(boolean granted, String action, String principal, + Matcher> indicesMatcher, String request) { + String eventType = granted ? "access_granted" : "access_denied"; + logCheckers.add(m -> eventType.equals(m.get("event_type")) + && action.equals(m.get("action")) + && principal.equals(m.get("principal")) + && indicesMatcher.matches(m.get("indices")) + && request.equals(m.get("request")) + ); + return this; + } + + public void assertLogs() throws Exception { + assertFalse("Previous test had an audit-related failure. All subsequent audit related assertions are bogus because we can't " + + "guarantee that we fully cleaned up after the last test.", auditFailure); + try { + assertBusy(() -> { + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + sm.checkPermission(new SpecialPermission()); + } + BufferedReader logReader = AccessController.doPrivileged((PrivilegedAction) () -> { + try { + return Files.newBufferedReader(AUDIT_LOG_FILE, StandardCharsets.UTF_8); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + logReader.skip(auditLogWrittenBeforeTestStart); + + List> logs = new ArrayList<>(); + String line; + Pattern logPattern = Pattern.compile( + ("PART PART PART origin_type=PART, origin_address=PART, " + + "principal=PART, (?:run_as_principal=PART, )?(?:run_by_principal=PART, )?" + + "roles=PART, action=\\[(.*?)\\], (?:indices=PART, )?request=PART") + .replace(" ", "\\s+").replace("PART", "\\[([^\\]]*)\\]")); + // fail(logPattern.toString()); + while ((line = logReader.readLine()) != null) { + java.util.regex.Matcher m = logPattern.matcher(line); + if (false == m.matches()) { + throw new IllegalArgumentException("Unrecognized log: " + line); + } + int i = 1; + Map log = new HashMap<>(); + /* We *could* parse the date but leaving it in the original format makes it + * easier to find the lines in the file that this log comes from. */ + log.put("time", m.group(i++)); + log.put("origin", m.group(i++)); + String eventType = m.group(i++); + if (false == ("access_denied".equals(eventType) || "access_granted".equals(eventType))) { + continue; + } + log.put("event_type", eventType); + log.put("origin_type", m.group(i++)); + log.put("origin_address", m.group(i++)); + String principal = m.group(i++); + log.put("principal", principal); + log.put("run_as_principal", m.group(i++)); + log.put("run_by_principal", m.group(i++)); + log.put("roles", m.group(i++)); + String action = m.group(i++); + if (false == (SQL_ACTION_NAME.equals(action) || GetIndexAction.NAME.equals(action))) { + //TODO we may want to extend this and the assertions to SearchAction.NAME as well + continue; + } + log.put("action", action); + // Use a sorted list for indices for consistent error reporting + List indices = new ArrayList<>(Strings.tokenizeByCommaToSet(m.group(i++))); + Collections.sort(indices); + if ("test_admin".equals(principal)) { + /* Sometimes we accidentally sneak access to the security tables. This is fine, SQL + * drops them from the interface. So we might have access to them, but we don't show + * them. */ + indices.remove(".security"); + indices.remove(".security-6"); + } + log.put("indices", indices); + log.put("request", m.group(i)); + logs.add(log); + } + List> allLogs = new ArrayList<>(logs); + List notMatching = new ArrayList<>(); + checker: for (int c = 0; c < logCheckers.size(); c++) { + Function, Boolean> logChecker = logCheckers.get(c); + for (Iterator> logsItr = logs.iterator(); logsItr.hasNext();) { + Map log = logsItr.next(); + if (logChecker.apply(log)) { + logsItr.remove(); + continue checker; + } + } + notMatching.add(c); + } + if (false == notMatching.isEmpty()) { + fail("Some checkers " + notMatching + " didn't match any logs. All logs:" + logsMessage(allLogs) + + "\nRemaining logs:" + logsMessage(logs)); + } + if (false == logs.isEmpty()) { + fail("Not all logs matched. Unmatched logs:" + logsMessage(logs)); + } + }); + } catch (AssertionError e) { + auditFailure = true; + logger.warn("Failed to find an audit log. Skipping remaining tests in this class after this the missing audit" + + "logs could turn up later."); + throw e; + } + } + + private String logsMessage(List> logs) { + if (logs.isEmpty()) { + return " none!"; + } + StringBuilder logsMessage = new StringBuilder(); + for (Map log : logs) { + logsMessage.append('\n').append(log); + } + return logsMessage.toString(); + } + } +} diff --git a/qa/sql/security/src/test/resources/plugin-security.policy b/qa/sql/security/src/test/resources/plugin-security.policy new file mode 100644 index 00000000000..d013547b9fd --- /dev/null +++ b/qa/sql/security/src/test/resources/plugin-security.policy @@ -0,0 +1,8 @@ +grant { + // Needed to read the audit log file + permission java.io.FilePermission "${tests.audit.logfile}", "read"; + + //// Required by ssl subproject: + // Required for the net client to setup ssl rather than use global ssl. + permission java.lang.RuntimePermission "setFactory"; +}; diff --git a/qa/sql/security/ssl/build.gradle b/qa/sql/security/ssl/build.gradle new file mode 100644 index 00000000000..ffe66b816df --- /dev/null +++ b/qa/sql/security/ssl/build.gradle @@ -0,0 +1,366 @@ +import org.elasticsearch.gradle.LoggedExec +import org.elasticsearch.gradle.MavenFilteringHack +import org.elasticsearch.gradle.test.NodeInfo + +import javax.net.ssl.HttpsURLConnection +import javax.net.ssl.KeyManagerFactory +import javax.net.ssl.SSLContext +import javax.net.ssl.TrustManagerFactory +import java.nio.charset.StandardCharsets +import java.security.KeyStore +import java.security.SecureRandom + +// Tell the tests we're running with ssl enabled +integTestRunner { + systemProperty 'tests.ssl.enabled', 'true' +} + +// needed to be consistent with ssl host checking +Object san = new SanEvaluator() + +// location of generated keystores and certificates +File keystoreDir = new File(project.buildDir, 'keystore') + +// Generate the node's keystore +File nodeKeystore = new File(keystoreDir, 'test-node.jks') +task createNodeKeyStore(type: LoggedExec) { + doFirst { + if (nodeKeystore.parentFile.exists() == false) { + nodeKeystore.parentFile.mkdirs() + } + if (nodeKeystore.exists()) { + delete nodeKeystore + } + } + executable = new File(project.javaHome, 'bin/keytool') + standardInput = new ByteArrayInputStream('FirstName LastName\nUnit\nOrganization\nCity\nState\nNL\nyes\n\n'.getBytes('UTF-8')) + args '-genkey', + '-alias', 'test-node', + '-keystore', nodeKeystore, + '-keyalg', 'RSA', + '-keysize', '2048', + '-validity', '712', + '-dname', 'CN=smoke-test-plugins-ssl', + '-keypass', 'keypass', + '-storepass', 'keypass', + '-ext', san +} + +// Generate the client's keystore +File clientKeyStore = new File(keystoreDir, 'test-client.jks') +task createClientKeyStore(type: LoggedExec) { + doFirst { + if (clientKeyStore.parentFile.exists() == false) { + clientKeyStore.parentFile.mkdirs() + } + if (clientKeyStore.exists()) { + delete clientKeyStore + } + } + executable = new File(project.javaHome, 'bin/keytool') + standardInput = new ByteArrayInputStream('FirstName LastName\nUnit\nOrganization\nCity\nState\nNL\nyes\n\n'.getBytes('UTF-8')) + args '-genkey', + '-alias', 'test-client', + '-keystore', clientKeyStore, + '-keyalg', 'RSA', + '-keysize', '2048', + '-validity', '712', + '-dname', 'CN=smoke-test-plugins-ssl', + '-keypass', 'keypass', + '-storepass', 'keypass', + '-ext', san +} + +// Export the node's certificate +File nodeCertificate = new File(keystoreDir, 'test-node.cert') +task exportNodeCertificate(type: LoggedExec) { + doFirst { + if (nodeCertificate.parentFile.exists() == false) { + nodeCertificate.parentFile.mkdirs() + } + if (nodeCertificate.exists()) { + delete nodeCertificate + } + } + executable = new File(project.javaHome, 'bin/keytool') + args '-export', + '-alias', 'test-node', + '-keystore', nodeKeystore, + '-storepass', 'keypass', + '-file', nodeCertificate +} + +// Import the node certificate in the client's keystore +task importNodeCertificateInClientKeyStore(type: LoggedExec) { + dependsOn exportNodeCertificate + executable = new File(project.javaHome, 'bin/keytool') + args '-import', + '-alias', 'test-node', + '-keystore', clientKeyStore, + '-storepass', 'keypass', + '-file', nodeCertificate, + '-noprompt' +} + +// Export the client's certificate +File clientCertificate = new File(keystoreDir, 'test-client.cert') +task exportClientCertificate(type: LoggedExec) { + doFirst { + if (clientCertificate.parentFile.exists() == false) { + clientCertificate.parentFile.mkdirs() + } + if (clientCertificate.exists()) { + delete clientCertificate + } + } + executable = new File(project.javaHome, 'bin/keytool') + args '-export', + '-alias', 'test-client', + '-keystore', clientKeyStore, + '-storepass', 'keypass', + '-file', clientCertificate +} + +// Import the client certificate in the node's keystore +task importClientCertificateInNodeKeyStore(type: LoggedExec) { + dependsOn exportClientCertificate + executable = new File(project.javaHome, 'bin/keytool') + args '-import', + '-alias', 'test-client', + '-keystore', nodeKeystore, + '-storepass', 'keypass', + '-file', clientCertificate, + '-noprompt' +} + +forbiddenPatterns { + exclude '**/*.cert' +} + +// Add keystores to test classpath: it expects it there +sourceSets.test.resources.srcDir(keystoreDir) +processTestResources.dependsOn( + createNodeKeyStore, createClientKeyStore, + importNodeCertificateInClientKeyStore, importClientCertificateInNodeKeyStore +) + +integTestCluster.dependsOn(importClientCertificateInNodeKeyStore) + + +integTestCluster { + // The setup that we actually want + setting 'xpack.security.http.ssl.enabled', 'true' + setting 'xpack.security.transport.ssl.enabled', 'true' + + // ceremony to set up ssl + setting 'xpack.ssl.keystore.path', 'test-node.jks' + keystoreSetting 'xpack.ssl.keystore.secure_password', 'keypass' + + // copy keystores into config/ + extraConfigFile nodeKeystore.name, nodeKeystore + extraConfigFile clientKeyStore.name, clientKeyStore + + // Override the wait condition to work properly with security and SSL + waitCondition = { NodeInfo node, AntBuilder ant -> + File tmpFile = new File(node.cwd, 'wait.success') + KeyStore keyStore = KeyStore.getInstance("JKS"); + keyStore.load(clientKeyStore.newInputStream(), 'keypass'.toCharArray()); + KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); + kmf.init(keyStore, 'keypass'.toCharArray()); + TrustManagerFactory tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); + tmf.init(keyStore); + SSLContext sslContext = SSLContext.getInstance("TLSv1.2"); + sslContext.init(kmf.getKeyManagers(), tmf.getTrustManagers(), new SecureRandom()); + for (int i = 0; i < 10; i++) { + // we use custom wait logic here for HTTPS + HttpsURLConnection httpURLConnection = null; + try { + httpURLConnection = (HttpsURLConnection) new URL("https://${node.httpUri()}/_cluster/health?wait_for_nodes=${numNodes}&wait_for_status=yellow").openConnection(); + httpURLConnection.setSSLSocketFactory(sslContext.getSocketFactory()); + httpURLConnection.setRequestProperty("Authorization", "Basic " + + Base64.getEncoder().encodeToString("test_admin:x-pack-test-password".getBytes(StandardCharsets.UTF_8))); + httpURLConnection.setRequestMethod("GET"); + httpURLConnection.connect(); + if (httpURLConnection.getResponseCode() == 200) { + tmpFile.withWriter StandardCharsets.UTF_8.name(), { + it.write(httpURLConnection.getInputStream().getText(StandardCharsets.UTF_8.name())) + } + } + } catch (IOException e) { + if (i == 9) { + logger.error("final attempt of calling cluster health failed", e) + } else { + logger.debug("failed to call cluster health", e) + } + } finally { + if (httpURLConnection != null) { + httpURLConnection.disconnect(); + } + } + + // did not start, so wait a bit before trying again + Thread.sleep(500L); + } + + return tmpFile.exists() + } +} + + + + +/** A lazy evaluator to find the san to use for certificate generation. */ +class SanEvaluator { + + private static String san = null + + String toString() { + synchronized (SanEvaluator.class) { + if (san == null) { + san = getSubjectAlternativeNameString() + } + } + return san + } + + // Code stolen from NetworkUtils/InetAddresses/NetworkAddress to support SAN + /** Return all interfaces (and subinterfaces) on the system */ + private static List getInterfaces() throws SocketException { + List all = new ArrayList<>(); + addAllInterfaces(all, Collections.list(NetworkInterface.getNetworkInterfaces())); + Collections.sort(all, new Comparator() { + @Override + public int compare(NetworkInterface left, NetworkInterface right) { + return Integer.compare(left.getIndex(), right.getIndex()); + } + }); + return all; + } + + /** Helper for getInterfaces, recursively adds subinterfaces to {@code target} */ + private static void addAllInterfaces(List target, List level) { + if (!level.isEmpty()) { + target.addAll(level); + for (NetworkInterface intf : level) { + addAllInterfaces(target, Collections.list(intf.getSubInterfaces())); + } + } + } + + private static String getSubjectAlternativeNameString() { + List list = new ArrayList<>(); + for (NetworkInterface intf : getInterfaces()) { + if (intf.isUp()) { + // NOTE: some operating systems (e.g. BSD stack) assign a link local address to the loopback interface + // while technically not a loopback address, some of these treat them as one (e.g. OS X "localhost") so we must too, + // otherwise things just won't work out of box. So we include all addresses from loopback interfaces. + for (InetAddress address : Collections.list(intf.getInetAddresses())) { + if (intf.isLoopback() || address.isLoopbackAddress()) { + list.add(address); + } + } + } + } + if (list.isEmpty()) { + throw new IllegalArgumentException("no up-and-running loopback addresses found, got " + getInterfaces()); + } + + StringBuilder builder = new StringBuilder("san="); + for (int i = 0; i < list.size(); i++) { + InetAddress address = list.get(i); + String hostAddress; + if (address instanceof Inet6Address) { + hostAddress = compressedIPV6Address((Inet6Address)address); + } else { + hostAddress = address.getHostAddress(); + } + builder.append("ip:").append(hostAddress); + String hostname = address.getHostName(); + if (hostname.equals(address.getHostAddress()) == false) { + builder.append(",dns:").append(hostname); + } + + if (i != (list.size() - 1)) { + builder.append(","); + } + } + + return builder.toString(); + } + + private static String compressedIPV6Address(Inet6Address inet6Address) { + byte[] bytes = inet6Address.getAddress(); + int[] hextets = new int[8]; + for (int i = 0; i < hextets.length; i++) { + hextets[i] = (bytes[2 * i] & 255) << 8 | bytes[2 * i + 1] & 255; + } + compressLongestRunOfZeroes(hextets); + return hextetsToIPv6String(hextets); + } + + /** + * Identify and mark the longest run of zeroes in an IPv6 address. + * + *

Only runs of two or more hextets are considered. In case of a tie, the + * leftmost run wins. If a qualifying run is found, its hextets are replaced + * by the sentinel value -1. + * + * @param hextets {@code int[]} mutable array of eight 16-bit hextets + */ + private static void compressLongestRunOfZeroes(int[] hextets) { + int bestRunStart = -1; + int bestRunLength = -1; + int runStart = -1; + for (int i = 0; i < hextets.length + 1; i++) { + if (i < hextets.length && hextets[i] == 0) { + if (runStart < 0) { + runStart = i; + } + } else if (runStart >= 0) { + int runLength = i - runStart; + if (runLength > bestRunLength) { + bestRunStart = runStart; + bestRunLength = runLength; + } + runStart = -1; + } + } + if (bestRunLength >= 2) { + Arrays.fill(hextets, bestRunStart, bestRunStart + bestRunLength, -1); + } + } + + /** + * Convert a list of hextets into a human-readable IPv6 address. + * + *

In order for "::" compression to work, the input should contain negative + * sentinel values in place of the elided zeroes. + * + * @param hextets {@code int[]} array of eight 16-bit hextets, or -1s + */ + private static String hextetsToIPv6String(int[] hextets) { + /* + * While scanning the array, handle these state transitions: + * start->num => "num" start->gap => "::" + * num->num => ":num" num->gap => "::" + * gap->num => "num" gap->gap => "" + */ + StringBuilder buf = new StringBuilder(39); + boolean lastWasNumber = false; + for (int i = 0; i < hextets.length; i++) { + boolean thisIsNumber = hextets[i] >= 0; + if (thisIsNumber) { + if (lastWasNumber) { + buf.append(':'); + } + buf.append(Integer.toHexString(hextets[i])); + } else { + if (i == 0 || lastWasNumber) { + buf.append("::"); + } + } + lastWasNumber = thisIsNumber; + } + return buf.toString(); + } +} diff --git a/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/ErrorsTestCase.java b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/ErrorsTestCase.java new file mode 100644 index 00000000000..8c31b7a6445 --- /dev/null +++ b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/ErrorsTestCase.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql; + +/** + * Interface implemented once per SQL access method to ensure that we + * test the same minimal set of error cases. Note that this does not + * include security related failures, those are tracked in another test. + */ +public interface ErrorsTestCase { + void testSelectInvalidSql() throws Exception; + void testSelectFromMissingIndex() throws Exception; + void testSelectMissingField() throws Exception; + void testSelectMissingFunction() throws Exception; +} diff --git a/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/CliIntegrationTestCase.java b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/CliIntegrationTestCase.java new file mode 100644 index 00000000000..d890b5ac76d --- /dev/null +++ b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/CliIntegrationTestCase.java @@ -0,0 +1,146 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.cli; + +import org.apache.http.HttpEntity; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.transport.client.PreBuiltTransportClient; +import org.elasticsearch.xpack.qa.sql.cli.RemoteCli.SecurityConfig; +import org.elasticsearch.xpack.qa.sql.embed.CliHttpServer; +import org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase; +import org.junit.After; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.rules.ExternalResource; + +import java.io.IOException; +import java.net.InetAddress; +import java.security.AccessControlException; +import java.util.function.Supplier; + +import static java.util.Collections.singletonMap; +import static org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase.assertNoSearchContexts; + +public abstract class CliIntegrationTestCase extends ESRestTestCase { + /** + * Should the HTTP server that serves SQL be embedded in the test + * process (true) or should the JDBC driver connect to Elasticsearch + * running at {@code tests.rest.cluster}. Note that to use embedded + * HTTP you have to have Elasticsearch's transport protocol open on + * port 9300 but the Elasticsearch running there does not need to have + * the SQL plugin installed. Note also that embedded HTTP is faster + * but is not canonical because it runs against a different HTTP server + * then JDBC will use in production. Gradle always uses non-embedded. + */ + private static final boolean EMBED_SQL = Booleans.parseBoolean(System.getProperty("tests.embed.sql", "false")); + + @ClassRule + public static final EmbeddedCliServer EMBEDDED = EMBED_SQL ? new EmbeddedCliServer() : null; + public static final Supplier ES = EMBED_SQL ? EMBEDDED::address : CliIntegrationTestCase::elasticsearchAddress; + + /** + * Read an address for Elasticsearch suitable for the CLI from the system properties. + */ + public static String elasticsearchAddress() { + String cluster = System.getProperty("tests.rest.cluster"); + // CLI only supports a single node at a time so we just give it one. + return cluster.split(",")[0]; + } + + private RemoteCli cli; + + /** + * Asks the CLI Fixture to start a CLI instance. + */ + @Before + public void startCli() throws IOException { + cli = new RemoteCli(ES.get(), true, securityConfig()); + } + + @After + public void orderlyShutdown() throws Exception { + if (cli == null) { + // failed to connect to the cli so there is nothing to do here + return; + } + cli.close(); + assertNoSearchContexts(); + } + + /** + * Override to add security configuration to the cli. + */ + protected SecurityConfig securityConfig() { + return null; + } + + protected void index(String index, CheckedConsumer body) throws IOException { + XContentBuilder builder = JsonXContent.contentBuilder().startObject(); + body.accept(builder); + builder.endObject(); + HttpEntity doc = new StringEntity(builder.string(), ContentType.APPLICATION_JSON); + client().performRequest("PUT", "/" + index + "/doc/1", singletonMap("refresh", "true"), doc); + } + + public String command(String command) throws IOException { + return cli.command(command); + } + + public String readLine() throws IOException { + return cli.readLine(); + } + + /** + * Embedded CLI server that runs against a running Elasticsearch + * server using the transport protocol. + */ + private static class EmbeddedCliServer extends ExternalResource { + private Client client; + private CliHttpServer server; + + @Override + @SuppressWarnings("resource") + protected void before() throws Throwable { + try { + Settings settings = Settings.builder() + .put("client.transport.ignore_cluster_name", true) + .build(); + client = new PreBuiltTransportClient(settings) + .addTransportAddress(new TransportAddress(InetAddress.getLoopbackAddress(), 9300)); + } catch (ExceptionInInitializerError e) { + if (e.getCause() instanceof AccessControlException) { + throw new RuntimeException(getClass().getSimpleName() + " is not available with the security manager", e); + } else { + throw e; + } + } + server = new CliHttpServer(client); + + server.start(0); + } + + @Override + protected void after() { + client.close(); + client = null; + server.stop(); + server = null; + } + + private String address() { + return server.address().getAddress().getHostAddress() + ":" + server.address().getPort(); + } + } +} diff --git a/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/ErrorsTestCase.java b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/ErrorsTestCase.java new file mode 100644 index 00000000000..f25bca75b75 --- /dev/null +++ b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/ErrorsTestCase.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.cli; + +import java.io.IOException; + +/** + * Tests for error messages. + */ +public abstract class ErrorsTestCase extends CliIntegrationTestCase implements org.elasticsearch.xpack.qa.sql.ErrorsTestCase { + @Override + public void testSelectInvalidSql() throws Exception { + assertEquals("[1;31mBad request [[22;3;33mFound 1 problem(s)", command("SELECT * FRO")); + assertEquals("line 1:8: Cannot determine columns for *[1;23;31m][0m", readLine()); + } + + @Override + public void testSelectFromMissingIndex() throws IOException { + assertEquals("[1;31mBad request [[22;3;33mFound 1 problem(s)", command("SELECT * FROM test")); + assertEquals("line 1:15: Unknown index [test][1;23;31m][0m", readLine()); + } + + @Override + public void testSelectMissingField() throws IOException { + index("test", body -> body.field("test", "test")); + assertEquals("[1;31mBad request [[22;3;33mFound 1 problem(s)", command("SELECT missing FROM test")); + assertEquals("line 1:8: Unknown column [missing][1;23;31m][0m", readLine()); + } + + @Override + public void testSelectMissingFunction() throws Exception { + index("test", body -> body.field("foo", 1)); + assertEquals("[1;31mBad request [[22;3;33mFound 1 problem(s)", command("SELECT missing(foo) FROM test")); + assertEquals("line 1:8: Unknown function [missing][1;23;31m][0m", readLine()); + } +} diff --git a/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/FetchSizeTestCase.java b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/FetchSizeTestCase.java new file mode 100644 index 00000000000..92344e6cd28 --- /dev/null +++ b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/FetchSizeTestCase.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.cli; + +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; + +import java.io.IOException; + +import static java.util.Collections.singletonMap; +import static org.hamcrest.Matchers.containsString; + +/** + * Test for setting the fetch size. + */ +public abstract class FetchSizeTestCase extends CliIntegrationTestCase { + public void testSelect() throws IOException { + StringBuilder bulk = new StringBuilder(); + for (int i = 0; i < 20; i++) { + bulk.append("{\"index\":{}}\n"); + bulk.append("{\"test_field\":" + i + "}\n"); + } + client().performRequest("PUT", "/test/doc/_bulk", singletonMap("refresh", "true"), + new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); + assertEquals("fetch size set to [90m4[0m", command("fetch size = 4")); + assertEquals("fetch separator set to \"[90m -- fetch sep -- [0m\"", command("fetch separator = \" -- fetch sep -- \"")); + assertThat(command("SELECT * FROM test ORDER BY test_field ASC"), containsString("test_field")); + assertThat(readLine(), containsString("----------")); + int i = 0; + while (i < 20) { + assertThat(readLine(), containsString(Integer.toString(i++))); + assertThat(readLine(), containsString(Integer.toString(i++))); + assertThat(readLine(), containsString(Integer.toString(i++))); + assertThat(readLine(), containsString(Integer.toString(i++))); + assertThat(readLine(), containsString(" -- fetch sep -- ")); + } + assertEquals("", readLine()); + } + + public void testInvalidFetchSize() throws IOException { + assertEquals("[1;31mInvalid fetch size [[22;3;33mcat[1;23;31m][0m", command("fetch size = cat")); + assertEquals("[1;31mInvalid fetch size [[22;3;33m0[1;23;31m]. Must be > 0.[0m", command("fetch size = 0")); + assertEquals("[1;31mInvalid fetch size [[22;3;33m-1231[1;23;31m]. Must be > 0.[0m", command("fetch size = -1231")); + assertEquals("[1;31mInvalid fetch size [[22;3;33m" + Long.MAX_VALUE + "[1;23;31m][0m", command("fetch size = " + Long.MAX_VALUE)); + } +} diff --git a/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/RemoteCli.java b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/RemoteCli.java new file mode 100644 index 00000000000..5b805933f5e --- /dev/null +++ b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/RemoteCli.java @@ -0,0 +1,233 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.cli; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.SpecialPermission; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.logging.Loggers; + +import java.io.BufferedReader; +import java.io.Closeable; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.OutputStreamWriter; +import java.io.PrintWriter; +import java.net.InetAddress; +import java.net.Socket; +import java.net.UnknownHostException; +import java.nio.charset.StandardCharsets; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.ArrayList; +import java.util.List; +import java.util.function.Predicate; + +import static org.elasticsearch.test.ESTestCase.randomBoolean; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.endsWith; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.startsWith; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThat; + +public class RemoteCli implements Closeable { + private static final Logger logger = Loggers.getLogger(RemoteCli.class); + + private static final InetAddress CLI_FIXTURE_ADDRESS; + private static final int CLI_FIXTURE_PORT; + static { + String addressAndPort = System.getProperty("tests.cli.fixture"); + if (addressAndPort == null) { + throw new IllegalArgumentException("Must set the [tests.cli.fixture] property. Gradle handles this for you " + + " in regular tests. In embedded mode the easiest thing to do is run " + + "`gradle :x-pack-elasticsearch:qa:sql:no-security:run` and to set the property to the contents of " + + "`qa/sql/no-security/build/fixtures/cliFixture/ports`"); + } + int split = addressAndPort.lastIndexOf(':'); + try { + CLI_FIXTURE_ADDRESS = InetAddress.getByName(addressAndPort.substring(0, split)); + } catch (UnknownHostException e) { + throw new RuntimeException(e); + } + CLI_FIXTURE_PORT = Integer.parseInt(addressAndPort.substring(split + 1)); + } + + private final Socket socket; + private final PrintWriter out; + private final BufferedReader in; + + public RemoteCli(String elasticsearchAddress, boolean checkConnectionOnStartup, + @Nullable SecurityConfig security) throws IOException { + // Connect + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + sm.checkPermission(new SpecialPermission()); + } + logger.info("connecting to the cli fixture at {}:{}", CLI_FIXTURE_ADDRESS, CLI_FIXTURE_PORT); + socket = AccessController.doPrivileged(new PrivilegedAction() { + @Override + public Socket run() { + try { + return new Socket(CLI_FIXTURE_ADDRESS, CLI_FIXTURE_PORT); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + }); + logger.info("connected"); + socket.setSoTimeout(10000); + out = new PrintWriter(new OutputStreamWriter(socket.getOutputStream(), StandardCharsets.UTF_8), true); + in = new BufferedReader(new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8)); + + // Start the CLI + String command; + if (security == null) { + command = elasticsearchAddress; + } else { + command = security.user + "@" + elasticsearchAddress; + if (security.https) { + command = "https://" + command; + } else if (randomBoolean()) { + command = "http://" + command; + } + if (security.keystoreLocation != null) { + command = command + " -keystore_location " + security.keystoreLocation; + } + } + if (false == checkConnectionOnStartup) { + command += " -check false"; + } + out.println(command); + + // Feed it passwords if needed + if (security != null && security.keystoreLocation != null) { + assertEquals("keystore password: ", readUntil(s -> s.endsWith(": "))); + out.println(security.keystorePassword); + } + if (security != null) { + assertEquals("password: ", readUntil(s -> s.endsWith(": "))); + out.println(security.password); + } + + // Throw out the logo and warnings about making a dumb terminal + while (false == readLine().contains("SQL")); + // Throw out the empty line before all the good stuff + assertEquals("", readLine()); + } + + /** + * Attempts an orderly shutdown of the CLI, reporting any unconsumed lines as errors. + */ + @Override + public void close() throws IOException { + try { + // Try and shutdown the client normally + /* Don't use println because it enits \r\n on windows but we put the + * terminal in unix mode to make the tests consistent. */ + out.print("quit;\n"); + out.flush(); + List nonQuit = new ArrayList<>(); + String line; + while (false == (line = readLine()).startsWith("[?1h=[33msql> [0mquit;[90mBye![0m")) { + if (false == line.isEmpty()) { + nonQuit.add(line); + } + } + assertThat("unconsumed lines", nonQuit, empty()); + } finally { + out.close(); + in.close(); + // Most importantly, close the socket so the next test can use the fixture + socket.close(); + } + } + + /** + * Send a command and assert the echo. + */ + public String command(String command) throws IOException { + assertThat("; automatically added", command, not(endsWith(";"))); + logger.info("out: {};", command); + /* Don't use println because it enits \r\n on windows but we put the + * terminal in unix mode to make the tests consistent. */ + out.print(command + ";\n"); + out.flush(); + String firstResponse = "[?1h=[33msql> [0m" + command + ";"; + String firstLine = readLine(); + assertThat(firstLine, startsWith(firstResponse)); + return firstLine.substring(firstResponse.length()); + } + + public String readLine() throws IOException { + /* Since we can't *see* esc in the error messages we just + * remove it here and pretend it isn't required. Hopefully + * `[` is enough for us to assert on. */ + String line = in.readLine().replace("\u001B", ""); + logger.info("in : {}", line); + return line; + } + + private String readUntil(Predicate end) throws IOException { + StringBuilder b = new StringBuilder(); + String result; + do { + int c = in.read(); + if (c == -1) { + throw new IOException("got eof before end"); + } + b.append((char) c); + result = b.toString(); + } while (false == end.test(result)); + logger.info("in : {}", result); + return result; + } + + public static class SecurityConfig { + private final boolean https; + private final String user; + private final String password; + @Nullable + private final String keystoreLocation; + @Nullable + private final String keystorePassword; + + public SecurityConfig(boolean https, String user, String password, + @Nullable String keystoreLocation, @Nullable String keystorePassword) { + if (user == null) { + throw new IllegalArgumentException( + "[user] is required. Send [null] instead of a SecurityConfig to run without security."); + } + if (password == null) { + throw new IllegalArgumentException( + "[password] is required. Send [null] instead of a SecurityConfig to run without security."); + } + if (keystoreLocation == null) { + if (keystorePassword != null) { + throw new IllegalArgumentException("[keystorePassword] cannot be specified if [keystoreLocation] is not specified"); + } + } else { + if (keystorePassword == null) { + throw new IllegalArgumentException("[keystorePassword] is required if [keystoreLocation] is specified"); + } + } + + this.https = https; + this.user = user; + this.password = password; + this.keystoreLocation = keystoreLocation; + this.keystorePassword = keystorePassword; + } + + public String keystoreLocation() { + return keystoreLocation; + } + + public String keystorePassword() { + return keystorePassword; + } + } +} diff --git a/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/SelectTestCase.java b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/SelectTestCase.java new file mode 100644 index 00000000000..3add41db6d5 --- /dev/null +++ b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/SelectTestCase.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.cli; + +import org.elasticsearch.test.hamcrest.RegexMatcher; + +import java.io.IOException; + +import static org.hamcrest.Matchers.containsString; + +public abstract class SelectTestCase extends CliIntegrationTestCase { + public void testSelect() throws IOException { + index("test", body -> body.field("test_field", "test_value")); + assertThat(command("SELECT * FROM test"), containsString("test_field")); + assertThat(readLine(), containsString("----------")); + assertThat(readLine(), containsString("test_value")); + assertEquals("", readLine()); + } + + public void testSelectWithWhere() throws IOException { + index("test", body -> body.field("test_field", "test_value1").field("i", 1)); + index("test", body -> body.field("test_field", "test_value2").field("i", 2)); + assertThat(command("SELECT * FROM test WHERE i = 2"), RegexMatcher.matches("\\s*i\\s*\\|\\s*test_field\\s*")); + assertThat(readLine(), containsString("----------")); + assertThat(readLine(), RegexMatcher.matches("\\s*2\\s*\\|\\s*test_value2\\s*")); + assertEquals("", readLine()); + } +} diff --git a/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/ShowTestCase.java b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/ShowTestCase.java new file mode 100644 index 00000000000..2e12b44df1a --- /dev/null +++ b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/ShowTestCase.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.cli; + +import org.elasticsearch.test.hamcrest.RegexMatcher; + +import java.io.IOException; +import java.util.regex.Pattern; + +import static org.hamcrest.Matchers.containsString; + +public abstract class ShowTestCase extends CliIntegrationTestCase { + public void testShowTables() throws IOException { + index("test1", body -> body.field("test_field", "test_value")); + index("test2", body -> body.field("test_field", "test_value")); + assertThat(command("SHOW TABLES"), RegexMatcher.matches("\\s*table\\s*")); + assertThat(readLine(), containsString("----------")); + assertThat(readLine(), RegexMatcher.matches("\\s*test[12]\\s*")); + assertThat(readLine(), RegexMatcher.matches("\\s*test[12]\\s*")); + assertEquals("", readLine()); + } + + public void testShowFunctions() throws IOException { + assertThat(command("SHOW FUNCTIONS"), RegexMatcher.matches("\\s*name\\s*\\|\\s*type\\s*")); + assertThat(readLine(), containsString("----------")); + assertThat(readLine(), RegexMatcher.matches("\\s*AVG\\s*\\|\\s*AGGREGATE\\s*")); + assertThat(readLine(), RegexMatcher.matches("\\s*COUNT\\s*\\|\\s*AGGREGATE\\s*")); + assertThat(readLine(), RegexMatcher.matches("\\s*MAX\\s*\\|\\s*AGGREGATE\\s*")); + assertThat(readLine(), RegexMatcher.matches("\\s*MIN\\s*\\|\\s*AGGREGATE\\s*")); + String line = readLine(); + Pattern aggregateFunction = Pattern.compile("\\s*[A-Z0-9_~]+\\s*\\|\\s*AGGREGATE\\s*"); + while (aggregateFunction.matcher(line).matches()) { + line = readLine(); + } + Pattern scalarFunction = Pattern.compile("\\s*[A-Z0-9_~]+\\s*\\|\\s*SCALAR\\s*"); + while (scalarFunction.matcher(line).matches()) { + line = readLine(); + } + assertEquals("", line); + } + + public void testShowFunctionsLikePrefix() throws IOException { + assertThat(command("SHOW FUNCTIONS LIKE 'L%'"), RegexMatcher.matches("\\s*name\\s*\\|\\s*type\\s*")); + assertThat(readLine(), containsString("----------")); + assertThat(readLine(), RegexMatcher.matches("\\s*LOG\\s*\\|\\s*SCALAR\\s*")); + assertThat(readLine(), RegexMatcher.matches("\\s*LOG10\\s*\\|\\s*SCALAR\\s*")); + assertEquals("", readLine()); + } + + public void testShowFunctionsLikeInfix() throws IOException { + assertThat(command("SHOW FUNCTIONS LIKE '%DAY%'"), RegexMatcher.matches("\\s*name\\s*\\|\\s*type\\s*")); + assertThat(readLine(), containsString("----------")); + assertThat(readLine(), RegexMatcher.matches("\\s*DAY_OF_MONTH\\s*\\|\\s*SCALAR\\s*")); + assertThat(readLine(), RegexMatcher.matches("\\s*DAY\\s*\\|\\s*SCALAR\\s*")); + assertThat(readLine(), RegexMatcher.matches("\\s*DAY_OF_WEEK\\s*\\|\\s*SCALAR\\s*")); + assertThat(readLine(), RegexMatcher.matches("\\s*DAY_OF_YEAR\\s*\\|\\s*SCALAR\\s*")); + assertThat(readLine(), RegexMatcher.matches("\\s*HOUR_OF_DAY\\s*\\|\\s*SCALAR\\s*")); + assertThat(readLine(), RegexMatcher.matches("\\s*MINUTE_OF_DAY\\s*\\|\\s*SCALAR\\s*")); + assertEquals("", readLine()); + } +} diff --git a/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/package-info.java b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/package-info.java new file mode 100644 index 00000000000..b77e050cc34 --- /dev/null +++ b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/package-info.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/** + * Support for integration tests for the Elasticsearch SQL CLI client + * and integration tests shared between multiple qa projects. + */ +package org.elasticsearch.xpack.qa.sql.cli; diff --git a/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/embed/CliHttpServer.java b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/embed/CliHttpServer.java new file mode 100644 index 00000000000..073325a8258 --- /dev/null +++ b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/embed/CliHttpServer.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.embed; + +import org.elasticsearch.client.Client; + +/** + * Internal server used for testing without starting a new Elasticsearch instance. + */ +public class CliHttpServer extends ProtoHttpServer { + public CliHttpServer(Client client) { + super(client, new CliProtoHandler(client), "/_xpack/sql/cli"); + } + + @Override + public String url() { + return "http://" + super.url(); + } +} \ No newline at end of file diff --git a/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/embed/CliProtoHandler.java b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/embed/CliProtoHandler.java new file mode 100644 index 00000000000..a4f0883c116 --- /dev/null +++ b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/embed/CliProtoHandler.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.embed; + +import com.sun.net.httpserver.HttpExchange; + +import org.elasticsearch.client.Client; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.test.rest.FakeRestChannel; +import org.elasticsearch.test.rest.FakeRestRequest; +import org.elasticsearch.xpack.sql.cli.net.protocol.Proto; +import org.elasticsearch.xpack.sql.plugin.RestSqlCliAction; + +import java.io.DataInput; +import java.io.IOException; + +import static org.mockito.Mockito.mock; + +class CliProtoHandler extends ProtoHandler { + private final RestSqlCliAction action; + + CliProtoHandler(Client client) { + super(client); + action = new RestSqlCliAction(Settings.EMPTY, mock(RestController.class)); + } + + @Override + protected void handle(HttpExchange http, DataInput in) throws IOException { + FakeRestChannel channel = new FakeRestChannel(new FakeRestRequest(), true, 1); + try { + action.operation(Proto.INSTANCE.readRequest(in), client).accept(channel); + while (false == channel.await()) {} + sendHttpResponse(http, channel.capturedResponse().content()); + } catch (Exception e) { + fail(http, e); + } + } +} \ No newline at end of file diff --git a/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/embed/EmbeddedJdbcServer.java b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/embed/EmbeddedJdbcServer.java new file mode 100644 index 00000000000..1ade058de63 --- /dev/null +++ b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/embed/EmbeddedJdbcServer.java @@ -0,0 +1,81 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.embed; + +import org.elasticsearch.client.Client; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.transport.client.PreBuiltTransportClient; +import org.junit.rules.ExternalResource; + +import java.net.InetAddress; +import java.security.AccessControlException; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.util.Properties; + +import static org.junit.Assert.assertNotNull; + +/** + * Embedded JDBC server that uses the transport client to power + * the jdbc endpoints in the same JVM as the tests. + */ +public class EmbeddedJdbcServer extends ExternalResource { + + private Client client; + private JdbcHttpServer server; + private String jdbcUrl; + private final Properties properties; + + public EmbeddedJdbcServer() { + this(false); + } + + public EmbeddedJdbcServer(boolean debug) { + properties = new Properties(); + if (debug) { + properties.setProperty("debug", "true"); + } + } + + @Override + @SuppressWarnings("resource") + protected void before() throws Throwable { + try { + Settings settings = Settings.builder() + .put("client.transport.ignore_cluster_name", true) + .build(); + client = new PreBuiltTransportClient(settings) + .addTransportAddress(new TransportAddress(InetAddress.getLoopbackAddress(), 9300)); + } catch (ExceptionInInitializerError e) { + if (e.getCause() instanceof AccessControlException) { + throw new RuntimeException(getClass().getSimpleName() + " is not available with the security manager", e); + } else { + throw e; + } + } + server = new JdbcHttpServer(client); + + server.start(0); + jdbcUrl = server.url(); + } + + @Override + protected void after() { + client.close(); + client = null; + server.stop(); + server = null; + } + + public Connection connection(Properties props) throws SQLException { + assertNotNull("ES JDBC Server is null - make sure ES is properly run as a @ClassRule", server); + Properties p = new Properties(properties); + p.putAll(props); + return DriverManager.getConnection(jdbcUrl, p); + } +} \ No newline at end of file diff --git a/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/embed/EmbeddedModeFilterClient.java b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/embed/EmbeddedModeFilterClient.java new file mode 100644 index 00000000000..c63643e68f2 --- /dev/null +++ b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/embed/EmbeddedModeFilterClient.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.embed; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.FilterClient; +import org.elasticsearch.xpack.sql.execution.PlanExecutor; +import org.elasticsearch.xpack.sql.plugin.sql.action.SqlAction; +import org.elasticsearch.xpack.sql.plugin.sql.action.SqlRequest; +import org.elasticsearch.xpack.sql.plugin.sql.action.SqlResponse; +import org.elasticsearch.xpack.sql.plugin.sql.action.TransportSqlAction; + +import java.util.Objects; + +/** + * Implements embedded sql mode by intercepting requests to SQL APIs and executing them locally. + */ +public class EmbeddedModeFilterClient extends FilterClient { + private PlanExecutor planExecutor; + + public EmbeddedModeFilterClient(Client in) { + super(in); + } + + public void setPlanExecutor(PlanExecutor executor) { + this.planExecutor = executor; + } + + @Override + @SuppressWarnings("unchecked") + protected < Request extends ActionRequest, + Response extends ActionResponse, + RequestBuilder extends ActionRequestBuilder> + void doExecute(Action action, + Request request, ActionListener listener) { + Objects.requireNonNull(planExecutor, "plan executor not set on EmbeddedClient"); + + if (action == SqlAction.INSTANCE) { + TransportSqlAction.operation(planExecutor, (SqlRequest) request, (ActionListener) listener); + } else { + super.doExecute(action, request, listener); + } + } +} diff --git a/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/embed/JdbcHttpServer.java b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/embed/JdbcHttpServer.java new file mode 100644 index 00000000000..efea3c052d5 --- /dev/null +++ b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/embed/JdbcHttpServer.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.embed; + +import org.elasticsearch.client.Client; + +/** + * Internal server used for testing without starting a new Elasticsearch instance. + */ +public class JdbcHttpServer extends ProtoHttpServer { + + public JdbcHttpServer(Client client) { + super(client, new JdbcProtoHandler(client), "/_xpack/sql/jdbc"); + } + + @Override + public String url() { + return "jdbc:es://" + super.url(); + } +} \ No newline at end of file diff --git a/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/embed/JdbcProtoHandler.java b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/embed/JdbcProtoHandler.java new file mode 100644 index 00000000000..9e76b9f6343 --- /dev/null +++ b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/embed/JdbcProtoHandler.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.embed; + +import com.sun.net.httpserver.HttpExchange; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.test.rest.FakeRestChannel; +import org.elasticsearch.test.rest.FakeRestRequest; +import org.elasticsearch.xpack.sql.analysis.index.IndexResolver; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto; +import org.elasticsearch.xpack.sql.plugin.RestSqlJdbcAction; +import org.elasticsearch.xpack.sql.plugin.SqlLicenseChecker; + +import java.io.DataInput; +import java.io.IOException; + +import static org.mockito.Mockito.mock; + +class JdbcProtoHandler extends ProtoHandler { + private final RestSqlJdbcAction action; + + JdbcProtoHandler(Client client) { + super(client); + action = new RestSqlJdbcAction(Settings.EMPTY, mock(RestController.class), new SqlLicenseChecker(() -> {}, () -> {}), + new IndexResolver(client)); + } + + @Override + protected void handle(HttpExchange http, DataInput in) throws IOException { + FakeRestChannel channel = new FakeRestChannel(new FakeRestRequest(), true, 1); + try { + action.operation(Proto.INSTANCE.readRequest(in), client).accept(channel); + while (false == channel.await()) {} + sendHttpResponse(http, channel.capturedResponse().content()); + } catch (Exception e) { + fail(http, e); + } + } +} \ No newline at end of file diff --git a/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/embed/ProtoHandler.java b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/embed/ProtoHandler.java new file mode 100644 index 00000000000..f75b1bbb5af --- /dev/null +++ b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/embed/ProtoHandler.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.embed; + +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.sql.analysis.index.IndexResolver; +import org.elasticsearch.xpack.sql.execution.PlanExecutor; + +import java.io.DataInput; +import java.io.DataInputStream; +import java.io.IOException; + +public abstract class ProtoHandler implements HttpHandler, AutoCloseable { + + private static PlanExecutor planExecutor(EmbeddedModeFilterClient client) { + return new PlanExecutor(client, new IndexResolver(client)); + } + + protected static final Logger log = ESLoggerFactory.getLogger(ProtoHandler.class.getName()); + + private final TimeValue TV = TimeValue.timeValueSeconds(5); + protected final EmbeddedModeFilterClient client; + protected final NodeInfo info; + protected final String clusterName; + + protected ProtoHandler(Client client) { + NodesInfoResponse niResponse = client.admin().cluster().prepareNodesInfo("_local").clear().get(TV); + this.client = !(client instanceof EmbeddedModeFilterClient) ? new EmbeddedModeFilterClient( + client) : (EmbeddedModeFilterClient) client; + this.client.setPlanExecutor(planExecutor(this.client)); + info = niResponse.getNodes().get(0); + clusterName = niResponse.getClusterName().value(); + } + + @Override + public void handle(HttpExchange http) throws IOException { + log.debug("Received query call..."); + + if ("HEAD".equals(http.getRequestMethod())) { + http.sendResponseHeaders(RestStatus.OK.getStatus(), 0); + http.close(); + return; + } + + try (DataInputStream in = new DataInputStream(http.getRequestBody())) { + handle(http, in); + } catch (Exception ex) { + fail(http, ex); + } + } + + protected abstract void handle(HttpExchange http, DataInput in) throws IOException; + + protected void sendHttpResponse(HttpExchange http, BytesReference response) throws IOException { + // first do the conversion in case an exception is triggered + if (http.getResponseHeaders().isEmpty()) { + http.sendResponseHeaders(RestStatus.OK.getStatus(), 0); + } + response.writeTo(http.getResponseBody()); + http.close(); + } + + protected void fail(HttpExchange http, Exception ex) { + log.error("Caught error while transmitting response", ex); + try { + // the error conversion has failed, halt + if (http.getResponseHeaders().isEmpty()) { + http.sendResponseHeaders(RestStatus.INTERNAL_SERVER_ERROR.getStatus(), -1); + } + } catch (IOException ioEx) { + log.error("Caught error while trying to catch error", ex); + } finally { + http.close(); + } + } + + @Override + public void close() { + // no-op + } +} \ No newline at end of file diff --git a/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/embed/ProtoHttpServer.java b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/embed/ProtoHttpServer.java new file mode 100644 index 00000000000..6708c7751f4 --- /dev/null +++ b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/embed/ProtoHttpServer.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.embed; + +import com.sun.net.httpserver.HttpServer; + +import org.elasticsearch.client.Client; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.SynchronousQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; + +public abstract class ProtoHttpServer { + + private final ProtoHandler handler; + private final String protoSuffix; + private final Client client; + private HttpServer server; + private ExecutorService executor; + + public ProtoHttpServer(Client client, ProtoHandler handler, String protoSuffix) { + this.client = client; + this.handler = handler; + this.protoSuffix = protoSuffix; + } + + public void start(int port) throws IOException { + // similar to Executors.newCached but with a smaller bound and much smaller keep-alive + executor = new ThreadPoolExecutor(0, 10, 250, TimeUnit.MILLISECONDS, new SynchronousQueue()); + + server = HttpServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress(), port), 0); + server.createContext("/", new RootHandler()); + server.createContext(protoSuffix, handler); + server.setExecutor(executor); + server.start(); + } + + public void stop() { + server.stop(1); + server = null; + executor.shutdownNow(); + executor = null; + } + + public InetSocketAddress address() { + return server != null ? server.getAddress() : null; + } + + public String url() { + return server != null ? "localhost:" + address().getPort() : ""; + } + + public Client client() { + return client; + } +} \ No newline at end of file diff --git a/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/embed/RootHandler.java b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/embed/RootHandler.java new file mode 100644 index 00000000000..175f1321637 --- /dev/null +++ b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/embed/RootHandler.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.embed; + +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.rest.RestStatus; + +import java.io.IOException; + +class RootHandler implements HttpHandler { + + private static final Logger log = ESLoggerFactory.getLogger(RootHandler.class.getName()); + + @Override + public void handle(HttpExchange http) throws IOException { + log.debug("Received query call..."); + + if ("HEAD".equals(http.getRequestMethod())) { + http.sendResponseHeaders(RestStatus.OK.getStatus(), 0); + http.close(); + return; + } + + fail(http, new UnsupportedOperationException("only HEAD allowed")); + } + + protected void fail(HttpExchange http, Exception ex) { + log.error("Caught error while transmitting response", ex); + try { + // the error conversion has failed, halt + if (http.getResponseHeaders().isEmpty()) { + http.sendResponseHeaders(RestStatus.INTERNAL_SERVER_ERROR.getStatus(), -1); + } + } catch (IOException ioEx) { + log.error("Caught error while trying to catch error", ex); + } finally { + http.close(); + } + } +} diff --git a/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/embed/package-info.java b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/embed/package-info.java new file mode 100644 index 00000000000..19cde8c678a --- /dev/null +++ b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/embed/package-info.java @@ -0,0 +1,10 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/** + * Support for testing in embedded mode. + */ +package org.elasticsearch.xpack.qa.sql.embed; \ No newline at end of file diff --git a/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ConnectionTestCase.java b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ConnectionTestCase.java new file mode 100644 index 00000000000..444142b7138 --- /dev/null +++ b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ConnectionTestCase.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.jdbc; + +import org.elasticsearch.Version; + +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.SQLException; + +/** + * Test the jdbc {@link Connection} implementation. + */ +public abstract class ConnectionTestCase extends JdbcIntegrationTestCase { + public void testConnectionProperties() throws SQLException { + try (Connection c = esJdbc()) { + assertFalse(c.isClosed()); + assertTrue(c.isReadOnly()); + DatabaseMetaData md = c.getMetaData(); + assertEquals(Version.CURRENT.major, md.getDatabaseMajorVersion()); + assertEquals(Version.CURRENT.minor, md.getDatabaseMinorVersion()); + } + } + + public void testIsValid() throws SQLException { + try (Connection c = esJdbc()) { + assertTrue(c.isValid(10)); + } + } + + /** + * Tests that we throw report no transaction isolation and throw sensible errors if you ask for any. + */ + public void testTransactionIsolation() throws Exception { + try (Connection c = esJdbc()) { + assertEquals(Connection.TRANSACTION_NONE, c.getTransactionIsolation()); + SQLException e = expectThrows(SQLException.class, () -> c.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE)); + assertEquals("Transactions not supported", e.getMessage()); + assertEquals(Connection.TRANSACTION_NONE, c.getTransactionIsolation()); + } + } +} \ No newline at end of file diff --git a/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/CsvSpecTestCase.java b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/CsvSpecTestCase.java new file mode 100644 index 00000000000..81ad152a779 --- /dev/null +++ b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/CsvSpecTestCase.java @@ -0,0 +1,195 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.jdbc; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcConfiguration; +import org.elasticsearch.xpack.sql.util.CollectionUtils; +import org.relique.io.TableReader; +import org.relique.jdbc.csv.CsvConnection; + +import java.io.BufferedReader; +import java.io.BufferedWriter; +import java.io.IOException; +import java.io.Reader; +import java.io.StringReader; +import java.io.StringWriter; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.List; +import java.util.Locale; +import java.util.Properties; + +import static org.hamcrest.Matchers.arrayWithSize; + +/** + * Tests comparing sql queries executed against our jdbc client + * with hard coded result sets. + */ +public abstract class CsvSpecTestCase extends SpecBaseIntegrationTestCase { + private final CsvTestCase testCase; + + @ParametersFactory(argumentFormatting = PARAM_FORMATTING) + public static List readScriptSpec() throws Exception { + Parser parser = specParser(); + return CollectionUtils.combine( + readScriptSpec("/command.csv-spec", parser), + readScriptSpec("/fulltext.csv-spec", parser), + readScriptSpec("/agg.csv-spec", parser), + readScriptSpec("/columns.csv-spec", parser), + readScriptSpec("/datetime.csv-spec", parser) + ); + } + + public CsvSpecTestCase(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) { + super(fileName, groupName, testName, lineNumber); + this.testCase = testCase; + } + + @Override + protected final void doTest() throws Throwable { + assertMatchesCsv(testCase.query, testName, testCase.expectedResults); + } + + private void assertMatchesCsv(String query, String csvTableName, String expectedResults) throws SQLException, IOException { + Properties csvProperties = new Properties(); + csvProperties.setProperty("charset", "UTF-8"); + csvProperties.setProperty("separator", "|"); + csvProperties.setProperty("trimValues", "true"); + Tuple resultsAndTypes = extractColumnTypes(expectedResults); + csvProperties.setProperty("columnTypes", resultsAndTypes.v2()); + Reader reader = new StringReader(resultsAndTypes.v1()); + TableReader tableReader = new TableReader() { + @Override + public Reader getReader(Statement statement, String tableName) throws SQLException { + return reader; + } + + @Override + public List getTableNames(Connection connection) throws SQLException { + throw new UnsupportedOperationException(); + } + }; + try (Connection csv = new CsvConnection(tableReader, csvProperties, "") {}; + Connection es = esJdbc()) { + + // pass the testName as table for debugging purposes (in case the underlying reader is missing) + ResultSet expected = csv.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY) + .executeQuery("SELECT * FROM " + csvTableName); + // trigger data loading for type inference + expected.beforeFirst(); + ResultSet elasticResults = executeJdbcQuery(es, query); + assertResults(expected, elasticResults); + } + } + + // make sure ES uses UTC (otherwise JDBC driver picks up the JVM timezone per spec/convention) + @Override + protected Properties connectionProperties() { + Properties connectionProperties = new Properties(); + connectionProperties.setProperty(JdbcConfiguration.TIME_ZONE, "UTC"); + return connectionProperties; + } + + private Tuple extractColumnTypes(String expectedResults) throws IOException { + try (StringReader reader = new StringReader(expectedResults)){ + try (BufferedReader bufferedReader = new BufferedReader(reader)){ + String header = bufferedReader.readLine(); + if (!header.contains(":")) { + // No type information in headers, no need to parse columns - trigger auto-detection + return new Tuple<>(expectedResults,""); + } + try (StringWriter writer = new StringWriter()) { + try (BufferedWriter bufferedWriter = new BufferedWriter(writer)){ + Tuple headerAndColumns = extractColumnTypesFromHeader(header); + bufferedWriter.write(headerAndColumns.v1()); + bufferedWriter.newLine(); + bufferedWriter.flush(); + // Copy the rest of test + Streams.copy(bufferedReader, bufferedWriter); + return new Tuple<>(writer.toString(), headerAndColumns.v2()); + } + } + } + } + } + + private Tuple extractColumnTypesFromHeader(String header) { + String[] columnTypes = Strings.delimitedListToStringArray(header, "|", " \t"); + StringBuilder types = new StringBuilder(); + StringBuilder columns = new StringBuilder(); + for(String column : columnTypes) { + String[] nameType = Strings.delimitedListToStringArray(column, ":"); + assertThat("If at least one column has a type associated with it, all columns should have types", nameType, arrayWithSize(2)); + if(types.length() > 0) { + types.append(","); + columns.append("|"); + } + columns.append(nameType[0]); + types.append(resolveColumnType(nameType[1])); + } + return new Tuple<>(columns.toString(), types.toString()); + } + + private String resolveColumnType(String type) { + switch (type.toLowerCase(Locale.ROOT)) { + case "s": return "string"; + case "b": return "boolean"; + case "i": return "integer"; + case "l": return "long"; + case "f": return "float"; + case "d": return "double"; + default: return type; + } + } + + static CsvSpecParser specParser() { + return new CsvSpecParser(); + } + + private static class CsvSpecParser implements Parser { + private final StringBuilder data = new StringBuilder(); + private CsvTestCase testCase; + + @Override + public Object parse(String line) { + // beginning of the section + if (testCase == null) { + // pick up the query + testCase = new CsvTestCase(); + testCase.query = line.endsWith(";") ? line.substring(0, line.length() - 1) : line; + } + else { + // read data + if (line.startsWith(";")) { + testCase.expectedResults = data.toString(); + // clean-up and emit + CsvTestCase result = testCase; + testCase = null; + data.setLength(0); + return result; + } + else { + data.append(line); + data.append("\r\n"); + } + } + + return null; + } + } + + protected static class CsvTestCase { + String query; + String expectedResults; + } +} \ No newline at end of file diff --git a/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DataLoader.java b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DataLoader.java new file mode 100644 index 00000000000..b3724fbcede --- /dev/null +++ b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DataLoader.java @@ -0,0 +1,109 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.jdbc; + +import org.apache.http.HttpHost; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.CheckedBiConsumer; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.json.JsonXContent; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.List; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; + +public class DataLoader { + + public static void main(String[] args) throws Exception { + try (RestClient client = RestClient.builder(new HttpHost("localhost", 9200)).build()) { + loadDatasetIntoEs(client); + Loggers.getLogger(DataLoader.class).info("Data loaded"); + } + } + + protected static void loadDatasetIntoEs(RestClient client) throws Exception { + XContentBuilder createIndex = JsonXContent.contentBuilder().startObject(); + createIndex.startObject("settings"); + { + createIndex.field("number_of_shards", 1); + } + createIndex.endObject(); + createIndex.startObject("mappings"); + { + createIndex.startObject("emp"); + { + createIndex.startObject("properties"); + { + createIndex.startObject("emp_no").field("type", "integer").endObject(); + createIndex.startObject("first_name").field("type", "text").endObject(); + createIndex.startObject("last_name").field("type", "text").endObject(); + createIndex.startObject("gender").field("type", "keyword").endObject(); + createIndex.startObject("birth_date").field("type", "date").endObject(); + createIndex.startObject("hire_date").field("type", "date").endObject(); + createIndex.startObject("salary").field("type", "integer").endObject(); + createIndex.startObject("languages").field("type", "byte").endObject(); + } + createIndex.endObject(); + } + createIndex.endObject(); + } + createIndex.endObject().endObject(); + client.performRequest("PUT", "/test_emp", emptyMap(), new StringEntity(createIndex.string(), ContentType.APPLICATION_JSON)); + + StringBuilder bulk = new StringBuilder(); + csvToLines("employees", (titles, fields) -> { + bulk.append("{\"index\":{}}\n"); + bulk.append('{'); + for (int f = 0; f < fields.size(); f++) { + if (f != 0) { + bulk.append(','); + } + bulk.append('"').append(titles.get(f)).append("\":\"").append(fields.get(f)).append('"'); + } + bulk.append("}\n"); + }); + client.performRequest("POST", "/test_emp/emp/_bulk", singletonMap("refresh", "true"), + new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); + } + + private static void csvToLines(String name, CheckedBiConsumer, List, Exception> consumeLine) throws Exception { + String location = "/" + name + ".csv"; + URL dataSet = SqlSpecTestCase.class.getResource(location); + if (dataSet == null) { + throw new IllegalArgumentException("Can't find [" + location + "]"); + } + + try (BufferedReader reader = new BufferedReader(new InputStreamReader(readFromJarUrl(dataSet), StandardCharsets.UTF_8))) { + String titlesString = reader.readLine(); + if (titlesString == null) { + throw new IllegalArgumentException("[" + location + "] must contain at least a title row"); + } + List titles = Arrays.asList(titlesString.split(",")); + + String line; + while ((line = reader.readLine()) != null) { + consumeLine.accept(titles, Arrays.asList(line.split(","))); + } + } + } + + @SuppressForbidden(reason = "test reads from jar") + public static InputStream readFromJarUrl(URL source) throws IOException { + return source.openStream(); + } +} diff --git a/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DatabaseMetaDataTestCase.java b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DatabaseMetaDataTestCase.java new file mode 100644 index 00000000000..f30722f6c40 --- /dev/null +++ b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DatabaseMetaDataTestCase.java @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.jdbc; + +import org.elasticsearch.common.CheckedSupplier; + +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.ResultSet; +import java.sql.SQLException; + +import static org.elasticsearch.xpack.qa.sql.jdbc.JdbcAssert.assertResultSets; + +/** + * Tests for our implementation of {@link DatabaseMetaData}. + */ +public class DatabaseMetaDataTestCase extends JdbcIntegrationTestCase { + /** + * We do not support procedures so we return an empty set for + * {@link DatabaseMetaData#getProcedures(String, String, String)}. + */ + public void testGetProcedures() throws Exception { + try (Connection h2 = LocalH2.anonymousDb(); + Connection es = esJdbc()) { + h2.createStatement().executeUpdate("RUNSCRIPT FROM 'classpath:/setup_mock_metadata_get_procedures.sql'"); + + ResultSet expected = h2.createStatement().executeQuery("SELECT * FROM mock"); + assertResultSets(expected, es.getMetaData().getProcedures( + randomBoolean() ? null : randomAlphaOfLength(5), + randomBoolean() ? null : randomAlphaOfLength(5), + randomBoolean() ? null : randomAlphaOfLength(5))); + } + } + + /** + * We do not support procedures so we return an empty set for + * {@link DatabaseMetaData#getProcedureColumns(String, String, String, String)}. + */ + public void testGetProcedureColumns() throws Exception { + try (Connection h2 = LocalH2.anonymousDb(); + Connection es = esJdbc()) { + h2.createStatement().executeUpdate("RUNSCRIPT FROM 'classpath:/setup_mock_metadata_get_procedure_columns.sql'"); + + ResultSet expected = h2.createStatement().executeQuery("SELECT * FROM mock"); + assertResultSets(expected, es.getMetaData().getProcedureColumns( + randomBoolean() ? null : randomAlphaOfLength(5), + randomBoolean() ? null : randomAlphaOfLength(5), + randomBoolean() ? null : randomAlphaOfLength(5), + randomBoolean() ? null : randomAlphaOfLength(5))); + } + } + + public void testGetTables() throws Exception { + index("test1", body -> body.field("name", "bob")); + index("test2", body -> body.field("name", "bob")); + + try (Connection h2 = LocalH2.anonymousDb(); + Connection es = esJdbc()) { + h2.createStatement().executeUpdate("RUNSCRIPT FROM 'classpath:/setup_mock_metadata_get_tables.sql'"); + + CheckedSupplier all = () -> + h2.createStatement().executeQuery("SELECT '" + clusterName() + "' AS TABLE_CAT, * FROM mock"); + assertResultSets(all.get(), es.getMetaData().getTables("%", "%", "%", null)); + assertResultSets(all.get(), es.getMetaData().getTables("%", "%", "te%", null)); + assertResultSets( + h2.createStatement().executeQuery("SELECT '" + clusterName() + "' AS TABLE_CAT, * FROM mock WHERE TABLE_NAME = 'test1'"), + es.getMetaData().getTables("%", "%", "test1.d%", null)); + } + } + + public void testColumns() throws Exception { + index("test1", body -> body.field("name", "bob")); + index("test2", body -> { + body.field("number", 7); + body.field("date", "2017-01-01T01:01:01Z"); + body.field("float", 42.0); + }); + + try (Connection h2 = LocalH2.anonymousDb(); + Connection es = esJdbc()) { + h2.createStatement().executeUpdate("RUNSCRIPT FROM 'classpath:/setup_mock_metadata_get_columns.sql'"); + + ResultSet expected = h2.createStatement().executeQuery("SELECT '" + clusterName() + "' AS TABLE_CAT, * FROM mock"); + assertResultSets(expected, es.getMetaData().getColumns("%", "%", "%", null)); + } + } +} diff --git a/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DebugCsvSpec.java b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DebugCsvSpec.java new file mode 100644 index 00000000000..22924e3e648 --- /dev/null +++ b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DebugCsvSpec.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.jdbc; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.test.junit.annotations.TestLogging; + +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.List; + +@TestLogging(JdbcTestUtils.SQL_TRACE) +public class DebugCsvSpec extends CsvSpecTestCase { + + @ParametersFactory(shuffle = false, argumentFormatting = SqlSpecTestCase.PARAM_FORMATTING) + public static List readScriptSpec() throws Exception { + Parser parser = specParser(); + return readScriptSpec("/debug.csv-spec", parser); + } + + public DebugCsvSpec(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) { + super(fileName, groupName, testName, lineNumber, testCase); + } + + @Override + protected void assertResults(ResultSet expected, ResultSet elastic) throws SQLException { + Logger log = logEsResultSet() ? logger : null; + + // + // uncomment this to printout the result set and create new CSV tests + // + //JdbcTestUtils.logResultSetMetadata(elastic, log); + //JdbcTestUtils.logResultSetData(elastic, log); + JdbcAssert.assertResultSets(expected, elastic, log); + } + + @Override + protected boolean logEsResultSet() { + return true; + } +} \ No newline at end of file diff --git a/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DebugSqlSpec.java b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DebugSqlSpec.java new file mode 100644 index 00000000000..1fd4a98bba8 --- /dev/null +++ b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DebugSqlSpec.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.jdbc; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.test.junit.annotations.TestLogging; + +import java.util.List; + +@TestLogging(JdbcTestUtils.SQL_TRACE) +public class DebugSqlSpec extends SqlSpecTestCase { + @ParametersFactory(shuffle = false, argumentFormatting = PARAM_FORMATTING) + public static List readScriptSpec() throws Exception { + Parser parser = specParser(); + return readScriptSpec("/debug.sql-spec", parser); + } + + public DebugSqlSpec(String fileName, String groupName, String testName, Integer lineNumber, String query) { + super(fileName, groupName, testName, lineNumber, query); + } + + @Override + protected boolean logEsResultSet() { + return true; + } +} \ No newline at end of file diff --git a/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ErrorsTestCase.java b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ErrorsTestCase.java new file mode 100644 index 00000000000..7e995e69363 --- /dev/null +++ b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ErrorsTestCase.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.jdbc; + +import java.sql.Connection; +import java.sql.SQLException; + +/** + * Tests for exceptions and their messages. + */ +public class ErrorsTestCase extends JdbcIntegrationTestCase implements org.elasticsearch.xpack.qa.sql.ErrorsTestCase { + @Override + public void testSelectInvalidSql() throws Exception { + try (Connection c = esJdbc()) { + SQLException e = expectThrows(SQLException.class, () -> c.prepareStatement("SELECT * FRO").executeQuery()); + assertEquals("Found 1 problem(s)\nline 1:8: Cannot determine columns for *", e.getMessage()); + } + } + + @Override + public void testSelectFromMissingIndex() throws SQLException { + try (Connection c = esJdbc()) { + SQLException e = expectThrows(SQLException.class, () -> c.prepareStatement("SELECT * FROM test").executeQuery()); + assertEquals("Found 1 problem(s)\nline 1:15: Unknown index [test]", e.getMessage()); + } + } + + @Override + public void testSelectMissingField() throws Exception { + index("test", body -> body.field("test", "test")); + try (Connection c = esJdbc()) { + SQLException e = expectThrows(SQLException.class, () -> c.prepareStatement("SELECT missing FROM test").executeQuery()); + assertEquals("Found 1 problem(s)\nline 1:8: Unknown column [missing]", e.getMessage()); + } + } + + @Override + public void testSelectMissingFunction() throws Exception { + index("test", body -> body.field("foo", 1)); + try (Connection c = esJdbc()) { + SQLException e = expectThrows(SQLException.class, () -> c.prepareStatement("SELECT missing(foo) FROM test").executeQuery()); + assertEquals("Found 1 problem(s)\nline 1:8: Unknown function [missing]", e.getMessage()); + } + } +} diff --git a/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/FetchSizeTestCase.java b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/FetchSizeTestCase.java new file mode 100644 index 00000000000..8d395c05709 --- /dev/null +++ b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/FetchSizeTestCase.java @@ -0,0 +1,96 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.jdbc; + +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +import static java.util.Collections.singletonMap; +import static org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase.assertNoSearchContexts; + +/** + * Tests for setting {@link Statement#setFetchSize(int)} and + * {@link ResultSet#getFetchSize()}. + */ +public class FetchSizeTestCase extends JdbcIntegrationTestCase { + @Before + public void createTestIndex() throws IOException { + StringBuilder bulk = new StringBuilder(); + for (int i = 0; i < 20; i++) { + bulk.append("{\"index\":{}}\n"); + bulk.append("{\"test_field\":" + i + "}\n"); + } + client().performRequest("PUT", "/test/doc/_bulk", singletonMap("refresh", "true"), + new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); + } + + /** + * Test for {@code SELECT} that is implemented as a scroll query. + * In this case the fetch size should be entirely respected. + */ + public void testScroll() throws SQLException { + try (Connection c = esJdbc(); + Statement s = c.createStatement()) { + s.setFetchSize(4); + try (ResultSet rs = s.executeQuery("SELECT * FROM test ORDER BY test_field ASC")) { + for (int i = 0; i < 20; i++) { + assertEquals(4, rs.getFetchSize()); + assertTrue(rs.next()); + assertEquals(i, rs.getInt(1)); + } + assertFalse(rs.next()); + } + } + } + + /** + * Test for {@code SELECT} that is implemented as a scroll query. + * In this test we don't retrieve all records and rely on close() to clean the cursor + */ + public void testIncompleteScroll() throws Exception { + try (Connection c = esJdbc(); + Statement s = c.createStatement()) { + s.setFetchSize(4); + try (ResultSet rs = s.executeQuery("SELECT * FROM test ORDER BY test_field ASC")) { + for (int i = 0; i < 10; i++) { + assertEquals(4, rs.getFetchSize()); + assertTrue(rs.next()); + assertEquals(i, rs.getInt(1)); + } + assertTrue(rs.next()); + } + } + assertNoSearchContexts(); + } + + + /** + * Test for {@code SELECT} that is implemented as an aggregation. + * In this case the fetch size should be entirely ignored. + */ + public void testAggregation() throws SQLException { + try (Connection c = esJdbc(); + Statement s = c.createStatement()) { + s.setFetchSize(4); + try (ResultSet rs = s.executeQuery("SELECT test_field, COUNT(*) FROM test GROUP BY test_field")) { + for (int i = 0; i < 20; i++) { + assertEquals(20, rs.getFetchSize()); + assertTrue(rs.next()); + assertEquals(i, rs.getInt(1)); + } + assertFalse(rs.next()); + } + } + } +} diff --git a/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcAssert.java b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcAssert.java new file mode 100644 index 00000000000..fbf0944471f --- /dev/null +++ b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcAssert.java @@ -0,0 +1,139 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.jdbc; + +import org.apache.logging.log4j.Logger; + +import java.sql.JDBCType; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.Types; +import java.util.ArrayList; +import java.util.Calendar; +import java.util.List; +import java.util.Locale; +import java.util.TimeZone; + +import static java.lang.String.format; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +public class JdbcAssert { + private static final Calendar UTC_CALENDAR = Calendar.getInstance(TimeZone.getTimeZone("UTC"), Locale.ROOT); + + public static void assertResultSets(ResultSet expected, ResultSet actual) throws SQLException { + assertResultSets(expected, actual, null); + } + + public static void assertResultSets(ResultSet expected, ResultSet actual, Logger logger) throws SQLException { + assertResultSetMetadata(expected, actual, logger); + assertResultSetData(expected, actual, logger); + } + + public static void assertResultSetMetadata(ResultSet expected, ResultSet actual) throws SQLException { + assertResultSetMetadata(expected, actual, null); + } + + public static void assertResultSetMetadata(ResultSet expected, ResultSet actual, Logger logger) throws SQLException { + ResultSetMetaData expectedMeta = expected.getMetaData(); + ResultSetMetaData actualMeta = actual.getMetaData(); + + if (logger != null) { + JdbcTestUtils.logResultSetMetadata(actual, logger); + } + + if (expectedMeta.getColumnCount() != actualMeta.getColumnCount()) { + List expectedCols = new ArrayList<>(); + for (int i = 1; i <= expectedMeta.getColumnCount(); i++) { + expectedCols.add(expectedMeta.getColumnName(i)); + + } + + List actualCols = new ArrayList<>(); + for (int i = 1; i <= actualMeta.getColumnCount(); i++) { + actualCols.add(actualMeta.getColumnName(i)); + } + + assertEquals(format(Locale.ROOT, "Different number of columns returned (expected %d but was %d);", + expectedMeta.getColumnCount(), actualMeta.getColumnCount()), + expectedCols.toString(), actualCols.toString()); + } + + for (int column = 1; column <= expectedMeta.getColumnCount(); column++) { + String expectedName = expectedMeta.getColumnName(column); + String actualName = actualMeta.getColumnName(column); + + if (!expectedName.equals(actualName)) { + // to help debugging, indicate the previous column (which also happened to match and thus was correct) + String expectedSet = expectedName; + String actualSet = actualName; + if (column > 1) { + expectedSet = expectedMeta.getColumnName(column - 1) + "," + expectedName; + actualSet = actualMeta.getColumnName(column - 1) + "," + actualName; + } + + assertEquals("Different column name [" + column + "]", expectedSet, actualSet); + } + + // use the type not the name (timestamp with timezone returns spaces for example) + int expectedType = expectedMeta.getColumnType(column); + int actualType = actualMeta.getColumnType(column); + + // since H2 cannot use a fixed timezone, the data is stored in UTC (and thus with timezone) + if (expectedType == Types.TIMESTAMP_WITH_TIMEZONE) { + expectedType = Types.TIMESTAMP; + } + assertEquals("Different column type for column [" + expectedName + "] (" + JDBCType.valueOf(expectedType) + " != " + + JDBCType.valueOf(actualType) + ")", expectedType, actualType); + } + } + + public static void assertResultSetData(ResultSet expected, ResultSet actual, Logger logger) throws SQLException { + ResultSetMetaData metaData = expected.getMetaData(); + int columns = metaData.getColumnCount(); + + long count = 0; + for (count = 0; expected.next(); count++) { + assertTrue("Expected more data but no more entries found after [" + count + "]", actual.next()); + + if (logger != null) { + logger.info(JdbcTestUtils.resultSetCurrentData(actual)); + } + + for (int column = 1; column <= columns; column++) { + Object expectedObject = expected.getObject(column); + Object actualObject = actual.getObject(column); + int type = metaData.getColumnType(column); + + String msg = "Different result for column [" + metaData.getColumnName(column) + "], entry [" + count + "]"; + + if (type == Types.TIMESTAMP || type == Types.TIMESTAMP_WITH_TIMEZONE) { + assertEquals(getTime(expected, column), getTime(actual, column)); + } + + else if (type == Types.DOUBLE) { + // the 1d/1f difference is used due to rounding/flooring + assertEquals(msg, (double) expectedObject, (double) actualObject, 1d); + } else if (type == Types.FLOAT) { + assertEquals(msg, (float) expectedObject, (float) actualObject, 1f); + } else { + assertEquals(msg, expectedObject, actualObject); + } + } + } + + if (actual.next()) { + fail("Elasticsearch [" + actual + "] still has data after [" + count + "] entries:\n" + + JdbcTestUtils.resultSetCurrentData(actual)); + } + } + + private static Object getTime(ResultSet rs, int column) throws SQLException { + return rs.getTime(column, UTC_CALENDAR).getTime(); + } +} \ No newline at end of file diff --git a/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcIntegrationTestCase.java b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcIntegrationTestCase.java new file mode 100644 index 00000000000..ee3192c9f2f --- /dev/null +++ b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcIntegrationTestCase.java @@ -0,0 +1,146 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.jdbc; + +import org.apache.http.HttpEntity; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.apache.http.util.EntityUtils; +import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.qa.sql.embed.EmbeddedJdbcServer; +import org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase; +import org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcConfiguration; +import org.elasticsearch.xpack.sql.jdbc.jdbcx.JdbcDataSource; +import org.joda.time.DateTimeZone; +import org.junit.After; +import org.junit.Before; +import org.junit.ClassRule; + +import java.io.IOException; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Properties; +import java.util.Set; +import java.util.TimeZone; + +import static java.util.Collections.singletonMap; +import static org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase.assertNoSearchContexts; + +public abstract class JdbcIntegrationTestCase extends ESRestTestCase { + /** + * Should the HTTP server that serves SQL be embedded in the test + * process (true) or should the JDBC driver connect to Elasticsearch + * running at {@code tests.rest.cluster}. Note that to use embedded + * HTTP you have to have Elasticsearch's transport protocol open on + * port 9300 but the Elasticsearch running there does not need to have + * the SQL plugin installed. Note also that embedded HTTP is faster + * but is not canonical because it runs against a different HTTP server + * then JDBC will use in production. Gradle always uses non-embedded. + */ + protected static final boolean EMBED_SQL = Booleans.parseBoolean(System.getProperty("tests.embed.sql", "false")); + + @ClassRule + public static final EmbeddedJdbcServer EMBEDDED_SERVER = EMBED_SQL ? new EmbeddedJdbcServer() : null; + + @After + public void checkSearchContent() throws Exception { + // Some context might linger due to fire and forget nature of scroll cleanup + assertNoSearchContexts(); + } + + /** + * Read an address for Elasticsearch suitable for the JDBC driver from the system properties. + */ + public static String elasticsearchAddress() { + String cluster = System.getProperty("tests.rest.cluster"); + // JDBC only supports a single node at a time so we just give it one. + return cluster.split(",")[0]; + /* This doesn't include "jdbc:es://" because we want the example in + * esJdbc to be obvious and because we want to use getProtocol to add + * https if we are running against https. */ + } + + public Connection esJdbc() throws SQLException { + if (EMBED_SQL) { + return EMBEDDED_SERVER.connection(connectionProperties()); + } + return randomBoolean() ? useDriverManager() : useDataSource(); + } + + protected Connection useDriverManager() throws SQLException { + String elasticsearchAddress = getProtocol() + "://" + elasticsearchAddress(); + // tag::connect-dm + String address = "jdbc:es://" + elasticsearchAddress; // <1> + Properties connectionProperties = connectionProperties(); // <2> + Connection connection = DriverManager.getConnection(address, connectionProperties); + // end::connect-dm + assertNotNull("The timezone should be specified", connectionProperties.getProperty(JdbcConfiguration.TIME_ZONE)); + return connection; + } + + protected Connection useDataSource() throws SQLException { + String elasticsearchAddress = getProtocol() + "://" + elasticsearchAddress(); + // tag::connect-ds + JdbcDataSource dataSource = new JdbcDataSource(); + String address = "jdbc:es://" + elasticsearchAddress; // <1> + dataSource.setUrl(address); + Properties connectionProperties = connectionProperties(); // <2> + dataSource.setProperties(connectionProperties); + Connection connection = dataSource.getConnection(); + // end::connect-ds + assertNotNull("The timezone should be specified", connectionProperties.getProperty(JdbcConfiguration.TIME_ZONE)); + return connection; + } + + public static void index(String index, CheckedConsumer body) throws IOException { + XContentBuilder builder = JsonXContent.contentBuilder().startObject(); + body.accept(builder); + builder.endObject(); + HttpEntity doc = new StringEntity(builder.string(), ContentType.APPLICATION_JSON); + client().performRequest("PUT", "/" + index + "/doc/1", singletonMap("refresh", "true"), doc); + } + + protected String clusterName() { + try { + String response = EntityUtils.toString(client().performRequest("GET", "/").getEntity()); + return XContentHelper.convertToMap(JsonXContent.jsonXContent, response, false).get("cluster_name").toString(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + /** + * The properties used to build the connection. + */ + protected Properties connectionProperties() { + Properties connectionProperties = new Properties(); + connectionProperties.put(JdbcConfiguration.TIME_ZONE, randomKnownTimeZone()); + return connectionProperties; + } + + public static String randomKnownTimeZone() { + // We use system default timezone for the connection that is selected randomly by TestRuleSetupAndRestoreClassEnv + // from all available JDK timezones. While Joda and JDK are generally in sync, some timezones might not be known + // to the current version of Joda and in this case the test might fail. To avoid that, we specify a timezone + // known for both Joda and JDK + Set timeZones = new HashSet<>(DateTimeZone.getAvailableIDs()); + timeZones.retainAll(Arrays.asList(TimeZone.getAvailableIDs())); + List ids = new ArrayList<>(timeZones); + Collections.sort(ids); + return randomFrom(ids); + } +} diff --git a/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcTestUtils.java b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcTestUtils.java new file mode 100644 index 00000000000..5062525f2b3 --- /dev/null +++ b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcTestUtils.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.jdbc; + +import org.apache.logging.log4j.Logger; + +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; + +public abstract class JdbcTestUtils { + + public static final String SQL_TRACE = "org.elasticsearch.xpack.sql:TRACE"; + + public static void logResultSetMetadata(ResultSet rs, Logger logger) throws SQLException { + ResultSetMetaData metaData = rs.getMetaData(); + // header + StringBuilder sb = new StringBuilder(); + StringBuilder column = new StringBuilder(); + + int columns = metaData.getColumnCount(); + for (int i = 1; i <= columns; i++) { + if (i > 1) { + sb.append(" | "); + } + column.setLength(0); + column.append(metaData.getColumnName(i)); + column.append("("); + column.append(metaData.getColumnTypeName(i)); + column.append(")"); + + sb.append(trimOrPad(column)); + } + + int l = sb.length(); + logger.info(sb.toString()); + sb.setLength(0); + for (int i = 0; i < l; i++) { + sb.append("-"); + } + + logger.info(sb.toString()); + } + + private static final int MAX_WIDTH = 20; + + public static void logResultSetData(ResultSet rs, Logger log) throws SQLException { + ResultSetMetaData metaData = rs.getMetaData(); + StringBuilder sb = new StringBuilder(); + StringBuilder column = new StringBuilder(); + + int columns = metaData.getColumnCount(); + + while (rs.next()) { + sb.setLength(0); + for (int i = 1; i <= columns; i++) { + column.setLength(0); + if (i > 1) { + sb.append(" | "); + } + sb.append(trimOrPad(column.append(rs.getString(i)))); + } + log.info(sb); + } + } + + public static String resultSetCurrentData(ResultSet rs) throws SQLException { + ResultSetMetaData metaData = rs.getMetaData(); + StringBuilder column = new StringBuilder(); + + int columns = metaData.getColumnCount(); + + StringBuilder sb = new StringBuilder(); + for (int i = 1; i <= columns; i++) { + column.setLength(0); + if (i > 1) { + sb.append(" | "); + } + sb.append(trimOrPad(column.append(rs.getString(i)))); + } + return sb.toString(); + } + + private static StringBuilder trimOrPad(StringBuilder buffer) { + if (buffer.length() > MAX_WIDTH) { + buffer.setLength(MAX_WIDTH - 1); + buffer.append("~"); + } + else { + for (int i = buffer.length(); i < MAX_WIDTH; i++) { + buffer.append(" "); + } + } + return buffer; + } +} \ No newline at end of file diff --git a/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/LocalH2.java b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/LocalH2.java new file mode 100644 index 00000000000..796682976e7 --- /dev/null +++ b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/LocalH2.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.jdbc; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.CheckedSupplier; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.xpack.sql.client.shared.SuppressForbidden; +import org.junit.rules.ExternalResource; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.util.Calendar; +import java.util.Locale; +import java.util.Properties; + +public class LocalH2 extends ExternalResource implements CheckedSupplier { + private final Logger logger = Loggers.getLogger(getClass()); + + static { + try { + // Initialize h2 so we can use it for testing + Class.forName("org.h2.Driver"); + } catch (ClassNotFoundException e) { + throw new RuntimeException(e); + } + } + + /** + * Creates an in memory anonymous database and returns the only connection to it. + * Closing the connection will remove the db. + */ + public static Connection anonymousDb() throws SQLException { + return DriverManager.getConnection("jdbc:h2:mem:;DATABASE_TO_UPPER=false;ALIAS_COLUMN_NAME=true"); + } + + private static final Properties DEFAULTS = new Properties(); + + private final String url; + // H2 in-memory will keep the db alive as long as this connection is opened + private Connection keepAlive; + Locale locale; + + /* + * The syntax on the connection string is fairly particular: + * mem:; creates an anonymous database in memory. The `;` is + * technically the separator that comes after the name. + * DATABASE_TO_UPPER=false turns *off* H2's Oracle-like habit + * of upper-casing everything that isn't quoted. + * ALIAS_COLUMN_NAME=true turn *on* returning alias names in + * result set metadata which is what most DBs do except + * for MySQL and, by default, H2. Our jdbc driver does it. + */ + // http://www.h2database.com/html/features.html#in_memory_databases + public LocalH2() { + this.url = "jdbc:h2:mem:essql;DATABASE_TO_UPPER=false;ALIAS_COLUMN_NAME=true"; + } + + @Override + @SuppressForbidden(reason = "H2 gets really confused with non Gregorian calendars") + protected void before() throws Throwable { + if ("japanese".equals(Calendar.getInstance().getCalendarType())) { + logger.info("Japanese calendar is detected. Overriding locale."); + locale = Locale.getDefault(); + Locale.setDefault(locale.stripExtensions()); // removes the calendar setting + assert "gregory".equals(Calendar.getInstance().getCalendarType()); + } + keepAlive = get(); + keepAlive.createStatement().execute("RUNSCRIPT FROM 'classpath:/setup_test_emp.sql'"); + } + + @Override + protected void after() { + try { + keepAlive.close(); + } catch (SQLException ex) { + // close + } + if (locale != null) { + logger.info("Restoring locale."); + Locale.setDefault(locale); + locale = null; + } + } + + @Override + public Connection get() throws SQLException { + return DriverManager.getConnection(url, DEFAULTS); + } +} diff --git a/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ShowTablesTestCase.java b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ShowTablesTestCase.java new file mode 100644 index 00000000000..4ddc2338c61 --- /dev/null +++ b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ShowTablesTestCase.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.jdbc; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.util.Locale; + +import static org.elasticsearch.xpack.qa.sql.jdbc.JdbcAssert.assertResultSets; + +public class ShowTablesTestCase extends JdbcIntegrationTestCase { + public void testShowTablesWithoutAnyIndexes() throws Exception { + try (Connection h2 = LocalH2.anonymousDb(); + Connection es = esJdbc()) { + h2.createStatement().executeUpdate("RUNSCRIPT FROM 'classpath:/setup_mock_show_tables.sql'"); + + ResultSet expected = h2.createStatement().executeQuery("SELECT * FROM mock"); + assertResultSets(expected, es.createStatement().executeQuery("SHOW TABLES")); + } + } + + public void testShowTablesWithManyIndices() throws Exception { + try (Connection h2 = LocalH2.anonymousDb(); + Connection es = esJdbc()) { + h2.createStatement().executeUpdate("RUNSCRIPT FROM 'classpath:/setup_mock_show_tables.sql'"); + int indices = between(2, 20); + for (int i = 0; i < indices; i++) { + String index = String.format(Locale.ROOT, "test%02d", i); + index(index, builder -> builder.field("name", "bob")); + h2.createStatement().executeUpdate("INSERT INTO mock VALUES ('" + index + "');"); + } + + ResultSet expected = h2.createStatement().executeQuery("SELECT * FROM mock ORDER BY table"); + assertResultSets(expected, es.createStatement().executeQuery("SHOW TABLES")); + } + } +} diff --git a/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SimpleExampleTestCase.java b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SimpleExampleTestCase.java new file mode 100644 index 00000000000..adabf47809b --- /dev/null +++ b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SimpleExampleTestCase.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.jdbc; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.Statement; + +public class SimpleExampleTestCase extends JdbcIntegrationTestCase { + public void testSimpleExample() throws Exception { + index("library", builder -> { + builder.field("name", "Don Quixote"); + builder.field("page_count", 1072); + }); + try (Connection connection = esJdbc()) { + // tag::simple_example + try (Statement statement = connection.createStatement(); + ResultSet results = statement.executeQuery( + "SELECT name, page_count FROM library ORDER BY page_count DESC LIMIT 1")) { + assertTrue(results.next()); + assertEquals("Don Quixote", results.getString(1)); + assertEquals(1072, results.getInt(2)); + assertFalse(results.next()); + } + // end::simple_example + } + } +} diff --git a/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SpecBaseIntegrationTestCase.java b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SpecBaseIntegrationTestCase.java new file mode 100644 index 00000000000..3b8ff080c23 --- /dev/null +++ b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SpecBaseIntegrationTestCase.java @@ -0,0 +1,178 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.jdbc; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.SuppressForbidden; +import org.junit.AfterClass; +import org.junit.Before; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * Tests that compare the Elasticsearch JDBC client to some other JDBC client + * after loading a specific set of test data. + */ +public abstract class SpecBaseIntegrationTestCase extends JdbcIntegrationTestCase { + protected static final String PARAM_FORMATTING = "%2$s.test%3$s"; + + protected final String fileName; + protected final String groupName; + protected final String testName; + protected final Integer lineNumber; + + public SpecBaseIntegrationTestCase(String fileName, String groupName, String testName, Integer lineNumber) { + this.fileName = fileName; + this.groupName = groupName; + this.testName = testName; + this.lineNumber = lineNumber; + } + + @Before + public void setupTestDataIfNeeded() throws Exception { + if (client().performRequest("HEAD", "/test_emp").getStatusLine().getStatusCode() == 404) { + DataLoader.loadDatasetIntoEs(client()); + } + } + + @Override + protected boolean preserveIndicesUponCompletion() { + return true; + } + + @AfterClass + public static void wipeTestData() throws IOException { + if (false == EMBED_SQL) { + try { + adminClient().performRequest("DELETE", "/*"); + } catch (ResponseException e) { + // 404 here just means we had no indexes + if (e.getResponse().getStatusLine().getStatusCode() != 404) { + throw e; + } + } + } + } + + public final void test() throws Throwable { + try { + doTest(); + } catch (AssertionError ae) { + throw reworkException(ae); + } + } + + /** + * Implementations should pay attention on using + * {@link #executeJdbcQuery(Connection, String)} (typically for + * ES connections) and {@link #assertResults(ResultSet, ResultSet)} + * which takes into account logging/debugging results (through + * {@link #logEsResultSet()}. + */ + protected abstract void doTest() throws Throwable; + + protected ResultSet executeJdbcQuery(Connection con, String query) throws SQLException { + Statement statement = con.createStatement(); + statement.setFetchSize(between(1, 500)); + return statement.executeQuery(query); + } + + protected boolean logEsResultSet() { + return false; + } + + protected void assertResults(ResultSet expected, ResultSet elastic) throws SQLException { + Logger log = logEsResultSet() ? logger : null; + JdbcAssert.assertResultSets(expected, elastic, log); + } + + private Throwable reworkException(Throwable th) { + StackTraceElement[] stackTrace = th.getStackTrace(); + StackTraceElement[] redone = new StackTraceElement[stackTrace.length + 1]; + System.arraycopy(stackTrace, 0, redone, 1, stackTrace.length); + redone[0] = new StackTraceElement(getClass().getName(), groupName + ".test" + testName, fileName, lineNumber); + + th.setStackTrace(redone); + return th; + } + + // + // spec reader + // + + // returns source file, groupName, testName, its line location, and the custom object (based on each test parser) + protected static List readScriptSpec(String url, Parser parser) throws Exception { + URL source = SpecBaseIntegrationTestCase.class.getResource(url); + Objects.requireNonNull(source, "Cannot find resource " + url); + + String fileName = source.getFile().substring(source.getFile().lastIndexOf("/") + 1); + String groupName = fileName.substring(fileName.lastIndexOf('/') + 1, fileName.lastIndexOf(".")); + + Map testNames = new LinkedHashMap<>(); + List testCases = new ArrayList<>(); + + String testName = null; + try (BufferedReader reader = new BufferedReader(new InputStreamReader(readFromJarUrl(source), StandardCharsets.UTF_8))) { + String line; + int lineNumber = 1; + while ((line = reader.readLine()) != null) { + line = line.trim(); + // ignore comments + if (!line.isEmpty() && !line.startsWith("//")) { + // parse test name + if (testName == null) { + if (testNames.keySet().contains(line)) { + throw new IllegalStateException("Duplicate test name '" + line + "' at line " + lineNumber + + " (previously seen at line " + testNames.get(line) + ")"); + } else { + testName = Strings.capitalize(line); + testNames.put(testName, Integer.valueOf(lineNumber)); + } + } else { + Object result = parser.parse(line); + // only if the parser is ready, add the object - otherwise keep on serving it lines + if (result != null) { + testCases.add(new Object[] { fileName, groupName, testName, Integer.valueOf(lineNumber), result }); + testName = null; + } + } + } + lineNumber++; + } + if (testName != null) { + throw new IllegalStateException("Read a test without a body at the end of [" + fileName + "]."); + } + } + assertNull("Cannot find spec for test " + testName, testName); + + return testCases; + } + + public interface Parser { + Object parse(String line); + } + + @SuppressForbidden(reason = "test reads from jar") + public static InputStream readFromJarUrl(URL source) throws IOException { + return source.openStream(); + } +} diff --git a/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SqlSpecTestCase.java b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SqlSpecTestCase.java new file mode 100644 index 00000000000..3fd9f0b7712 --- /dev/null +++ b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SqlSpecTestCase.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.jdbc; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcConfiguration; +import org.elasticsearch.xpack.sql.util.CollectionUtils; +import org.junit.ClassRule; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.util.List; +import java.util.Properties; + +/** + * Tests comparing sql queries executed against our jdbc client + * with those executed against H2's jdbc client. + */ +public abstract class SqlSpecTestCase extends SpecBaseIntegrationTestCase { + private String query; + + @ClassRule + public static LocalH2 H2 = new LocalH2(); + + @ParametersFactory(argumentFormatting = PARAM_FORMATTING) + public static List readScriptSpec() throws Exception { + Parser parser = specParser(); + return CollectionUtils.combine( + readScriptSpec("/select.sql-spec", parser), + readScriptSpec("/filter.sql-spec", parser), + readScriptSpec("/datetime.sql-spec", parser), + readScriptSpec("/math.sql-spec", parser), + readScriptSpec("/agg.sql-spec", parser), + readScriptSpec("/arithmetic.sql-spec", parser) + ); + } + + // TODO: add tests for nested docs when interplug communication is enabled + // "DESCRIBE emp.emp", + // "SELECT dep FROM emp.emp", + // "SELECT dep.dept_name, first_name, last_name FROM emp.emp WHERE emp_no = 10020", + // "SELECT first_name f, last_name l, dep.from_date FROM emp.emp WHERE dep.dept_name = 'Production' ORDER BY dep.from_date", + // "SELECT first_name f, last_name l, YEAR(dep.from_date) start " + // + "FROM emp.emp WHERE dep.dept_name = 'Production' AND tenure > 30 ORDER BY start" + + private static class SqlSpecParser implements Parser { + @Override + public Object parse(String line) { + return line.endsWith(";") ? line.substring(0, line.length() - 1) : line; + } + } + + static SqlSpecParser specParser() { + return new SqlSpecParser(); + } + + public SqlSpecTestCase(String fileName, String groupName, String testName, Integer lineNumber, String query) { + super(fileName, groupName, testName, lineNumber); + this.query = query; + } + + @Override + protected final void doTest() throws Throwable { + try (Connection h2 = H2.get(); + Connection es = esJdbc()) { + + ResultSet expected, elasticResults; + expected = executeJdbcQuery(h2, query); + elasticResults = executeJdbcQuery(es, query); + + assertResults(expected, elasticResults); + } + } + + // TODO: use UTC for now until deciding on a strategy for handling date extraction + @Override + protected Properties connectionProperties() { + Properties connectionProperties = new Properties(); + connectionProperties.setProperty(JdbcConfiguration.TIME_ZONE, "UTC"); + return connectionProperties; + } +} \ No newline at end of file diff --git a/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/package-info.java b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/package-info.java new file mode 100644 index 00000000000..1825d9033c8 --- /dev/null +++ b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/package-info.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/** + * Support for integration tests for the Elasticsearch SQL JDBC client + * and integration tests shared between multiple qa projects. + */ +package org.elasticsearch.xpack.qa.sql.jdbc; diff --git a/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/RestSqlTestCase.java b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/RestSqlTestCase.java new file mode 100644 index 00000000000..5f390b0d73e --- /dev/null +++ b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/RestSqlTestCase.java @@ -0,0 +1,378 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.rest; + +import org.apache.http.HttpEntity; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.NotEqualMessageBuilder; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.qa.sql.ErrorsTestCase; +import org.hamcrest.Matcher; + +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; +import java.util.TreeMap; + +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static java.util.Collections.unmodifiableMap; +import static org.hamcrest.Matchers.containsString; + +/** + * Integration test for the rest sql action. The one that speaks json directly to a + * user rather than to the JDBC driver or CLI. + */ +public abstract class RestSqlTestCase extends ESRestTestCase implements ErrorsTestCase { + /** + * Builds that map that is returned in the header for each column. + */ + public static Map columnInfo(String name, String type) { + Map column = new HashMap<>(); + column.put("name", name); + column.put("type", type); + return unmodifiableMap(column); + } + + public void testBasicQuery() throws IOException { + StringBuilder bulk = new StringBuilder(); + bulk.append("{\"index\":{\"_id\":\"1\"}}\n"); + bulk.append("{\"test\":\"test\"}\n"); + bulk.append("{\"index\":{\"_id\":\"2\"}}\n"); + bulk.append("{\"test\":\"test\"}\n"); + client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"), + new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); + + Map expected = new HashMap<>(); + expected.put("columns", singletonList(columnInfo("test", "text"))); + expected.put("rows", Arrays.asList(singletonList("test"), singletonList("test"))); + expected.put("size", 2); + assertResponse(expected, runSql("SELECT * FROM test")); + } + + public void testNextPage() throws IOException { + StringBuilder bulk = new StringBuilder(); + for (int i = 0; i < 20; i++) { + bulk.append("{\"index\":{\"_id\":\"" + i + "\"}}\n"); + bulk.append("{\"text\":\"text" + i + "\", \"number\":" + i + "}\n"); + } + client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"), + new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); + + String request = "{\"query\":\"SELECT text, number, SIN(number) AS s FROM test ORDER BY number\", \"fetch_size\":2}"; + + String cursor = null; + for (int i = 0; i < 20; i += 2) { + Map response; + if (i == 0) { + response = runSql(new StringEntity(request, ContentType.APPLICATION_JSON)); + } else { + response = runSql(new StringEntity("{\"cursor\":\"" + cursor + "\"}", ContentType.APPLICATION_JSON)); + } + + Map expected = new HashMap<>(); + if (i == 0) { + expected.put("columns", Arrays.asList( + columnInfo("text", "text"), + columnInfo("number", "long"), + columnInfo("s", "double"))); + } + expected.put("rows", Arrays.asList( + Arrays.asList("text" + i, i, Math.sin(i)), + Arrays.asList("text" + (i + 1), i + 1, Math.sin(i + 1)))); + expected.put("size", 2); + cursor = (String) response.remove("cursor"); + assertResponse(expected, response); + assertNotNull(cursor); + } + Map expected = new HashMap<>(); + expected.put("size", 0); + expected.put("rows", emptyList()); + assertResponse(expected, runSql(new StringEntity("{\"cursor\":\"" + cursor + "\"}", ContentType.APPLICATION_JSON))); + } + + @AwaitsFix(bugUrl = "https://github.com/elastic/x-pack-elasticsearch/issues/2074") + public void testTimeZone() throws IOException { + StringBuilder bulk = new StringBuilder(); + bulk.append("{\"index\":{\"_id\":\"1\"}}\n"); + bulk.append("{\"test\":\"2017-07-27 00:00:00\"}\n"); + bulk.append("{\"index\":{\"_id\":\"2\"}}\n"); + bulk.append("{\"test\":\"2017-07-27 01:00:00\"}\n"); + client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"), + new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); + + Map expected = new HashMap<>(); + expected.put("columns", singletonMap("test", singletonMap("type", "text"))); + expected.put("rows", Arrays.asList(singletonMap("test", "test"), singletonMap("test", "test"))); + expected.put("size", 2); + + // Default TimeZone is UTC + assertResponse(expected, runSql( + new StringEntity("{\"query\":\"SELECT DAY_OF_YEAR(test), COUNT(*) FROM test\"}", ContentType.APPLICATION_JSON))); + } + + @Override + public void testSelectInvalidSql() { + expectBadRequest(() -> runSql("SELECT * FRO"), containsString("1:8: Cannot determine columns for *")); + } + + @Override + public void testSelectFromMissingIndex() { + expectBadRequest(() -> runSql("SELECT * FROM missing"), containsString("1:15: Unknown index [missing]")); + } + + @Override + public void testSelectMissingField() throws IOException { + StringBuilder bulk = new StringBuilder(); + bulk.append("{\"index\":{\"_id\":\"1\"}}\n"); + bulk.append("{\"test\":\"test\"}\n"); + client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"), + new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); + + expectBadRequest(() -> runSql("SELECT foo FROM test"), containsString("1:8: Unknown column [foo]")); + } + + @Override + public void testSelectMissingFunction() throws Exception { + StringBuilder bulk = new StringBuilder(); + bulk.append("{\"index\":{\"_id\":\"1\"}}\n"); + bulk.append("{\"foo\":1}\n"); + client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"), + new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); + + expectBadRequest(() -> runSql("SELECT missing(foo) FROM test"), containsString("1:8: Unknown function [missing]")); + } + + private void expectBadRequest(ThrowingRunnable code, Matcher errorMessageMatcher) { + ResponseException e = expectThrows(ResponseException.class, code); + assertEquals(e.getMessage(), 400, e.getResponse().getStatusLine().getStatusCode()); + assertThat(e.getMessage(), errorMessageMatcher); + } + + private Map runSql(String sql) throws IOException { + return runSql(sql, ""); + } + + private Map runSql(String sql, String suffix) throws IOException { + return runSql(suffix, new StringEntity("{\"query\":\"" + sql + "\"}", ContentType.APPLICATION_JSON)); + } + + private Map runSql(HttpEntity sql) throws IOException { + return runSql("", sql); + } + + private Map runSql(String suffix, HttpEntity sql) throws IOException { + Map params = new TreeMap<>(); + params.put("error_trace", "true"); // Helps with debugging in case something crazy happens on the server. + params.put("pretty", "true"); // Improves error reporting readability + params.put("format", "json"); // JSON is easier to parse then a table + Response response = client().performRequest("POST", "/_xpack/sql" + suffix, params, sql); + try (InputStream content = response.getEntity().getContent()) { + return XContentHelper.convertToMap(JsonXContent.jsonXContent, content, false); + } + } + + public void testBasicTranslateQuery() throws IOException { + StringBuilder bulk = new StringBuilder(); + bulk.append("{\"index\":{\"_id\":\"1\"}}\n"); + bulk.append("{\"test\":\"test\"}\n"); + bulk.append("{\"index\":{\"_id\":\"2\"}}\n"); + bulk.append("{\"test\":\"test\"}\n"); + client().performRequest("POST", "/test_translate/test/_bulk", singletonMap("refresh", "true"), + new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); + + Map response = runSql("SELECT * FROM test_translate", "/translate/"); + assertEquals(response.get("size"), 1000); + @SuppressWarnings("unchecked") + Map source = (Map) response.get("_source"); + assertNotNull(source); + assertEquals(emptyList(), source.get("excludes")); + assertEquals(singletonList("test"), source.get("includes")); + } + + public void testBasicQueryWithFilter() throws IOException { + StringBuilder bulk = new StringBuilder(); + bulk.append("{\"index\":{\"_id\":\"1\"}}\n"); + bulk.append("{\"test\":\"foo\"}\n"); + bulk.append("{\"index\":{\"_id\":\"2\"}}\n"); + bulk.append("{\"test\":\"bar\"}\n"); + client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"), + new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); + + Map expected = new HashMap<>(); + expected.put("columns", singletonList(columnInfo("test", "text"))); + expected.put("rows", singletonList(singletonList("foo"))); + expected.put("size", 1); + assertResponse(expected, runSql(new StringEntity("{\"query\":\"SELECT * FROM test\", \"filter\":{\"match\": {\"test\": \"foo\"}}}", + ContentType.APPLICATION_JSON))); + } + + public void testBasicTranslateQueryWithFilter() throws IOException { + StringBuilder bulk = new StringBuilder(); + bulk.append("{\"index\":{\"_id\":\"1\"}}\n"); + bulk.append("{\"test\":\"foo\"}\n"); + bulk.append("{\"index\":{\"_id\":\"2\"}}\n"); + bulk.append("{\"test\":\"bar\"}\n"); + client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"), + new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); + + Map response = runSql("/translate/", + new StringEntity("{\"query\":\"SELECT * FROM test\", \"filter\":{\"match\": {\"test\": \"foo\"}}}", + ContentType.APPLICATION_JSON)); + + assertEquals(response.get("size"), 1000); + @SuppressWarnings("unchecked") + Map source = (Map) response.get("_source"); + assertNotNull(source); + assertEquals(emptyList(), source.get("excludes")); + assertEquals(singletonList("test"), source.get("includes")); + + @SuppressWarnings("unchecked") + Map query = (Map) response.get("query"); + assertNotNull(query); + + @SuppressWarnings("unchecked") + Map constantScore = (Map) query.get("constant_score"); + assertNotNull(constantScore); + + @SuppressWarnings("unchecked") + Map filter = (Map) constantScore.get("filter"); + assertNotNull(filter); + + @SuppressWarnings("unchecked") + Map match = (Map) filter.get("match"); + assertNotNull(match); + + @SuppressWarnings("unchecked") + Map matchQuery = (Map) match.get("test"); + assertNotNull(matchQuery); + assertEquals("foo", matchQuery.get("query")); + } + + public void testBasicQueryText() throws IOException { + StringBuilder bulk = new StringBuilder(); + bulk.append("{\"index\":{\"_id\":\"1\"}}\n"); + bulk.append("{\"test\":\"test\"}\n"); + bulk.append("{\"index\":{\"_id\":\"2\"}}\n"); + bulk.append("{\"test\":\"test\"}\n"); + client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"), + new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); + String expected = + "test \n" + + "---------------\n" + + "test \n" + + "test \n"; + Tuple response = runSqlAsText("SELECT * FROM test"); + logger.warn(expected); + logger.warn(response.v1()); + } + + public void testNextPageText() throws IOException { + StringBuilder bulk = new StringBuilder(); + for (int i = 0; i < 20; i++) { + bulk.append("{\"index\":{\"_id\":\"" + i + "\"}}\n"); + bulk.append("{\"text\":\"text" + i + "\", \"number\":" + i + "}\n"); + } + client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"), + new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); + + String request = "{\"query\":\"SELECT text, number, number + 5 AS sum FROM test ORDER BY number\", \"fetch_size\":2}"; + + String cursor = null; + for (int i = 0; i < 20; i += 2) { + Tuple response; + if (i == 0) { + response = runSqlAsText("", new StringEntity(request, ContentType.APPLICATION_JSON)); + } else { + response = runSqlAsText("", new StringEntity("{\"cursor\":\"" + cursor + "\"}", ContentType.APPLICATION_JSON)); + } + + StringBuilder expected = new StringBuilder(); + if (i == 0) { + expected.append(" text | number | sum \n"); + expected.append("---------------+---------------+---------------\n"); + } + expected.append(String.format(Locale.ROOT, "%-15s|%-15d|%-15d\n", "text" + i, i, i + 5)); + expected.append(String.format(Locale.ROOT, "%-15s|%-15d|%-15d\n", "text" + (i + 1), i + 1, i + 6)); + cursor = response.v2(); + assertEquals(expected.toString(), response.v1()); + assertNotNull(cursor); + } + Map expected = new HashMap<>(); + expected.put("size", 0); + expected.put("rows", emptyList()); + assertResponse(expected, runSql(new StringEntity("{\"cursor\":\"" + cursor + "\"}", ContentType.APPLICATION_JSON))); + + Map response = runSql("/close", new StringEntity("{\"cursor\":\"" + cursor + "\"}", ContentType.APPLICATION_JSON)); + assertEquals(true, response.get("succeeded")); + + assertEquals(0, getNumberOfSearchContexts("test")); + } + + private Tuple runSqlAsText(String sql) throws IOException { + return runSqlAsText("", new StringEntity("{\"query\":\"" + sql + "\"}", ContentType.APPLICATION_JSON)); + } + + private Tuple runSqlAsText(String suffix, HttpEntity sql) throws IOException { + Response response = client().performRequest("POST", "/_xpack/sql" + suffix, singletonMap("error_trace", "true"), sql); + return new Tuple<>( + Streams.copyToString(new InputStreamReader(response.getEntity().getContent(), StandardCharsets.UTF_8)), + response.getHeader("Cursor") + ); + } + + private void assertResponse(Map expected, Map actual) { + if (false == expected.equals(actual)) { + NotEqualMessageBuilder message = new NotEqualMessageBuilder(); + message.compareMaps(actual, expected); + fail("Response does not match:\n" + message.toString()); + } + } + + public static int getNumberOfSearchContexts(String index) throws IOException { + Response response = client().performRequest("GET", "/_stats/search"); + Map stats; + try (InputStream content = response.getEntity().getContent()) { + stats = XContentHelper.convertToMap(JsonXContent.jsonXContent, content, false); + } + return getOpenContexts(stats, index); + } + + public static void assertNoSearchContexts() throws IOException { + Response response = client().performRequest("GET", "/_stats/search"); + Map stats; + try (InputStream content = response.getEntity().getContent()) { + stats = XContentHelper.convertToMap(JsonXContent.jsonXContent, content, false); + } + @SuppressWarnings("unchecked") + Map indexStats = (Map) stats.get("indices"); + for (String index : indexStats.keySet()) { + if (index.startsWith(".") == false) { // We are not interested in internal indices + assertEquals(index + " should have no search contexts", 0, getOpenContexts(stats, index)); + } + } + } + + @SuppressWarnings("unchecked") + public static int getOpenContexts(Map indexStats, String index) { + return (int) ((Map) ((Map) ((Map) ((Map) + indexStats.get("indices")).get(index)).get("total")).get("search")).get("open_contexts"); + } + +} diff --git a/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/package-info.java b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/package-info.java new file mode 100644 index 00000000000..1a061730c60 --- /dev/null +++ b/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/package-info.java @@ -0,0 +1,10 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/** + * Integration tests shared between multiple qa projects. + */ +package org.elasticsearch.xpack.qa.sql.rest; diff --git a/qa/sql/src/main/resources/agg.csv-spec b/qa/sql/src/main/resources/agg.csv-spec new file mode 100644 index 00000000000..b84ba44bffc --- /dev/null +++ b/qa/sql/src/main/resources/agg.csv-spec @@ -0,0 +1,67 @@ +// +// Aggs not supported by H2 / traditional SQL stores +// + +singlePercentileWithoutComma +SELECT gender, PERCENTILE(emp_no, 97) p1 FROM test_emp GROUP BY gender; + +gender:s | p1:d +M | 10095.6112 +F | 10099.1936 +; + +singlePercentileWithComma +SELECT gender, PERCENTILE(emp_no, 97.76) p1 FROM test_emp GROUP BY gender; + +gender:s | p1:d +M | 10095.6112 +F | 10099.1936 +; + +multiplePercentilesOneWithCommaOneWithout +SELECT gender, PERCENTILE(emp_no, 92.45) p1, PERCENTILE(emp_no, 91) p2 FROM test_emp GROUP BY gender; + +gender:s | p1:d | p2:d +M | 10090.319 | 10087.68 +F | 10095.128 | 10093.52 +; + +multiplePercentilesWithoutComma +SELECT gender, PERCENTILE(emp_no, 91) p1, PERCENTILE(emp_no, 89) p2 FROM test_emp GROUP BY gender; + +gender:s | p1:d | p2:d +M | 10087.68 | 10085.18 +F | 10093.52 | 10092.08 +; + +multiplePercentilesWithComma +SELECT gender, PERCENTILE(emp_no, 85.7) p1, PERCENTILE(emp_no, 94.3) p2 FROM test_emp GROUP BY gender; + +gender:s | p1:d | p2:d +M | 10083.134 | 10091.932 +F | 10088.852 | 10097.792 +; + +percentileRank +SELECT gender, PERCENTILE_RANK(emp_no, 10025) rank FROM test_emp GROUP BY gender; + +gender:s | rank:d +M | 23.41269841269841 +F | 26.351351351351347 +; + +multiplePercentileRanks +SELECT gender, PERCENTILE_RANK(emp_no, 10030.0) rank1, PERCENTILE_RANK(emp_no, 10025) rank2 FROM test_emp GROUP BY gender; + +gender:s | rank1:d | rank2:d +M | 29.365079365079367 | 23.41269841269841 +F | 29.93762993762994 | 26.351351351351347 +; + +multiplePercentilesAndPercentileRank +SELECT gender, PERCENTILE(emp_no, 97.76) p1, PERCENTILE(emp_no, 93.3) p2, PERCENTILE_RANK(emp_no, 10025) rank FROM test_emp GROUP BY gender; + +gender:s | p1:d | p2:d | rank:d +M | 10095.6112 | 10090.846 | 23.41269841269841 +F | 10099.1936 | 10096.351999999999 | 26.351351351351347 +; \ No newline at end of file diff --git a/qa/sql/src/main/resources/agg.sql-spec b/qa/sql/src/main/resources/agg.sql-spec new file mode 100644 index 00000000000..00e691cf378 --- /dev/null +++ b/qa/sql/src/main/resources/agg.sql-spec @@ -0,0 +1,253 @@ +// +// Group-By +// + +groupByOnText +SELECT gender g FROM "test_emp" GROUP BY gender; +groupByOnTextWithWhereClause +SELECT gender g FROM "test_emp" WHERE emp_no < 10020 GROUP BY gender; +groupByOnTextWithWhereAndLimit +SELECT gender g FROM "test_emp" WHERE emp_no < 10020 GROUP BY gender LIMIT 1; +groupByOnTextOnAlias +SELECT gender g FROM "test_emp" WHERE emp_no < 10020 GROUP BY g; +groupByOnTextOnAliasOrderDesc +SELECT gender g FROM "test_emp" WHERE emp_no < 10020 GROUP BY g ORDER BY g DESC; + +groupByOnDate +SELECT birth_date b FROM "test_emp" GROUP BY birth_date ORDER BY birth_date DESC; +groupByOnDateWithWhereClause +SELECT birth_date b FROM "test_emp" WHERE emp_no < 10020 GROUP BY birth_date ORDER BY birth_date DESC; +groupByOnDateWithWhereAndLimit +SELECT birth_date b FROM "test_emp" WHERE emp_no < 10020 GROUP BY birth_date ORDER BY birth_date DESC LIMIT 1; +groupByOnDateOnAlias +SELECT birth_date b FROM "test_emp" WHERE emp_no < 10020 GROUP BY b ORDER BY birth_date DESC; + +groupByOnNumber +SELECT emp_no e FROM "test_emp" GROUP BY emp_no ORDER BY emp_no DESC; +groupByOnNumberWithWhereClause +SELECT emp_no e FROM "test_emp" WHERE emp_no < 10020 GROUP BY emp_no ORDER BY emp_no DESC; +groupByOnNumberWithWhereAndLimit +SELECT emp_no e FROM "test_emp" WHERE emp_no < 10020 GROUP BY emp_no ORDER BY emp_no DESC LIMIT 1; +groupByOnNumberOnAlias +SELECT emp_no e FROM "test_emp" WHERE emp_no < 10020 GROUP BY e ORDER BY emp_no DESC; + +// group by scalar +groupByAddScalar +SELECT emp_no + 1 AS e FROM test_emp GROUP BY e ORDER BY e; +groupByMinScalarDesc +SELECT emp_no - 1 AS e FROM test_emp GROUP BY e ORDER BY e DESC; +groupByAddScalarDesc +SELECT emp_no % 2 AS e FROM test_emp GROUP BY e ORDER BY e DESC; +groupByMulScalar +SELECT emp_no * 2 AS e FROM test_emp GROUP BY e ORDER BY e; +groupByModScalar +SELECT (emp_no % 3) + 1 AS e FROM test_emp GROUP BY e ORDER BY e; + +// +// Aggregate Functions +// + +// COUNT +aggCountImplicit +SELECT COUNT(*) c FROM "test_emp"; +aggCountImplicitWithCast +SELECT CAST(COUNT(*) AS INT) c FROM "test_emp"; +aggCountImplicitWithConstant +SELECT COUNT(1) FROM "test_emp"; +aggCountImplicitWithConstantAndFilter +SELECT COUNT(1) FROM "test_emp" WHERE emp_no < 10010; +aggCountAliasAndWhereClause +SELECT gender g, COUNT(*) c FROM "test_emp" WHERE emp_no < 10020 GROUP BY gender; +aggCountAliasAndWhereClauseAndLimit +SELECT gender g, COUNT(*) c FROM "test_emp" WHERE emp_no < 10020 GROUP BY gender LIMIT 1; +aggCountAliasWithCastAndFilter +SELECT gender g, CAST(COUNT(*) AS INT) c FROM "test_emp" WHERE emp_no < 10020 GROUP BY gender; +aggCountWithAlias +SELECT gender g, COUNT(*) c FROM "test_emp" GROUP BY g; + +// Conditional COUNT +aggCountAndHaving +SELECT gender g, COUNT(*) c FROM "test_emp" GROUP BY g HAVING COUNT(*) > 10; +aggCountOnColumnAndHaving +SELECT gender g, COUNT(gender) c FROM "test_emp" GROUP BY g HAVING COUNT(gender) > 10; +// NOT supported yet since Having introduces a new agg +//aggCountOnColumnAndWildcardAndHaving +//SELECT gender g, COUNT(*) c FROM "test_emp" GROUP BY g HAVING COUNT(gender) > 10; +aggCountAndHavingOnAlias +SELECT gender g, COUNT(*) c FROM "test_emp" GROUP BY g HAVING c > 10; +aggCountOnColumnAndHavingOnAlias +SELECT gender g, COUNT(gender) c FROM "test_emp" GROUP BY g HAVING c > 10; +aggCountOnColumnAndMultipleHaving +SELECT gender g, COUNT(gender) c FROM "test_emp" GROUP BY g HAVING c > 10 AND c < 70; +aggCountOnColumnAndMultipleHavingWithLimit +SELECT gender g, COUNT(gender) c FROM "test_emp" GROUP BY g HAVING c > 10 AND c < 70 LIMIT 1; +aggCountOnColumnAndHavingBetween +SELECT gender g, COUNT(gender) c FROM "test_emp" GROUP BY g HAVING c BETWEEN 10 AND 70; +aggCountOnColumnAndHavingBetweenWithLimit +SELECT gender g, COUNT(gender) c FROM "test_emp" GROUP BY g HAVING c BETWEEN 10 AND 70 LIMIT 1; + +aggCountOnColumnAndHavingOnAliasAndFunction +SELECT gender g, COUNT(gender) c FROM "test_emp" GROUP BY g HAVING c > 10 AND COUNT(gender) < 70; +// NOT supported yet since Having introduces a new agg +//aggCountOnColumnAndHavingOnAliasAndFunctionWildcard -> COUNT(*/1) vs COUNT(gender) +//SELECT gender g, COUNT(gender) c FROM "test_emp" GROUP BY g HAVING c > 10 AND COUNT(*) < 70; +//aggCountOnColumnAndHavingOnAliasAndFunctionConstant +//SELECT gender g, COUNT(gender) c FROM "test_emp" GROUP BY g HAVING c > 10 AND COUNT(1) < 70; + + +// MIN +aggMinImplicit +SELECT MIN(emp_no) m FROM "test_emp"; +aggMinImplicitWithCast +SELECT CAST(MIN(emp_no) AS SMALLINT) m FROM "test_emp"; +aggMin +SELECT gender g, MIN(emp_no) m FROM "test_emp" GROUP BY gender; +aggMinWithCast +SELECT CAST(MIN(emp_no) AS SMALLINT) m FROM "test_emp" GROUP BY gender; +aggMinAndCount +SELECT MIN(emp_no) m, COUNT(1) c FROM "test_emp" GROUP BY gender; +aggMinAndCountWithFilter +SELECT MIN(emp_no) m, COUNT(1) c FROM "test_emp" WHERE emp_no < 10020 GROUP BY gender ; +aggMinAndCountWithFilterAndLimit +SELECT MIN(emp_no) m, COUNT(1) c FROM "test_emp" WHERE emp_no < 10020 GROUP BY gender LIMIT 1; +aggMinWithCastAndFilter +SELECT gender g, CAST(MIN(emp_no) AS SMALLINT) m, COUNT(1) c FROM "test_emp" WHERE emp_no < 10020 GROUP BY gender; +aggMinWithAlias +SELECT gender g, MIN(emp_no) m FROM "test_emp" GROUP BY g; + +// Conditional MIN +aggMinWithHaving +SELECT gender g, MIN(emp_no) m FROM "test_emp" GROUP BY g HAVING MIN(emp_no) > 10; +aggMinWithHavingOnAlias +SELECT gender g, MIN(emp_no) m FROM "test_emp" GROUP BY g HAVING m > 10; +aggMinWithMultipleHaving +SELECT gender g, MIN(emp_no) m FROM "test_emp" GROUP BY g HAVING m > 10 AND m < 99999; +aggMinWithMultipleHavingBetween +SELECT gender g, MIN(emp_no) m FROM "test_emp" GROUP BY g HAVING m BETWEEN 10 AND 99999; +aggMinWithMultipleHavingWithLimit +SELECT gender g, MIN(emp_no) m FROM "test_emp" GROUP BY g HAVING m > 10 AND m < 99999 LIMIT 1; +aggMinWithMultipleHavingBetween +SELECT gender g, MIN(emp_no) m FROM "test_emp" GROUP BY g HAVING m BETWEEN 10 AND 99999 LIMIT 1; +aggMinWithMultipleHavingOnAliasAndFunction +SELECT gender g, MIN(emp_no) m FROM "test_emp" GROUP BY g HAVING m > 10 AND MIN(emp_no) < 99999; + +// MAX +aggMaxImplicit +SELECT MAX(emp_no) c FROM "test_emp"; +aggMaxImplicitWithCast +SELECT CAST(MAX(emp_no) AS SMALLINT) c FROM "test_emp"; +aggMax +SELECT gender g, MAX(emp_no) m FROM "test_emp" GROUP BY gender ; +aggMaxWithCast +SELECT gender g, CAST(MAX(emp_no) AS SMALLINT) m FROM "test_emp" GROUP BY gender ; +aggMaxAndCount +SELECT MAX(emp_no) m, COUNT(1) c FROM "test_emp" GROUP BY gender; +aggMaxAndCountWithFilter +SELECT gender g, MAX(emp_no) m, COUNT(1) c FROM "test_emp" WHERE emp_no > 10000 GROUP BY gender; +aggMaxAndCountWithFilterAndLimit +SELECT gender g, MAX(emp_no) m, COUNT(1) c FROM "test_emp" WHERE emp_no > 10000 GROUP BY gender LIMIT 1; +aggMaxWithAlias +SELECT gender g, MAX(emp_no) m FROM "test_emp" GROUP BY g; + +// Conditional MAX +aggMaxWithHaving +SELECT gender g, MAX(emp_no) m FROM "test_emp" GROUP BY g HAVING MAX(emp_no) > 10; +aggMaxWithHavingOnAlias +SELECT gender g, MAX(emp_no) m FROM "test_emp" GROUP BY g HAVING m > 10; +aggMaxWithMultipleHaving +SELECT gender g, MAX(emp_no) m FROM "test_emp" GROUP BY g HAVING m > 10 AND m < 99999; +aggMaxWithMultipleHavingBetween +SELECT gender g, MAX(emp_no) m FROM "test_emp" GROUP BY g HAVING m BETWEEN 10 AND 99999; +aggMaxWithMultipleHavingWithLimit +SELECT gender g, MAX(emp_no) m FROM "test_emp" GROUP BY g HAVING m > 10 AND m < 99999 LIMIT 1; +aggMaxWithMultipleHavingBetweenWithLimit +SELECT gender g, MAX(emp_no) m FROM "test_emp" GROUP BY g HAVING m BETWEEN 10 AND 99999 LIMIT 1; +aggMaxWithMultipleHavingOnAliasAndFunction +SELECT gender g, MAX(emp_no) m FROM "test_emp" GROUP BY g HAVING m > 10 AND MAX(emp_no) < 99999; + +// SUM +aggSumImplicitWithCast +SELECT CAST(SUM(emp_no) AS BIGINT) s FROM "test_emp"; +aggSumWithCast +SELECT gender g, CAST(SUM(emp_no) AS BIGINT) s FROM "test_emp" GROUP BY gender; +aggSumWithCastAndCount +SELECT gender g, CAST(SUM(emp_no) AS BIGINT) s, COUNT(1) c FROM "test_emp" GROUP BY gender; +aggSumWithCastAndCountWithFilter +SELECT gender g, CAST(SUM(emp_no) AS BIGINT) s, COUNT(1) c FROM "test_emp" WHERE emp_no > 10000 GROUP BY gender; +aggSumWithCastAndCountWithFilterAndLimit +SELECT gender g, CAST(SUM(emp_no) AS BIGINT) s, COUNT(1) c FROM "test_emp" WHERE emp_no > 10000 GROUP BY gender LIMIT 1; +aggSumWithAlias +SELECT gender g, CAST(SUM(emp_no) AS BIGINT) s FROM "test_emp" GROUP BY g; + +// Conditional SUM +aggSumWithHaving +SELECT gender g, CAST(SUM(emp_no) AS INT) s FROM "test_emp" GROUP BY g HAVING SUM(emp_no) > 10; +aggSumWithHavingOnAlias +SELECT gender g, CAST(SUM(emp_no) AS INT) s FROM "test_emp" GROUP BY g HAVING s > 10; +aggSumWithMultipleHaving +SELECT gender g, CAST(SUM(emp_no) AS INT) s FROM "test_emp" GROUP BY g HAVING s > 10 AND s < 10000000; +aggSumWithMultipleHavingBetween +SELECT gender g, CAST(SUM(emp_no) AS INT) s FROM "test_emp" GROUP BY g HAVING s BETWEEN 10 AND 10000000; +aggSumWithMultipleHavingWithLimit +SELECT gender g, CAST(SUM(emp_no) AS INT) s FROM "test_emp" GROUP BY g HAVING s > 10 AND s < 10000000 LIMIT 1; +aggSumWithMultipleHavingBetweenWithLimit +SELECT gender g, CAST(SUM(emp_no) AS INT) s FROM "test_emp" GROUP BY g HAVING s BETWEEN 10 AND 10000000 LIMIT 1; +aggSumWithMultipleHavingOnAliasAndFunction +SELECT gender g, CAST(SUM(emp_no) AS INT) s FROM "test_emp" GROUP BY g HAVING s > 10 AND SUM(emp_no) > 10000000; + +// AVG +aggAvgImplicitWithCast +SELECT CAST(AVG(emp_no) AS FLOAT) a FROM "test_emp"; +aggAvgWithCastToFloat +SELECT gender g, CAST(AVG(emp_no) AS FLOAT) a FROM "test_emp" GROUP BY gender; +// casting to an exact type - varchar, bigint, etc... will likely fail due to rounding error +aggAvgWithCastToDouble +SELECT gender g, CAST(AVG(emp_no) AS DOUBLE) a FROM "test_emp" GROUP BY gender; +aggAvgWithCastAndCount +SELECT gender g, CAST(AVG(emp_no) AS FLOAT) a, COUNT(1) c FROM "test_emp" GROUP BY gender; +aggAvgWithCastAndCountWithFilter +SELECT gender g, CAST(AVG(emp_no) AS FLOAT) a, COUNT(1) c FROM "test_emp" WHERE emp_no > 10000 GROUP BY gender ; +aggAvgWithCastAndCountWithFilterAndLimit +SELECT gender g, CAST(AVG(emp_no) AS FLOAT) a, COUNT(1) c FROM "test_emp" WHERE emp_no > 10000 GROUP BY gender LIMIT 1; +aggAvgWithAlias +SELECT gender g, CAST(AVG(emp_no) AS FLOAT) a FROM "test_emp" GROUP BY g; + +// Conditional AVG +aggAvgWithHaving +SELECT gender g, CAST(AVG(emp_no) AS FLOAT) a FROM "test_emp" GROUP BY g HAVING AVG(emp_no) > 10; +aggAvgWithHavingOnAlias +SELECT gender g, CAST(AVG(emp_no) AS FLOAT) a FROM "test_emp" GROUP BY g HAVING a > 10; +aggAvgWithMultipleHaving +SELECT gender g, CAST(AVG(emp_no) AS FLOAT) a FROM "test_emp" GROUP BY g HAVING a > 10 AND a < 10000000; +aggAvgWithMultipleHavingBetween +SELECT gender g, CAST(AVG(emp_no) AS FLOAT) a FROM "test_emp" GROUP BY g HAVING a BETWEEN 10 AND 10000000; +aggAvgWithMultipleHavingWithLimit +SELECT gender g, CAST(AVG(emp_no) AS FLOAT) a FROM "test_emp" GROUP BY g HAVING a > 10 AND a < 10000000 LIMIT 1; +aggAvgWithMultipleHavingBetweenWithLimit +SELECT gender g, CAST(AVG(emp_no) AS FLOAT) a FROM "test_emp" GROUP BY g HAVING a BETWEEN 10 AND 10000000 LIMIT 1; +aggAvgWithMultipleHavingOnAliasAndFunction +SELECT gender g, CAST(AVG(emp_no) AS FLOAT) a FROM "test_emp" GROUP BY g HAVING a > 10 AND AVG(emp_no) > 10000000; + +// +// GroupBy on Scalar plus Having +// +aggGroupByOnScalarWithHaving +SELECT emp_no + 1 AS e FROM test_emp GROUP BY e HAVING AVG(salary) BETWEEN 1 AND 10010 ORDER BY e; + +// +// Mixture of Aggs that triggers promotion of aggs to stats +// +aggMultiIncludingScalarFunction +SELECT MIN(salary) mi, MAX(salary) ma, MAX(salary) - MIN(salary) AS d FROM test_emp GROUP BY languages ORDER BY languages; +aggHavingWithAggNotInGroupBy +SELECT MIN(salary) mi, MAX(salary) ma, MAX(salary) - MIN(salary) AS d FROM test_emp GROUP BY languages HAVING AVG(salary) > 30000 ORDER BY languages; +aggHavingWithAliasOnScalarFromGroupBy +SELECT MIN(salary) mi, MAX(salary) ma, MAX(salary) - MIN(salary) AS d FROM test_emp GROUP BY languages HAVING d BETWEEN 50 AND 10000 AND AVG(salary) > 30000 ORDER BY languages; +aggHavingWithScalarFunctionBasedOnAliasFromGroupBy +SELECT MIN(salary) mi, MAX(salary) ma, MAX(salary) - MIN(salary) AS d FROM test_emp GROUP BY languages HAVING ma % mi > 1 AND AVG(salary) > 30000 ORDER BY languages; +aggHavingWithMultipleScalarFunctionsBasedOnAliasFromGroupBy +SELECT MIN(salary) mi, MAX(salary) ma, MAX(salary) - MIN(salary) AS d FROM test_emp GROUP BY languages HAVING d - ma % mi > 0 AND AVG(salary) > 30000 ORDER BY languages; +aggHavingWithMultipleScalarFunctionsBasedOnAliasFromGroupByAndAggNotInGroupBy +SELECT MIN(salary) mi, MAX(salary) ma, MAX(salary) - MIN(salary) AS d FROM test_emp GROUP BY languages HAVING ROUND(d - ABS(ma % mi)) + AVG(salary) > 0 AND AVG(salary) > 30000 ORDER BY languages; +aggHavingScalarOnAggFunctionsWithoutAliasesInAndNotInGroupBy +SELECT MIN(salary) mi, MAX(salary) ma, MAX(salary) - MIN(salary) AS d FROM test_emp GROUP BY languages HAVING MAX(salary) % MIN(salary) + AVG(salary) > 3000 ORDER BY languages; diff --git a/qa/sql/src/main/resources/arithmetic.csv-spec b/qa/sql/src/main/resources/arithmetic.csv-spec new file mode 100644 index 00000000000..4d8a9fc3fc2 --- /dev/null +++ b/qa/sql/src/main/resources/arithmetic.csv-spec @@ -0,0 +1,13 @@ +// +// Arithmetic tests outside H2 +// + +// the standard behavior here is to return the constant for each element +// the weird thing is that an actual query needs to be ran +arithmeticWithFrom +SELECT 5 - 2 x FROM test_emp; + +x +3 +; + diff --git a/qa/sql/src/main/resources/arithmetic.sql-spec b/qa/sql/src/main/resources/arithmetic.sql-spec new file mode 100644 index 00000000000..857b1045a0e --- /dev/null +++ b/qa/sql/src/main/resources/arithmetic.sql-spec @@ -0,0 +1,75 @@ +// +// Arithmetic tests +// + +unaryMinus +SELECT - 1 AS x; +plus +SELECT 1 + 1 AS x; +minus +SELECT 1 - 1 AS x; +divide +SELECT 6 / 3 AS x; +multiply +SELECT 2 * 3 AS x; +mod +SELECT 5 % 2 AS x; +operatorsPriority +SELECT 1 + 3 * 4 / 2 - 2 AS x; +operatorsPriorityWithParanthesis +SELECT ((1 + 3) * 2 / (3 - 1)) * 2 AS x; +literalAliasing +SELECT 2 + 3 AS x, 'foo' y; + +// variable scalar arithmetic +scalarVariablePlus +SELECT emp_no + 10000 AS x FROM test_emp; +scalarVariableMinus +SELECT emp_no - 10000 AS x FROM test_emp; +scalarVariableMul +SELECT emp_no * 10000 AS x FROM test_emp; +scalarVariableDiv +SELECT emp_no / 10000 AS x FROM test_emp; +scalarVariableMod +SELECT emp_no % 10000 AS x FROM test_emp; +scalarVariableMultipleInputs +SELECT (emp_no % 10000) + YEAR(hire_date) AS x FROM test_emp; +scalarVariableTwoInputs +SELECT (emp_no % 10000) + YEAR(hire_date) AS x FROM test_emp; +scalarVariableThreeInputs +SELECT ((emp_no % 10000) + YEAR(hire_date)) / MONTH(birth_date) AS x FROM test_emp; +scalarVariableArithmeticAndEntry +SELECT emp_no, emp_no % 10000 AS x FROM test_emp; +scalarVariableTwoInputsAndEntry +SELECT emp_no, (emp_no % 10000) + YEAR(hire_date) AS x FROM test_emp; +scalarVariableThreeInputsAndEntry +SELECT emp_no, ((emp_no % 10000) + YEAR(hire_date)) / MONTH(birth_date) AS x FROM test_emp; + + +// variable scalar agg +aggVariablePlus +SELECT COUNT(*) + 10000 AS x FROM test_emp GROUP BY gender; +aggVariableMinus +SELECT COUNT(*) - 10000 AS x FROM test_emp GROUP BY gender; +aggVariableMul +SELECT COUNT(*) * 2 AS x FROM test_emp GROUP BY gender; +aggVariableDiv +SELECT COUNT(*) / 5000 AS x FROM test_emp GROUP BY gender; +aggVariableMod +SELECT COUNT(*) % 10000 AS x FROM test_emp GROUP BY gender; +aggVariableTwoInputs +SELECT MAX(emp_no) - MIN(emp_no) AS x FROM test_emp GROUP BY gender; +aggVariableThreeInputs +SELECT (MAX(emp_no) - MIN(emp_no)) + AVG(emp_no) AS x FROM test_emp GROUP BY gender; + +// ordering +orderByPlus +SELECT emp_no FROM test_emp ORDER BY emp_no + 2 LIMIT 10; +orderByNegative +SELECT emp_no FROM test_emp ORDER BY -emp_no LIMIT 10; +orderByMinusDesc +SELECT emp_no FROM test_emp ORDER BY -emp_no DESC LIMIT 10; +orderByModulo +SELECT emp_no FROM test_emp ORDER BY emp_no % 10000 LIMIT 10; +orderByMul +SELECT emp_no FROM test_emp ORDER BY emp_no * 2 LIMIT 10; \ No newline at end of file diff --git a/qa/sql/src/main/resources/columns.csv-spec b/qa/sql/src/main/resources/columns.csv-spec new file mode 100644 index 00000000000..331c4a705ff --- /dev/null +++ b/qa/sql/src/main/resources/columns.csv-spec @@ -0,0 +1,15 @@ +// +// Test of explicit column types +// the columns can be specified as or as +// if at least one column has an explicit column type, all columns should have an explicit type +// type might be missing in which case it will be autodetected or can be one of the following +// d - double, f - float, i - int, b - byte, l - long, t - timestamp, date + + +columnDetectionOverride +SELECT gender, FLOOR(PERCENTILE(emp_no, 97.76)) p1 FROM test_emp GROUP BY gender; + +gender:s | p1:l +M | 10095 +F | 10099 +; \ No newline at end of file diff --git a/qa/sql/src/main/resources/command.csv-spec b/qa/sql/src/main/resources/command.csv-spec new file mode 100644 index 00000000000..921828c1999 --- /dev/null +++ b/qa/sql/src/main/resources/command.csv-spec @@ -0,0 +1,124 @@ +// +// Commands +// + +// SHOW_FUNCTIONS +showFunctions +SHOW FUNCTIONS; + + name:s | type:s +AVG |AGGREGATE +COUNT |AGGREGATE +MAX |AGGREGATE +MIN |AGGREGATE +SUM |AGGREGATE +MEAN |AGGREGATE +STDDEV_POP |AGGREGATE +VAR_POP |AGGREGATE +SUM_OF_SQUARES |AGGREGATE +SKEWNESS |AGGREGATE +KURTOSIS |AGGREGATE +PERCENTILE |AGGREGATE +PERCENTILE_RANK |AGGREGATE +DAY_OF_MONTH |SCALAR +DAY |SCALAR +DOM |SCALAR +DAY_OF_WEEK |SCALAR +DOW |SCALAR +DAY_OF_YEAR |SCALAR +DOY |SCALAR +HOUR_OF_DAY |SCALAR +HOUR |SCALAR +MINUTE_OF_DAY |SCALAR +MINUTE_OF_HOUR |SCALAR +MINUTE |SCALAR +SECOND_OF_MINUTE|SCALAR +SECOND |SCALAR +MONTH_OF_YEAR |SCALAR +MONTH |SCALAR +YEAR |SCALAR +ABS |SCALAR +ACOS |SCALAR +ASIN |SCALAR +ATAN |SCALAR +CBRT |SCALAR +CEIL |SCALAR +COS |SCALAR +COSH |SCALAR +DEGREES |SCALAR +E |SCALAR +EXP |SCALAR +EXPM1 |SCALAR +FLOOR |SCALAR +LOG |SCALAR +LOG10 |SCALAR +PI |SCALAR +RADIANS |SCALAR +ROUND |SCALAR +SIN |SCALAR +SINH |SCALAR +SQRT |SCALAR +TAN |SCALAR +; + +showFunctionsWithExactMatch +SHOW FUNCTIONS LIKE 'ABS'; + + name:s | type:s +ABS |SCALAR +; + + +showFunctionsWithPatternWildcard +SHOW FUNCTIONS LIKE 'A%'; + + name:s | type:s +AVG |AGGREGATE +ABS |SCALAR +ACOS |SCALAR +ASIN |SCALAR +ATAN |SCALAR +; + +showFunctionsWithPatternChar +SHOW FUNCTIONS LIKE 'A__'; + + name:s | type:s +AVG |AGGREGATE +ABS |SCALAR +; + +showFunctions +SHOW FUNCTIONS '%DAY%'; + + name:s | type:s +DAY_OF_MONTH |SCALAR +DAY |SCALAR +DAY_OF_WEEK |SCALAR +DAY_OF_YEAR |SCALAR +HOUR_OF_DAY |SCALAR +MINUTE_OF_DAY |SCALAR +; + +showTables +SHOW TABLES; + + table:s +test_emp +; + +// DESCRIBE + +describe +DESCRIBE "test_emp"; + + column:s | type:s +birth_date |TIMESTAMP +emp_no |INTEGER +first_name |VARCHAR +gender |VARCHAR +hire_date |TIMESTAMP +languages |TINYINT +last_name |VARCHAR +salary |INTEGER +; diff --git a/qa/sql/src/main/resources/datetime.csv-spec b/qa/sql/src/main/resources/datetime.csv-spec new file mode 100644 index 00000000000..459fa052d7a --- /dev/null +++ b/qa/sql/src/main/resources/datetime.csv-spec @@ -0,0 +1,165 @@ +// +// DateTime +// + +// +// Time (H2 doesn't support these for Timezone with timestamp) +// +// + +dateTimeSecond +SELECT SECOND(birth_date) d, last_name l FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; + +d:i | l:s +0 | Facello +0 | Simmel +0 | Bamford +0 | Koblick +0 | Maliniak +0 | Preusig +0 | Zielinski +0 | Kalloufi +0 | Peac +; + +dateTimeMinute +SELECT MINUTE(birth_date) d, last_name l FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; + +d:i | l:s +0 | Facello +0 | Simmel +0 | Bamford +0 | Koblick +0 | Maliniak +0 | Preusig +0 | Zielinski +0 | Kalloufi +0 | Peac +; + +dateTimeHour +SELECT HOUR(birth_date) d, last_name l FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; + +d:i | l:s +0 | Facello +0 | Simmel +0 | Bamford +0 | Koblick +0 | Maliniak +0 | Preusig +0 | Zielinski +0 | Kalloufi +0 | Peac + +; + +// +// Date (in H2 these start at 0 instead of 1...) +// +dateTimeDayOfWeek +SELECT DAY_OF_WEEK(birth_date) d, last_name l FROM "test_emp" WHERE emp_no < 10010 ORDER BY DAY_OF_WEEK(birth_date); + +d:i | l:s +1 | Preusig +2 | Simmel +3 | Facello +3 | Kalloufi +4 | Bamford +4 | Zielinski +5 | Maliniak +6 | Koblick +6 | Peac +; + +dateTimeDayOfYear +SELECT DAY_OF_YEAR(birth_date) d, last_name l FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; + +d:i | l:s +245 | Facello +154 | Simmel +337 | Bamford +121 | Koblick +21 | Maliniak +110 | Preusig +143 | Zielinski +50 | Kalloufi +110 | Peac +; + +// +// Aggregate +// + +dateTimeAggByYear +SELECT YEAR(birth_date) AS d, CAST(SUM(emp_no) AS INT) s FROM "test_emp" GROUP BY YEAR(birth_date) ORDER BY YEAR(birth_date) LIMIT 13; + +d:i | s:i +1952 | 90472 +1953 | 110398 +1954 | 80447 +1955 | 40240 +1956 | 60272 +1957 | 50280 +1958 | 70225 +1959 | 110517 +1960 | 100501 +1961 | 100606 +1962 | 60361 +1963 | 80372 +1964 | 40264 +; + +dateTimeAggByMonth +SELECT MONTH(birth_date) AS d, COUNT(*) AS c, CAST(SUM(emp_no) AS INT) s FROM "test_emp" GROUP BY MONTH(birth_date) ORDER BY MONTH(birth_date) DESC; + +d:i | c:l | s:i +12 | 7 | 70325 +11 | 8 | 80439 +10 | 9 | 90517 +9 | 13 | 130688 +8 | 8 | 80376 +7 | 11 | 110486 +6 | 8 | 80314 +5 | 10 | 100573 +4 | 9 | 90450 +3 | 2 | 20164 +2 | 9 | 90430 +1 | 6 | 60288 +; + +dateTimeAggByDayOfMonth +SELECT DAY_OF_MONTH(birth_date) AS d, COUNT(*) AS c, CAST(SUM(emp_no) AS INT) s FROM "test_emp" GROUP BY DAY_OF_MONTH(birth_date) ORDER BY DAY_OF_MONTH(birth_date) DESC; + +d:i | c:l | s:i +31 | 1 | 10025 +30 | 2 | 20147 +29 | 3 | 30104 +28 | 2 | 20125 +27 | 3 | 30169 +26 | 4 | 40190 +25 | 5 | 50443 +24 | 2 | 20069 +23 | 7 | 70413 +22 | 1 | 10037 +21 | 6 | 60359 +20 | 4 | 40135 +19 | 8 | 80299 +18 | 2 | 20169 +17 | 1 | 10081 +16 | 1 | 10096 +15 | 2 | 20132 +14 | 4 | 40173 +13 | 5 | 50264 +12 | 1 | 10014 +11 | 2 | 20141 +10 | 2 | 20063 +9 | 3 | 30189 +8 | 2 | 20057 +7 | 5 | 50240 +6 | 4 | 40204 +5 | 2 | 20103 +4 | 3 | 30157 +3 | 4 | 40204 +2 | 4 | 40081 +1 | 5 | 50167 +; diff --git a/qa/sql/src/main/resources/datetime.sql-spec b/qa/sql/src/main/resources/datetime.sql-spec new file mode 100644 index 00000000000..20ea8329c8f --- /dev/null +++ b/qa/sql/src/main/resources/datetime.sql-spec @@ -0,0 +1,45 @@ +// +// DateTime +// + +// +// Time NOT IMPLEMENTED in H2 on TIMESTAMP WITH TIME ZONE - hence why these are moved to CSV +// + +// +// Date +// + +dateTimeDay +SELECT DAY(birth_date) d, last_name l FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +dateTimeDayOfMonth +SELECT DAY_OF_MONTH(birth_date) d, last_name l FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +dateTimeMonth +SELECT MONTH(birth_date) d, last_name l FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +dateTimeYear +SELECT YEAR(birth_date) d, last_name l FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; + +// +// Filter +// +dateTimeFilterDayOfMonth +SELECT DAY_OF_MONTH(birth_date) AS d, last_name l FROM "test_emp" WHERE DAY_OF_MONTH(birth_date) <= 10 ORDER BY emp_no LIMIT 5; +dateTimeFilterMonth +SELECT MONTH(birth_date) AS d, last_name l FROM "test_emp" WHERE MONTH(birth_date) <= 5 ORDER BY emp_no LIMIT 5; +dateTimeFilterYear +SELECT YEAR(birth_date) AS d, last_name l FROM "test_emp" WHERE YEAR(birth_date) <= 1960 ORDER BY emp_no LIMIT 5; + + +// +// Aggregate +// + + +dateTimeAggByYear +SELECT YEAR(birth_date) AS d, CAST(SUM(emp_no) AS INT) s FROM "test_emp" GROUP BY YEAR(birth_date) ORDER BY YEAR(birth_date) LIMIT 13; + +dateTimeAggByMonth +SELECT MONTH(birth_date) AS d, COUNT(*) AS c, CAST(SUM(emp_no) AS INT) s FROM "test_emp" GROUP BY MONTH(birth_date) ORDER BY MONTH(birth_date) DESC; + +dateTimeAggByDayOfMonth +SELECT DAY_OF_MONTH(birth_date) AS d, COUNT(*) AS c, CAST(SUM(emp_no) AS INT) s FROM "test_emp" GROUP BY DAY_OF_MONTH(birth_date) ORDER BY DAY_OF_MONTH(birth_date) DESC; diff --git a/qa/sql/src/main/resources/debug.csv-spec b/qa/sql/src/main/resources/debug.csv-spec new file mode 100644 index 00000000000..30d0048aa6b --- /dev/null +++ b/qa/sql/src/main/resources/debug.csv-spec @@ -0,0 +1,10 @@ +// +// Spec used for debugging a certain test (without having to alter the spec suite of which it might be part of) +// + +debug +SELECT int FROM test GROUP BY AVG(int) + 2; + +table:s +test_emp +; \ No newline at end of file diff --git a/qa/sql/src/main/resources/debug.sql-spec b/qa/sql/src/main/resources/debug.sql-spec new file mode 100644 index 00000000000..ff2e2c5adf7 --- /dev/null +++ b/qa/sql/src/main/resources/debug.sql-spec @@ -0,0 +1,6 @@ +// +// Spec used for debugging a certain test (without having to alter the spec suite of which it might be part of) +// + +debug +SELECT int FROM test GROUP BY AVG(int) + 2; diff --git a/qa/sql/src/main/resources/employees.csv b/qa/sql/src/main/resources/employees.csv new file mode 100644 index 00000000000..4425a4b592f --- /dev/null +++ b/qa/sql/src/main/resources/employees.csv @@ -0,0 +1,101 @@ +birth_date,emp_no,first_name,gender,hire_date,languages,last_name,salary +1953-09-02T00:00:00Z,10001,Georgi,M,1986-06-26T00:00:00Z,2,Facello,57305 +1964-06-02T00:00:00Z,10002,Bezalel,F,1985-11-21T00:00:00Z,5,Simmel,56371 +1959-12-03T00:00:00Z,10003,Parto,M,1986-08-28T00:00:00Z,4,Bamford,61805 +1954-05-01T00:00:00Z,10004,Chirstian,M,1986-12-01T00:00:00Z,5,Koblick,36174 +1955-01-21T00:00:00Z,10005,Kyoichi,M,1989-09-12T00:00:00Z,1,Maliniak,63528 +1953-04-20T00:00:00Z,10006,Anneke,F,1989-06-02T00:00:00Z,3,Preusig,60335 +1957-05-23T00:00:00Z,10007,Tzvetan,F,1989-02-10T00:00:00Z,4,Zielinski,74572 +1958-02-19T00:00:00Z,10008,Saniya,M,1994-09-15T00:00:00Z,2,Kalloufi,43906 +1952-04-19T00:00:00Z,10009,Sumant,F,1985-02-18T00:00:00Z,1,Peac,66174 +1963-06-01T00:00:00Z,10010,Duangkaew,F,1989-08-24T00:00:00Z,4,Piveteau,45797 +1953-11-07T00:00:00Z,10011,Mary,F,1990-01-22T00:00:00Z,5,Sluis,31120 +1960-10-04T00:00:00Z,10012,Patricio,M,1992-12-18T00:00:00Z,5,Bridgland,48942 +1963-06-07T00:00:00Z,10013,Eberhardt,M,1985-10-20T00:00:00Z,1,Terkki,48735 +1956-02-12T00:00:00Z,10014,Berni,M,1987-03-11T00:00:00Z,5,Genin,37137 +1959-08-19T00:00:00Z,10015,Guoxiang,M,1987-07-02T00:00:00Z,5,Nooteboom,25324 +1961-05-02T00:00:00Z,10016,Kazuhito,M,1995-01-27T00:00:00Z,2,Cappelletti,61358 +1958-07-06T00:00:00Z,10017,Cristinel,F,1993-08-03T00:00:00Z,2,Bouloucos,58715 +1954-06-19T00:00:00Z,10018,Kazuhide,F,1987-04-03T00:00:00Z,2,Peha,56760 +1953-01-23T00:00:00Z,10019,Lillian,M,1999-04-30T00:00:00Z,1,Haddadi,73717 +1952-12-24T00:00:00Z,10020,Mayuko,M,1991-01-26T00:00:00Z,3,Warwick,40031 +1960-02-20T00:00:00Z,10021,Ramzi,M,1988-02-10T00:00:00Z,5,Erde,60408 +1952-07-08T00:00:00Z,10022,Shahaf,M,1995-08-22T00:00:00Z,3,Famili,48233 +1953-09-29T00:00:00Z,10023,Bojan,F,1989-12-17T00:00:00Z,2,Montemayor,47896 +1958-09-05T00:00:00Z,10024,Suzette,F,1997-05-19T00:00:00Z,3,Pettey,64675 +1958-10-31T00:00:00Z,10025,Prasadram,M,1987-08-17T00:00:00Z,5,Heyers,47411 +1953-04-03T00:00:00Z,10026,Yongqiao,M,1995-03-20T00:00:00Z,3,Berztiss,28336 +1962-07-10T00:00:00Z,10027,Divier,F,1989-07-07T00:00:00Z,5,Reistad,73851 +1963-11-26T00:00:00Z,10028,Domenick,M,1991-10-22T00:00:00Z,1,Tempesti,39356 +1956-12-13T00:00:00Z,10029,Otmar,M,1985-11-20T00:00:00Z,3,Herbst,74999 +1958-07-14T00:00:00Z,10030,Elvis,M,1994-02-17T00:00:00Z,3,Demeyer,67492 +1959-01-27T00:00:00Z,10031,Karsten,M,1991-09-01T00:00:00Z,4,Joslin,37716 +1960-08-09T00:00:00Z,10032,Jeong,F,1990-06-20T00:00:00Z,3,Reistad,62233 +1956-11-14T00:00:00Z,10033,Arif,M,1987-03-18T00:00:00Z,1,Merlo,70011 +1962-12-29T00:00:00Z,10034,Bader,M,1988-09-21T00:00:00Z,1,Swan,39878 +1953-02-08T00:00:00Z,10035,Alain,M,1988-09-05T00:00:00Z,5,Chappelet,25945 +1959-08-10T00:00:00Z,10036,Adamantios,M,1992-01-03T00:00:00Z,4,Portugali,60781 +1963-07-22T00:00:00Z,10037,Pradeep,M,1990-12-05T00:00:00Z,2,Makrucki,37691 +1960-07-20T00:00:00Z,10038,Huan,M,1989-09-20T00:00:00Z,4,Lortz,35222 +1959-10-01T00:00:00Z,10039,Alejandro,M,1988-01-19T00:00:00Z,2,Brender,36051 +1959-09-13T00:00:00Z,10040,Weiyi,F,1993-02-14T00:00:00Z,4,Meriste,37112 +1959-08-27T00:00:00Z,10041,Uri,F,1989-11-12T00:00:00Z,1,Lenart,56415 +1956-02-26T00:00:00Z,10042,Magy,F,1993-03-21T00:00:00Z,3,Stamatiou,30404 +1960-09-19T00:00:00Z,10043,Yishay,M,1990-10-20T00:00:00Z,1,Tzvieli,34341 +1961-09-21T00:00:00Z,10044,Mingsen,F,1994-05-21T00:00:00Z,1,Casley,39728 +1957-08-14T00:00:00Z,10045,Moss,M,1989-09-02T00:00:00Z,3,Shanbhogue,74970 +1960-07-23T00:00:00Z,10046,Lucien,M,1992-06-20T00:00:00Z,4,Rosenbaum,50064 +1952-06-29T00:00:00Z,10047,Zvonko,M,1989-03-31T00:00:00Z,4,Nyanchama,42716 +1963-07-11T00:00:00Z,10048,Florian,M,1985-02-24T00:00:00Z,3,Syrotiuk,26436 +1961-04-24T00:00:00Z,10049,Basil,F,1992-05-04T00:00:00Z,5,Tramer,37853 +1958-05-21T00:00:00Z,10050,Yinghua,M,1990-12-25T00:00:00Z,2,Dredge,43026 +1953-07-28T00:00:00Z,10051,Hidefumi,M,1992-10-15T00:00:00Z,3,Caine,58121 +1961-02-26T00:00:00Z,10052,Heping,M,1988-05-21T00:00:00Z,1,Nitsch,55360 +1954-09-13T00:00:00Z,10053,Sanjiv,F,1986-02-04T00:00:00Z,3,Zschoche,54462 +1957-04-04T00:00:00Z,10054,Mayumi,M,1995-03-13T00:00:00Z,4,Schueller,65367 +1956-06-06T00:00:00Z,10055,Georgy,M,1992-04-27T00:00:00Z,5,Dredge,49281 +1961-09-01T00:00:00Z,10056,Brendon,F,1990-02-01T00:00:00Z,2,Bernini,33370 +1954-05-30T00:00:00Z,10057,Ebbe,F,1992-01-15T00:00:00Z,4,Callaway,27215 +1954-10-01T00:00:00Z,10058,Berhard,M,1987-04-13T00:00:00Z,3,McFarlin,38376 +1953-09-19T00:00:00Z,10059,Alejandro,F,1991-06-26T00:00:00Z,2,McAlpine,44307 +1961-10-15T00:00:00Z,10060,Breannda,M,1987-11-02T00:00:00Z,2,Billingsley,29175 +1962-10-19T00:00:00Z,10061,Tse,M,1985-09-17T00:00:00Z,1,Herber,49095 +1961-11-02T00:00:00Z,10062,Anoosh,M,1991-08-30T00:00:00Z,3,Peyn,65030 +1952-08-06T00:00:00Z,10063,Gino,F,1989-04-08T00:00:00Z,3,Leonhardt,52121 +1959-04-07T00:00:00Z,10064,Udi,M,1985-11-20T00:00:00Z,5,Jansch,33956 +1963-04-14T00:00:00Z,10065,Satosi,M,1988-05-18T00:00:00Z,2,Awdeh,50249 +1952-11-13T00:00:00Z,10066,Kwee,M,1986-02-26T00:00:00Z,5,Schusler,31897 +1953-01-07T00:00:00Z,10067,Claudi,M,1987-03-04T00:00:00Z,2,Stavenow,52044 +1962-11-26T00:00:00Z,10068,Charlene,M,1987-08-07T00:00:00Z,3,Brattka,28941 +1960-09-06T00:00:00Z,10069,Margareta,F,1989-11-05T00:00:00Z,5,Bierman,41933 +1955-08-20T00:00:00Z,10070,Reuven,M,1985-10-14T00:00:00Z,3,Garigliano,54329 +1958-01-21T00:00:00Z,10071,Hisao,M,1987-10-01T00:00:00Z,2,Lipner,40612 +1952-05-15T00:00:00Z,10072,Hironoby,F,1988-07-21T00:00:00Z,5,Sidou,54518 +1954-02-23T00:00:00Z,10073,Shir,M,1991-12-01T00:00:00Z,4,McClurg,32568 +1955-08-28T00:00:00Z,10074,Mokhtar,F,1990-08-13T00:00:00Z,5,Bernatsky,38992 +1960-03-09T00:00:00Z,10075,Gao,F,1987-03-19T00:00:00Z,5,Dolinsky,51956 +1952-06-13T00:00:00Z,10076,Erez,F,1985-07-09T00:00:00Z,3,Ritzmann,62405 +1964-04-18T00:00:00Z,10077,Mona,M,1990-03-02T00:00:00Z,5,Azuma,46595 +1959-12-25T00:00:00Z,10078,Danel,F,1987-05-26T00:00:00Z,2,Mondadori,69904 +1961-10-05T00:00:00Z,10079,Kshitij,F,1986-03-27T00:00:00Z,2,Gils,32263 +1957-12-03T00:00:00Z,10080,Premal,M,1985-11-19T00:00:00Z,5,Baek,52833 +1960-12-17T00:00:00Z,10081,Zhongwei,M,1986-10-30T00:00:00Z,2,Rosen,50128 +1963-09-09T00:00:00Z,10082,Parviz,M,1990-01-03T00:00:00Z,4,Lortz,49818 +1959-07-23T00:00:00Z,10083,Vishv,M,1987-03-31T00:00:00Z,1,Zockler,39110 +1960-05-25T00:00:00Z,10084,Tuval,M,1995-12-15T00:00:00Z,1,Kalloufi,28035 +1962-11-07T00:00:00Z,10085,Kenroku,M,1994-04-09T00:00:00Z,5,Malabarba,35742 +1962-11-19T00:00:00Z,10086,Somnath,M,1990-02-16T00:00:00Z,1,Foote,68547 +1959-07-23T00:00:00Z,10087,Xinglin,F,1986-09-08T00:00:00Z,5,Eugenio,32272 +1954-02-25T00:00:00Z,10088,Jungsoon,F,1988-09-02T00:00:00Z,5,Syrzycki,39638 +1963-03-21T00:00:00Z,10089,Sudharsan,F,1986-08-12T00:00:00Z,4,Flasterstein,43602 +1961-05-30T00:00:00Z,10090,Kendra,M,1986-03-14T00:00:00Z,2,Hofting,44956 +1955-10-04T00:00:00Z,10091,Amabile,M,1992-11-18T00:00:00Z,3,Gomatam,38645 +1964-10-18T00:00:00Z,10092,Valdiodio,F,1989-09-22T00:00:00Z,1,Niizuma,25976 +1964-06-11T00:00:00Z,10093,Sailaja,M,1996-11-05T00:00:00Z,3,Desikan,45656 +1957-05-25T00:00:00Z,10094,Arumugam,F,1987-04-18T00:00:00Z,5,Ossenbruggen,66817 +1965-01-03T00:00:00Z,10095,Hilari,M,1986-07-15T00:00:00Z,4,Morton,37702 +1954-09-16T00:00:00Z,10096,Jayson,M,1990-01-14T00:00:00Z,4,Mandell,43889 +1952-02-27T00:00:00Z,10097,Remzi,M,1990-09-15T00:00:00Z,3,Waschkowski,71165 +1961-09-23T00:00:00Z,10098,Sreekrishna,F,1985-05-13T00:00:00Z,4,Servieres,44817 +1956-05-25T00:00:00Z,10099,Valter,F,1988-10-18T00:00:00Z,2,Sullins,73578 +1953-04-21T00:00:00Z,10100,Hironobu,F,1987-09-21T00:00:00Z,4,Haraldson,68431 diff --git a/qa/sql/src/main/resources/example.csv-spec b/qa/sql/src/main/resources/example.csv-spec new file mode 100644 index 00000000000..a7964cec8d2 --- /dev/null +++ b/qa/sql/src/main/resources/example.csv-spec @@ -0,0 +1,22 @@ +// some comment + +// name of the test - translated into 'testName' +name + +// ES SQL query +SELECT COUNT(*) FROM "emp"; + +// +// expected result in CSV format +// + +// list of +// type might be missing in which case it will be autodetected or can be one of the following +// d - double, f - float, i - int, b - byte, l - long, t - timestamp, date +A,B:d,C:i +// actual values +foo,2.5,3 +bar,3.5,4 +tar,4.5,5 +; +// repeat the above \ No newline at end of file diff --git a/qa/sql/src/main/resources/example.sql-spec b/qa/sql/src/main/resources/example.sql-spec new file mode 100644 index 00000000000..8408dc58b1a --- /dev/null +++ b/qa/sql/src/main/resources/example.sql-spec @@ -0,0 +1,8 @@ +// some comment + +// name of the test - translated into 'testName' +name +// SQL query to be executed against H2 and ES +SELECT COUNT(*) FROM "emp"; + +// repeat the above \ No newline at end of file diff --git a/qa/sql/src/main/resources/filter.sql-spec b/qa/sql/src/main/resources/filter.sql-spec new file mode 100644 index 00000000000..31d1830dfc6 --- /dev/null +++ b/qa/sql/src/main/resources/filter.sql-spec @@ -0,0 +1,63 @@ +// +// Filter +// + +whereFieldEquality +SELECT last_name l FROM "test_emp" WHERE emp_no = 10000 LIMIT 5; +whereFieldNonEquality +SELECT last_name l FROM "test_emp" WHERE emp_no <> 10000 ORDER BY emp_no LIMIT 5; +whereFieldNonEqualityJavaSyntax +SELECT last_name l FROM "test_emp" WHERE emp_no != 10000 ORDER BY emp_no LIMIT 5; +whereFieldLessThan +SELECT last_name l FROM "test_emp" WHERE emp_no < 10003 ORDER BY emp_no LIMIT 5; +whereFieldAndComparison +SELECT last_name l FROM "test_emp" WHERE emp_no > 10000 AND emp_no < 10005 ORDER BY emp_no LIMIT 5; +whereFieldOrComparison +SELECT last_name l FROM "test_emp" WHERE emp_no < 10003 OR emp_no = 10005 ORDER BY emp_no LIMIT 5; + +whereFieldEqualityNot +SELECT last_name l FROM "test_emp" WHERE NOT emp_no = 10000 LIMIT 5; +whereFieldNonEqualityNot +SELECT last_name l FROM "test_emp" WHERE NOT emp_no <> 10000 ORDER BY emp_no LIMIT 5; +whereFieldNonEqualityJavaSyntaxNot +SELECT last_name l FROM "test_emp" WHERE NOT emp_no != 10000 ORDER BY emp_no LIMIT 5; +whereFieldLessThanNot +SELECT last_name l FROM "test_emp" WHERE NOT emp_no < 10003 ORDER BY emp_no LIMIT 5; +whereFieldAndComparisonNot +SELECT last_name l FROM "test_emp" WHERE NOT (emp_no > 10000 AND emp_no < 10005) ORDER BY emp_no LIMIT 5; +whereFieldOrComparisonNot +SELECT last_name l FROM "test_emp" WHERE NOT (emp_no < 10003 OR emp_no = 10005) ORDER BY emp_no LIMIT 5; + +whereFieldWithOrder +SELECT last_name l FROM "test_emp" WHERE emp_no < 10003 ORDER BY emp_no; +whereFieldWithExactMatchOnString +SELECT last_name l FROM "test_emp" WHERE emp_no < 10003 AND gender = 'M'; +whereFieldWithNotEqualsOnString +SELECT last_name l FROM "test_emp" WHERE emp_no < 10003 AND gender <> 'M'; +whereFieldWithLikeMatch +SELECT last_name l FROM "test_emp" WHERE emp_no < 10003 AND last_name LIKE 'K%'; + +whereFieldWithOrderNot +SELECT last_name l FROM "test_emp" WHERE NOT emp_no < 10003 ORDER BY emp_no LIMIT 5; +whereFieldWithExactMatchOnStringNot +SELECT last_name l FROM "test_emp" WHERE NOT (emp_no < 10003 AND gender = 'M') ORDER BY emp_no LIMIT 5; +whereFieldWithNotEqualsOnStringNot +SELECT last_name l FROM "test_emp" WHERE NOT (emp_no < 10003 AND gender <> 'M') ORDER BY emp_no LIMIT 5; +whereFieldWithLikeMatchNot +SELECT last_name l FROM "test_emp" WHERE NOT (emp_no < 10003 AND last_name NOT LIKE 'K%') ORDER BY emp_no LIMIT 5; + +whereFieldOnMatchWithAndAndOr +SELECT last_name l FROM "test_emp" WHERE emp_no < 10003 AND (gender = 'M' AND NOT FALSE OR last_name LIKE 'K%') ORDER BY emp_no; + +// TODO: (NOT) RLIKE in particular and more NOT queries in general + +whereIsNotNullAndComparison +SELECT last_name l FROM "test_emp" WHERE emp_no IS NOT NULL AND emp_no < 10005 ORDER BY emp_no; +whereIsNull +SELECT last_name l FROM "test_emp" WHERE emp_no IS NULL; +whereIsNotNullAndIsNull +SELECT last_name l FROM "test_emp" WHERE emp_no IS NOT NULL AND gender IS NULL; +whereBetween +SELECT last_name l FROM "test_emp" WHERE emp_no BETWEEN 9990 AND 10003 ORDER BY emp_no; +whereNotBetween +SELECT last_name l FROM "test_emp" WHERE emp_no NOT BETWEEN 10010 AND 10020 ORDER BY emp_no LIMIT 5; diff --git a/qa/sql/src/main/resources/fulltext.csv-spec b/qa/sql/src/main/resources/fulltext.csv-spec new file mode 100644 index 00000000000..73ef6363e8a --- /dev/null +++ b/qa/sql/src/main/resources/fulltext.csv-spec @@ -0,0 +1,31 @@ +// +// Full-text +// + +simpleQueryAllFields +SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE QUERY('Baek fox') LIMIT 3; + + emp_no:i | first_name:s | gender:s | last_name:s +10080 |Premal |M |Baek +; + +simpleQueryDedicatedField +SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE QUERY('Man*', 'fields=last_name') LIMIT 5; + + emp_no:i | first_name:s | gender:s | last_name:s +10096 |Jayson |M |Mandell +; + +matchQuery +SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE MATCH(first_name, 'Erez'); + + emp_no:i | first_name:s | gender:s | last_name:s +10076 |Erez |F |Ritzmann +; + +multiMatchQuery +SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE MATCH('first_name,last_name', 'Morton', 'type=best_fields;default_operator=OR'); + + emp_no:i | first_name:s | gender:s | last_name:s +10095 |Hilari |M |Morton +; diff --git a/qa/sql/src/main/resources/math.sql-spec b/qa/sql/src/main/resources/math.sql-spec new file mode 100644 index 00000000000..7f38a8a1a88 --- /dev/null +++ b/qa/sql/src/main/resources/math.sql-spec @@ -0,0 +1,78 @@ +// +// Math +// + +mathAbs +SELECT ABS(emp_no) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +mathACos +SELECT ACOS(emp_no) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +mathASin +SELECT ASIN(emp_no) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +mathATan +SELECT ATAN(emp_no) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +//mathCbrt +//SELECT CBRT(emp_no) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +mathCeil +// H2 returns CEIL as a double despite the value being an integer; we return a long as the other DBs +SELECT CAST(CEIL(emp_no) AS INT) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +mathCos +SELECT COS(emp_no) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +mathCosh +SELECT COSH(emp_no) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +mathDegrees +SELECT DEGREES(emp_no) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +mathFloor +SELECT CAST(FLOOR(emp_no) AS INT) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +mathLog +SELECT LOG(emp_no) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +mathLog10 +SELECT LOG10(emp_no) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +mathRadians +SELECT RADIANS(emp_no) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +mathRound +SELECT CAST(ROUND(emp_no) AS INT) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +mathSin +SELECT SIN(emp_no) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +mathSinH +SELECT SINH(emp_no) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +mathSqrt +SELECT SQRT(emp_no) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +mathTan +SELECT TAN(emp_no) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; + +// +// Combined methods +// + +mathAbsOfSin +SELECT ABS(SIN(emp_no)) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +mathAbsOfCeilOfSin +SELECT EXP(ABS(CEIL(SIN(DEGREES(emp_no))))) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +mathAbsOfCeilOfSinWithFilter +SELECT EXP(ABS(CEIL(SIN(DEGREES(emp_no))))) m, first_name FROM "test_emp" WHERE EXP(ABS(CEIL(SIN(DEGREES(emp_no))))) < 10 ORDER BY emp_no; + +// +// Filter by Scalar +// +mathAbsFilterAndOrder +SELECT emp_no, ABS(emp_no) m, first_name FROM "test_emp" WHERE ABS(emp_no) < 10010 ORDER BY ABS(emp_no); +mathACosFilterAndOrder +SELECT emp_no, ACOS(emp_no) m, first_name FROM "test_emp" WHERE ACOS(emp_no) < 10010 ORDER BY ACOS(emp_no); +mathASinFilterAndOrder +SELECT emp_no, ASIN(emp_no) m, first_name FROM "test_emp" WHERE ASIN(emp_no) < 10010 ORDER BY ASIN(emp_no); +//mathATanFilterAndOrder +//SELECT emp_no, ATAN(emp_no) m, first_name FROM "test_emp" WHERE ATAN(emp_no) < 10010 ORDER BY ATAN(emp_no); +mathCeilFilterAndOrder +SELECT emp_no, CAST(CEIL(emp_no) AS INT) m, first_name FROM "test_emp" WHERE CEIL(emp_no) < 10010 ORDER BY CEIL(emp_no); +//mathCosFilterAndOrder +//SELECT emp_no, COS(emp_no) m, first_name FROM "test_emp" WHERE COS(emp_no) < 10010 ORDER BY COS(emp_no); +//mathCoshFilterAndOrder +//SELECT emp_no, COSH(emp_no) m, first_name FROM "test_emp" WHERE COSH(emp_no) < 10010 ORDER BY COSH(emp_no); + +// +// constants +// +mathConstantPI +SELECT ABS(emp_no) m, PI() as pi, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +mathConstant +SELECT 5 + 2 * 3 / 2 % 2 AS c, PI() as e, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; \ No newline at end of file diff --git a/qa/sql/src/main/resources/plugin-security.policy b/qa/sql/src/main/resources/plugin-security.policy new file mode 100644 index 00000000000..bb58eb4270d --- /dev/null +++ b/qa/sql/src/main/resources/plugin-security.policy @@ -0,0 +1,4 @@ +grant { + // Policy is required for tests to connect to testing Elasticsearch instances. + permission java.net.SocketPermission "*", "connect,resolve"; +}; diff --git a/qa/sql/src/main/resources/select.sql-spec b/qa/sql/src/main/resources/select.sql-spec new file mode 100644 index 00000000000..76562a07c86 --- /dev/null +++ b/qa/sql/src/main/resources/select.sql-spec @@ -0,0 +1,58 @@ +// +// Basic SELECT +// + +wildcardWithOrder +// tag::wildcardWithOrder +SELECT * FROM test_emp ORDER BY emp_no; +// end::wildcardWithOrder +column +SELECT last_name FROM "test_emp" ORDER BY emp_no; +columnWithAlias +SELECT last_name AS l FROM "test_emp" ORDER BY emp_no; +columnWithAliasNoAs +SELECT last_name l FROM "test_emp" ORDER BY emp_no; +multipleColumnsNoAlias +SELECT first_name, last_name FROM "test_emp" ORDER BY emp_no; +multipleColumnWithAliasWithAndWithoutAs +SELECT first_name f, last_name AS l FROM "test_emp" ORDER BY emp_no; + +// +// SELECT with LIMIT +// + +wildcardWithLimit +SELECT * FROM "test_emp" ORDER BY emp_no LIMIT 5; +wildcardWithOrderWithLimit +SELECT * FROM "test_emp" ORDER BY emp_no LIMIT 5; +columnWithLimit +SELECT last_name FROM "test_emp" ORDER BY emp_no LIMIT 5; +columnWithAliasWithLimit +SELECT last_name AS l FROM "test_emp" ORDER BY emp_no LIMIT 5; +columnWithAliasNoAsWithLimit +SELECT last_name l FROM "test_emp" ORDER BY emp_no LIMIT 5; +multipleColumnsNoAliasWithLimit +SELECT first_name, last_name FROM "test_emp" ORDER BY emp_no LIMIT 5; +multipleColumnWithAliasWithAndWithoutAsWithLimit +SELECT first_name f, last_name AS l FROM "test_emp" ORDER BY emp_no LIMIT 5; + + +// +// SELECT with CAST +// +//castWithLiteralToInt +//SELECT CAST(1 AS INT); +castOnColumnNumberToVarchar +SELECT CAST(emp_no AS VARCHAR) AS emp_no_cast FROM "test_emp" ORDER BY emp_no LIMIT 5; +castOnColumnNumberToLong +SELECT CAST(emp_no AS BIGINT) AS emp_no_cast FROM "test_emp" ORDER BY emp_no LIMIT 5; +castOnColumnNumberToSmallint +SELECT CAST(emp_no AS SMALLINT) AS emp_no_cast FROM "test_emp" ORDER BY emp_no LIMIT 5; +castOnColumnNumberWithAliasToInt +SELECT CAST(emp_no AS INT) AS emp_no_cast FROM "test_emp" ORDER BY emp_no LIMIT 5; +castOnColumnNumberToReal +SELECT CAST(emp_no AS REAL) AS emp_no_cast FROM "test_emp" ORDER BY emp_no LIMIT 5; +castOnColumnNumberToDouble +SELECT CAST(emp_no AS DOUBLE) AS emp_no_cast FROM "test_emp" ORDER BY emp_no LIMIT 5; +castOnColumnNumberToBoolean +SELECT CAST(emp_no AS BOOL) AS emp_no_cast FROM "test_emp" ORDER BY emp_no LIMIT 5; diff --git a/qa/sql/src/main/resources/setup_mock_metadata_get_columns.sql b/qa/sql/src/main/resources/setup_mock_metadata_get_columns.sql new file mode 100644 index 00000000000..1d1aef14a80 --- /dev/null +++ b/qa/sql/src/main/resources/setup_mock_metadata_get_columns.sql @@ -0,0 +1,45 @@ +CREATE TABLE mock ( + TABLE_SCHEM VARCHAR, + TABLE_NAME VARCHAR, + COLUMN_NAME VARCHAR, + DATA_TYPE INTEGER, + TYPE_NAME VARCHAR, + COLUMN_SIZE INTEGER, + BUFFER_LENGTH NULL, + DECIMAL_DIGITS INTEGER, + NUM_PREC_RADIX INTEGER, + NULLABLE INTEGER, + REMARKS VARCHAR, + COLUMN_DEF VARCHAR, + SQL_DATA_TYPE INTEGER, + SQL_DATETIME_SUB INTEGER, + CHAR_OCTET_LENGTH INTEGER, + ORDINAL_POSITION INTEGER, + IS_NULLABLE VARCHAR, + SCOPE_CATALOG VARCHAR, + SCOPE_SCHEMA VARCHAR, + SCOPE_TABLE VARCHAR, + SOURCE_DATA_TYPE SMALLINT, + IS_AUTOINCREMENT VARCHAR, + IS_GENERATEDCOLUMN VARCHAR +) AS +SELECT '', 'test1', 'name', 12, 'VARCHAR', 2147483647, null, null, null, + 1, -- columnNullable + null, null, null, null, null, 1, 'YES', null, null, null, null, '', '' +FROM DUAL +UNION ALL +SELECT '', 'test2', 'date', 93, 'TIMESTAMP', 19, null, null, null, + 1, -- columnNullable + null, null, null, null, null, 1, 'YES', null, null, null, null, '', '' +FROM DUAL +UNION ALL +SELECT '', 'test2', 'float', 7, 'REAL', 7, null, null, 2, + 1, -- columnNullable + null, null, null, null, null, 2, 'YES', null, null, null, null, '', '' +FROM DUAL +UNION ALL +SELECT '', 'test2', 'number', -5, 'BIGINT', 19, null, null, 10, + 1, -- columnNullable + null, null, null, null, null, 3, 'YES', null, null, null, null, '', '' +FROM DUAL +; diff --git a/qa/sql/src/main/resources/setup_mock_metadata_get_procedure_columns.sql b/qa/sql/src/main/resources/setup_mock_metadata_get_procedure_columns.sql new file mode 100644 index 00000000000..bf2b741c1cd --- /dev/null +++ b/qa/sql/src/main/resources/setup_mock_metadata_get_procedure_columns.sql @@ -0,0 +1,22 @@ +CREATE TABLE mock ( + PROCEDURE_CAT VARCHAR, + PROCEDURE_SCHEM VARCHAR, + PROCEDURE_NAME VARCHAR, + COLUMN_NAME VARCHAR, + COLUMN_TYPE SMALLINT, + DATA_TYPE INTEGER, + TYPE_NAME VARCHAR, + PRECISION INTEGER, + LENGTH INTEGER, + SCALE SMALLINT, + RADIX SMALLINT, + NULLABLE SMALLINT, + REMARKS VARCHAR, + COLUMN_DEF VARCHAR, + SQL_DATA_TYPE INTEGER, + SQL_DATETIME_SUB INTEGER, + CHAR_OCTET_LENGTH INTEGER, + ORDINAL_POSITION INTEGER, + IS_NULLABLE VARCHAR, + SPECIFIC_NAME VARCHAR +); diff --git a/qa/sql/src/main/resources/setup_mock_metadata_get_procedures.sql b/qa/sql/src/main/resources/setup_mock_metadata_get_procedures.sql new file mode 100644 index 00000000000..72bc81259e4 --- /dev/null +++ b/qa/sql/src/main/resources/setup_mock_metadata_get_procedures.sql @@ -0,0 +1,11 @@ +CREATE TABLE mock ( + PROCEDURE_CAT VARCHAR, + PROCEDURE_SCHEM VARCHAR, + PROCEDURE_NAME VARCHAR, + NUM_INPUT_PARAMS INTEGER, + NUM_OUTPUT_PARAMS INTEGER, + NUM_RESULT_SETS INTEGER, + REMARKS VARCHAR, + PROCEDURE_TYPE SMALLINT, + SPECIFIC_NAME VARCHAR +); diff --git a/qa/sql/src/main/resources/setup_mock_metadata_get_tables.sql b/qa/sql/src/main/resources/setup_mock_metadata_get_tables.sql new file mode 100644 index 00000000000..c99989bca82 --- /dev/null +++ b/qa/sql/src/main/resources/setup_mock_metadata_get_tables.sql @@ -0,0 +1,15 @@ +CREATE TABLE mock ( + TABLE_SCHEM VARCHAR, + TABLE_NAME VARCHAR, + TABLE_TYPE VARCHAR, + REMARKS VARCHAR, + TYPE_CAT VARCHAR, + TYPE_SCHEM VARCHAR, + TYPE_NAME VARCHAR, + SELF_REFERENCING_COL_NAME VARCHAR, + REF_GENERATION VARCHAR +) AS +SELECT '', 'test1', 'TABLE', '', null, null, null, null, null FROM DUAL +UNION ALL +SELECT '', 'test2', 'TABLE', '', null, null, null, null, null FROM DUAL +; diff --git a/qa/sql/src/main/resources/setup_mock_show_tables.sql b/qa/sql/src/main/resources/setup_mock_show_tables.sql new file mode 100644 index 00000000000..eec1624bf7c --- /dev/null +++ b/qa/sql/src/main/resources/setup_mock_show_tables.sql @@ -0,0 +1,3 @@ +CREATE TABLE mock ( + "table" VARCHAR +); diff --git a/qa/sql/src/main/resources/setup_test_emp.sql b/qa/sql/src/main/resources/setup_test_emp.sql new file mode 100644 index 00000000000..3b79b3037f1 --- /dev/null +++ b/qa/sql/src/main/resources/setup_test_emp.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS "test_emp"; +CREATE TABLE "test_emp" ( + "birth_date" TIMESTAMP WITH TIME ZONE, + "emp_no" INT, + "first_name" VARCHAR(50), + "gender" VARCHAR(1), + "hire_date" TIMESTAMP WITH TIME ZONE, + "languages" TINYINT, + "last_name" VARCHAR(50), + "salary" INT + ) + AS SELECT * FROM CSVREAD('classpath:/employees.csv'); \ No newline at end of file diff --git a/sql/build.gradle b/sql/build.gradle new file mode 100644 index 00000000000..68227109ba3 --- /dev/null +++ b/sql/build.gradle @@ -0,0 +1,11 @@ +description = 'SQL for Elasticsearch' + +subprojects { + group = 'org.elasticsearch.sql' + + apply plugin: 'elasticsearch.build' + + dependencies { + testCompile "org.elasticsearch.test:framework:${version}" + } +} diff --git a/sql/cli-proto/build.gradle b/sql/cli-proto/build.gradle new file mode 100644 index 00000000000..98ab91a53d5 --- /dev/null +++ b/sql/cli-proto/build.gradle @@ -0,0 +1,17 @@ +description = 'Request and response objects shared by the cli and ' + + 'its backend in :sql:server' + +dependencies { + compile project(':x-pack-elasticsearch:sql:shared-proto') + testCompile project(':x-pack-elasticsearch:sql:test-utils') +} + +forbiddenApisMain { + // does not depend on core, so only jdk and http signatures should be checked + signaturesURLs = [this.class.getResource('/forbidden/jdk-signatures.txt')] +} + +dependencyLicenses { + mapping from: /shared-proto.*/, to: 'elasticsearch' + ignoreSha 'shared-proto' +} diff --git a/sql/cli-proto/licenses/elasticsearch-LICENSE.txt b/sql/cli-proto/licenses/elasticsearch-LICENSE.txt new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/sql/cli-proto/licenses/elasticsearch-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/sql/cli-proto/licenses/elasticsearch-NOTICE.txt b/sql/cli-proto/licenses/elasticsearch-NOTICE.txt new file mode 100644 index 00000000000..643a060cd05 --- /dev/null +++ b/sql/cli-proto/licenses/elasticsearch-NOTICE.txt @@ -0,0 +1,5 @@ +Elasticsearch +Copyright 2009-2017 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). diff --git a/sql/cli-proto/src/main/java/org/elasticsearch/xpack/sql/cli/net/protocol/InfoRequest.java b/sql/cli-proto/src/main/java/org/elasticsearch/xpack/sql/cli/net/protocol/InfoRequest.java new file mode 100644 index 00000000000..0716de49a15 --- /dev/null +++ b/sql/cli-proto/src/main/java/org/elasticsearch/xpack/sql/cli/net/protocol/InfoRequest.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.net.protocol; + +import org.elasticsearch.xpack.sql.cli.net.protocol.Proto.RequestType; +import org.elasticsearch.xpack.sql.protocol.shared.AbstractInfoRequest; +import org.elasticsearch.xpack.sql.protocol.shared.SqlDataInput; + +import java.io.IOException; + +/** + * Request general information about the server. + */ +public class InfoRequest extends AbstractInfoRequest { + /** + * Build the info request containing information about the current JVM. + */ + public InfoRequest() { + super(); + } + + public InfoRequest(String jvmVersion, String jvmVendor, String jvmClassPath, String osName, String osVersion) { + super(jvmVersion, jvmVendor, jvmClassPath, osName, osVersion); + } + + InfoRequest(SqlDataInput in) throws IOException { + super(in); + } + + @Override + public RequestType requestType() { + return RequestType.INFO; + } +} diff --git a/sql/cli-proto/src/main/java/org/elasticsearch/xpack/sql/cli/net/protocol/InfoResponse.java b/sql/cli-proto/src/main/java/org/elasticsearch/xpack/sql/cli/net/protocol/InfoResponse.java new file mode 100644 index 00000000000..52de73d97e3 --- /dev/null +++ b/sql/cli-proto/src/main/java/org/elasticsearch/xpack/sql/cli/net/protocol/InfoResponse.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.net.protocol; + +import org.elasticsearch.xpack.sql.cli.net.protocol.Proto.RequestType; +import org.elasticsearch.xpack.sql.cli.net.protocol.Proto.ResponseType; +import org.elasticsearch.xpack.sql.protocol.shared.AbstractInfoResponse; +import org.elasticsearch.xpack.sql.protocol.shared.Request; + +import java.io.DataInput; +import java.io.IOException; + +/** + * General information about the server. + */ +public class InfoResponse extends AbstractInfoResponse { + public InfoResponse(String nodeName, String clusterName, byte versionMajor, byte versionMinor, String version, + String versionHash, String versionDate) { + super(nodeName, clusterName, versionMajor, versionMinor, version, versionHash, versionDate); + } + + InfoResponse(Request request, DataInput in) throws IOException { + super(request, in); + } + + @Override + public RequestType requestType() { + return RequestType.INFO; + } + + @Override + public ResponseType responseType() { + return ResponseType.INFO; + } +} \ No newline at end of file diff --git a/sql/cli-proto/src/main/java/org/elasticsearch/xpack/sql/cli/net/protocol/Proto.java b/sql/cli-proto/src/main/java/org/elasticsearch/xpack/sql/cli/net/protocol/Proto.java new file mode 100644 index 00000000000..d99404b4e11 --- /dev/null +++ b/sql/cli-proto/src/main/java/org/elasticsearch/xpack/sql/cli/net/protocol/Proto.java @@ -0,0 +1,96 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.net.protocol; + +import org.elasticsearch.xpack.sql.protocol.shared.AbstractProto; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * Binary protocol for the CLI. All backwards compatibility is done using the + * version number sent in the header. + */ +public final class Proto extends AbstractProto { + public static final Proto INSTANCE = new Proto(); + + private Proto() {} + + @Override + protected RequestType readRequestType(DataInput in) throws IOException { + return RequestType.readFrom(in); + } + + @Override + protected ResponseType readResponseType(DataInput in) throws IOException { + return ResponseType.readFrom(in); + } + + public enum RequestType implements AbstractProto.RequestType { + INFO(InfoRequest::new), + QUERY_INIT(QueryInitRequest::new), + QUERY_PAGE(QueryPageRequest::new), + QUERY_CLOSE(QueryCloseRequest::new); + + private final RequestReader reader; + + RequestType(RequestReader reader) { + this.reader = reader; + } + + static RequestType readFrom(DataInput in) throws IOException { + byte b = in.readByte(); + try { + return values()[b]; + } catch (ArrayIndexOutOfBoundsException e) { + throw new IllegalArgumentException("Unknown request type [" + b + "]", e); + } + } + + @Override + public void writeTo(DataOutput out) throws IOException { + out.writeByte(ordinal()); + } + + @Override + public RequestReader reader() { + return reader; + } + } + + public enum ResponseType implements AbstractProto.ResponseType { + INFO(InfoResponse::new), + QUERY_INIT(QueryInitResponse::new), + QUERY_PAGE(QueryPageResponse::new), + QUERY_CLOSE(QueryCloseResponse::new); + + private final ResponseReader reader; + + ResponseType(ResponseReader reader) { + this.reader = reader; + } + + static ResponseType readFrom(DataInput in) throws IOException { + byte b = in.readByte(); + try { + return values()[b]; + } catch (ArrayIndexOutOfBoundsException e) { + throw new IllegalArgumentException("Unknown response type [" + b + "]", e); + } + } + + @Override + public void writeTo(DataOutput out) throws IOException { + out.writeByte(ordinal()); + } + + @Override + public ResponseReader reader() { + return reader; + } + } +} diff --git a/sql/cli-proto/src/main/java/org/elasticsearch/xpack/sql/cli/net/protocol/QueryCloseRequest.java b/sql/cli-proto/src/main/java/org/elasticsearch/xpack/sql/cli/net/protocol/QueryCloseRequest.java new file mode 100644 index 00000000000..5d372092825 --- /dev/null +++ b/sql/cli-proto/src/main/java/org/elasticsearch/xpack/sql/cli/net/protocol/QueryCloseRequest.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.net.protocol; + +import org.elasticsearch.xpack.sql.protocol.shared.AbstractQueryCloseRequest; +import org.elasticsearch.xpack.sql.protocol.shared.SqlDataInput; + +import java.io.IOException; + +public class QueryCloseRequest extends AbstractQueryCloseRequest { + public QueryCloseRequest(String cursor) { + super(cursor); + } + + QueryCloseRequest(SqlDataInput in) throws IOException { + super(in); + } + + @Override + public Proto.RequestType requestType() { + return Proto.RequestType.QUERY_CLOSE; + } +} diff --git a/sql/cli-proto/src/main/java/org/elasticsearch/xpack/sql/cli/net/protocol/QueryCloseResponse.java b/sql/cli-proto/src/main/java/org/elasticsearch/xpack/sql/cli/net/protocol/QueryCloseResponse.java new file mode 100644 index 00000000000..dbb9d4e85e5 --- /dev/null +++ b/sql/cli-proto/src/main/java/org/elasticsearch/xpack/sql/cli/net/protocol/QueryCloseResponse.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.net.protocol; + +import org.elasticsearch.xpack.sql.cli.net.protocol.Proto.RequestType; +import org.elasticsearch.xpack.sql.cli.net.protocol.Proto.ResponseType; +import org.elasticsearch.xpack.sql.protocol.shared.AbstractQueryCloseResponse; +import org.elasticsearch.xpack.sql.protocol.shared.Request; + +import java.io.DataInput; +import java.io.IOException; + +public class QueryCloseResponse extends AbstractQueryCloseResponse { + public QueryCloseResponse(boolean succeeded) { + super(succeeded); + } + + QueryCloseResponse(Request request, DataInput in) throws IOException { + super(request, in); + } + + @Override + public RequestType requestType() { + return RequestType.QUERY_CLOSE; + } + + @Override + public ResponseType responseType() { + return ResponseType.QUERY_CLOSE; + } +} diff --git a/sql/cli-proto/src/main/java/org/elasticsearch/xpack/sql/cli/net/protocol/QueryInitRequest.java b/sql/cli-proto/src/main/java/org/elasticsearch/xpack/sql/cli/net/protocol/QueryInitRequest.java new file mode 100644 index 00000000000..becd3fc551b --- /dev/null +++ b/sql/cli-proto/src/main/java/org/elasticsearch/xpack/sql/cli/net/protocol/QueryInitRequest.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.net.protocol; + +import org.elasticsearch.xpack.sql.cli.net.protocol.Proto.RequestType; +import org.elasticsearch.xpack.sql.protocol.shared.AbstractQueryInitRequest; +import org.elasticsearch.xpack.sql.protocol.shared.TimeoutInfo; +import org.elasticsearch.xpack.sql.protocol.shared.SqlDataInput; + +import java.io.IOException; +import java.util.TimeZone; + +/** + * Request to start a query. + */ +public class QueryInitRequest extends AbstractQueryInitRequest { + public QueryInitRequest(String query, int fetchSize, TimeZone timeZone, TimeoutInfo timeout) { + super(query, fetchSize, timeZone, timeout); + } + + QueryInitRequest(SqlDataInput in) throws IOException { + super(in); + } + + @Override + public RequestType requestType() { + return RequestType.QUERY_INIT; + } +} diff --git a/sql/cli-proto/src/main/java/org/elasticsearch/xpack/sql/cli/net/protocol/QueryInitResponse.java b/sql/cli-proto/src/main/java/org/elasticsearch/xpack/sql/cli/net/protocol/QueryInitResponse.java new file mode 100644 index 00000000000..18cde69a15a --- /dev/null +++ b/sql/cli-proto/src/main/java/org/elasticsearch/xpack/sql/cli/net/protocol/QueryInitResponse.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.net.protocol; + +import org.elasticsearch.xpack.sql.cli.net.protocol.Proto.RequestType; +import org.elasticsearch.xpack.sql.cli.net.protocol.Proto.ResponseType; +import org.elasticsearch.xpack.sql.protocol.shared.Request; + +import java.io.DataInput; +import java.io.IOException; + +public class QueryInitResponse extends QueryResponse { + public QueryInitResponse(long tookNanos, String cursor, String data) { + super(tookNanos, cursor, data); + } + + QueryInitResponse(Request request, DataInput in) throws IOException { + super(request, in); + } + + @Override + public RequestType requestType() { + return RequestType.QUERY_INIT; + } + + @Override + public ResponseType responseType() { + return ResponseType.QUERY_INIT; + } +} diff --git a/sql/cli-proto/src/main/java/org/elasticsearch/xpack/sql/cli/net/protocol/QueryPageRequest.java b/sql/cli-proto/src/main/java/org/elasticsearch/xpack/sql/cli/net/protocol/QueryPageRequest.java new file mode 100644 index 00000000000..e30417bc58e --- /dev/null +++ b/sql/cli-proto/src/main/java/org/elasticsearch/xpack/sql/cli/net/protocol/QueryPageRequest.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.net.protocol; + +import org.elasticsearch.xpack.sql.cli.net.protocol.Proto.RequestType; +import org.elasticsearch.xpack.sql.protocol.shared.AbstractQueryPageRequest; +import org.elasticsearch.xpack.sql.protocol.shared.TimeoutInfo; +import org.elasticsearch.xpack.sql.protocol.shared.SqlDataInput; + +import java.io.IOException; + +public class QueryPageRequest extends AbstractQueryPageRequest { + public QueryPageRequest(String cursor, TimeoutInfo timeout) { + super(cursor, timeout); + } + + QueryPageRequest(SqlDataInput in) throws IOException { + super(in); + } + + @Override + public RequestType requestType() { + return RequestType.QUERY_PAGE; + } +} diff --git a/sql/cli-proto/src/main/java/org/elasticsearch/xpack/sql/cli/net/protocol/QueryPageResponse.java b/sql/cli-proto/src/main/java/org/elasticsearch/xpack/sql/cli/net/protocol/QueryPageResponse.java new file mode 100644 index 00000000000..5bce5284460 --- /dev/null +++ b/sql/cli-proto/src/main/java/org/elasticsearch/xpack/sql/cli/net/protocol/QueryPageResponse.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.net.protocol; + +import org.elasticsearch.xpack.sql.cli.net.protocol.Proto.RequestType; +import org.elasticsearch.xpack.sql.cli.net.protocol.Proto.ResponseType; +import org.elasticsearch.xpack.sql.protocol.shared.Request; + +import java.io.DataInput; +import java.io.IOException; + +public class QueryPageResponse extends QueryResponse { + public QueryPageResponse(long tookNanos, String cursor, String data) { + super(tookNanos, cursor, data); + } + + QueryPageResponse(Request request, DataInput in) throws IOException { + super(request, in); + } + + @Override + public RequestType requestType() { + return RequestType.QUERY_PAGE; + } + + @Override + public ResponseType responseType() { + return ResponseType.QUERY_PAGE; + } +} diff --git a/sql/cli-proto/src/main/java/org/elasticsearch/xpack/sql/cli/net/protocol/QueryResponse.java b/sql/cli-proto/src/main/java/org/elasticsearch/xpack/sql/cli/net/protocol/QueryResponse.java new file mode 100644 index 00000000000..1cd48f212d0 --- /dev/null +++ b/sql/cli-proto/src/main/java/org/elasticsearch/xpack/sql/cli/net/protocol/QueryResponse.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.net.protocol; + +import org.elasticsearch.xpack.sql.protocol.shared.AbstractQueryResponse; +import org.elasticsearch.xpack.sql.protocol.shared.Request; +import org.elasticsearch.xpack.sql.protocol.shared.SqlDataOutput; + +import java.io.DataInput; +import java.io.IOException; +import java.util.Objects; + +public abstract class QueryResponse extends AbstractQueryResponse { + public final String data; + + protected QueryResponse(long tookNanos, String cursor, String data) { + super(tookNanos, cursor); + if (data == null) { + throw new IllegalArgumentException("data cannot be null"); + } + this.data = data; + } + + protected QueryResponse(Request request, DataInput in) throws IOException { + super(request, in); + data = in.readUTF(); + } + + @Override + protected void writeTo(SqlDataOutput out) throws IOException { + super.writeTo(out); + out.writeUTF(data); + } + + @Override + protected String toStringBody() { + return super.toStringBody() + " data=[" + data + "]"; + } + + @Override + public boolean equals(Object obj) { + if (false == super.equals(obj)) { + return false; + } + QueryResponse other = (QueryResponse) obj; + return data.equals(other.data); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), data); + } +} diff --git a/sql/cli-proto/src/test/java/org/elasticsearch/xpack/sql/cli/net/protocol/CliRoundTripTestUtils.java b/sql/cli-proto/src/test/java/org/elasticsearch/xpack/sql/cli/net/protocol/CliRoundTripTestUtils.java new file mode 100644 index 00000000000..7093f5ab129 --- /dev/null +++ b/sql/cli-proto/src/test/java/org/elasticsearch/xpack/sql/cli/net/protocol/CliRoundTripTestUtils.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.net.protocol; + +import org.elasticsearch.xpack.sql.protocol.shared.Request; +import org.elasticsearch.xpack.sql.protocol.shared.Response; +import org.elasticsearch.xpack.sql.protocol.shared.TimeoutInfo; +import org.elasticsearch.xpack.sql.test.RoundTripTestUtils; + +import java.io.IOException; + +import static org.elasticsearch.test.ESTestCase.randomNonNegativeLong; + +public final class CliRoundTripTestUtils { + private CliRoundTripTestUtils() { + // Just static utilities + } + + static void assertRoundTripCurrentVersion(Request request) throws IOException { + RoundTripTestUtils.assertRoundTrip(request, Proto.INSTANCE::writeRequest, Proto.INSTANCE::readRequest); + } + + static void assertRoundTripCurrentVersion(Request request, Response response) throws IOException { + RoundTripTestUtils.assertRoundTrip(response, + (r, out) -> Proto.INSTANCE.writeResponse(r, Proto.CURRENT_VERSION, out), + in -> Proto.INSTANCE.readResponse(request, in)); + } + + static TimeoutInfo randomTimeoutInfo() { + return new TimeoutInfo(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()); + } +} diff --git a/sql/cli-proto/src/test/java/org/elasticsearch/xpack/sql/cli/net/protocol/InfoRequestTests.java b/sql/cli-proto/src/test/java/org/elasticsearch/xpack/sql/cli/net/protocol/InfoRequestTests.java new file mode 100644 index 00000000000..d0b035b1ccf --- /dev/null +++ b/sql/cli-proto/src/test/java/org/elasticsearch/xpack/sql/cli/net/protocol/InfoRequestTests.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.net.protocol; + +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.elasticsearch.xpack.sql.cli.net.protocol.CliRoundTripTestUtils.assertRoundTripCurrentVersion; + +public class InfoRequestTests extends ESTestCase { + static InfoRequest randomInfoRequest() { + return new InfoRequest(randomAlphaOfLength(5), randomAlphaOfLength(5), randomAlphaOfLength(5), + randomAlphaOfLength(5), randomAlphaOfLength(5)); + } + + public void testRoundTrip() throws IOException { + assertRoundTripCurrentVersion(randomInfoRequest()); + } + + public void testToString() { + assertEquals("InfoRequest", + new InfoRequest("1.8.0_131", "testvendor", "testcp", "Mac OS X", "10.12.5").toString()); + } +} diff --git a/sql/cli-proto/src/test/java/org/elasticsearch/xpack/sql/cli/net/protocol/InfoResponseTests.java b/sql/cli-proto/src/test/java/org/elasticsearch/xpack/sql/cli/net/protocol/InfoResponseTests.java new file mode 100644 index 00000000000..5f3b08480ef --- /dev/null +++ b/sql/cli-proto/src/test/java/org/elasticsearch/xpack/sql/cli/net/protocol/InfoResponseTests.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.net.protocol; + +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.elasticsearch.xpack.sql.cli.net.protocol.CliRoundTripTestUtils.assertRoundTripCurrentVersion; +import static org.elasticsearch.xpack.sql.cli.net.protocol.InfoRequestTests.randomInfoRequest; + +public class InfoResponseTests extends ESTestCase { + static InfoResponse randomInfoResponse() { + return new InfoResponse(randomAlphaOfLength(5), randomAlphaOfLength(5), randomByte(), randomByte(), + randomAlphaOfLength(5), randomAlphaOfLength(5), randomAlphaOfLength(5)); + } + + public void testRoundTrip() throws IOException { + assertRoundTripCurrentVersion(randomInfoRequest(), randomInfoResponse()); + } + + public void testToString() { + assertEquals("InfoResponse", + new InfoResponse("adsf", "test_cluster", (byte) 6, (byte) 0, "6.0.0", "feed", "date").toString()); + } +} diff --git a/sql/cli-proto/src/test/java/org/elasticsearch/xpack/sql/cli/net/protocol/QueryCloseRequestTests.java b/sql/cli-proto/src/test/java/org/elasticsearch/xpack/sql/cli/net/protocol/QueryCloseRequestTests.java new file mode 100644 index 00000000000..2c9bec200d0 --- /dev/null +++ b/sql/cli-proto/src/test/java/org/elasticsearch/xpack/sql/cli/net/protocol/QueryCloseRequestTests.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.net.protocol; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.protocol.shared.TimeoutInfo; + +import java.io.IOException; + +import static org.elasticsearch.xpack.sql.cli.net.protocol.CliRoundTripTestUtils.assertRoundTripCurrentVersion; + +public class QueryCloseRequestTests extends ESTestCase { + static QueryCloseRequest randomQueryCloseRequest() { + String cursor = randomAlphaOfLength(10); + return new QueryCloseRequest(cursor); + } + + public void testRoundTrip() throws IOException { + assertRoundTripCurrentVersion(randomQueryCloseRequest()); + } + + public void testToString() { + assertEquals("QueryCloseRequest<0320>", new QueryCloseRequest("0320").toString()); + } +} diff --git a/sql/cli-proto/src/test/java/org/elasticsearch/xpack/sql/cli/net/protocol/QueryCloseResponseTests.java b/sql/cli-proto/src/test/java/org/elasticsearch/xpack/sql/cli/net/protocol/QueryCloseResponseTests.java new file mode 100644 index 00000000000..e888b78dba3 --- /dev/null +++ b/sql/cli-proto/src/test/java/org/elasticsearch/xpack/sql/cli/net/protocol/QueryCloseResponseTests.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.net.protocol; + +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.elasticsearch.xpack.sql.cli.net.protocol.CliRoundTripTestUtils.assertRoundTripCurrentVersion; +import static org.elasticsearch.xpack.sql.cli.net.protocol.QueryCloseRequestTests.randomQueryCloseRequest; + +public class QueryCloseResponseTests extends ESTestCase { + static QueryCloseResponse randomQueryCloseResponse() { + return new QueryCloseResponse(randomBoolean()); + } + + public void testRoundTrip() throws IOException { + assertRoundTripCurrentVersion(randomQueryCloseRequest(), randomQueryCloseResponse()); + } + + public void testToString() { + assertEquals("QueryCloseResponse", + new QueryCloseResponse(true).toString()); + } +} diff --git a/sql/cli-proto/src/test/java/org/elasticsearch/xpack/sql/cli/net/protocol/QueryInitRequestTests.java b/sql/cli-proto/src/test/java/org/elasticsearch/xpack/sql/cli/net/protocol/QueryInitRequestTests.java new file mode 100644 index 00000000000..c27eb3f04b8 --- /dev/null +++ b/sql/cli-proto/src/test/java/org/elasticsearch/xpack/sql/cli/net/protocol/QueryInitRequestTests.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.net.protocol; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.protocol.shared.TimeoutInfo; + +import java.io.IOException; +import java.util.TimeZone; + +import static org.elasticsearch.xpack.sql.cli.net.protocol.CliRoundTripTestUtils.assertRoundTripCurrentVersion; +import static org.elasticsearch.xpack.sql.cli.net.protocol.CliRoundTripTestUtils.randomTimeoutInfo; + +public class QueryInitRequestTests extends ESTestCase { + static QueryInitRequest randomQueryInitRequest() { + return new QueryInitRequest(randomAlphaOfLength(5), between(0, Integer.MAX_VALUE), randomTimeZone(random()), randomTimeoutInfo()); + } + + public void testRoundTrip() throws IOException { + assertRoundTripCurrentVersion(randomQueryInitRequest()); + } + + public void testToString() { + assertEquals("QueryInitRequest", + new QueryInitRequest("SELECT * FROM test.doc", 10, TimeZone.getTimeZone("UTC"), new TimeoutInfo(1, 1, 1)).toString()); + assertEquals("QueryInitRequest", + new QueryInitRequest("SELECT * FROM test.doc", 10, TimeZone.getTimeZone("GMT-5"), new TimeoutInfo(1, 1, 1)).toString()); + } +} diff --git a/sql/cli-proto/src/test/java/org/elasticsearch/xpack/sql/cli/net/protocol/QueryInitResponseTests.java b/sql/cli-proto/src/test/java/org/elasticsearch/xpack/sql/cli/net/protocol/QueryInitResponseTests.java new file mode 100644 index 00000000000..8facca8b723 --- /dev/null +++ b/sql/cli-proto/src/test/java/org/elasticsearch/xpack/sql/cli/net/protocol/QueryInitResponseTests.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.net.protocol; + +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.elasticsearch.xpack.sql.cli.net.protocol.CliRoundTripTestUtils.assertRoundTripCurrentVersion; +import static org.elasticsearch.xpack.sql.cli.net.protocol.QueryInitRequestTests.randomQueryInitRequest; + +public class QueryInitResponseTests extends ESTestCase { + static QueryInitResponse randomQueryInitResponse() { + String cursor = randomAlphaOfLength(10); + return new QueryInitResponse(randomNonNegativeLong(), cursor, randomAlphaOfLength(5)); + } + + public void testRoundTrip() throws IOException { + assertRoundTripCurrentVersion(randomQueryInitRequest(), randomQueryInitResponse()); + } + + public void testToString() { + assertEquals("QueryInitResponse", + new QueryInitResponse(123, "0103", "test").toString()); + } +} diff --git a/sql/cli-proto/src/test/java/org/elasticsearch/xpack/sql/cli/net/protocol/QueryPageRequestTests.java b/sql/cli-proto/src/test/java/org/elasticsearch/xpack/sql/cli/net/protocol/QueryPageRequestTests.java new file mode 100644 index 00000000000..8680b848046 --- /dev/null +++ b/sql/cli-proto/src/test/java/org/elasticsearch/xpack/sql/cli/net/protocol/QueryPageRequestTests.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.net.protocol; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.protocol.shared.TimeoutInfo; + +import java.io.IOException; + +import static org.elasticsearch.xpack.sql.cli.net.protocol.CliRoundTripTestUtils.assertRoundTripCurrentVersion; +import static org.elasticsearch.xpack.sql.cli.net.protocol.CliRoundTripTestUtils.randomTimeoutInfo; + +public class QueryPageRequestTests extends ESTestCase { + static QueryPageRequest randomQueryPageRequest() { + String cursor = randomAlphaOfLength(10); + return new QueryPageRequest(cursor, randomTimeoutInfo()); + } + + public void testRoundTrip() throws IOException { + assertRoundTripCurrentVersion(randomQueryPageRequest()); + } + + public void testToString() { + assertEquals("QueryPageRequest<0320>", new QueryPageRequest("0320", new TimeoutInfo(1, 1, 1)).toString()); + } +} diff --git a/sql/cli-proto/src/test/java/org/elasticsearch/xpack/sql/cli/net/protocol/QueryPageResponseTests.java b/sql/cli-proto/src/test/java/org/elasticsearch/xpack/sql/cli/net/protocol/QueryPageResponseTests.java new file mode 100644 index 00000000000..bd7413b6eda --- /dev/null +++ b/sql/cli-proto/src/test/java/org/elasticsearch/xpack/sql/cli/net/protocol/QueryPageResponseTests.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.net.protocol; + +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.elasticsearch.xpack.sql.cli.net.protocol.CliRoundTripTestUtils.assertRoundTripCurrentVersion; +import static org.elasticsearch.xpack.sql.cli.net.protocol.QueryPageRequestTests.randomQueryPageRequest; + +public class QueryPageResponseTests extends ESTestCase { + static QueryPageResponse randomQueryPageResponse() { + String cursor = randomAlphaOfLength(10); + return new QueryPageResponse(randomNonNegativeLong(), cursor, randomAlphaOfLength(5)); + } + + public void testRoundTrip() throws IOException { + assertRoundTripCurrentVersion(randomQueryPageRequest(), randomQueryPageResponse()); + } + + public void testToString() { + assertEquals("QueryPageResponse", + new QueryPageResponse(123, "0103", "test").toString()); + } +} diff --git a/sql/cli/build.gradle b/sql/cli/build.gradle new file mode 100644 index 00000000000..361da9c02b4 --- /dev/null +++ b/sql/cli/build.gradle @@ -0,0 +1,106 @@ +apply plugin: 'elasticsearch.build' +/* We don't use the 'application' plugin because it builds a zip and tgz which + * we don't want. */ + +description = 'Command line interface to Elasticsearch that speaks SQL' + +dependencies { + compile "org.jline:jline:3.3.1" + compile project(':x-pack-elasticsearch:sql:shared-client') + compile project(':x-pack-elasticsearch:sql:cli-proto') + compile project(':x-pack-elasticsearch:sql:shared-proto') + compile project(':core:cli') + + runtime "org.fusesource.jansi:jansi:1.16" + runtime "org.elasticsearch:jna:4.4.0-1" + runtime "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" +} + +dependencyLicenses { + mapping from: /cli-proto.*/, to: 'elasticsearch' + mapping from: /shared-client.*/, to: 'elasticsearch' + mapping from: /shared-proto.*/, to: 'elasticsearch' + mapping from: /elasticsearch-cli.*/, to: 'elasticsearch' + mapping from: /jackson-.*/, to: 'jackson' + ignoreSha 'cli-proto' + ignoreSha 'shared-client' + ignoreSha 'shared-proto' + ignoreSha 'elasticsearch-cli' +} + +forbiddenApisMain { + // does not depend on core, so only jdk and http signatures should be checked + signaturesURLs = [this.class.getResource('/forbidden/jdk-signatures.txt')] +} + +jar { + // Bundle all dependencies into the jar. + from { + configurations.compile.collect { it.isDirectory() ? it : zipTree(it) } + configurations.runtime.collect { it.isDirectory() ? it : zipTree(it) } + } + // Make the jar "executable" with `java -jar` + manifest { + attributes 'Main-Class': 'org.elasticsearch.xpack.sql.cli.Cli' + } +} + +// Needed so we can launch graphviz if it is installed +project.compactProfile = 'full' + +thirdPartyAudit.excludes = [ + 'org.apache.sshd.client.SshClient', + 'org.apache.sshd.client.auth.keyboard.UserInteraction', + 'org.apache.sshd.client.channel.ChannelShell', + 'org.apache.sshd.client.channel.ClientChannel', + 'org.apache.sshd.client.channel.ClientChannelEvent', + 'org.apache.sshd.client.future.AuthFuture', + 'org.apache.sshd.client.future.ConnectFuture', + 'org.apache.sshd.client.future.OpenFuture', + 'org.apache.sshd.client.session.ClientSession', + 'org.apache.sshd.common.Factory', + 'org.apache.sshd.common.channel.PtyMode', + 'org.apache.sshd.common.config.keys.FilePasswordProvider', + 'org.apache.sshd.common.util.io.NoCloseInputStream', + 'org.apache.sshd.common.util.io.NoCloseOutputStream', + 'org.apache.sshd.server.Command', + 'org.apache.sshd.server.Environment', + 'org.apache.sshd.server.ExitCallback', + 'org.apache.sshd.server.SessionAware', + 'org.apache.sshd.server.Signal', + 'org.apache.sshd.server.SshServer', + 'org.apache.sshd.server.keyprovider.SimpleGeneratorHostKeyProvider', + 'org.apache.sshd.server.scp.ScpCommandFactory$Builder', + 'org.apache.sshd.server.session.ServerSession', + 'org.apache.sshd.server.subsystem.sftp.SftpSubsystemFactory$Builder', + 'org.mozilla.universalchardet.UniversalDetector' +] + +task run { + description = 'Run the CLI and connect to elasticsearch running on 9200' + dependsOn 'assemble' + doLast { + List command = [new File(project.javaHome, 'bin/java').absolutePath] + if ('true'.equals(System.getProperty('debug', 'false'))) { + command += '-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=8000' + } + command += ['-jar', jar.archivePath.absolutePath] + logger.info("running the cli with: ${command}") + + new ProcessBuilder(command) + .redirectOutput(ProcessBuilder.Redirect.INHERIT) + .redirectInput(ProcessBuilder.Redirect.INHERIT) + .redirectError(ProcessBuilder.Redirect.INHERIT) + .start() + .waitFor() + } +} + +// Use the jar for testing so we can get the proper version information +test { + classpath -= compileJava.outputs.files + classpath -= configurations.compile + classpath -= configurations.runtime + classpath += jar.outputs.files + dependsOn jar +} \ No newline at end of file diff --git a/sql/cli/licenses/elasticsearch-LICENSE.txt b/sql/cli/licenses/elasticsearch-LICENSE.txt new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/sql/cli/licenses/elasticsearch-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/sql/cli/licenses/elasticsearch-NOTICE.txt b/sql/cli/licenses/elasticsearch-NOTICE.txt new file mode 100644 index 00000000000..643a060cd05 --- /dev/null +++ b/sql/cli/licenses/elasticsearch-NOTICE.txt @@ -0,0 +1,5 @@ +Elasticsearch +Copyright 2009-2017 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). diff --git a/sql/cli/licenses/jackson-LICENSE b/sql/cli/licenses/jackson-LICENSE new file mode 100644 index 00000000000..f5f45d26a49 --- /dev/null +++ b/sql/cli/licenses/jackson-LICENSE @@ -0,0 +1,8 @@ +This copy of Jackson JSON processor streaming parser/generator is licensed under the +Apache (Software) License, version 2.0 ("the License"). +See the License for details about distribution rights, and the +specific rights regarding derivate works. + +You may obtain a copy of the License at: + +http://www.apache.org/licenses/LICENSE-2.0 diff --git a/sql/cli/licenses/jackson-NOTICE b/sql/cli/licenses/jackson-NOTICE new file mode 100644 index 00000000000..4c976b7b4cc --- /dev/null +++ b/sql/cli/licenses/jackson-NOTICE @@ -0,0 +1,20 @@ +# Jackson JSON processor + +Jackson is a high-performance, Free/Open Source JSON processing library. +It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has +been in development since 2007. +It is currently developed by a community of developers, as well as supported +commercially by FasterXML.com. + +## Licensing + +Jackson core and extension components may licensed under different licenses. +To find the details that apply to this artifact see the accompanying LICENSE file. +For more information, including possible other licensing options, contact +FasterXML.com (http://fasterxml.com). + +## Credits + +A list of contributors may be found from CREDITS file, which is included +in some artifacts (usually source distributions); but is always available +from the source code management (SCM) system project uses. diff --git a/sql/cli/licenses/jackson-core-2.8.10.jar.sha1 b/sql/cli/licenses/jackson-core-2.8.10.jar.sha1 new file mode 100644 index 00000000000..a322d371e26 --- /dev/null +++ b/sql/cli/licenses/jackson-core-2.8.10.jar.sha1 @@ -0,0 +1 @@ +eb21a035c66ad307e66ec8fce37f5d50fd62d039 \ No newline at end of file diff --git a/sql/cli/licenses/jansi-1.16.jar.sha1 b/sql/cli/licenses/jansi-1.16.jar.sha1 new file mode 100644 index 00000000000..8adc5c7977c --- /dev/null +++ b/sql/cli/licenses/jansi-1.16.jar.sha1 @@ -0,0 +1 @@ +b1aaf0028852164ab6b4057192ccd0ba7dedd3a5 \ No newline at end of file diff --git a/sql/cli/licenses/jansi-LICENSE.txt b/sql/cli/licenses/jansi-LICENSE.txt new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/sql/cli/licenses/jansi-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/sql/cli/licenses/jansi-NOTICE.txt b/sql/cli/licenses/jansi-NOTICE.txt new file mode 100644 index 00000000000..e69de29bb2d diff --git a/sql/cli/licenses/jline-3.3.1.jar.sha1 b/sql/cli/licenses/jline-3.3.1.jar.sha1 new file mode 100644 index 00000000000..85661fb7fda --- /dev/null +++ b/sql/cli/licenses/jline-3.3.1.jar.sha1 @@ -0,0 +1 @@ +d8a30137fe4ee2246b71b3915baac767d348c5bb \ No newline at end of file diff --git a/sql/cli/licenses/jline-LICENSE.txt b/sql/cli/licenses/jline-LICENSE.txt new file mode 100644 index 00000000000..d4defddda4c --- /dev/null +++ b/sql/cli/licenses/jline-LICENSE.txt @@ -0,0 +1,35 @@ +Copyright (c) 2002-2016, the original author or authors. +All rights reserved. + +http://www.opensource.org/licenses/bsd-license.php + +Redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following +conditions are met: + +Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with +the distribution. + +Neither the name of JLine nor the names of its contributors +may be used to endorse or promote products derived from this +software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, +BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY +AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, +OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED +AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/sql/cli/licenses/jline-NOTICE.txt b/sql/cli/licenses/jline-NOTICE.txt new file mode 100644 index 00000000000..e69de29bb2d diff --git a/sql/cli/licenses/jna-4.4.0-1.jar.sha1 b/sql/cli/licenses/jna-4.4.0-1.jar.sha1 new file mode 100644 index 00000000000..6b564834b57 --- /dev/null +++ b/sql/cli/licenses/jna-4.4.0-1.jar.sha1 @@ -0,0 +1 @@ +c9dfcec6f07ee4b1d7a6c09a7eaa9dd4fb6d2c79 \ No newline at end of file diff --git a/sql/cli/licenses/jna-LICENSE.txt b/sql/cli/licenses/jna-LICENSE.txt new file mode 100644 index 00000000000..f433b1a53f5 --- /dev/null +++ b/sql/cli/licenses/jna-LICENSE.txt @@ -0,0 +1,177 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/sql/cli/licenses/jna-NOTICE.txt b/sql/cli/licenses/jna-NOTICE.txt new file mode 100644 index 00000000000..e69de29bb2d diff --git a/sql/cli/licenses/jopt-simple-5.0.2.jar.sha1 b/sql/cli/licenses/jopt-simple-5.0.2.jar.sha1 new file mode 100644 index 00000000000..b50ed4fea3b --- /dev/null +++ b/sql/cli/licenses/jopt-simple-5.0.2.jar.sha1 @@ -0,0 +1 @@ +98cafc6081d5632b61be2c9e60650b64ddbc637c \ No newline at end of file diff --git a/sql/cli/licenses/jopt-simple-LICENSE.txt b/sql/cli/licenses/jopt-simple-LICENSE.txt new file mode 100644 index 00000000000..85f923a9526 --- /dev/null +++ b/sql/cli/licenses/jopt-simple-LICENSE.txt @@ -0,0 +1,24 @@ +/* + The MIT License + + Copyright (c) 2004-2015 Paul R. Holser, Jr. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ diff --git a/sql/cli/licenses/jopt-simple-NOTICE.txt b/sql/cli/licenses/jopt-simple-NOTICE.txt new file mode 100644 index 00000000000..e69de29bb2d diff --git a/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/Cli.java b/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/Cli.java new file mode 100644 index 00000000000..3ac065f53d9 --- /dev/null +++ b/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/Cli.java @@ -0,0 +1,148 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli; + +import joptsimple.OptionSet; +import joptsimple.OptionSpec; +import org.elasticsearch.cli.Command; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.xpack.sql.cli.command.ClearScreenCliCommand; +import org.elasticsearch.xpack.sql.cli.command.CliCommand; +import org.elasticsearch.xpack.sql.cli.command.CliCommands; +import org.elasticsearch.xpack.sql.cli.command.CliSession; +import org.elasticsearch.xpack.sql.cli.command.FetchSeparatorCliCommand; +import org.elasticsearch.xpack.sql.cli.command.FetchSizeCliCommand; +import org.elasticsearch.xpack.sql.cli.command.PrintLogoCommand; +import org.elasticsearch.xpack.sql.cli.command.ServerInfoCliCommand; +import org.elasticsearch.xpack.sql.cli.command.ServerQueryCliCommand; +import org.elasticsearch.xpack.sql.client.shared.ClientException; +import org.elasticsearch.xpack.sql.client.shared.ConnectionConfiguration; +import org.elasticsearch.xpack.sql.client.shared.Version; + +import java.io.IOException; +import java.net.ConnectException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Arrays; +import java.util.List; +import java.util.logging.LogManager; + +public class Cli extends Command { + private final OptionSpec debugOption; + private final OptionSpec keystoreLocation; + private final OptionSpec checkOption; + private final OptionSpec connectionString; + + private Cli() { + super("Elasticsearch SQL CLI", Cli::configureLogging); + this.debugOption = parser.acceptsAll(Arrays.asList("d", "debug"), + "Enable debug logging") + .withRequiredArg().ofType(Boolean.class) + .defaultsTo(Boolean.parseBoolean(System.getProperty("cli.debug", "false"))); + this.keystoreLocation = parser.acceptsAll( + Arrays.asList("k", "keystore_location"), + "Location of a keystore to use when setting up SSL. " + + "If specified then the CLI will prompt for a keystore password. " + + "If specified when the uri isn't https then an error is thrown.") + .withRequiredArg().ofType(String.class); + this.checkOption = parser.acceptsAll(Arrays.asList("c", "check"), + "Enable initial connection check on startup") + .withRequiredArg().ofType(Boolean.class) + .defaultsTo(Boolean.parseBoolean(System.getProperty("cli.check", "true"))); + this.connectionString = parser.nonOptions("uri"); + } + + /** + * Use this VM Options to run in IntelliJ or Eclipse: + * -Dorg.jline.terminal.type=xterm-256color + * -Dorg.jline.terminal.jna=false + * -Dorg.jline.terminal.jansi=false + * -Dorg.jline.terminal.exec=false + * -Dorg.jline.terminal.dumb=true + */ + public static void main(String[] args) throws Exception { + final Cli cli = new Cli(); + int status = cli.main(args, Terminal.DEFAULT); + if (status != ExitCodes.OK) { + exit(status); + } + } + + private static void configureLogging() { + try { + /* Initialize the logger from the a properties file we bundle. This makes sure + * we get useful error messages from jLine. */ + LogManager.getLogManager().readConfiguration(Cli.class.getResourceAsStream("/logging.properties")); + } catch (IOException ex) { + throw new RuntimeException("cannot setup logging", ex); + } + } + + @Override + protected void execute(org.elasticsearch.cli.Terminal terminal, OptionSet options) throws Exception { + boolean debug = debugOption.value(options); + boolean checkConnection = checkOption.value(options); + List args = connectionString.values(options); + if (args.size() > 1) { + throw new UserException(ExitCodes.USAGE, "expecting a single uri"); + } + String uri = args.size() == 1 ? args.get(0) : null; + args = keystoreLocation.values(options); + if (args.size() > 1) { + throw new UserException(ExitCodes.USAGE, "expecting a single keystore file"); + } + String keystoreLocationValue = args.size() == 1 ? args.get(0) : null; + execute(uri, debug, keystoreLocationValue, checkConnection); + } + + private void execute(String uri, boolean debug, String keystoreLocation, boolean checkConnection) throws Exception { + CliCommand cliCommand = new CliCommands( + new PrintLogoCommand(), + new ClearScreenCliCommand(), + new FetchSizeCliCommand(), + new FetchSeparatorCliCommand(), + new ServerInfoCliCommand(), + new ServerQueryCliCommand() + ); + try (CliTerminal cliTerminal = new JLineTerminal()) { + ConnectionBuilder connectionBuilder = new ConnectionBuilder(cliTerminal); + ConnectionConfiguration con = connectionBuilder.buildConnection(uri, keystoreLocation); + CliSession cliSession = new CliSession(new CliHttpClient(con)); + cliSession.setDebug(debug); + if (checkConnection) { + checkConnection(cliSession, cliTerminal, con); + } + new CliRepl(cliTerminal, cliSession, cliCommand).execute(); + } + } + + private void checkConnection(CliSession cliSession, CliTerminal cliTerminal, ConnectionConfiguration con) throws UserException { + try { + cliSession.checkConnection(); + } catch (ClientException ex) { + if (cliSession.isDebug()) { + cliTerminal.error("Client Exception", ex.getMessage()); + cliTerminal.println(); + cliTerminal.printStackTrace(ex); + cliTerminal.flush(); + } + if (ex.getCause() != null && ex.getCause() instanceof ConnectException) { + // Most likely Elasticsearch is not running + throw new UserException(ExitCodes.IO_ERROR, + "Cannot connect to the server " + con.connectionString() + " - " + ex.getCause().getMessage()); + } else { + // Most likely we connected to an old version of Elasticsearch or not Elasticsearch at all + throw new UserException(ExitCodes.DATA_ERROR, + "Cannot communicate with the server " + con.connectionString() + + ". This version of CLI only works with Elasticsearch version " + Version.version()); + } + } + + } +} diff --git a/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/CliHttpClient.java b/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/CliHttpClient.java new file mode 100644 index 00000000000..e32a013de02 --- /dev/null +++ b/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/CliHttpClient.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli; + +import org.elasticsearch.xpack.sql.cli.net.protocol.InfoRequest; +import org.elasticsearch.xpack.sql.cli.net.protocol.InfoResponse; +import org.elasticsearch.xpack.sql.cli.net.protocol.Proto; +import org.elasticsearch.xpack.sql.cli.net.protocol.QueryCloseRequest; +import org.elasticsearch.xpack.sql.cli.net.protocol.QueryCloseResponse; +import org.elasticsearch.xpack.sql.cli.net.protocol.QueryInitRequest; +import org.elasticsearch.xpack.sql.cli.net.protocol.QueryPageRequest; +import org.elasticsearch.xpack.sql.cli.net.protocol.QueryResponse; +import org.elasticsearch.xpack.sql.client.shared.ConnectionConfiguration; +import org.elasticsearch.xpack.sql.client.shared.JreHttpUrlConnection; +import org.elasticsearch.xpack.sql.client.shared.JreHttpUrlConnection.ResponseOrException; +import org.elasticsearch.xpack.sql.protocol.shared.Request; +import org.elasticsearch.xpack.sql.protocol.shared.Response; +import org.elasticsearch.xpack.sql.protocol.shared.TimeoutInfo; + +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.sql.SQLException; +import java.time.Instant; +import java.util.Collections; +import java.util.Map; +import java.util.TimeZone; + +public class CliHttpClient { + private final ConnectionConfiguration cfg; + + public CliHttpClient(ConnectionConfiguration cfg) { + this.cfg = cfg; + } + + public InfoResponse serverInfo() throws SQLException { + InfoRequest request = new InfoRequest(); + return (InfoResponse) post(request); + } + + public QueryResponse queryInit(String query, int fetchSize) throws SQLException { + // TODO allow customizing the time zone - this is what session set/reset/get should be about + QueryInitRequest request = new QueryInitRequest(query, fetchSize, TimeZone.getTimeZone("UTC"), timeout()); + return (QueryResponse) post(request); + } + + public QueryResponse nextPage(String cursor) throws SQLException { + QueryPageRequest request = new QueryPageRequest(cursor, timeout()); + return (QueryResponse) post(request); + } + + public QueryCloseResponse queryClose(String cursor) throws SQLException { + QueryCloseRequest request = new QueryCloseRequest(cursor); + return (QueryCloseResponse) post(request); + } + + private TimeoutInfo timeout() { + long clientTime = Instant.now().toEpochMilli(); + return new TimeoutInfo(clientTime, cfg.queryTimeout(), cfg.pageTimeout()); + } + + private Response post(Request request) throws SQLException { + return AccessController.doPrivileged((PrivilegedAction>) () -> + JreHttpUrlConnection.http("_xpack/sql/cli", "error_trace", cfg, con -> + con.post( + out -> Proto.INSTANCE.writeRequest(request, out), + in -> Proto.INSTANCE.readResponse(request, in) + ) + ) + ).getResponseOrThrowException(); + } +} diff --git a/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/CliRepl.java b/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/CliRepl.java new file mode 100644 index 00000000000..9fe4dece230 --- /dev/null +++ b/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/CliRepl.java @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli; + +import org.elasticsearch.xpack.sql.cli.command.CliCommand; +import org.elasticsearch.xpack.sql.cli.command.CliSession; + +import java.util.Locale; + +public class CliRepl { + + private CliTerminal cliTerminal; + private CliCommand cliCommand; + private CliSession cliSession; + + public CliRepl(CliTerminal cliTerminal, CliSession cliSession, CliCommand cliCommand) { + this.cliTerminal = cliTerminal; + this.cliCommand = cliCommand; + this.cliSession = cliSession; + } + + public void execute() { + String DEFAULT_PROMPT = "sql> "; + String MULTI_LINE_PROMPT = " | "; + + StringBuilder multiLine = new StringBuilder(); + String prompt = DEFAULT_PROMPT; + + cliTerminal.flush(); + cliCommand.handle(cliTerminal, cliSession, "logo"); + + while (true) { + String line = cliTerminal.readLine(prompt); + if (line == null) { + return; + } + line = line.trim(); + + if (!line.endsWith(";")) { + multiLine.append(" "); + multiLine.append(line); + prompt = MULTI_LINE_PROMPT; + continue; + } + + line = line.substring(0, line.length() - 1); + + prompt = DEFAULT_PROMPT; + if (multiLine.length() > 0) { + // append the line without trailing ; + multiLine.append(line); + line = multiLine.toString().trim(); + multiLine.setLength(0); + } + + // special case to handle exit + if (isExit(line)) { + cliTerminal.line().em("Bye!").ln(); + cliTerminal.flush(); + return; + } + if (cliCommand.handle(cliTerminal, cliSession, line) == false) { + cliTerminal.error("Unrecognized command", line); + } + cliTerminal.println(); + } + } + + private static boolean isExit(String line) { + line = line.toLowerCase(Locale.ROOT); + return line.equals("exit") || line.equals("quit"); + } + +} diff --git a/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/CliTerminal.java b/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/CliTerminal.java new file mode 100644 index 00000000000..017a7598ae6 --- /dev/null +++ b/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/CliTerminal.java @@ -0,0 +1,101 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli; + +import java.io.IOException; + +/** + * Represents a terminal endpoint + */ +public interface CliTerminal extends AutoCloseable { + + /** + * Prints line with plain text + */ + void print(String text); + + /** + * Prints line with plain text followed by a new line + */ + void println(String text); + + /** + * Prints a formatted error message + */ + void error(String type, String message); + + /** + * Prints a new line + */ + void println(); + + /** + * Clears the terminal + */ + void clear(); + + /** + * Flushes the terminal + */ + void flush(); + + /** + * Prints the stacktrace of the exception + */ + void printStackTrace(Exception ex); + + /** + * Prompts the user to enter the password and returns it. + */ + String readPassword(String prompt); + + /** + * Reads the line from the terminal. + */ + String readLine(String prompt); + + /** + * Creates a new line builder, which allows building a formatted lines. + * + * The line is not displayed until it is closed with ln() or end(). + */ + LineBuilder line(); + + interface LineBuilder { + /** + * Adds a plain text to the line + */ + LineBuilder text(String text); + + /** + * Adds a text with emphasis to the line + */ + LineBuilder em(String text); + + /** + * Adds a text representing the error message + */ + LineBuilder error(String text); + + /** + * Adds a text representing a parameter of the error message + */ + LineBuilder param(String text); + + /** + * Adds '\n' to the line and send it to the screen. + */ + void ln(); + + /** + * Sends the line to the screen. + */ + void end(); + } + + @Override + void close() throws IOException; +} diff --git a/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/Completers.java b/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/Completers.java new file mode 100644 index 00000000000..591ef56fd33 --- /dev/null +++ b/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/Completers.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli; + +import org.jline.reader.Completer; +import org.jline.reader.impl.completer.AggregateCompleter; +import org.jline.reader.impl.completer.ArgumentCompleter; +import org.jline.reader.impl.completer.StringsCompleter; + +class Completers { + //TODO: need tree structure + static final Completer INSTANCE = new AggregateCompleter( + new ArgumentCompleter(new StringsCompleter("", "EXPLAIN", "SHOW", "SELECT", "SET")), + new ArgumentCompleter(new StringsCompleter("SHOW", "TABLE", "COLUMNS", "FUNCTIONS"))); + +} diff --git a/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/ConnectionBuilder.java b/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/ConnectionBuilder.java new file mode 100644 index 00000000000..8f209e6d98a --- /dev/null +++ b/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/ConnectionBuilder.java @@ -0,0 +1,108 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli; + +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.xpack.sql.client.shared.ConnectionConfiguration; + +import java.net.URI; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Properties; + +import static org.elasticsearch.xpack.sql.client.shared.UriUtils.parseURI; +import static org.elasticsearch.xpack.sql.client.shared.UriUtils.removeQuery; + +/** + * Connection Builder. Can interactively ask users for the password if it is not provided + */ +public class ConnectionBuilder { + public static String DEFAULT_CONNECTION_STRING = "http://localhost:9200/"; + public static URI DEFAULT_URI = URI.create(DEFAULT_CONNECTION_STRING); + + private CliTerminal cliTerminal; + + public ConnectionBuilder(CliTerminal cliTerminal) { + this.cliTerminal = cliTerminal; + } + + /** + * Build the connection. + * @param connectionStringArg the connection string to connect to + * @param keystoreLocation the location of the keystore to configure. If null then use the system keystore. + */ + public ConnectionConfiguration buildConnection(String connectionStringArg, String keystoreLocation) throws UserException { + final URI uri; + final String connectionString; + Properties properties = new Properties(); + String user = null; + String password = null; + if (connectionStringArg != null) { + connectionString = connectionStringArg; + uri = removeQuery(parseURI(connectionString, DEFAULT_URI), connectionString, DEFAULT_URI); + user = uri.getUserInfo(); + if (user != null) { + int colonIndex = user.indexOf(':'); + if (colonIndex >= 0) { + password = user.substring(colonIndex + 1); + user = user.substring(0, colonIndex); + } + } + } else { + uri = DEFAULT_URI; + connectionString = DEFAULT_CONNECTION_STRING; + } + + if (keystoreLocation != null) { + if (false == "https".equals(uri.getScheme())) { + throw new UserException(ExitCodes.USAGE, "keystore file specified without https"); + } + Path p = Paths.get(keystoreLocation); + checkIfExists("keystore file", p); + String keystorePassword = cliTerminal.readPassword("keystore password: "); + + /* + * Set both the keystore and truststore settings which is required + * to everything work smoothly. I'm not totally sure why we have + * two settings but that is a problem for another day. + */ + properties.put("ssl.keystore.location", keystoreLocation); + properties.put("ssl.keystore.pass", keystorePassword); + properties.put("ssl.truststore.location", keystoreLocation); + properties.put("ssl.truststore.pass", keystorePassword); + } + + if ("https".equals(uri.getScheme())) { + properties.put("ssl", "true"); + } + + if (user != null) { + if (password == null) { + password = cliTerminal.readPassword("password: "); + } + properties.setProperty(ConnectionConfiguration.AUTH_USER, user); + properties.setProperty(ConnectionConfiguration.AUTH_PASS, password); + } + + return newConnectionConfiguration(uri, connectionString, properties); + } + + protected ConnectionConfiguration newConnectionConfiguration(URI uri, String connectionString, Properties properties) { + return new ConnectionConfiguration(uri, connectionString, properties); + } + + protected void checkIfExists(String name, Path p) throws UserException { + if (false == Files.exists(p)) { + throw new UserException(ExitCodes.USAGE, name + " [" + p + "] doesn't exist"); + } + if (false == Files.isRegularFile(p)) { + throw new UserException(ExitCodes.USAGE, name + " [" + p + "] isn't a regular file"); + } + } + +} diff --git a/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/FatalCliException.java b/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/FatalCliException.java new file mode 100644 index 00000000000..c314ac1009e --- /dev/null +++ b/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/FatalCliException.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli; + +/** + * Throwing this except will cause the CLI to terminate + */ +public class FatalCliException extends RuntimeException { + public FatalCliException(String message, Throwable cause) { + super(message, cause); + } + + public FatalCliException(String message) { + super(message); + } +} diff --git a/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/JLineTerminal.java b/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/JLineTerminal.java new file mode 100644 index 00000000000..dc8fa73b0b9 --- /dev/null +++ b/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/JLineTerminal.java @@ -0,0 +1,161 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli; + +import org.jline.reader.EndOfFileException; +import org.jline.reader.LineReader; +import org.jline.reader.LineReaderBuilder; +import org.jline.reader.UserInterruptException; +import org.jline.terminal.Terminal; +import org.jline.terminal.TerminalBuilder; +import org.jline.utils.AttributedString; +import org.jline.utils.AttributedStringBuilder; +import org.jline.utils.InfoCmp; + +import java.io.BufferedReader; +import java.io.IOException; + +import static org.jline.utils.AttributedStyle.BOLD; +import static org.jline.utils.AttributedStyle.BRIGHT; +import static org.jline.utils.AttributedStyle.DEFAULT; +import static org.jline.utils.AttributedStyle.RED; +import static org.jline.utils.AttributedStyle.YELLOW; + +/** + * jline-based implementation of the terminal + */ +public class JLineTerminal implements CliTerminal { + + private Terminal terminal; + private LineReader reader; + + protected JLineTerminal() { + try { + this.terminal = TerminalBuilder.builder().build(); + reader = LineReaderBuilder.builder() + .terminal(terminal) + .completer(Completers.INSTANCE) + .build(); + } catch (IOException ex) { + throw new FatalCliException("Cannot use terminal", ex); + } + } + + @Override + public LineBuilder line() { + return new LineBuilder(); + } + + @Override + public void print(String text) { + terminal.writer().print(text); + } + + @Override + public void println(String text) { + terminal.writer().println(text); + } + + @Override + public void error(String type, String message) { + AttributedStringBuilder sb = new AttributedStringBuilder(); + sb.append(type + " [", BOLD.foreground(RED)); + sb.append(message, DEFAULT.boldOff().italic().foreground(YELLOW)); + sb.append("]", BOLD.underlineOff().foreground(RED)); + terminal.writer().print(sb.toAnsi(terminal)); + terminal.flush(); + } + + @Override + public void println() { + terminal.writer().println(); + } + + @Override + public void clear() { + terminal.puts(InfoCmp.Capability.clear_screen); + } + + @Override + public void flush() { + terminal.flush(); + } + + @Override + public void printStackTrace(Exception ex) { + ex.printStackTrace(terminal.writer()); + } + + @Override + public String readPassword(String prompt) { + terminal.writer().print(prompt); + terminal.writer().flush(); + terminal.echo(false); + try { + return new BufferedReader(terminal.reader()).readLine(); + } catch (IOException ex) { + throw new FatalCliException("Error reading password", ex); + } finally { + terminal.echo(true); + } + } + + @Override + public String readLine(String prompt) { + try { + String attributedString = new AttributedString(prompt, DEFAULT.foreground(YELLOW)).toAnsi(terminal); + return reader.readLine(attributedString); + } catch (UserInterruptException ex) { + return ""; + } catch (EndOfFileException ex) { + return null; + } + } + + @Override + public void close() throws IOException { + terminal.close(); + } + + public final class LineBuilder implements CliTerminal.LineBuilder { + AttributedStringBuilder line; + + private LineBuilder() { + line = new AttributedStringBuilder(); + } + + public LineBuilder text(String text) { + line.append(text, DEFAULT); + return this; + } + + public LineBuilder em(String text) { + line.append(text, DEFAULT.foreground(BRIGHT)); + return this; + } + + + public LineBuilder error(String text) { + line.append(text, BOLD.foreground(RED)); + return this; + } + + public LineBuilder param(String text) { + line.append(text, DEFAULT.italic().foreground(YELLOW)); + return this; + } + + public void ln() { + terminal.writer().println(line.toAnsi(terminal)); + } + + public void end() { + terminal.writer().print(line.toAnsi(terminal)); + terminal.writer().flush(); + } + } + +} diff --git a/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/AbstractCliCommand.java b/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/AbstractCliCommand.java new file mode 100644 index 00000000000..f7efc0888d3 --- /dev/null +++ b/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/AbstractCliCommand.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.command; + +import org.elasticsearch.xpack.sql.cli.CliTerminal; + +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * The base class for simple commands that match the pattern + */ +public abstract class AbstractCliCommand implements CliCommand { + + protected final Pattern pattern; + + AbstractCliCommand(Pattern pattern) { + this.pattern = pattern; + } + + @Override + public boolean handle(CliTerminal terminal, CliSession cliSession, String line) { + Matcher matcher = pattern.matcher(line); + if (matcher.matches()) { + return doHandle(terminal, cliSession, matcher, line); + } + return false; + } + + /** + * the perform the command + * returns true if the command handled the line and false otherwise + */ + protected abstract boolean doHandle(CliTerminal terminal, CliSession cliSession, Matcher m, String line); +} diff --git a/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/AbstractServerCliCommand.java b/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/AbstractServerCliCommand.java new file mode 100644 index 00000000000..bee868bdac2 --- /dev/null +++ b/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/AbstractServerCliCommand.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.command; + +import org.elasticsearch.xpack.sql.cli.CliTerminal; + +public abstract class AbstractServerCliCommand implements CliCommand { + + public AbstractServerCliCommand() { + } + + @Override + public final boolean handle(CliTerminal terminal, CliSession cliSession, String line) { + try { + return doHandle(terminal, cliSession, line); + } catch (RuntimeException e) { + handleExceptionWhileCommunicatingWithServer(terminal, cliSession, e); + } + return true; + } + + protected abstract boolean doHandle(CliTerminal cliTerminal, CliSession cliSession, String line); + + /** + * Handle an exception while communication with the server. Extracted + * into a method so that tests can bubble the failure. + */ + protected void handleExceptionWhileCommunicatingWithServer(CliTerminal terminal, CliSession cliSession, RuntimeException e) { + terminal.line().error("Communication error [").param(e.getMessage()).error("]").ln(); + if (cliSession.isDebug()) { + terminal.printStackTrace(e); + } + } + + +} diff --git a/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ClearScreenCliCommand.java b/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ClearScreenCliCommand.java new file mode 100644 index 00000000000..ffde1ec556a --- /dev/null +++ b/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ClearScreenCliCommand.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.command; + +import org.elasticsearch.xpack.sql.cli.CliTerminal; + +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * cls command that cleans the screen + */ +public class ClearScreenCliCommand extends AbstractCliCommand { + + public ClearScreenCliCommand() { + super(Pattern.compile("cls", Pattern.CASE_INSENSITIVE)); + } + + @Override + protected boolean doHandle(CliTerminal terminal, CliSession cliSession, Matcher m, String line) { + terminal.clear(); + return true; + } +} diff --git a/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/CliCommand.java b/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/CliCommand.java new file mode 100644 index 00000000000..b87b06b3803 --- /dev/null +++ b/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/CliCommand.java @@ -0,0 +1,17 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.command; + +import org.elasticsearch.xpack.sql.cli.CliTerminal; + +public interface CliCommand { + + /** + * Handle the command, return true if the command is handled, false otherwise + */ + boolean handle(CliTerminal terminal, CliSession cliSession, String line); + +} diff --git a/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/CliCommands.java b/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/CliCommands.java new file mode 100644 index 00000000000..192195c5b22 --- /dev/null +++ b/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/CliCommands.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.command; + +import org.elasticsearch.xpack.sql.cli.CliTerminal; + +import java.util.Arrays; +import java.util.List; + +/** + * Wrapper for several commands + */ +public class CliCommands implements CliCommand { + + private final List commands; + + public CliCommands(CliCommand... commands) { + this.commands = Arrays.asList(commands); + } + + @Override + public boolean handle(CliTerminal terminal, CliSession cliSession, String line) { + for (CliCommand cliCommand : commands) { + if (cliCommand.handle(terminal, cliSession, line)) { + return true; + } + } + return false; + } +} diff --git a/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/CliSession.java b/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/CliSession.java new file mode 100644 index 00000000000..b30fb18163b --- /dev/null +++ b/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/CliSession.java @@ -0,0 +1,72 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.command; + +import org.elasticsearch.xpack.sql.cli.CliHttpClient; +import org.elasticsearch.xpack.sql.cli.net.protocol.InfoResponse; +import org.elasticsearch.xpack.sql.client.shared.ClientException; +import org.elasticsearch.xpack.sql.client.shared.Version; +import org.elasticsearch.xpack.sql.protocol.shared.AbstractQueryInitRequest; + +import java.sql.SQLException; + +/** + * Stores information about the current session + */ +public class CliSession { + private final CliHttpClient cliHttpClient; + private int fetchSize = AbstractQueryInitRequest.DEFAULT_FETCH_SIZE; + private String fetchSeparator = ""; + private boolean debug; + + public CliSession(CliHttpClient cliHttpClient) { + this.cliHttpClient = cliHttpClient; + } + + public CliHttpClient getClient() { + return cliHttpClient; + } + + public void setFetchSize(int fetchSize) { + if (fetchSize <= 0) { + throw new IllegalArgumentException("Must be > 0."); + } + this.fetchSize = fetchSize; + } + + public int getFetchSize() { + return fetchSize; + } + + public void setFetchSeparator(String fetchSeparator) { + this.fetchSeparator = fetchSeparator; + } + + public String getFetchSeparator() { + return fetchSeparator; + } + + public void setDebug(boolean debug) { + this.debug = debug; + } + + public boolean isDebug() { + return debug; + } + + public void checkConnection() throws ClientException { + InfoResponse response; + try { + response = cliHttpClient.serverInfo(); + } catch (SQLException ex) { + throw new ClientException(ex); + } + // TODO: We can relax compatibility requirement later when we have a better idea about protocol compatibility guarantees + if (response.majorVersion != Version.versionMajor() || response.minorVersion != Version.versionMinor()) { + throw new ClientException("This alpha version of CLI is only compatible with Elasticsearch version " + Version.version()); + } + } +} diff --git a/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/FetchSeparatorCliCommand.java b/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/FetchSeparatorCliCommand.java new file mode 100644 index 00000000000..786f31cb010 --- /dev/null +++ b/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/FetchSeparatorCliCommand.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.command; + +import org.elasticsearch.xpack.sql.cli.CliTerminal; + +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * fetch_separator command that allows to change the separator string between fetches + */ +public class FetchSeparatorCliCommand extends AbstractCliCommand { + + public FetchSeparatorCliCommand() { + super(Pattern.compile("fetch(?: |_)separator *= *\"(.+)\"", Pattern.CASE_INSENSITIVE)); + } + + @Override + protected boolean doHandle(CliTerminal terminal, CliSession cliSession, Matcher m, String line) { + cliSession.setFetchSeparator(m.group(1)); + terminal.line().text("fetch separator set to \"").em(cliSession.getFetchSeparator()).text("\"").end(); + return true; + } +} diff --git a/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/FetchSizeCliCommand.java b/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/FetchSizeCliCommand.java new file mode 100644 index 00000000000..8ccef47e19d --- /dev/null +++ b/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/FetchSizeCliCommand.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.command; + +import org.elasticsearch.xpack.sql.cli.CliTerminal; + +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * fetch_size command that allows to change the size of fetches + */ +public class FetchSizeCliCommand extends AbstractCliCommand { + + public FetchSizeCliCommand() { + super(Pattern.compile("fetch(?: |_)size *= *(.+)", Pattern.CASE_INSENSITIVE)); + } + + @Override + protected boolean doHandle(CliTerminal terminal, CliSession cliSession, Matcher m, String line) { + try { + cliSession.setFetchSize(Integer.parseInt(m.group(1))); + } catch (NumberFormatException e) { + terminal.line().error("Invalid fetch size [").param(m.group(1)).error("]").end(); + return true; + } catch (IllegalArgumentException e) { + terminal.line().error("Invalid fetch size [").param(m.group(1)).error("]. " + e.getMessage()).end(); + return true; + } + terminal.line().text("fetch size set to ").em(Integer.toString(cliSession.getFetchSize())).end(); + return true; + } +} diff --git a/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/PrintLogoCommand.java b/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/PrintLogoCommand.java new file mode 100644 index 00000000000..306189b535a --- /dev/null +++ b/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/PrintLogoCommand.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.command; + +import org.elasticsearch.xpack.sql.cli.Cli; +import org.elasticsearch.xpack.sql.cli.CliTerminal; +import org.elasticsearch.xpack.sql.cli.FatalCliException; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * logo command that cleans the screen and prints the logo + */ +public class PrintLogoCommand extends AbstractCliCommand { + + public PrintLogoCommand() { + super(Pattern.compile("logo", Pattern.CASE_INSENSITIVE)); + } + + @Override + protected boolean doHandle(CliTerminal terminal, CliSession cliSession, Matcher m, String line) { + printLogo(terminal); + return true; + } + + public void printLogo(CliTerminal terminal) { + terminal.clear(); + try (InputStream in = Cli.class.getResourceAsStream("/logo.txt")) { + if (in == null) { + throw new FatalCliException("Could not find logo!"); + } + try (BufferedReader reader = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8))) { + String line; + while ((line = reader.readLine()) != null) { + terminal.println(line); + } + } + } catch (IOException e) { + throw new FatalCliException("Could not load logo!", e); + } + + terminal.println(); + } + +} \ No newline at end of file diff --git a/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommand.java b/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommand.java new file mode 100644 index 00000000000..cc4830a40fd --- /dev/null +++ b/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommand.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.command; + +import org.elasticsearch.xpack.sql.cli.CliTerminal; +import org.elasticsearch.xpack.sql.cli.net.protocol.InfoResponse; + +import java.sql.SQLException; +import java.util.Locale; + +public class ServerInfoCliCommand extends AbstractServerCliCommand { + + public ServerInfoCliCommand() { + } + + @Override + public boolean doHandle(CliTerminal terminal, CliSession cliSession, String line) { + if (false == "info".equals(line.toLowerCase(Locale.ROOT))) { + return false; + } + InfoResponse info; + try { + info = cliSession.getClient().serverInfo(); + } catch (SQLException e) { + terminal.error("Error fetching server info", e.getMessage()); + return true; + } + terminal.line() + .text("Node:").em(info.node) + .text(" Cluster:").em(info.cluster) + .text(" Version:").em(info.versionString) + .ln(); + return true; + } +} diff --git a/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommand.java b/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommand.java new file mode 100644 index 00000000000..c75a3784101 --- /dev/null +++ b/sql/cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommand.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.command; + +import org.elasticsearch.xpack.sql.cli.CliHttpClient; +import org.elasticsearch.xpack.sql.cli.CliTerminal; +import org.elasticsearch.xpack.sql.cli.net.protocol.QueryResponse; +import org.elasticsearch.xpack.sql.client.shared.JreHttpUrlConnection; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.sql.SQLException; + +public class ServerQueryCliCommand extends AbstractServerCliCommand { + + @Override + protected boolean doHandle(CliTerminal terminal, CliSession cliSession, String line) { + QueryResponse response = null; + CliHttpClient cliClient = cliSession.getClient(); + try { + response = cliClient.queryInit(line, cliSession.getFetchSize()); + if (response.data.startsWith("digraph ")) { + handleGraphviz(terminal, response.data); + return true; + } + while (true) { + handleText(terminal, response.data); + if (response.cursor().isEmpty()) { + // Successfully finished the entire query! + terminal.flush(); + return true; + } + if (false == cliSession.getFetchSeparator().equals("")) { + terminal.println(cliSession.getFetchSeparator()); + } + response = cliSession.getClient().nextPage(response.cursor()); + } + } catch (SQLException e) { + if (JreHttpUrlConnection.SQL_STATE_BAD_SERVER.equals(e.getSQLState())) { + terminal.error("Server error", e.getMessage()); + } else { + terminal.error("Bad request", e.getMessage()); + } + if (response != null && response.cursor().isEmpty() == false) { + try { + cliClient.queryClose(response.cursor()); + } catch (SQLException ex) { + terminal.error("Could not close cursor", ex.getMessage()); + } + } + } + return true; + } + + private void handleText(CliTerminal terminal, String str) { + terminal.print(str); + } + + private void handleGraphviz(CliTerminal terminal, String str) { + try { + // save the content to a temp file + Path dotTempFile = Files.createTempFile(Paths.get("."), "sql-gv", ".dot"); + Files.write(dotTempFile, str.getBytes(StandardCharsets.UTF_8)); + terminal.println("Saved graph file at " + dotTempFile); + } catch (IOException ex) { + terminal.error("Cannot save graph file ", ex.getMessage()); + } + } + +} diff --git a/sql/cli/src/main/resources/logging.properties b/sql/cli/src/main/resources/logging.properties new file mode 100644 index 00000000000..cfba3c2935f --- /dev/null +++ b/sql/cli/src/main/resources/logging.properties @@ -0,0 +1,6 @@ +handlers=java.util.logging.ConsoleHandler +.level = WARNING + +# Let jline log information about any failure to setup the terminal properly. +# Without this we have no way of knowing *why* you lose terminal features. +org.jline.level = FINE diff --git a/sql/cli/src/main/resources/logo.txt b/sql/cli/src/main/resources/logo.txt new file mode 100644 index 00000000000..0229fa9b335 --- /dev/null +++ b/sql/cli/src/main/resources/logo.txt @@ -0,0 +1,25 @@ + .sssssss.` .sssssss. + .:sXXXXXXXXXXo` `ohXXXXXXXXXho. + .yXXXXXXXXXXXXXXo` `oXXXXXXXXXXXXXXX- +.XXXXXXXXXXXXXXXXXXo` `oXXXXXXXXXXXXXXXXXX. +.XXXXXXXXXXXXXXXXXXXXo. .oXXXXXXXXXXXXXXXXXXXXh +.XXXXXXXXXXXXXXXXXXXXXXo``oXXXXXXXXXXXXXXXXXXXXXXy +`yXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX. + `oXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXo` + `oXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXo` + `oXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXo` + `oXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXo` + `oXXXXXXXXXXXXXXXXXXXXXXXXXXXXo` + .XXXXXXXXXXXXXXXXXXXXXXXXXo` + .oXXXXXXXXXXXXXXXXXXXXXXXXo` + `oXXXXXXXXXXXXXXXXXXXXXXXXo` `odo` + `oXXXXXXXXXXXXXXXXXXXXXXXXo` `oXXXXXo` + `oXXXXXXXXXXXXXXXXXXXXXXXXo` `oXXXXXXXXXo` + `oXXXXXXXXXXXXXXXXXXXXXXXXo` `oXXXXXXXXXXXXXo` +`yXXXXXXXXXXXXXXXXXXXXXXXo` oXXXXXXXXXXXXXXXXX. +.XXXXXXXXXXXXXXXXXXXXXXo` `oXXXXXXXXXXXXXXXXXXXy +.XXXXXXXXXXXXXXXXXXXXo` /XXXXXXXXXXXXXXXXXXXXX +.XXXXXXXXXXXXXXXXXXo` `oXXXXXXXXXXXXXXXXXX- + -XXXXXXXXXXXXXXXo` `oXXXXXXXXXXXXXXXo` + .oXXXXXXXXXXXo` `oXXXXXXXXXXXo. + `.sshXXyso` SQL `.sshXhss.` \ No newline at end of file diff --git a/sql/cli/src/test/java/org/elasticsearch/xpack/sql/cli/CliReplTests.java b/sql/cli/src/test/java/org/elasticsearch/xpack/sql/cli/CliReplTests.java new file mode 100644 index 00000000000..2397418256a --- /dev/null +++ b/sql/cli/src/test/java/org/elasticsearch/xpack/sql/cli/CliReplTests.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.cli.command.CliCommand; +import org.elasticsearch.xpack.sql.cli.command.CliSession; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +public class CliReplTests extends ESTestCase { + + public void testBasicCliFunctionality() throws Exception { + CliTerminal cliTerminal = new TestTerminal( + "test;", + "notest;", + "exit;" + ); + CliSession mockSession = mock(CliSession.class); + CliCommand mockCommand = mock(CliCommand.class); + when(mockCommand.handle(cliTerminal, mockSession, "logo")).thenReturn(true); + when(mockCommand.handle(cliTerminal, mockSession, "test")).thenReturn(true); + when(mockCommand.handle(cliTerminal, mockSession, "notest")).thenReturn(false); + + CliRepl cli = new CliRepl(cliTerminal, mockSession, mockCommand); + cli.execute(); + + verify(mockCommand, times(1)).handle(cliTerminal, mockSession, "test"); + verify(mockCommand, times(1)).handle(cliTerminal, mockSession, "logo"); + verify(mockCommand, times(1)).handle(cliTerminal, mockSession, "notest"); + verifyNoMoreInteractions(mockCommand, mockSession); + } + + + public void testFatalCliExceptionHandling() throws Exception { + CliTerminal cliTerminal = new TestTerminal( + "test;", + "fail;" + ); + + CliSession mockSession = mock(CliSession.class); + CliCommand mockCommand = mock(CliCommand.class); + when(mockCommand.handle(cliTerminal, mockSession, "logo")).thenReturn(true); + when(mockCommand.handle(cliTerminal, mockSession, "test")).thenReturn(true); + when(mockCommand.handle(cliTerminal, mockSession, "fail")).thenThrow(new FatalCliException("die")); + + CliRepl cli = new CliRepl(cliTerminal, mockSession, mockCommand); + expectThrows(FatalCliException.class, cli::execute); + + verify(mockCommand, times(1)).handle(cliTerminal, mockSession, "logo"); + verify(mockCommand, times(1)).handle(cliTerminal, mockSession, "test"); + verify(mockCommand, times(1)).handle(cliTerminal, mockSession, "fail"); + verifyNoMoreInteractions(mockCommand, mockSession); + } + +} diff --git a/sql/cli/src/test/java/org/elasticsearch/xpack/sql/cli/CliSessionTests.java b/sql/cli/src/test/java/org/elasticsearch/xpack/sql/cli/CliSessionTests.java new file mode 100644 index 00000000000..766649e0e89 --- /dev/null +++ b/sql/cli/src/test/java/org/elasticsearch/xpack/sql/cli/CliSessionTests.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.cli.command.CliSession; +import org.elasticsearch.xpack.sql.cli.net.protocol.InfoResponse; +import org.elasticsearch.xpack.sql.client.shared.ClientException; +import org.elasticsearch.xpack.sql.client.shared.Version; + +import java.sql.SQLException; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +public class CliSessionTests extends ESTestCase { + + public void testProperConnection() throws Exception { + CliHttpClient cliHttpClient = mock(CliHttpClient.class); + when(cliHttpClient.serverInfo()).thenReturn(new InfoResponse(randomAlphaOfLength(5), randomAlphaOfLength(5), + (byte) Version.versionMajor(), (byte) Version.versionMinor(), + randomAlphaOfLength(5), randomAlphaOfLength(5), randomAlphaOfLength(5))); + CliSession cliSession = new CliSession(cliHttpClient); + cliSession.checkConnection(); + verify(cliHttpClient, times(1)).serverInfo(); + verifyNoMoreInteractions(cliHttpClient); + } + + public void testConnection() throws Exception { + CliHttpClient cliHttpClient = mock(CliHttpClient.class); + when(cliHttpClient.serverInfo()).thenThrow(new SQLException("Cannot connect")); + CliSession cliSession = new CliSession(cliHttpClient); + expectThrows(ClientException.class, cliSession::checkConnection); + verify(cliHttpClient, times(1)).serverInfo(); + verifyNoMoreInteractions(cliHttpClient); + } + + public void testWrongServerVersion() throws Exception { + CliHttpClient cliHttpClient = mock(CliHttpClient.class); + byte minor; + byte major; + if (randomBoolean()) { + minor = (byte) Version.versionMinor(); + major = (byte) (Version.versionMajor() + 1); + } else { + minor = (byte) (Version.versionMinor() + 1); + major = (byte) Version.versionMajor(); + + } + when(cliHttpClient.serverInfo()).thenReturn(new InfoResponse(randomAlphaOfLength(5), randomAlphaOfLength(5), + minor, major, randomAlphaOfLength(5), randomAlphaOfLength(5), randomAlphaOfLength(5))); + CliSession cliSession = new CliSession(cliHttpClient); + expectThrows(ClientException.class, cliSession::checkConnection); + verify(cliHttpClient, times(1)).serverInfo(); + verifyNoMoreInteractions(cliHttpClient); + } +} \ No newline at end of file diff --git a/sql/cli/src/test/java/org/elasticsearch/xpack/sql/cli/ConnectionBuilderTests.java b/sql/cli/src/test/java/org/elasticsearch/xpack/sql/cli/ConnectionBuilderTests.java new file mode 100644 index 00000000000..d56c821d63a --- /dev/null +++ b/sql/cli/src/test/java/org/elasticsearch/xpack/sql/cli/ConnectionBuilderTests.java @@ -0,0 +1,109 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli; + +import org.elasticsearch.cli.UserException; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.client.shared.ConnectionConfiguration; +import org.elasticsearch.xpack.sql.client.shared.SslConfig; +import java.net.URI; +import java.nio.file.Path; +import java.util.Properties; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +public class ConnectionBuilderTests extends ESTestCase { + + public void testDefaultConnection() throws Exception { + CliTerminal testTerminal = mock(CliTerminal.class); + ConnectionBuilder connectionBuilder = new ConnectionBuilder(testTerminal); + ConnectionConfiguration con = connectionBuilder.buildConnection(null, null); + assertNull(con.authUser()); + assertNull(con.authPass()); + assertEquals("http://localhost:9200/", con.connectionString()); + assertEquals(URI.create("http://localhost:9200/"), con.baseUri()); + assertEquals(30000, con.connectTimeout()); + assertEquals(60000, con.networkTimeout()); + assertEquals(45000, con.pageTimeout()); + assertEquals(90000, con.queryTimeout()); + assertEquals(1000, con.pageSize()); + verifyNoMoreInteractions(testTerminal); + } + + public void testBasicConnection() throws Exception { + CliTerminal testTerminal = mock(CliTerminal.class); + ConnectionBuilder connectionBuilder = new ConnectionBuilder(testTerminal); + ConnectionConfiguration con = connectionBuilder.buildConnection("http://foobar:9242/", null); + assertNull(con.authUser()); + assertNull(con.authPass()); + assertEquals("http://foobar:9242/", con.connectionString()); + assertEquals(URI.create("http://foobar:9242/"), con.baseUri()); + verifyNoMoreInteractions(testTerminal); + } + + public void testUserAndPasswordConnection() throws Exception { + CliTerminal testTerminal = mock(CliTerminal.class); + ConnectionBuilder connectionBuilder = new ConnectionBuilder(testTerminal); + ConnectionConfiguration con = connectionBuilder.buildConnection("http://user:pass@foobar:9242/", null); + assertEquals("user", con.authUser()); + assertEquals("pass", con.authPass()); + assertEquals("http://user:pass@foobar:9242/", con.connectionString()); + assertEquals(URI.create("http://foobar:9242/"), con.baseUri()); + verifyNoMoreInteractions(testTerminal); + } + + public void testUserInteractiveConnection() throws Exception { + CliTerminal testTerminal = mock(CliTerminal.class); + when(testTerminal.readPassword("password: ")).thenReturn("password"); + ConnectionBuilder connectionBuilder = new ConnectionBuilder(testTerminal); + ConnectionConfiguration con = connectionBuilder.buildConnection("http://user@foobar:9242/", null); + assertEquals("user", con.authUser()); + assertEquals("password", con.authPass()); + assertEquals("http://user@foobar:9242/", con.connectionString()); + assertEquals(URI.create("http://foobar:9242/"), con.baseUri()); + verify(testTerminal, times(1)).readPassword(any()); + verifyNoMoreInteractions(testTerminal); + } + + public void testKeystoreAndUserInteractiveConnection() throws Exception { + CliTerminal testTerminal = mock(CliTerminal.class); + when(testTerminal.readPassword("keystore password: ")).thenReturn("keystore password"); + when(testTerminal.readPassword("password: ")).thenReturn("password"); + AtomicBoolean called = new AtomicBoolean(false); + ConnectionBuilder connectionBuilder = new ConnectionBuilder(testTerminal) { + @Override + protected void checkIfExists(String name, Path p) { + // Stubbed so we don't need permission to read the file + } + + @Override + protected ConnectionConfiguration newConnectionConfiguration(URI uri, String connectionString, + Properties properties) { + // Stub building the actual configuration because we don't have permission to read the keystore. + assertEquals("true", properties.get(SslConfig.SSL)); + assertEquals("keystore_location", properties.get(SslConfig.SSL_KEYSTORE_LOCATION)); + assertEquals("keystore password", properties.get(SslConfig.SSL_KEYSTORE_PASS)); + assertEquals("keystore_location", properties.get(SslConfig.SSL_TRUSTSTORE_LOCATION)); + assertEquals("keystore password", properties.get(SslConfig.SSL_TRUSTSTORE_PASS)); + + called.set(true); + return null; + } + }; + assertNull(connectionBuilder.buildConnection("https://user@foobar:9242/", "keystore_location")); + assertTrue(called.get()); + verify(testTerminal, times(2)).readPassword(any()); + verifyNoMoreInteractions(testTerminal); + } + + +} diff --git a/sql/cli/src/test/java/org/elasticsearch/xpack/sql/cli/TestTerminal.java b/sql/cli/src/test/java/org/elasticsearch/xpack/sql/cli/TestTerminal.java new file mode 100644 index 00000000000..697b62fefbb --- /dev/null +++ b/sql/cli/src/test/java/org/elasticsearch/xpack/sql/cli/TestTerminal.java @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Iterator; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +public class TestTerminal implements CliTerminal { + + private StringBuilder stringBuilder = new StringBuilder(); + private boolean closed = false; + private Iterator inputLines; + + public TestTerminal(String ... inputLines) { + this.inputLines = Arrays.asList(inputLines).iterator(); + } + + @Override + public LineBuilder line() { + return new LineBuilder() { + + @Override + public LineBuilder text(String text) { + stringBuilder.append(text); + return this; + } + + @Override + public LineBuilder em(String text) { + stringBuilder.append("").append(text).append(""); + return this; + } + + @Override + public LineBuilder error(String text) { + stringBuilder.append("").append(text).append(""); + return this; + } + + @Override + public LineBuilder param(String text) { + stringBuilder.append("").append(text).append(""); + return this; + } + + @Override + public void ln() { + stringBuilder.append("\n"); + } + + @Override + public void end() { + stringBuilder.append(""); + } + }; + } + + @Override + public void print(String text) { + stringBuilder.append(text); + } + + @Override + public void println(String text) { + stringBuilder.append(text); + stringBuilder.append("\n"); + } + + @Override + public void error(String type, String message) { + stringBuilder.append("").append(type).append(" ["); + stringBuilder.append("").append(message).append(""); + stringBuilder.append("]\n"); + } + + @Override + public void println() { + stringBuilder.append("\n"); + } + + @Override + public void clear() { + stringBuilder = new StringBuilder(); + } + + @Override + public void flush() { + stringBuilder.append(""); + } + + @Override + public void printStackTrace(Exception ex) { + stringBuilder.append(""); + } + + @Override + public String readPassword(String prompt) { + return "password"; + } + + @Override + public String readLine(String prompt) { + assertTrue(inputLines.hasNext()); + return inputLines.next(); + } + + @Override + public void close() throws IOException { + assertFalse(closed); + closed = true; + } + + @Override + public String toString() { + return stringBuilder.toString(); + } +} diff --git a/sql/cli/src/test/java/org/elasticsearch/xpack/sql/cli/VersionTests.java b/sql/cli/src/test/java/org/elasticsearch/xpack/sql/cli/VersionTests.java new file mode 100644 index 00000000000..6d34f93568f --- /dev/null +++ b/sql/cli/src/test/java/org/elasticsearch/xpack/sql/cli/VersionTests.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.client.shared.Version; + +public class VersionTests extends ESTestCase { + public void testVersionIsCurrent() { + /* This test will only work properly in gradle because in gradle we run the tests + * using the jar. */ + assertEquals(org.elasticsearch.Version.CURRENT.toString(), Version.versionNumber()); + assertNotNull(Version.versionHash()); + assertEquals(org.elasticsearch.Version.CURRENT.major, Version.versionMajor()); + assertEquals(org.elasticsearch.Version.CURRENT.minor, Version.versionMinor()); + } + +} diff --git a/sql/cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/BuiltinCommandTests.java b/sql/cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/BuiltinCommandTests.java new file mode 100644 index 00000000000..99696b24888 --- /dev/null +++ b/sql/cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/BuiltinCommandTests.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.command; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.cli.CliHttpClient; +import org.elasticsearch.xpack.sql.cli.TestTerminal; + +import static org.hamcrest.Matchers.containsString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verifyNoMoreInteractions; + + +public class BuiltinCommandTests extends ESTestCase { + + public void testInvalidCommand() throws Exception { + TestTerminal testTerminal = new TestTerminal(); + CliHttpClient cliHttpClient = mock(CliHttpClient.class); + CliSession cliSession = new CliSession(cliHttpClient); + assertFalse(new ClearScreenCliCommand().handle(testTerminal, cliSession, "something")); + assertFalse(new FetchSeparatorCliCommand().handle(testTerminal, cliSession, "something")); + assertFalse(new FetchSizeCliCommand().handle(testTerminal, cliSession, "something")); + assertFalse(new PrintLogoCommand().handle(testTerminal, cliSession, "something")); + verifyNoMoreInteractions(cliHttpClient); + } + + public void testClearScreen() throws Exception { + TestTerminal testTerminal = new TestTerminal(); + CliHttpClient cliHttpClient = mock(CliHttpClient.class); + CliSession cliSession = new CliSession(cliHttpClient); + testTerminal.print("not clean"); + assertTrue(new ClearScreenCliCommand().handle(testTerminal, cliSession, "cls")); + assertEquals("", testTerminal.toString()); + verifyNoMoreInteractions(cliHttpClient); + } + + public void testFetchSeparator() throws Exception { + TestTerminal testTerminal = new TestTerminal(); + CliHttpClient cliHttpClient = mock(CliHttpClient.class); + CliSession cliSession = new CliSession(cliHttpClient); + FetchSeparatorCliCommand cliCommand = new FetchSeparatorCliCommand(); + assertFalse(cliCommand.handle(testTerminal, cliSession, "fetch")); + assertEquals("", cliSession.getFetchSeparator()); + + assertTrue(cliCommand.handle(testTerminal, cliSession, "fetch_separator = \"foo\"")); + assertEquals("foo", cliSession.getFetchSeparator()); + assertEquals("fetch separator set to \"foo\"", testTerminal.toString()); + testTerminal.clear(); + + assertTrue(cliCommand.handle(testTerminal, cliSession, "fetch_separator=\"bar\"")); + assertEquals("bar", cliSession.getFetchSeparator()); + assertEquals("fetch separator set to \"bar\"", testTerminal.toString()); + testTerminal.clear(); + + assertTrue(cliCommand.handle(testTerminal, cliSession, "fetch separator=\"baz\"")); + assertEquals("baz", cliSession.getFetchSeparator()); + assertEquals("fetch separator set to \"baz\"", testTerminal.toString()); + verifyNoMoreInteractions(cliHttpClient); + } + + public void testFetchSize() throws Exception { + TestTerminal testTerminal = new TestTerminal(); + CliHttpClient cliHttpClient = mock(CliHttpClient.class); + CliSession cliSession = new CliSession(cliHttpClient); + FetchSizeCliCommand cliCommand = new FetchSizeCliCommand(); + assertFalse(cliCommand.handle(testTerminal, cliSession, "fetch")); + assertEquals(1000L, cliSession.getFetchSize()); + + assertTrue(cliCommand.handle(testTerminal, cliSession, "fetch_size = \"foo\"")); + assertEquals(1000L, cliSession.getFetchSize()); + assertEquals("Invalid fetch size [\"foo\"]", testTerminal.toString()); + testTerminal.clear(); + + assertTrue(cliCommand.handle(testTerminal, cliSession, "fetch_size = 10")); + assertEquals(10L, cliSession.getFetchSize()); + assertEquals("fetch size set to 10", testTerminal.toString()); + + testTerminal.clear(); + + assertTrue(cliCommand.handle(testTerminal, cliSession, "fetch_size = -10")); + assertEquals(10L, cliSession.getFetchSize()); + assertEquals("Invalid fetch size [-10]. Must be > 0.", testTerminal.toString()); + verifyNoMoreInteractions(cliHttpClient); + } + + public void testPrintLogo() throws Exception { + TestTerminal testTerminal = new TestTerminal(); + CliHttpClient cliHttpClient = mock(CliHttpClient.class); + CliSession cliSession = new CliSession(cliHttpClient); + testTerminal.print("not clean"); + assertTrue(new PrintLogoCommand().handle(testTerminal, cliSession, "logo")); + assertThat(testTerminal.toString(), containsString("SQL")); + verifyNoMoreInteractions(cliHttpClient); + } + +} diff --git a/sql/cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/CliCommandsTests.java b/sql/cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/CliCommandsTests.java new file mode 100644 index 00000000000..13aee4a4291 --- /dev/null +++ b/sql/cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/CliCommandsTests.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.command; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.cli.CliHttpClient; +import org.elasticsearch.xpack.sql.cli.TestTerminal; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verifyNoMoreInteractions; + +public class CliCommandsTests extends ESTestCase { + + public void testCliCommands() { + TestTerminal testTerminal = new TestTerminal(); + CliHttpClient cliHttpClient = mock(CliHttpClient.class); + CliSession cliSession = new CliSession(cliHttpClient); + CliCommands cliCommands = new CliCommands( + (terminal, session, line) -> line.equals("foo"), + (terminal, session, line) -> line.equals("bar"), + (terminal, session, line) -> line.equals("baz") + ); + + assertTrue(cliCommands.handle(testTerminal, cliSession, "foo")); + assertTrue(cliCommands.handle(testTerminal, cliSession, "bar")); + assertTrue(cliCommands.handle(testTerminal, cliSession, "baz")); + assertFalse(cliCommands.handle(testTerminal, cliSession, "")); + assertFalse(cliCommands.handle(testTerminal, cliSession, "something")); + verifyNoMoreInteractions(cliHttpClient); + } +} diff --git a/sql/cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommandTests.java b/sql/cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommandTests.java new file mode 100644 index 00000000000..18f281e0368 --- /dev/null +++ b/sql/cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommandTests.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.command; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.cli.CliHttpClient; +import org.elasticsearch.xpack.sql.cli.TestTerminal; +import org.elasticsearch.xpack.sql.cli.net.protocol.InfoResponse; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +public class ServerInfoCliCommandTests extends ESTestCase { + + public void testInvalidCommand() throws Exception { + TestTerminal testTerminal = new TestTerminal(); + CliHttpClient client = mock(CliHttpClient.class); + CliSession cliSession = new CliSession(client); + ServerInfoCliCommand cliCommand = new ServerInfoCliCommand(); + assertFalse(cliCommand.handle(testTerminal, cliSession, "blah")); + assertEquals(testTerminal.toString(), ""); + verifyNoMoreInteractions(client); + } + + public void testShowInfo() throws Exception { + TestTerminal testTerminal = new TestTerminal(); + CliHttpClient client = mock(CliHttpClient.class); + CliSession cliSession = new CliSession(client); + when(client.serverInfo()).thenReturn(new InfoResponse("my_node", "my_cluster", (byte) 1, (byte) 2, "v1.2", "1234", "Sep 1, 2017")); + ServerInfoCliCommand cliCommand = new ServerInfoCliCommand(); + assertTrue(cliCommand.handle(testTerminal, cliSession, "info")); + assertEquals(testTerminal.toString(), "Node:my_node Cluster:my_cluster Version:v1.2\n"); + verify(client, times(1)).serverInfo(); + verifyNoMoreInteractions(client); + } + +} \ No newline at end of file diff --git a/sql/cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommandTests.java b/sql/cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommandTests.java new file mode 100644 index 00000000000..f4b7c1b039f --- /dev/null +++ b/sql/cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommandTests.java @@ -0,0 +1,102 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.command; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.cli.CliHttpClient; +import org.elasticsearch.xpack.sql.cli.TestTerminal; +import org.elasticsearch.xpack.sql.cli.net.protocol.QueryCloseResponse; +import org.elasticsearch.xpack.sql.cli.net.protocol.QueryInitResponse; +import org.elasticsearch.xpack.sql.cli.net.protocol.QueryPageResponse; + +import java.sql.SQLException; + +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +public class ServerQueryCliCommandTests extends ESTestCase { + + public void testExceptionHandling() throws Exception { + TestTerminal testTerminal = new TestTerminal(); + CliHttpClient client = mock(CliHttpClient.class); + CliSession cliSession = new CliSession(client); + when(client.queryInit("blah", 1000)).thenThrow(new SQLException("test exception")); + ServerQueryCliCommand cliCommand = new ServerQueryCliCommand(); + assertTrue(cliCommand.handle(testTerminal, cliSession, "blah")); + assertEquals("Bad request [test exception]\n", testTerminal.toString()); + verify(client, times(1)).queryInit(eq("blah"), eq(1000)); + verifyNoMoreInteractions(client); + } + + public void testOnePageQuery() throws Exception { + TestTerminal testTerminal = new TestTerminal(); + CliHttpClient client = mock(CliHttpClient.class); + CliSession cliSession = new CliSession(client); + cliSession.setFetchSize(10); + when(client.queryInit("test query", 10)).thenReturn(new QueryInitResponse(123, "", "some command response")); + ServerQueryCliCommand cliCommand = new ServerQueryCliCommand(); + assertTrue(cliCommand.handle(testTerminal, cliSession, "test query")); + assertEquals("some command response", testTerminal.toString()); + verify(client, times(1)).queryInit(eq("test query"), eq(10)); + verifyNoMoreInteractions(client); + } + + public void testThreePageQuery() throws Exception { + TestTerminal testTerminal = new TestTerminal(); + CliHttpClient client = mock(CliHttpClient.class); + CliSession cliSession = new CliSession(client); + cliSession.setFetchSize(10); + when(client.queryInit("test query", 10)).thenReturn(new QueryInitResponse(123, "my_cursor1", "first")); + when(client.nextPage("my_cursor1")).thenReturn(new QueryPageResponse(345, "my_cursor2", "second")); + when(client.nextPage("my_cursor2")).thenReturn(new QueryPageResponse(678, "", "third")); + ServerQueryCliCommand cliCommand = new ServerQueryCliCommand(); + assertTrue(cliCommand.handle(testTerminal, cliSession, "test query")); + assertEquals("firstsecondthird", testTerminal.toString()); + verify(client, times(1)).queryInit(eq("test query"), eq(10)); + verify(client, times(2)).nextPage(any()); + verifyNoMoreInteractions(client); + } + + public void testTwoPageQueryWithSeparator() throws Exception { + TestTerminal testTerminal = new TestTerminal(); + CliHttpClient client = mock(CliHttpClient.class); + CliSession cliSession = new CliSession(client); + cliSession.setFetchSize(15); + // Set a separator + cliSession.setFetchSeparator("-----"); + when(client.queryInit("test query", 15)).thenReturn(new QueryInitResponse(123, "my_cursor1", "first")); + when(client.nextPage("my_cursor1")).thenReturn(new QueryPageResponse(345, "", "second")); + ServerQueryCliCommand cliCommand = new ServerQueryCliCommand(); + assertTrue(cliCommand.handle(testTerminal, cliSession, "test query")); + assertEquals("first-----\nsecond", testTerminal.toString()); + verify(client, times(1)).queryInit(eq("test query"), eq(15)); + verify(client, times(1)).nextPage(any()); + verifyNoMoreInteractions(client); + } + + public void testCursorCleanupOnError() throws Exception { + TestTerminal testTerminal = new TestTerminal(); + CliHttpClient client = mock(CliHttpClient.class); + CliSession cliSession = new CliSession(client); + cliSession.setFetchSize(15); + when(client.queryInit("test query", 15)).thenReturn(new QueryInitResponse(123, "my_cursor1", "first")); + when(client.nextPage("my_cursor1")).thenThrow(new SQLException("test exception")); + when(client.queryClose("my_cursor1")).thenReturn(new QueryCloseResponse(true)); + ServerQueryCliCommand cliCommand = new ServerQueryCliCommand(); + assertTrue(cliCommand.handle(testTerminal, cliSession, "test query")); + assertEquals("firstBad request [test exception]\n", testTerminal.toString()); + verify(client, times(1)).queryInit(eq("test query"), eq(15)); + verify(client, times(1)).nextPage(any()); + verify(client, times(1)).queryClose(eq("my_cursor1")); + verifyNoMoreInteractions(client); + } + +} \ No newline at end of file diff --git a/sql/cli/src/test/java/org/elasticsearch/xpack/sql/cli/package-info.java b/sql/cli/src/test/java/org/elasticsearch/xpack/sql/cli/package-info.java new file mode 100644 index 00000000000..9df8484bdc5 --- /dev/null +++ b/sql/cli/src/test/java/org/elasticsearch/xpack/sql/cli/package-info.java @@ -0,0 +1,10 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/** + * Unit tests for the Elasticsearch CLI client. + */ +package org.elasticsearch.xpack.sql.cli; diff --git a/sql/jdbc-proto/build.gradle b/sql/jdbc-proto/build.gradle new file mode 100644 index 00000000000..22b0f8639d1 --- /dev/null +++ b/sql/jdbc-proto/build.gradle @@ -0,0 +1,17 @@ +description = 'Request and response objects shared by the jdbc driver and ' + + 'its backend in :sql:server' + +dependencies { + compile project(':x-pack-elasticsearch:sql:shared-proto') + testCompile project(':x-pack-elasticsearch:sql:test-utils') +} + +forbiddenApisMain { + // does not depend on core, so only jdk and http signatures should be checked + signaturesURLs = [this.class.getResource('/forbidden/jdk-signatures.txt')] +} + +dependencyLicenses { + mapping from: /shared-proto.*/, to: 'elasticsearch' + ignoreSha 'shared-proto' +} diff --git a/sql/jdbc-proto/licenses/elasticsearch-LICENSE.txt b/sql/jdbc-proto/licenses/elasticsearch-LICENSE.txt new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/sql/jdbc-proto/licenses/elasticsearch-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/sql/jdbc-proto/licenses/elasticsearch-NOTICE.txt b/sql/jdbc-proto/licenses/elasticsearch-NOTICE.txt new file mode 100644 index 00000000000..643a060cd05 --- /dev/null +++ b/sql/jdbc-proto/licenses/elasticsearch-NOTICE.txt @@ -0,0 +1,5 @@ +Elasticsearch +Copyright 2009-2017 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). diff --git a/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/ColumnInfo.java b/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/ColumnInfo.java new file mode 100644 index 00000000000..d1e19656698 --- /dev/null +++ b/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/ColumnInfo.java @@ -0,0 +1,110 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.protocol; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.sql.JDBCType; +import java.util.Objects; + +public class ColumnInfo { + public String catalog, schema, table, label, name; + public int displaySize; + public JDBCType type; + + public ColumnInfo(String name, JDBCType type, String table, String catalog, String schema, String label, int displaySize) { + if (name == null) { + throw new IllegalArgumentException("[name] must not be null"); + } + if (type == null) { + throw new IllegalArgumentException("[type] must not be null"); + } + if (table == null) { + throw new IllegalArgumentException("[table] must not be null"); + } + if (catalog == null) { + throw new IllegalArgumentException("[catalog] must not be null"); + } + if (schema == null) { + throw new IllegalArgumentException("[schema] must not be null"); + } + if (label == null) { + throw new IllegalArgumentException("[label] must not be null"); + } + this.name = name; + this.type = type; + this.table = table; + this.catalog = catalog; + this.schema = schema; + this.label = label; + this.displaySize = displaySize; + } + + ColumnInfo(DataInput in) throws IOException { + name = in.readUTF(); + type = JDBCType.valueOf(in.readInt()); + table = in.readUTF(); + catalog = in.readUTF(); + schema = in.readUTF(); + label = in.readUTF(); + displaySize = in.readInt(); + } + + void writeTo(DataOutput out) throws IOException { + out.writeUTF(name); + out.writeInt(type.getVendorTypeNumber()); + out.writeUTF(table); + out.writeUTF(catalog); + out.writeUTF(schema); + out.writeUTF(label); + out.writeInt(displaySize); + } + + public int displaySize() { + // 0 - means unknown + return displaySize; + } + + @Override + public String toString() { + StringBuilder b = new StringBuilder(); + if (false == "".equals(table)) { + b.append(table).append('.'); + } + b.append(name).append("').toString(); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + ColumnInfo other = (ColumnInfo) obj; + return name.equals(other.name) + && type.equals(other.type) + && table.equals(other.table) + && catalog.equals(other.catalog) + && schema.equals(other.schema) + && label.equals(other.label) + && displaySize == other.displaySize; + } + + @Override + public int hashCode() { + return Objects.hash(name, type, table, catalog, schema, label, displaySize); + } +} diff --git a/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/InfoRequest.java b/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/InfoRequest.java new file mode 100644 index 00000000000..7d0e4dad1c8 --- /dev/null +++ b/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/InfoRequest.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.protocol; + +import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.RequestType; +import org.elasticsearch.xpack.sql.protocol.shared.AbstractInfoRequest; +import org.elasticsearch.xpack.sql.protocol.shared.SqlDataInput; + +import java.io.IOException; + +/** + * Request general information about the server. + */ +public class InfoRequest extends AbstractInfoRequest { + /** + * Build the info request containing information about the current JVM. + */ + public InfoRequest() { + super(); + } + + public InfoRequest(String jvmVersion, String jvmVendor, String jvmClassPath, String osName, String osVersion) { + super(jvmVersion, jvmVendor, jvmClassPath, osName, osVersion); + } + + InfoRequest(SqlDataInput in) throws IOException { + super(in); + } + + @Override + public RequestType requestType() { + return RequestType.INFO; + } +} diff --git a/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/InfoResponse.java b/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/InfoResponse.java new file mode 100644 index 00000000000..c85e72a2589 --- /dev/null +++ b/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/InfoResponse.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.protocol; + +import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.RequestType; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.ResponseType; +import org.elasticsearch.xpack.sql.protocol.shared.AbstractInfoResponse; +import org.elasticsearch.xpack.sql.protocol.shared.Request; + +import java.io.DataInput; +import java.io.IOException; + +/** + * General information about the server. + */ +public class InfoResponse extends AbstractInfoResponse { + public InfoResponse(String nodeName, String clusterName, byte versionMajor, byte versionMinor, String version, + String versionHash, String versionDate) { + super(nodeName, clusterName, versionMajor, versionMinor, version, versionHash, versionDate); + } + + InfoResponse(Request request, DataInput in) throws IOException { + super(request, in); + } + + @Override + public RequestType requestType() { + return RequestType.INFO; + } + + @Override + public ResponseType responseType() { + return ResponseType.INFO; + } +} \ No newline at end of file diff --git a/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/MetaColumnInfo.java b/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/MetaColumnInfo.java new file mode 100644 index 00000000000..0a50d0fa731 --- /dev/null +++ b/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/MetaColumnInfo.java @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.protocol; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.sql.JDBCType; +import java.util.Objects; + +public class MetaColumnInfo { + public final String table, name; + public final JDBCType type; + public final int size, position; + + public MetaColumnInfo(String table, String name, JDBCType type, int size, int position) { + if (table == null) { + throw new IllegalArgumentException("[table] must not be null"); + } + if (name == null) { + throw new IllegalArgumentException("[name] must not be null"); + } + if (type == null) { + throw new IllegalArgumentException("[type] must not be null"); + } + this.table = table; + this.name = name; + this.type = type; + this.size = size; + this.position = position; + } + + MetaColumnInfo(DataInput in) throws IOException { + table = in.readUTF(); + name = in.readUTF(); + type = JDBCType.valueOf(in.readInt()); + size = in.readInt(); + position = in.readInt(); + } + + void writeTo(DataOutput out) throws IOException { + out.writeUTF(table); + out.writeUTF(name); + out.writeInt(type.getVendorTypeNumber()); + out.writeInt(size); + out.writeInt(position); + } + + @Override + public String toString() { + return table + "." + name + + ""; + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + MetaColumnInfo other = (MetaColumnInfo) obj; + return table.equals(other.table) + && name.equals(other.name) + && type.equals(other.type) + && size == other.size + && position == other.position; + } + + @Override + public int hashCode() { + return Objects.hash(table, name, type, size, position); + } +} diff --git a/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/MetaColumnRequest.java b/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/MetaColumnRequest.java new file mode 100644 index 00000000000..af17b83b4fa --- /dev/null +++ b/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/MetaColumnRequest.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.protocol; + +import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.RequestType; +import org.elasticsearch.xpack.sql.protocol.shared.Request; +import org.elasticsearch.xpack.sql.protocol.shared.SqlDataInput; +import org.elasticsearch.xpack.sql.protocol.shared.SqlDataOutput; + +import java.io.IOException; +import java.util.Objects; + +public class MetaColumnRequest extends Request { + private final String tablePattern, columnPattern; + + public MetaColumnRequest(String tablePattern, String columnPattern) { + this.tablePattern = tablePattern == null ? "" : tablePattern; + this.columnPattern = columnPattern == null ? "" : columnPattern; + } + + MetaColumnRequest(SqlDataInput in) throws IOException { + tablePattern = in.readUTF(); + columnPattern = in.readUTF(); + } + + @Override + protected void writeTo(SqlDataOutput out) throws IOException { + out.writeUTF(tablePattern); + out.writeUTF(columnPattern); + } + + public String tablePattern() { + return tablePattern; + } + + public String columnPattern() { + return columnPattern; + } + + @Override + protected String toStringBody() { + return "table=[" + tablePattern + + "] column=[" + columnPattern + "]"; + } + + @Override + public RequestType requestType() { + return RequestType.META_COLUMN; + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + MetaColumnRequest other = (MetaColumnRequest) obj; + return Objects.equals(tablePattern, other.tablePattern) + && Objects.equals(columnPattern, other.columnPattern); + } + + @Override + public int hashCode() { + return Objects.hash(tablePattern, columnPattern); + } +} \ No newline at end of file diff --git a/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/MetaColumnResponse.java b/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/MetaColumnResponse.java new file mode 100644 index 00000000000..2737f1b18cc --- /dev/null +++ b/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/MetaColumnResponse.java @@ -0,0 +1,78 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.protocol; + +import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.RequestType; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.ResponseType; +import org.elasticsearch.xpack.sql.protocol.shared.Request; +import org.elasticsearch.xpack.sql.protocol.shared.Response; +import org.elasticsearch.xpack.sql.protocol.shared.SqlDataOutput; + +import java.io.DataInput; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static java.util.Collections.unmodifiableList; +import static java.util.stream.Collectors.joining; + +public class MetaColumnResponse extends Response { + public final List columns; + + public MetaColumnResponse(List columns) { + if (columns == null) { + throw new IllegalArgumentException("[columns] must not be null"); + } + this.columns = columns; + } + + public MetaColumnResponse(Request request, DataInput in) throws IOException { + int length = in.readInt(); + List list = new ArrayList<>(length); + + for (int i = 0; i < length; i++) { + list.add(new MetaColumnInfo(in)); + } + columns = unmodifiableList(list); + } + + @Override + protected void writeTo(SqlDataOutput out) throws IOException { + out.writeInt(columns.size()); + for (MetaColumnInfo info : columns) { + info.writeTo(out); + } + } + + @Override + protected String toStringBody() { + return columns.stream().map(Object::toString).collect(joining(", ")); + } + + @Override + public RequestType requestType() { + return RequestType.META_COLUMN; + } + + @Override + public ResponseType responseType() { + return ResponseType.META_COLUMN; + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + MetaColumnResponse other = (MetaColumnResponse) obj; + return columns.equals(other.columns); + } + + @Override + public int hashCode() { + return columns.hashCode(); + } +} \ No newline at end of file diff --git a/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/MetaTableRequest.java b/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/MetaTableRequest.java new file mode 100644 index 00000000000..f5828df49ed --- /dev/null +++ b/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/MetaTableRequest.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.protocol; + +import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.RequestType; +import org.elasticsearch.xpack.sql.protocol.shared.Request; +import org.elasticsearch.xpack.sql.protocol.shared.SqlDataInput; +import org.elasticsearch.xpack.sql.protocol.shared.SqlDataOutput; + +import java.io.IOException; + +public class MetaTableRequest extends Request { + private final String pattern; + + public MetaTableRequest(String pattern) { + this.pattern = pattern == null ? "" : pattern; + } + + MetaTableRequest(SqlDataInput in) throws IOException { + this.pattern = in.readUTF(); + } + + @Override + public void writeTo(SqlDataOutput out) throws IOException { + out.writeUTF(pattern); + } + + public String pattern() { + return pattern; + } + + @Override + protected String toStringBody() { + return pattern; + } + + @Override + public RequestType requestType() { + return RequestType.META_TABLE; + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + MetaTableRequest other = (MetaTableRequest) obj; + return pattern.equals(other.pattern); + } + + @Override + public int hashCode() { + return pattern.hashCode(); + } +} diff --git a/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/MetaTableResponse.java b/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/MetaTableResponse.java new file mode 100644 index 00000000000..0ab4375762f --- /dev/null +++ b/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/MetaTableResponse.java @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.protocol; + +import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.RequestType; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.ResponseType; +import org.elasticsearch.xpack.sql.protocol.shared.Request; +import org.elasticsearch.xpack.sql.protocol.shared.Response; +import org.elasticsearch.xpack.sql.protocol.shared.SqlDataOutput; + +import java.io.DataInput; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static java.util.Collections.unmodifiableList; + +public class MetaTableResponse extends Response { + public final List tables; + + public MetaTableResponse(List tables) { + if (tables == null) { + throw new IllegalArgumentException("[tables] must not be null"); + } + this.tables = tables; + } + + MetaTableResponse(Request request, DataInput in) throws IOException { + int length = in.readInt(); + List list = new ArrayList<>(length); + for (int i = 0; i < length; i++) { + list.add(in.readUTF()); + } + tables = unmodifiableList(list); + } + + @Override + public void writeTo(SqlDataOutput out) throws IOException { + out.writeInt(tables.size()); + for (String t : tables) { + out.writeUTF(t); + } + } + + @Override + protected String toStringBody() { + return String.join(", ", tables); + } + + @Override + public RequestType requestType() { + return RequestType.META_TABLE; + } + + @Override + public ResponseType responseType() { + return ResponseType.META_TABLE; + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + MetaTableResponse other = (MetaTableResponse) obj; + return tables.equals(other.tables); + } + + @Override + public int hashCode() { + return tables.hashCode(); + } + +} diff --git a/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/Page.java b/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/Page.java new file mode 100644 index 00000000000..ac846412150 --- /dev/null +++ b/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/Page.java @@ -0,0 +1,178 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.protocol; + +import org.elasticsearch.xpack.sql.protocol.shared.SqlDataInput; +import org.elasticsearch.xpack.sql.protocol.shared.SqlDataOutput; + +import java.io.IOException; +import java.lang.reflect.Array; +import java.sql.JDBCType; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.xpack.sql.jdbc.net.protocol.ProtoUtils.classOf; +import static org.elasticsearch.xpack.sql.jdbc.net.protocol.ProtoUtils.readValue; +import static org.elasticsearch.xpack.sql.jdbc.net.protocol.ProtoUtils.writeValue; + +/** + * Stores a page of data in a columnar format. + */ +public class Page implements Payload { + private final List columnInfo; + + /** + * The actual data, one array per column. + */ + private final Object[][] data; + + /** + * The number of rows in this page. The {@link #data} arrays may be larger + * but data after the end of the arrays is garbage. + */ + private int rows; + + private int maxRows; + + /** + * Build empty, call {@link #readFrom(SqlDataInput)} after to fill it. + */ + Page(List columnInfo) { + this.columnInfo = columnInfo; + data = new Object[columnInfo.size()][]; + } + + /** + * Build with a particular set of rows. Use this for testing. + */ + Page(List columnInfo, Object[][] rows) { + this(columnInfo); + makeRoomFor(rows.length); + this.rows = rows.length; + for (int row = 0; row < rows.length; row++) { + if (columnInfo.size() != rows[row].length) { + throw new IllegalArgumentException("Column count mismatch. Got [" + columnInfo.size() + + "] ColumnInfos but [" + rows.length + "] columns on the [" + row + "] row."); + } + } + for (int column = 0; column < columnInfo.size(); column++) { + for (int row = 0; row < rows.length; row++) { + data[column][row] = rows[row][column]; + } + } + } + + public int rows() { + return rows; + } + + public List columnInfo() { + return columnInfo; + } + + Object[] column(int index) { + if (index < 0 || index >= data.length) { + // NB: exception is caught higher up in the JDBC driver + throw new IllegalArgumentException("Invalid column [" + index + "] (max is [" + (data.length - 1) + "])"); + } + + return data[index]; + } + + public Object entry(int row, int column) { + if (row < 0 || row >= rows) { + // NB: exception is caught higher up in the JDBC driver + throw new IllegalArgumentException("Invalid row [" + row + "] (max is [" + (rows -1) + "])"); + } + return column(column)[row]; + } + + @Override + public void readFrom(SqlDataInput in) throws IOException { + int rows = in.readInt(); + // this.rows may be less than the number of rows we have space for + if (rows > maxRows) { + makeRoomFor(rows); + } + this.rows = rows; + + for (int row = 0; row < rows; row++) { + for (int column = 0; column < columnInfo.size(); column++) { + data[column][row] = readValue(in, columnInfo.get(column).type); + } + } + } + + @Override + public void writeTo(SqlDataOutput out) throws IOException { + int rows = rows(); + out.writeInt(rows); + for (int row = 0; row < rows; row++) { + for (int column = 0; column < columnInfo.size(); column++) { + JDBCType columnType = columnInfo.get(column).type; + writeValue(out, entry(row, column), columnType); + } + } + } + + @Override + public String toString() { + StringBuilder b = new StringBuilder(); + for (int row = 0; row < rows(); row++) { + for (int column = 0; column < columnInfo.size(); column++) { + if (column > 0) { + b.append(", "); + } + b.append(entry(row, column)); + } + b.append('\n'); + } + return b.toString(); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj instanceof Page == false) { + return false; + } + Page other = (Page) obj; + if (rows != other.rows) { + return false; + } + if (false == columnInfo.equals(other.columnInfo)) { + return false; + } + for (int row = 0; row < rows(); row++) { + for (int column = 0; column < columnInfo.size(); column++) { + if (false == Objects.equals(entry(row, column), other.entry(row, column))) { + return false; + } + } + } + return true; + } + + @Override + public int hashCode() { + int result = Objects.hash(rows(), columnInfo.size()); + for (int row = 0; row < rows(); row++) { + for (int column = 0; column < columnInfo.size(); column++) { + Object entry = entry(row, column); + result = result * 31 + (entry == null ? 0 : entry.hashCode()); + } + } + return result; + } + + + private void makeRoomFor(int rows) { + maxRows = rows; + for (int i = 0; i < columnInfo.size(); i++) { + Class type = classOf(columnInfo.get(i).type); + data[i] = (Object[]) Array.newInstance(type, rows); + } + } +} \ No newline at end of file diff --git a/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/Payload.java b/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/Payload.java new file mode 100644 index 00000000000..73405ba9d36 --- /dev/null +++ b/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/Payload.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.protocol; + +import org.elasticsearch.xpack.sql.protocol.shared.SqlDataInput; +import org.elasticsearch.xpack.sql.protocol.shared.SqlDataOutput; + +import java.io.IOException; + +public interface Payload { + + void readFrom(SqlDataInput in) throws IOException; + + void writeTo(SqlDataOutput out) throws IOException; +} diff --git a/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/Proto.java b/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/Proto.java new file mode 100644 index 00000000000..68ce28abd4f --- /dev/null +++ b/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/Proto.java @@ -0,0 +1,102 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.protocol; + +import org.elasticsearch.xpack.sql.protocol.shared.AbstractProto; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * Binary protocol for the JDBC. All backwards compatibility is done using the + * version number sent in the header. + */ +public final class Proto extends AbstractProto { + public static final Proto INSTANCE = new Proto(); + + private Proto() {} + + @Override + protected RequestType readRequestType(DataInput in) throws IOException { + return RequestType.readFrom(in); + } + + @Override + protected ResponseType readResponseType(DataInput in) throws IOException { + return ResponseType.readFrom(in); + } + + public enum RequestType implements AbstractProto.RequestType { + INFO(InfoRequest::new), + META_TABLE(MetaTableRequest::new), + META_COLUMN(MetaColumnRequest::new), + QUERY_INIT(QueryInitRequest::new), + QUERY_PAGE(QueryPageRequest::new), + QUERY_CLOSE(QueryCloseRequest::new) + ; + + private final RequestReader reader; + + RequestType(RequestReader reader) { + this.reader = reader; + } + + static RequestType readFrom(DataInput in) throws IOException { + byte b = in.readByte(); + try { + return values()[b]; + } catch (ArrayIndexOutOfBoundsException e) { + throw new IllegalArgumentException("Unknown response type [" + b + "]", e); + } + } + + @Override + public void writeTo(DataOutput out) throws IOException { + out.writeByte(ordinal()); + } + + @Override + public RequestReader reader() { + return reader; + } + } + + public enum ResponseType implements AbstractProto.ResponseType { + INFO(InfoResponse::new), + META_TABLE(MetaTableResponse::new), + META_COLUMN(MetaColumnResponse::new), + QUERY_INIT(QueryInitResponse::new), + QUERY_PAGE(QueryPageResponse::new), + QUERY_CLOSE(QueryCloseResponse::new) + ; + + private final ResponseReader reader; + + ResponseType(ResponseReader reader) { + this.reader = reader; + } + + static ResponseType readFrom(DataInput in) throws IOException { + byte b = in.readByte(); + try { + return values()[b]; + } catch (ArrayIndexOutOfBoundsException e) { + throw new IllegalArgumentException("Unknown response type [" + b + "]", e); + } + } + + @Override + public void writeTo(DataOutput out) throws IOException { + out.writeByte(ordinal()); + } + + @Override + public ResponseReader reader() { + return reader; + } + } +} diff --git a/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/ProtoUtils.java b/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/ProtoUtils.java new file mode 100644 index 00000000000..269871d2f84 --- /dev/null +++ b/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/ProtoUtils.java @@ -0,0 +1,184 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.protocol; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.math.BigDecimal; +import java.sql.Blob; +import java.sql.Clob; +import java.sql.JDBCType; + +public class ProtoUtils { + // See Jdbc spec, appendix B + public static Object readValue(DataInput in, JDBCType type) throws IOException { + Object result; + byte hasNext = in.readByte(); + if (hasNext == 0) { // TODO feels like a bitmask at the start of the row would be better. + return null; + } + // TODO we ought to make sure we use all of these + switch (type) { + case NULL: + // used to move the stream forward + // TODO why serialize NULL types at all? + in.readBoolean(); + return null; + case BIT: + case BOOLEAN: + result = Boolean.valueOf(in.readBoolean()); + break; + case TINYINT: + result = Byte.valueOf(in.readByte()); + break; + case SMALLINT: + result = Short.valueOf(in.readShort()); + break; + case INTEGER: + result = Integer.valueOf(in.readInt()); + break; + case BIGINT: + result = Long.valueOf(in.readLong()); + break; + case FLOAT: + case DOUBLE: + result = Double.valueOf(in.readDouble()); + break; + case REAL: + result = Float.valueOf(in.readFloat()); + break; + case BINARY: + case VARBINARY: + case LONGVARBINARY: + int size = in.readInt(); + byte[] ar = new byte[size]; + in.readFully(ar, 0, size); + result = ar; + break; + case CHAR: + case VARCHAR: + case LONGVARCHAR: + result = in.readUTF(); + break; + // NB: date/time is kept in its raw form since the JdbcDriver has to do calendar/timezone + // conversion anyway and thus the long value is relevant + case TIMESTAMP: + result = in.readLong(); + break; + default: + throw new IOException("Don't know how to read type [" + type + "]"); + } + return result; + } + + public static void writeValue(DataOutput out, Object o, JDBCType type) throws IOException { + if (o == null) { + out.writeByte(0); + return; + } + out.writeByte(1); + + switch (type) { + // TODO: we ought to make sure we use all of these + case NULL: + // used to move the stream forward + out.writeBoolean(false); + return; + case BIT: + case BOOLEAN: + out.writeBoolean((Boolean) o); + return; + case TINYINT: + out.writeByte(((Number) o).byteValue()); + return; + case SMALLINT: + out.writeShort(((Number) o).shortValue()); + return; + case INTEGER: + out.writeInt(((Number) o).intValue()); + return; + case BIGINT: + out.writeLong(((Number) o).longValue()); + return; + case FLOAT: + case DOUBLE: + out.writeDouble(((Number) o).doubleValue()); + return; + case REAL: + out.writeFloat(((Number) o).floatValue()); + return; + case BINARY: + case VARBINARY: + case LONGVARBINARY: + byte[] a = (byte[]) o; + if (a == null || a.length == 0) { + out.writeInt(0); + return; + } + out.writeInt(a.length); + out.write(a); + return; + case CHAR: + case VARCHAR: + case LONGVARCHAR: + out.writeUTF(o.toString()); + return; + case TIMESTAMP: + out.writeLong(((Number) o).longValue()); + return; + default: + throw new IOException("Don't know how to write type [" + type + "]"); + } + } + + /** + * The type of the array used to store columns of this type. + */ + // NB: JDBC requires the use of Objects not primitive + // (in fact primitives are never used through-out the API) + public static Class classOf(JDBCType jdbcType) { + switch (jdbcType) { + case NUMERIC: + case DECIMAL: + return BigDecimal.class; + case BOOLEAN: + case BIT: + return Boolean.class; + case TINYINT: + return Byte.class; + case SMALLINT: + return Short.class; + case INTEGER: + return Integer.class; + case BIGINT: + return Long.class; + case REAL: + return Float.class; + case FLOAT: + case DOUBLE: + return Double.class; + case BINARY: + case VARBINARY: + case LONGVARBINARY: + return byte[].class; + case CHAR: + case VARCHAR: + case LONGVARCHAR: + return String.class; + case DATE: + case TIME: + case TIMESTAMP: + return Long.class; + case BLOB: + return Blob.class; + case CLOB: + return Clob.class; + default: + throw new IllegalArgumentException("Unsupported JDBC type [" + jdbcType + "]"); + } + } +} \ No newline at end of file diff --git a/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/QueryCloseRequest.java b/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/QueryCloseRequest.java new file mode 100644 index 00000000000..4211e63f082 --- /dev/null +++ b/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/QueryCloseRequest.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.protocol; + +import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.RequestType; +import org.elasticsearch.xpack.sql.protocol.shared.AbstractQueryCloseRequest; +import org.elasticsearch.xpack.sql.protocol.shared.AbstractQueryPageRequest; +import org.elasticsearch.xpack.sql.protocol.shared.Nullable; +import org.elasticsearch.xpack.sql.protocol.shared.SqlDataInput; +import org.elasticsearch.xpack.sql.protocol.shared.TimeoutInfo; + +import java.io.IOException; + +public class QueryCloseRequest extends AbstractQueryCloseRequest { + + public QueryCloseRequest(String cursor) { + super(cursor); + } + + QueryCloseRequest(SqlDataInput in) throws IOException { + super(in); + } + + @Override + public RequestType requestType() { + return RequestType.QUERY_CLOSE; + } + +} diff --git a/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/QueryCloseResponse.java b/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/QueryCloseResponse.java new file mode 100644 index 00000000000..81fd4516c06 --- /dev/null +++ b/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/QueryCloseResponse.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.protocol; + +import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.RequestType; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.ResponseType; +import org.elasticsearch.xpack.sql.protocol.shared.AbstractInfoResponse; +import org.elasticsearch.xpack.sql.protocol.shared.AbstractQueryCloseResponse; +import org.elasticsearch.xpack.sql.protocol.shared.Request; + +import java.io.DataInput; +import java.io.IOException; + +public class QueryCloseResponse extends AbstractQueryCloseResponse { + public QueryCloseResponse(boolean succeeded) { + super(succeeded); + } + + QueryCloseResponse(Request request, DataInput in) throws IOException { + super(request, in); + } + + @Override + public RequestType requestType() { + return RequestType.QUERY_CLOSE; + } + + @Override + public ResponseType responseType() { + return ResponseType.QUERY_CLOSE; + } +} \ No newline at end of file diff --git a/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/QueryInitRequest.java b/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/QueryInitRequest.java new file mode 100644 index 00000000000..c9748aae152 --- /dev/null +++ b/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/QueryInitRequest.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.protocol; + +import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.RequestType; +import org.elasticsearch.xpack.sql.protocol.shared.AbstractQueryInitRequest; +import org.elasticsearch.xpack.sql.protocol.shared.TimeoutInfo; +import org.elasticsearch.xpack.sql.protocol.shared.SqlDataInput; + +import java.io.IOException; +import java.util.TimeZone; + +public class QueryInitRequest extends AbstractQueryInitRequest { + public QueryInitRequest(String query, int fetchSize, TimeZone timeZone, TimeoutInfo timeout) { + super(query, fetchSize, timeZone, timeout); + } + + QueryInitRequest(SqlDataInput in) throws IOException { + super(in); + } + + @Override + public RequestType requestType() { + return RequestType.QUERY_INIT; + } +} \ No newline at end of file diff --git a/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/QueryInitResponse.java b/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/QueryInitResponse.java new file mode 100644 index 00000000000..b21715f88be --- /dev/null +++ b/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/QueryInitResponse.java @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.protocol; + +import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.RequestType; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.ResponseType; +import org.elasticsearch.xpack.sql.protocol.shared.AbstractQueryResponse; +import org.elasticsearch.xpack.sql.protocol.shared.Request; +import org.elasticsearch.xpack.sql.protocol.shared.SqlDataInput; +import org.elasticsearch.xpack.sql.protocol.shared.SqlDataOutput; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; + +import static java.util.Collections.unmodifiableList; + +public class QueryInitResponse extends AbstractQueryResponse { + public final List columns; + public final Payload data; + + public QueryInitResponse(long tookNanos, String cursor, List columns, Payload data) { + super(tookNanos, cursor); + this.columns = columns; + this.data = data; + } + + QueryInitResponse(Request request, SqlDataInput in) throws IOException { + super(request, in); + int size = in.readInt(); + List columns = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + columns.add(new ColumnInfo(in)); + } + this.columns = unmodifiableList(columns); + // TODO - Page is a client class, it shouldn't leak here + Page data = new Page(columns); + data.readFrom(in); + this.data = data; + } + + @Override + public void writeTo(SqlDataOutput out) throws IOException { + super.writeTo(out); + out.writeInt(columns.size()); + for (ColumnInfo c : columns) { + c.writeTo(out); + } + data.writeTo(out); + } + + @Override + protected String toStringBody() { + return super.toStringBody() + + " columns=" + columns + + " data=[\n" + data + "]"; + } + + @Override + public RequestType requestType() { + return RequestType.QUERY_INIT; + } + + @Override + public ResponseType responseType() { + return ResponseType.QUERY_INIT; + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), columns, data); + } + + + @Override + public boolean equals(Object obj) { + if (false == super.equals(obj)) { + return false; + } + QueryInitResponse other = (QueryInitResponse) obj; + return Objects.equals(columns, other.columns) + && Objects.equals(data, other.data); + } +} \ No newline at end of file diff --git a/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/QueryPageRequest.java b/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/QueryPageRequest.java new file mode 100644 index 00000000000..9d1a9b56488 --- /dev/null +++ b/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/QueryPageRequest.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.protocol; + +import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.RequestType; +import org.elasticsearch.xpack.sql.protocol.shared.AbstractQueryPageRequest; +import org.elasticsearch.xpack.sql.protocol.shared.Nullable; +import org.elasticsearch.xpack.sql.protocol.shared.TimeoutInfo; +import org.elasticsearch.xpack.sql.protocol.shared.SqlDataInput; + +import java.io.IOException; + +public class QueryPageRequest extends AbstractQueryPageRequest { + private final transient Payload data; + + public QueryPageRequest(String cursor, TimeoutInfo timeout, @Nullable Payload data) { + super(cursor, timeout); + this.data = data; + } + + QueryPageRequest(SqlDataInput in) throws IOException { + super(in); + this.data = null; // data isn't used on the server side + } + + public Payload data() { + return data; + } + + @Override + public RequestType requestType() { + return RequestType.QUERY_PAGE; + } + + // not overriding hashCode and equals because we're intentionally ignore the data field +} diff --git a/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/QueryPageResponse.java b/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/QueryPageResponse.java new file mode 100644 index 00000000000..27b5ba8c1db --- /dev/null +++ b/sql/jdbc-proto/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/QueryPageResponse.java @@ -0,0 +1,67 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.protocol; + +import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.RequestType; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.ResponseType; +import org.elasticsearch.xpack.sql.protocol.shared.AbstractQueryResponse; +import org.elasticsearch.xpack.sql.protocol.shared.Request; +import org.elasticsearch.xpack.sql.protocol.shared.SqlDataInput; +import org.elasticsearch.xpack.sql.protocol.shared.SqlDataOutput; + +import java.io.IOException; +import java.util.Objects; + +public class QueryPageResponse extends AbstractQueryResponse { + private final Payload data; + + public QueryPageResponse(long tookNanos, String cursor, Payload data) { + super(tookNanos, cursor); + this.data = data; + } + + QueryPageResponse(Request request, SqlDataInput in) throws IOException { + super(request, in); + QueryPageRequest queryPageRequest = (QueryPageRequest) request; + data = queryPageRequest.data(); + queryPageRequest.data().readFrom(in); + } + + @Override + public void writeTo(SqlDataOutput out) throws IOException { + super.writeTo(out); + data.writeTo(out); + } + + @Override + protected String toStringBody() { + return super.toStringBody() + " data=[\n" + data + "]"; + } + + @Override + public RequestType requestType() { + return RequestType.QUERY_PAGE; + } + + @Override + public ResponseType responseType() { + return ResponseType.QUERY_PAGE; + } + + @Override + public boolean equals(Object obj) { + if (false == super.equals(obj)) { + return false; + } + QueryPageResponse other = (QueryPageResponse) obj; + return data.equals(other.data); + } + + @Override + public int hashCode() { + return Objects.hash(data); + } +} diff --git a/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/ColumnInfoTests.java b/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/ColumnInfoTests.java new file mode 100644 index 00000000000..ab76432e412 --- /dev/null +++ b/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/ColumnInfoTests.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.protocol; + +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.sql.JDBCType; + +import static org.elasticsearch.xpack.sql.test.RoundTripTestUtils.assertRoundTrip; + +public class ColumnInfoTests extends ESTestCase { + static ColumnInfo varcharInfo(String name) { + return new ColumnInfo(name, JDBCType.VARCHAR, "", "", "", "", 0); + } + + static ColumnInfo intInfo(String name) { + return new ColumnInfo(name, JDBCType.INTEGER, "", "", "", "", 11); + } + + static ColumnInfo doubleInfo(String name) { + return new ColumnInfo(name, JDBCType.DOUBLE, "", "", "", "", 25); + } + + static Object randomValueFor(ColumnInfo info) { + switch (info.type) { + case VARCHAR: return randomAlphaOfLength(5); + case INTEGER: return randomInt(); + case DOUBLE: return randomDouble(); + default: + throw new IllegalArgumentException("Unsupported type [" + info.type + "]"); + } + } + + static ColumnInfo randomColumnInfo() { + return new ColumnInfo(randomAlphaOfLength(5), randomFrom(JDBCType.values()), randomAlphaOfLength(5), randomAlphaOfLength(5), + randomAlphaOfLength(5), randomAlphaOfLength(5), randomInt(25)); + } + + public void testRoundTrip() throws IOException { + assertRoundTrip(randomColumnInfo(), ColumnInfo::writeTo, ColumnInfo::new); + } + + public void testToString() { + assertEquals("test.doc.a", + new ColumnInfo("a", JDBCType.VARCHAR, "test.doc", "as", "ads", "lab", 0).toString()); + assertEquals("test.doc.a", + new ColumnInfo("a", JDBCType.VARCHAR, "test.doc", "", "", "", 0).toString()); + assertEquals("string", varcharInfo("string").toString()); + assertEquals("int", intInfo("int").toString()); + assertEquals("d", doubleInfo("d").toString()); + } +} diff --git a/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/InfoRequestTests.java b/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/InfoRequestTests.java new file mode 100644 index 00000000000..7fefba5eba4 --- /dev/null +++ b/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/InfoRequestTests.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.protocol; + +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.assertRoundTripCurrentVersion; + +public class InfoRequestTests extends ESTestCase { + static InfoRequest randomInfoRequest() { + return new InfoRequest(randomAlphaOfLength(5), randomAlphaOfLength(5), randomAlphaOfLength(5), + randomAlphaOfLength(5), randomAlphaOfLength(5)); + } + + public void testRoundTrip() throws IOException { + assertRoundTripCurrentVersion(randomInfoRequest()); + } + + public void testToString() { + assertEquals("InfoRequest", + new InfoRequest("1.8.0_131", "testvendor", "testcp", "Mac OS X", "10.12.5").toString()); + } +} diff --git a/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/InfoResponseTests.java b/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/InfoResponseTests.java new file mode 100644 index 00000000000..be2087aeb57 --- /dev/null +++ b/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/InfoResponseTests.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.protocol; + +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.assertRoundTripCurrentVersion; + +public class InfoResponseTests extends ESTestCase { + static InfoResponse randomInfoResponse() { + return new InfoResponse(randomAlphaOfLength(5), randomAlphaOfLength(5), randomByte(), randomByte(), + randomAlphaOfLength(5), randomAlphaOfLength(5), randomAlphaOfLength(5)); + } + + public void testRoundTrip() throws IOException { + assertRoundTripCurrentVersion(InfoRequestTests::randomInfoRequest, randomInfoResponse()); + } + + public void testToString() { + assertEquals("InfoResponse", + new InfoResponse("adsf", "test_cluster", (byte) 6, (byte) 0, "6.0.0", "feed", "date").toString()); + } +} diff --git a/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/JdbcRoundTripTestUtils.java b/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/JdbcRoundTripTestUtils.java new file mode 100644 index 00000000000..68765c10526 --- /dev/null +++ b/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/JdbcRoundTripTestUtils.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.protocol; + +import org.elasticsearch.xpack.sql.protocol.shared.Request; +import org.elasticsearch.xpack.sql.protocol.shared.Response; +import org.elasticsearch.xpack.sql.protocol.shared.TimeoutInfo; +import org.elasticsearch.xpack.sql.test.RoundTripTestUtils; + +import java.io.IOException; +import java.util.function.Supplier; + +import static org.elasticsearch.test.ESTestCase.randomNonNegativeLong; + +public final class JdbcRoundTripTestUtils { + private JdbcRoundTripTestUtils() { + // Just static utilities + } + + static void assertRoundTripCurrentVersion(Request request) throws IOException { + RoundTripTestUtils.assertRoundTrip(request, Proto.INSTANCE::writeRequest, Proto.INSTANCE::readRequest); + } + + static void assertRoundTripCurrentVersion(Supplier request, Response response) throws IOException { + RoundTripTestUtils.assertRoundTrip(response, + (r, out) -> Proto.INSTANCE.writeResponse(r, Proto.CURRENT_VERSION, out), + in -> Proto.INSTANCE.readResponse(request.get(), in)); + } + + static TimeoutInfo randomTimeoutInfo() { + return new TimeoutInfo(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()); + } +} diff --git a/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/MetaColumnInfoTests.java b/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/MetaColumnInfoTests.java new file mode 100644 index 00000000000..04ba1a01199 --- /dev/null +++ b/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/MetaColumnInfoTests.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.protocol; + +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.sql.JDBCType; + +import static org.elasticsearch.xpack.sql.test.RoundTripTestUtils.assertRoundTrip; + +public class MetaColumnInfoTests extends ESTestCase { + static MetaColumnInfo randomMetaColumnInfo() { + return new MetaColumnInfo(randomAlphaOfLength(5), randomAlphaOfLength(5), randomFrom(JDBCType.values()), + between(1, Integer.MAX_VALUE), between(1, Integer.MAX_VALUE)); + } + + public void testRoundTrip() throws IOException { + assertRoundTrip(randomMetaColumnInfo(), MetaColumnInfo::writeTo, MetaColumnInfo::new); + } + + public void testToString() { + assertEquals("test.doc.col", + new MetaColumnInfo("test.doc", "col", JDBCType.VARCHAR, 100, 1).toString()); + } +} diff --git a/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/MetaColumnRequestTests.java b/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/MetaColumnRequestTests.java new file mode 100644 index 00000000000..edfaa0ec322 --- /dev/null +++ b/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/MetaColumnRequestTests.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.protocol; + +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.assertRoundTripCurrentVersion; + +public class MetaColumnRequestTests extends ESTestCase { + public static MetaColumnRequest randomMetaColumnRequest() { + return new MetaColumnRequest(randomAlphaOfLength(10), randomAlphaOfLength(10)); + } + + public void testRoundTrip() throws IOException { + assertRoundTripCurrentVersion(randomMetaColumnRequest()); + } + + public void testToString() { + assertEquals("MetaColumnRequest", new MetaColumnRequest("test.do%", "d%").toString()); + } +} diff --git a/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/MetaColumnResponseTests.java b/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/MetaColumnResponseTests.java new file mode 100644 index 00000000000..8928cf8a448 --- /dev/null +++ b/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/MetaColumnResponseTests.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.protocol; + +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.sql.JDBCType; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static java.util.Collections.emptyList; +import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.assertRoundTripCurrentVersion; +import static org.elasticsearch.xpack.sql.jdbc.net.protocol.MetaColumnInfoTests.randomMetaColumnInfo; + + +public class MetaColumnResponseTests extends ESTestCase { + static MetaColumnResponse randomMetaColumnResponse() { + int size = between(0, 10); + List columns = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + columns.add(randomMetaColumnInfo()); + } + return new MetaColumnResponse(columns); + } + + public void testRoundTrip() throws IOException { + assertRoundTripCurrentVersion(MetaColumnRequestTests::randomMetaColumnRequest, randomMetaColumnResponse()); + } + + public void testToString() { + assertEquals("MetaColumnResponse<>", new MetaColumnResponse(emptyList()).toString()); + assertEquals("MetaColumnResponse, " + + "a.doc.col2, " + + "b.doc.col1>", new MetaColumnResponse(Arrays.asList( + new MetaColumnInfo("a.doc", "col1", JDBCType.VARCHAR, 100, 1), + new MetaColumnInfo("a.doc", "col2", JDBCType.INTEGER, 16, 2), + new MetaColumnInfo("b.doc", "col1", JDBCType.VARCHAR, 100, 1))).toString()); + } +} diff --git a/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/MetaTableRequestTests.java b/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/MetaTableRequestTests.java new file mode 100644 index 00000000000..52caf21859c --- /dev/null +++ b/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/MetaTableRequestTests.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.protocol; + +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.assertRoundTripCurrentVersion; + +public class MetaTableRequestTests extends ESTestCase { + public static MetaTableRequest randomMetaTableRequest() { + return new MetaTableRequest(randomAlphaOfLength(10)); + } + + public void testRoundTrip() throws IOException { + assertRoundTripCurrentVersion(randomMetaTableRequest()); + } + + public void testToString() { + assertEquals("MetaTableRequest", new MetaTableRequest("test.do%").toString()); + } +} diff --git a/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/MetaTableResponseTests.java b/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/MetaTableResponseTests.java new file mode 100644 index 00000000000..c0bcfecac9f --- /dev/null +++ b/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/MetaTableResponseTests.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.protocol; + +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static java.util.Collections.emptyList; +import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.assertRoundTripCurrentVersion; + +public class MetaTableResponseTests extends ESTestCase { + static MetaTableResponse randomMetaTableResponse() { + int size = between(0, 10); + List tables = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + tables.add(randomAlphaOfLength(5)); + } + return new MetaTableResponse(tables); + } + + public void testRoundTrip() throws IOException { + assertRoundTripCurrentVersion(MetaTableRequestTests::randomMetaTableRequest, randomMetaTableResponse()); + } + + public void testToString() { + assertEquals("MetaTableResponse<>", new MetaTableResponse(emptyList()).toString()); + assertEquals("MetaTableResponse", new MetaTableResponse(Arrays.asList("a.doc", "b.doc", "c.doc")).toString()); + } +} diff --git a/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/PageTests.java b/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/PageTests.java new file mode 100644 index 00000000000..6f493c17f34 --- /dev/null +++ b/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/PageTests.java @@ -0,0 +1,108 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.protocol; + +import org.elasticsearch.common.CheckedBiConsumer; +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.protocol.shared.AbstractProto; +import org.elasticsearch.xpack.sql.protocol.shared.SqlDataInput; +import org.elasticsearch.xpack.sql.protocol.shared.SqlDataOutput; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.function.Supplier; + +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; +import static org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfoTests.doubleInfo; +import static org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfoTests.intInfo; +import static org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfoTests.randomValueFor; +import static org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfoTests.varcharInfo; +import static org.elasticsearch.xpack.sql.test.RoundTripTestUtils.assertRoundTrip; +import static org.elasticsearch.xpack.sql.test.RoundTripTestUtils.roundTrip; + +public class PageTests extends ESTestCase { + static Page randomPage() { + int columns = between(0, 10); + List columnInfo = new ArrayList<>(); + for (int c = 0; c < columns; c++) { + @SuppressWarnings("unchecked") + Supplier info = randomFrom( + () -> varcharInfo(randomAlphaOfLength(5)), + () -> intInfo(randomAlphaOfLength(5)), + () -> doubleInfo(randomAlphaOfLength(5))); + columnInfo.add(info.get()); + } + return randomPageContents(columnInfo); + } + + static Page randomPageContents(List columnInfo) { + Object[][] rows = new Object[between(0, 10)][]; + for (int r = 0; r < rows.length; r++) { + rows[r] = new Object[columnInfo.size()]; + for (int c = 0; c < columnInfo.size(); c++) { + rows[r][c] = randomValueFor(columnInfo.get(c)); + } + } + return new Page(columnInfo, rows); + } + + public void testRoundTripNoReuse() throws IOException { + Page example = randomPage(); + assertRoundTrip(example, writeTo(AbstractProto.CURRENT_VERSION), in -> { + Page page = new Page(example.columnInfo()); + page.readFrom(new SqlDataInput(in, AbstractProto.CURRENT_VERSION)); + return page; + }); + } + + public void testRoundTripReuse() throws IOException { + Page example = randomPage(); + Page target = new Page(example.columnInfo()); + CheckedFunction readFrom = in -> { + target.readFrom(new SqlDataInput(in, AbstractProto.CURRENT_VERSION)); + return null; + }; + roundTrip(example, writeTo(AbstractProto.CURRENT_VERSION), readFrom); + assertEquals(example, target); + + example = randomPageContents(example.columnInfo()); + roundTrip(example, writeTo(AbstractProto.CURRENT_VERSION), readFrom); + assertEquals(example, target); + } + + public void testToString() { + assertEquals("\n\n", + new Page(emptyList(), new Object[][] { + new Object[] {}, + new Object[] {}, + }).toString()); + assertEquals("test\n", + new Page(singletonList(varcharInfo("a")), new Object[][] { + new Object[] {"test"} + }).toString()); + assertEquals("test, 1\n", + new Page(Arrays.asList(varcharInfo("a"), intInfo("b")), new Object[][] { + new Object[] {"test", 1} + }).toString()); + assertEquals("test, 1\nbar, 7\n", + new Page(Arrays.asList(varcharInfo("a"), intInfo("b")), new Object[][] { + new Object[] {"test", 1}, + new Object[] {"bar", 7} + }).toString()); + + } + + private static CheckedBiConsumer writeTo(int version) { + return (page, in) -> + page.writeTo(new SqlDataOutput(in, version)); + } +} diff --git a/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/QueryCloseRequestTests.java b/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/QueryCloseRequestTests.java new file mode 100644 index 00000000000..13e575988f5 --- /dev/null +++ b/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/QueryCloseRequestTests.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.protocol; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.protocol.shared.TimeoutInfo; + +import java.io.IOException; + +import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.assertRoundTripCurrentVersion; +import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.randomTimeoutInfo; + +public class QueryCloseRequestTests extends ESTestCase { + static QueryCloseRequest randomQueryCloseRequest() { + String cursor = randomAlphaOfLength(10); + return new QueryCloseRequest(cursor); + } + + public void testRoundTrip() throws IOException { + assertRoundTripCurrentVersion(randomQueryCloseRequest()); + } + + public void testToString() { + assertEquals("QueryCloseRequest<123>", new QueryCloseRequest("123").toString()); + } +} diff --git a/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/QueryCloseResponseTests.java b/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/QueryCloseResponseTests.java new file mode 100644 index 00000000000..45d7ee16d06 --- /dev/null +++ b/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/QueryCloseResponseTests.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.protocol; + +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.assertRoundTripCurrentVersion; + +public class QueryCloseResponseTests extends ESTestCase { + static QueryCloseResponse randomQueryCloseResponse() { + return new QueryCloseResponse(randomBoolean()); + } + + public void testRoundTrip() throws IOException { + assertRoundTripCurrentVersion(QueryCloseRequestTests::randomQueryCloseRequest, randomQueryCloseResponse()); + } + + public void testToString() { + assertEquals("QueryCloseResponse", new QueryCloseResponse(true).toString()); + } +} diff --git a/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/QueryInitRequestTests.java b/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/QueryInitRequestTests.java new file mode 100644 index 00000000000..452c24ec7dd --- /dev/null +++ b/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/QueryInitRequestTests.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.protocol; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.protocol.shared.TimeoutInfo; + +import java.io.IOException; +import java.util.TimeZone; + +import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.assertRoundTripCurrentVersion; +import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.randomTimeoutInfo; + + +public class QueryInitRequestTests extends ESTestCase { + static QueryInitRequest randomQueryInitRequest() { + return new QueryInitRequest(randomAlphaOfLength(5), between(0, Integer.MAX_VALUE), randomTimeZone(random()), randomTimeoutInfo()); + } + + public void testRoundTrip() throws IOException { + assertRoundTripCurrentVersion(randomQueryInitRequest()); + } + + public void testToString() { + assertEquals("QueryInitRequest", + new QueryInitRequest("SELECT * FROM test.doc", 10, TimeZone.getTimeZone("UTC"), new TimeoutInfo(1, 1, 1)).toString()); + assertEquals("QueryInitRequest", + new QueryInitRequest("SELECT * FROM test.doc", 10, TimeZone.getTimeZone("GMT-5"), new TimeoutInfo(1, 1, 1)).toString()); + } +} diff --git a/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/QueryInitResponseTests.java b/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/QueryInitResponseTests.java new file mode 100644 index 00000000000..0a83ea15c41 --- /dev/null +++ b/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/QueryInitResponseTests.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.protocol; + +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static java.util.Collections.singletonList; +import static org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfoTests.varcharInfo; +import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.assertRoundTripCurrentVersion; +import static org.elasticsearch.xpack.sql.jdbc.net.protocol.PageTests.randomPage; + +public class QueryInitResponseTests extends ESTestCase { + static QueryInitResponse randomQueryInitResponse() { + String cursor = randomAlphaOfLength(10); + Page page = randomPage(); + return new QueryInitResponse(randomNonNegativeLong(), cursor, page.columnInfo(), page); + } + + public void testRoundTrip() throws IOException { + assertRoundTripCurrentVersion(QueryInitRequestTests::randomQueryInitRequest, randomQueryInitResponse()); + } + + public void testToString() { + Page page = new Page(singletonList(varcharInfo("a")), new Object[][] { + new Object[] {"test"}, + new Object[] {"string"}, + }); + assertEquals("QueryInitResponse] data=[" + + "\ntest\nstring\n]>", + new QueryInitResponse(123, "0120", page.columnInfo(), page).toString()); + } +} diff --git a/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/QueryPageRequestTests.java b/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/QueryPageRequestTests.java new file mode 100644 index 00000000000..189398b82d9 --- /dev/null +++ b/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/QueryPageRequestTests.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.protocol; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.protocol.shared.TimeoutInfo; + +import java.io.IOException; + +import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.assertRoundTripCurrentVersion; +import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.randomTimeoutInfo; + +public class QueryPageRequestTests extends ESTestCase { + static QueryPageRequest randomQueryPageRequest(Page page) { + String cursor = randomAlphaOfLength(10); + return new QueryPageRequest(cursor, randomTimeoutInfo(), page); + } + + public void testRoundTrip() throws IOException { + assertRoundTripCurrentVersion(randomQueryPageRequest(null)); + } + + public void testToString() { + assertEquals("QueryPageRequest<0320>", new QueryPageRequest("0320", new TimeoutInfo(1, 1, 1), null).toString()); + } +} diff --git a/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/QueryPageResponseTests.java b/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/QueryPageResponseTests.java new file mode 100644 index 00000000000..1e8f38ba97c --- /dev/null +++ b/sql/jdbc-proto/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/QueryPageResponseTests.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.protocol; + +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static java.util.Collections.singletonList; +import static org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfoTests.varcharInfo; +import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.assertRoundTripCurrentVersion; +import static org.elasticsearch.xpack.sql.jdbc.net.protocol.PageTests.randomPage; +import static org.elasticsearch.xpack.sql.jdbc.net.protocol.QueryPageRequestTests.randomQueryPageRequest; + +public class QueryPageResponseTests extends ESTestCase { + static QueryPageResponse randomQueryPageResponse(Page page) { + String cursor = randomAlphaOfLength(10); + return new QueryPageResponse(randomNonNegativeLong(), cursor, page); + } + + public void testRoundTrip() throws IOException { + Page page = randomPage(); + assertRoundTripCurrentVersion(() -> randomQueryPageRequest(new Page(page.columnInfo())), randomQueryPageResponse(page)); + } + + public void testToString() { + Page results = new Page(singletonList(varcharInfo("a")), new Object[][] { + new Object[] {"test"} + }); + assertEquals("QueryPageResponse", + new QueryPageResponse(123, "0810", results).toString()); + } +} diff --git a/sql/jdbc/build.gradle b/sql/jdbc/build.gradle new file mode 100644 index 00000000000..551a9f8d1e5 --- /dev/null +++ b/sql/jdbc/build.gradle @@ -0,0 +1,64 @@ +plugins { + id 'com.github.johnrengelman.shadow' version '2.0.1' +} + +import org.elasticsearch.gradle.test.RunTask + +description = 'JDBC driver for Elasticsearch' + +forbiddenApisMain { + // does not depend on core, so only jdk and http signatures should be checked + signaturesURLs = [this.class.getResource('/forbidden/jdk-signatures.txt')] +} + +dependencies { + compile project(':x-pack-elasticsearch:sql:shared-client') + compile project(':x-pack-elasticsearch:sql:jdbc-proto') + compile project(':x-pack-elasticsearch:sql:shared-proto') + runtime "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" + /* We want to limit these dependencies so we don't have a huge jar. + * Since we shadow these dependencies we don't have to be super careful + * but we have to be *somewhat* careful because things like commons logging + * don't shadow properly. */ +} + +dependencyLicenses { + mapping from: /jdbc-proto.*/, to: 'elasticsearch' + mapping from: /shared-client.*/, to: 'elasticsearch' + mapping from: /shared-proto.*/, to: 'elasticsearch' + mapping from: /jackson-.*/, to: 'jackson' + ignoreSha 'jdbc-proto' + ignoreSha 'shared-client' + ignoreSha 'shared-proto' +} + +/* Disable the jar task configured by the java plugin. We're not going to + * distribute an unshaded jar so there is no need making one. */ +jar { + enabled = false +} +configurations.archives.artifacts.removeAll { it.archiveTask.is jar } + +/* Move the shaded jar to the empty classifier because it is the only one + * we're shipping. */ +shadowJar { + classifier = null + // We only need to relocate jackson + relocate 'com.fasterxml.jackson', 'org.elasticsearch.xpack.sql.jdbc.shadow.jacksonp' + manifest { + inheritFrom jar.manifest + } +} +assemble.dependsOn shadowJar +artifacts { + archives shadowJar +} + +// And for better realism let's use the shaded jar for testing +test { + classpath -= compileJava.outputs.files + classpath -= configurations.compile + classpath -= configurations.runtime + classpath += shadowJar.outputs.files + dependsOn shadowJar +} diff --git a/sql/jdbc/licenses/elasticsearch-LICENSE.txt b/sql/jdbc/licenses/elasticsearch-LICENSE.txt new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/sql/jdbc/licenses/elasticsearch-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/sql/jdbc/licenses/elasticsearch-NOTICE.txt b/sql/jdbc/licenses/elasticsearch-NOTICE.txt new file mode 100644 index 00000000000..643a060cd05 --- /dev/null +++ b/sql/jdbc/licenses/elasticsearch-NOTICE.txt @@ -0,0 +1,5 @@ +Elasticsearch +Copyright 2009-2017 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). diff --git a/sql/jdbc/licenses/jackson-LICENSE b/sql/jdbc/licenses/jackson-LICENSE new file mode 100644 index 00000000000..f5f45d26a49 --- /dev/null +++ b/sql/jdbc/licenses/jackson-LICENSE @@ -0,0 +1,8 @@ +This copy of Jackson JSON processor streaming parser/generator is licensed under the +Apache (Software) License, version 2.0 ("the License"). +See the License for details about distribution rights, and the +specific rights regarding derivate works. + +You may obtain a copy of the License at: + +http://www.apache.org/licenses/LICENSE-2.0 diff --git a/sql/jdbc/licenses/jackson-NOTICE b/sql/jdbc/licenses/jackson-NOTICE new file mode 100644 index 00000000000..4c976b7b4cc --- /dev/null +++ b/sql/jdbc/licenses/jackson-NOTICE @@ -0,0 +1,20 @@ +# Jackson JSON processor + +Jackson is a high-performance, Free/Open Source JSON processing library. +It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has +been in development since 2007. +It is currently developed by a community of developers, as well as supported +commercially by FasterXML.com. + +## Licensing + +Jackson core and extension components may licensed under different licenses. +To find the details that apply to this artifact see the accompanying LICENSE file. +For more information, including possible other licensing options, contact +FasterXML.com (http://fasterxml.com). + +## Credits + +A list of contributors may be found from CREDITS file, which is included +in some artifacts (usually source distributions); but is always available +from the source code management (SCM) system project uses. diff --git a/sql/jdbc/licenses/jackson-core-2.8.10.jar.sha1 b/sql/jdbc/licenses/jackson-core-2.8.10.jar.sha1 new file mode 100644 index 00000000000..a322d371e26 --- /dev/null +++ b/sql/jdbc/licenses/jackson-core-2.8.10.jar.sha1 @@ -0,0 +1 @@ +eb21a035c66ad307e66ec8fce37f5d50fd62d039 \ No newline at end of file diff --git a/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcException.java b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcException.java new file mode 100644 index 00000000000..412c374e8a2 --- /dev/null +++ b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcException.java @@ -0,0 +1,17 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc; + +public class JdbcException extends RuntimeException { + + public JdbcException(String message) { + super(message); + } + + public JdbcException(Throwable cause, String message) { + super(message, cause); + } +} diff --git a/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcSQLException.java b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcSQLException.java new file mode 100644 index 00000000000..352e075a570 --- /dev/null +++ b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcSQLException.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc; + +import java.sql.SQLException; + +public class JdbcSQLException extends SQLException { + + public JdbcSQLException(String message) { + super(message); + } + + public JdbcSQLException(Throwable cause, String message) { + super(message, cause); + } +} diff --git a/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/ConnectionProxy.java b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/ConnectionProxy.java new file mode 100644 index 00000000000..016fcc87d24 --- /dev/null +++ b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/ConnectionProxy.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.debug; + +import java.sql.DatabaseMetaData; +import java.sql.Statement; + +final class ConnectionProxy extends DebuggingInvoker { + + ConnectionProxy(DebugLog log, Object target) { + super(log, target, null); + } + + @Override + protected Object postProcess(Object result, Object proxy) { + if (result instanceof Statement) { + return Debug.proxy(result, new StatementProxy(log, result, proxy)); + } + if (result instanceof DatabaseMetaData) { + return Debug.proxy(new DatabaseMetadataProxy(log, result, proxy)); + } + + return result; + } +} diff --git a/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/DatabaseMetadataProxy.java b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/DatabaseMetadataProxy.java new file mode 100644 index 00000000000..0c7cc0dcb4f --- /dev/null +++ b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/DatabaseMetadataProxy.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.debug; + +import java.sql.ResultSet; + +final class DatabaseMetadataProxy extends DebuggingInvoker { + + DatabaseMetadataProxy(DebugLog log, Object result, Object parent) { + super(log, result, parent); + } + + @Override + protected Object postProcess(Object result, Object proxy) { + if (result instanceof ResultSet) { + return Debug.proxy(new ResultSetProxy(log, result, null)); + } + return result; + } +} diff --git a/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/Debug.java b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/Debug.java new file mode 100644 index 00000000000..72b6b2abf53 --- /dev/null +++ b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/Debug.java @@ -0,0 +1,238 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.debug; + +import org.elasticsearch.xpack.sql.client.shared.SuppressForbidden; +import org.elasticsearch.xpack.sql.jdbc.JdbcException; +import org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcConfiguration; + +import java.io.OutputStreamWriter; +import java.io.PrintStream; +import java.io.PrintWriter; +import java.lang.reflect.InvocationHandler; +import java.lang.reflect.Proxy; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.sql.CallableStatement; +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.DriverManager; +import java.sql.ParameterMetaData; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.Statement; +import java.util.HashMap; +import java.util.Map; + +import javax.sql.DataSource; + +/** + * Class handling debug logging. Typically disabled (hence why it's called debug). + * JDBC carries a lot of legacy conventions, logging being one of them - in JDBC logging was expected to + * be to System.Err/Out since there were no logging frameworks at the time. + * This didn't work so the API was changed through {@link DriverManager#getLogStream()} however that also had issues + * being global and not working well with encoding (hence why {@link DriverManager#getLogWriter()} was introduced) + * and was changed again through {@link DataSource#getLogWriter()}. + * However by then the damage was done and most drivers don't use either and have their own logging implementation. + * + * This class tries to cater to both audience - use the legacy, Writer way if needed though strive to use the + * proper typical approach, that of specifying intention and output (file) in the URL. + * + * For this reason the {@link System#out} and {@link System#err} are being refered in this class though are used only + * when needed. + */ +public final class Debug { + + // cache for streams created by ourselves + private static final Map OUTPUT_CACHE = new HashMap<>(); + // reference counter for a given output + private static final Map OUTPUT_REFS = new HashMap<>(); + // cache of loggers that rely on external/managed printers + private static final Map OUTPUT_MANAGED = new HashMap<>(); + + private static volatile DebugLog ERR = null, OUT = null; + private static volatile PrintStream SYS_ERR = null, SYS_OUT = null; + + /** + * Create a proxied Connection which performs logging of all methods being invoked. + * Typically Debug will read its configuration from the configuration and act accordingly however + * there are two cases where the output is specified programmatically, namely through + * {@link DriverManager#setLogWriter(PrintWriter)} and {@link DataSource#setLogWriter(PrintWriter)}. + * The former is the 'legacy' way, having a global impact on all drivers while the latter allows per + * instance configuration. + * + * As both approaches are not widely used, Debug will take the principle of least surprise and pick its + * own configuration first; if that does not exist it will fallback to the managed approaches (assuming they + * are specified, otherwise logging is simply disabled). + */ + public static Connection proxy(JdbcConfiguration info, Connection connection, PrintWriter managedPrinter) { + return createProxy(Connection.class, new ConnectionProxy(logger(info, managedPrinter), connection)); + } + + static DatabaseMetaData proxy(DatabaseMetadataProxy handler) { + return createProxy(DatabaseMetaData.class, handler); + } + + static ParameterMetaData proxy(ParameterMetaDataProxy handler) { + return createProxy(ParameterMetaData.class, handler); + } + + static ResultSet proxy(ResultSetProxy handler) { + return createProxy(ResultSet.class, handler); + } + + static ResultSetMetaData proxy(ResultSetMetaDataProxy handler) { + return createProxy(ResultSetMetaData.class, handler); + } + + static Statement proxy(Object statement, StatementProxy handler) { + Class i = Statement.class; + + if (statement instanceof PreparedStatement) { + i = PreparedStatement.class; + } + else if (statement instanceof CallableStatement) { + i = CallableStatement.class; + } + + return createProxy(i, handler); + } + + @SuppressWarnings("unchecked") + private static

P createProxy(Class

proxy, InvocationHandler handler) { + return (P) Proxy.newProxyInstance(Debug.class.getClassLoader(), new Class[] { DebugProxy.class, proxy }, handler); + } + + private static DebugLog logger(JdbcConfiguration info, PrintWriter managedPrinter) { + DebugLog log = null; + + if (managedPrinter != null) { + synchronized (Debug.class) { + log = OUTPUT_MANAGED.get(managedPrinter); + if (log == null) { + log = new DebugLog(managedPrinter); + OUTPUT_MANAGED.put(managedPrinter, log); + } + return log; + } + } + + String out = info.debugOut(); + + // System.out/err can be changed so do some checks + if ("err".equals(out)) { + PrintStream sys = stderr(); + + if (SYS_ERR == null) { + SYS_ERR = sys; + } + if (SYS_ERR != sys) { + SYS_ERR.flush(); + SYS_ERR = sys; + ERR = null; + } + if (ERR == null) { + ERR = new DebugLog(new PrintWriter(new OutputStreamWriter(sys, StandardCharsets.UTF_8))); + } + return ERR; + } + + if ("out".equals(out)) { + PrintStream sys = stdout(); + + if (SYS_OUT == null) { + SYS_OUT = sys; + } + + if (SYS_OUT != sys) { + SYS_OUT.flush(); + SYS_OUT = sys; + OUT = null; + } + + if (OUT == null) { + OUT = new DebugLog(new PrintWriter(new OutputStreamWriter(sys, StandardCharsets.UTF_8))); + } + return OUT; + } + + synchronized (Debug.class) { + log = OUTPUT_CACHE.get(out); + if (log == null) { + // must be local file + try { + PrintWriter print = new PrintWriter(Files.newBufferedWriter(Paths.get("").resolve(out), StandardCharsets.UTF_8)); + log = new DebugLog(print); + OUTPUT_CACHE.put(out, log); + OUTPUT_REFS.put(out, Integer.valueOf(0)); + } catch (Exception ex) { + throw new JdbcException(ex, "Cannot open debug output [" + out + "]"); + } + } + OUTPUT_REFS.put(out, Integer.valueOf(OUTPUT_REFS.get(out).intValue() + 1)); + } + + return log; + } + + public static void release(JdbcConfiguration info) { + if (!info.debug()) { + return; + } + + String out = info.debugOut(); + synchronized (Debug.class) { + Integer ref = OUTPUT_REFS.get(out); + if (ref != null) { + int r = ref.intValue(); + if (r < 2) { + OUTPUT_REFS.remove(out); + DebugLog d = OUTPUT_CACHE.remove(out); + if (d != null) { + if (d.print != null) { + d.print.close(); + } + } + } + else { + OUTPUT_REFS.put(out, Integer.valueOf(r - 1)); + } + } + } + } + + public static synchronized void close() { + // clear the ref + OUTPUT_REFS.clear(); + + // clear the streams + for (DebugLog d : OUTPUT_CACHE.values()) { + if (d.print != null) { + d.print.close(); + } + } + OUTPUT_CACHE.clear(); + + // flush the managed ones + for (DebugLog d : OUTPUT_MANAGED.values()) { + d.print.flush(); + } + + OUTPUT_MANAGED.clear(); + } + + @SuppressForbidden(reason = "JDBC drivers allows logging to Sys.out") + private static PrintStream stdout() { + return System.out; + } + + @SuppressForbidden(reason = "JDBC drivers allows logging to Sys.err") + private static PrintStream stderr() { + return System.err; + } +} \ No newline at end of file diff --git a/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/DebugLog.java b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/DebugLog.java new file mode 100644 index 00000000000..28444c1f8ee --- /dev/null +++ b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/DebugLog.java @@ -0,0 +1,100 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.debug; + +import org.elasticsearch.xpack.sql.client.shared.StringUtils; + +import java.io.PrintWriter; +import java.lang.reflect.Array; +import java.lang.reflect.Method; +import java.util.Locale; + +// Logging is done through PrintWriter (not PrintStream which maps to System.err/out) to plug into the JDBC API +final class DebugLog { + private static final String HEADER = "%tF/%tT.%tL - "; + + final PrintWriter print; + + DebugLog(PrintWriter print) { + this.print = print; + } + + void logMethod(Method m, Object[] args) { + long time = System.currentTimeMillis(); + print.printf(Locale.ROOT, HEADER + "Invoke %s#%s(%s)%n", + time, time, time, + //m.getReturnType().getSimpleName(), + m.getDeclaringClass().getSimpleName(), + m.getName(), + //array(m.getParameterTypes()), + array(args)); + } + + + void logResult(Method m, Object[] args, Object r) { + long time = System.currentTimeMillis(); + print.printf(Locale.ROOT, HEADER + "%s#%s(%s) returned %s%n", + time, time, time, + //m.getReturnType().getSimpleName(), + m.getDeclaringClass().getSimpleName(), + m.getName(), + //array(m.getParameterTypes()), + array(args), + r); + } + + void logException(Method m, Object[] args, Throwable t) { + long time = System.currentTimeMillis(); + print.printf(Locale.ROOT, HEADER + "%s#%s(%s) threw ", + time, time, time, + m.getDeclaringClass().getSimpleName(), + m.getName(), + array(args)); + t.printStackTrace(print); + print.flush(); + } + + + private static String array(Object[] a) { + if (a == null || a.length == 0) { + return StringUtils.EMPTY; + } + if (a.length == 1) { + return handleArray(a[0]); + } + + StringBuilder b = new StringBuilder(); + int iMax = a.length - 1; + for (int i = 0; ; i++) { + b.append(handleArray(a[i])); + if (i == iMax) { + return b.toString(); + } + b.append(", "); + } + } + + private static String handleArray(Object o) { + if (o != null && o.getClass().isArray()) { + StringBuilder b = new StringBuilder(); + int l = Array.getLength(o); + int iMax = l - 1; + + if (iMax == -1) + return "[]"; + + b.append('['); + for (int i = 0; i < l; i++) { + b.append(handleArray(Array.get(o, i))); + if (i == iMax) { + return b.append("]").toString(); + } + b.append(", "); + } + } + return String.valueOf(o); + } +} \ No newline at end of file diff --git a/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/DebugProxy.java b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/DebugProxy.java new file mode 100644 index 00000000000..9294075cb1d --- /dev/null +++ b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/DebugProxy.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.debug; + +// Debug marker interface for compatible proxy. +interface DebugProxy { + +} diff --git a/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/DebuggingInvoker.java b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/DebuggingInvoker.java new file mode 100644 index 00000000000..4320841471c --- /dev/null +++ b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/DebuggingInvoker.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.debug; + +import org.elasticsearch.xpack.sql.jdbc.JdbcSQLException; + +import java.lang.reflect.InvocationHandler; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.lang.reflect.Proxy; + +abstract class DebuggingInvoker implements InvocationHandler { + + private final Object target; + // used by subclasses to indicate the parent instance that creates the object + // for example a PreparedStatement has a Connection as parent + // the instance is kept around instead of reproxying to preserve the semantics (instead of creating a new proxy) + protected final Object parent; + + final DebugLog log; + + DebuggingInvoker(DebugLog log, Object target, Object parent) { + this.log = log; + this.target = target; + this.parent = parent; + } + + @Override + public final Object invoke(Object proxy, Method method, Object[] args) throws Throwable { + String name = method.getName(); + Class[] params = method.getParameterTypes(); + + if ("equals".equals(name) && params.length == 1 && params[0] == Object.class) { + Object o = args[0]; + if (o == null || !(o instanceof DebugProxy)) { + return Boolean.FALSE; + } + InvocationHandler ih = Proxy.getInvocationHandler(o); + return (ih instanceof DebuggingInvoker && target.equals(((DebuggingInvoker) ih).target)); + } + + else if ("hashCode".equals(name) && params.length == 0) { + return System.identityHashCode(proxy); + } + + else if ("toString".equals(name) && params.length == 0) { + return "Debug proxy for " + target; + } + + try { + Object result = method.invoke(target, args); + log.logResult(method, args, result); + return result == null || result instanceof DebugProxy ? result : postProcess(result, proxy); + } catch (InvocationTargetException ex) { + log.logException(method, args, ex.getCause()); + throw ex.getCause(); + } catch (Exception ex) { + // should not occur + log.logException(method, args, ex); + throw new JdbcSQLException(ex, "Debugging failed for [" + method + "]"); + } + } + + protected Object postProcess(Object result, Object proxy) { + return result; + } + + Object target() { + return target; + } +} diff --git a/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/ParameterMetaDataProxy.java b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/ParameterMetaDataProxy.java new file mode 100644 index 00000000000..22d0cea3cc3 --- /dev/null +++ b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/ParameterMetaDataProxy.java @@ -0,0 +1,13 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.debug; + +final class ParameterMetaDataProxy extends DebuggingInvoker { + + ParameterMetaDataProxy(DebugLog log, Object target) { + super(log, target, null); + } +} diff --git a/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/ResultSetMetaDataProxy.java b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/ResultSetMetaDataProxy.java new file mode 100644 index 00000000000..18b2e583e86 --- /dev/null +++ b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/ResultSetMetaDataProxy.java @@ -0,0 +1,13 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.debug; + +final class ResultSetMetaDataProxy extends DebuggingInvoker { + + ResultSetMetaDataProxy(DebugLog log, Object target) { + super(log, target, null); + } +} diff --git a/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/ResultSetProxy.java b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/ResultSetProxy.java new file mode 100644 index 00000000000..417adbb0f2e --- /dev/null +++ b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/ResultSetProxy.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.debug; + +import java.sql.ResultSetMetaData; +import java.sql.Statement; + +class ResultSetProxy extends DebuggingInvoker { + + ResultSetProxy(DebugLog log, Object target, Object parent) { + super(log, target, parent); + } + + @Override + protected Object postProcess(Object result, Object proxy) { + if (result instanceof ResultSetMetaData) { + return Debug.proxy(new ResultSetMetaDataProxy(log, result)); + } + if (result instanceof Statement) { + return parent; + } + return result; + } +} diff --git a/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/StatementProxy.java b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/StatementProxy.java new file mode 100644 index 00000000000..7c5d19553fc --- /dev/null +++ b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/StatementProxy.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.debug; + +import java.sql.Connection; +import java.sql.ParameterMetaData; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; + +// handles Statement, PreparedStatement and CallableStatement +final class StatementProxy extends DebuggingInvoker { + + StatementProxy(DebugLog log, Object target, Object con) { + super(log, target, con); + } + + @Override + protected Object postProcess(Object result, Object proxy) { + if (result instanceof Connection) { + return parent; + } + if (result instanceof ResultSet) { + return Debug.proxy(new ResultSetProxy(log, result, proxy)); + } + if (result instanceof ParameterMetaData) { + return Debug.proxy(new ParameterMetaDataProxy(log, result)); + } + if (result instanceof ResultSetMetaData) { + return Debug.proxy(new ResultSetMetaDataProxy(log, result)); + } + + return result; + } +} diff --git a/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcConfiguration.java b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcConfiguration.java new file mode 100644 index 00000000000..60a6127294a --- /dev/null +++ b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcConfiguration.java @@ -0,0 +1,189 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.jdbc; + +import org.elasticsearch.xpack.sql.client.shared.ConnectionConfiguration; +import org.elasticsearch.xpack.sql.client.shared.StringUtils; +import org.elasticsearch.xpack.sql.jdbc.JdbcSQLException; +import org.elasticsearch.xpack.sql.client.shared.Version; + +import java.net.URI; +import java.sql.DriverPropertyInfo; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Properties; +import java.util.Set; +import java.util.TimeZone; + +import static org.elasticsearch.xpack.sql.client.shared.UriUtils.parseURI; +import static org.elasticsearch.xpack.sql.client.shared.UriUtils.removeQuery; + +// +// Supports the following syntax +// +// jdbc:es://[host|ip] +// jdbc:es://[host|ip]:port/(prefix) +// jdbc:es://[host|ip]:port/(prefix)(?options=value&) +// +// Additional properties can be specified either through the Properties object or in the URL. In case of duplicates, the URL wins. +// + +//TODO: beef this up for Security/SSL +public class JdbcConfiguration extends ConnectionConfiguration { + static final String URL_PREFIX = "jdbc:es://"; + public static URI DEFAULT_URI = URI.create("http://localhost:9200/"); + + + static final String DEBUG = "debug"; + static final String DEBUG_DEFAULT = "false"; + + static final String DEBUG_OUTPUT = "debug.output"; + // can be out/err/url + static final String DEBUG_OUTPUT_DEFAULT = "err"; + + public static final String TIME_ZONE = "timezone"; + // follow the JDBC spec and use the JVM default... + // to avoid inconsistency, the default is picked up once at startup and reused across connections + // to cater to the principle of least surprise + // really, the way to move forward is to specify a calendar or the timezone manually + static final String TIME_ZONE_DEFAULT = TimeZone.getDefault().getID(); + + // options that don't change at runtime + private static final Set OPTION_NAMES = new LinkedHashSet<>(Arrays.asList(TIME_ZONE, DEBUG, DEBUG_OUTPUT)); + + static { + // trigger version initialization + // typically this should have already happened but in case the + // JdbcDriver/JdbcDataSource are not used and the impl. classes used directly + // this covers that case + Version.version(); + } + + // immutable properties + private final String originalUrl; + private final boolean debug; + private final String debugOut; + + // mutable ones + private TimeZone timeZone; + + public static JdbcConfiguration create(String u, Properties props) throws JdbcSQLException { + URI uri = parseUrl(u); + Properties urlProps = parseProperties(uri, u); + uri = removeQuery(uri, u, DEFAULT_URI); + + // override properties set in the URL with the ones specified programmatically + if (props != null) { + urlProps.putAll(props); + } + + try { + return new JdbcConfiguration(uri, u, urlProps); + } catch (JdbcSQLException e) { + throw e; + } catch (Exception ex) { + throw new JdbcSQLException(ex, ex.getMessage()); + } + } + + private static URI parseUrl(String u) throws JdbcSQLException { + String url = u; + String format = "jdbc:es://[http|https]?[host[:port]]*/[prefix]*[?[option=value]&]*"; + if (!canAccept(u)) { + throw new JdbcSQLException("Expected [" + URL_PREFIX + "] url, received [" + u + "]"); + } + + try { + return parseURI(removeJdbcPrefix(u), DEFAULT_URI); + } catch (IllegalArgumentException ex) { + throw new JdbcSQLException(ex, "Invalid URL [" + url + "], format should be [" + format + "]"); + } + } + + private static String removeJdbcPrefix(String connectionString) throws JdbcSQLException { + if (connectionString.startsWith(URL_PREFIX)) { + return connectionString.substring(URL_PREFIX.length()); + } else { + throw new JdbcSQLException("Expected [" + URL_PREFIX + "] url, received [" + connectionString + "]"); + } + } + + private static Properties parseProperties(URI uri, String u) throws JdbcSQLException { + Properties props = new Properties(); + try { + if (uri.getRawQuery() != null) { + // parse properties + List prms = StringUtils.tokenize(uri.getRawQuery(), "&"); + for (String param : prms) { + List args = StringUtils.tokenize(param, "="); + if (args.size() != 2) { + throw new JdbcSQLException("Invalid parameter [" + param + "], format needs to be key=value"); + } + // further validation happens in the constructor (since extra properties might be specified either way) + props.setProperty(args.get(0).trim(), args.get(1).trim()); + } + } + } catch (JdbcSQLException e) { + throw e; + } catch (Exception e) { + // Add the url to unexpected exceptions + throw new IllegalArgumentException("Failed to parse acceptable jdbc url [" + u + "]", e); + } + return props; + } + + // constructor is private to force the use of a factory in order to catch and convert any validation exception + // and also do input processing as oppose to handling this from the constructor (which is tricky or impossible) + private JdbcConfiguration(URI baseURI, String u, Properties props) throws JdbcSQLException { + super(baseURI, u, props); + + this.originalUrl = u; + + this.debug = parseValue(DEBUG, props.getProperty(DEBUG, DEBUG_DEFAULT), Boolean::parseBoolean); + this.debugOut = props.getProperty(DEBUG_OUTPUT, DEBUG_OUTPUT_DEFAULT); + + this.timeZone = parseValue(TIME_ZONE, props.getProperty(TIME_ZONE, TIME_ZONE_DEFAULT), TimeZone::getTimeZone); + } + + @Override + protected Collection extraOptions() { + return OPTION_NAMES; + } + + public boolean debug() { + return debug; + } + + public String debugOut() { + return debugOut; + } + + public TimeZone timeZone() { + return timeZone; + } + + public void timeZone(TimeZone timeZone) { + this.timeZone = timeZone; + } + + public static boolean canAccept(String url) { + return (StringUtils.hasText(url) && url.trim().startsWith(JdbcConfiguration.URL_PREFIX)); + } + + public DriverPropertyInfo[] driverPropertyInfo() { + List info = new ArrayList<>(); + for (String option : OPTION_NAMES) { + String value = null; + DriverPropertyInfo prop = new DriverPropertyInfo(option, value); + info.add(prop); + } + + return info.toArray(new DriverPropertyInfo[info.size()]); + } +} diff --git a/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcConnection.java b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcConnection.java new file mode 100644 index 00000000000..233211fcfde --- /dev/null +++ b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcConnection.java @@ -0,0 +1,433 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.jdbc; + +import org.elasticsearch.xpack.sql.jdbc.debug.Debug; +import org.elasticsearch.xpack.sql.jdbc.net.client.JdbcHttpClient; + +import java.sql.Array; +import java.sql.Blob; +import java.sql.CallableStatement; +import java.sql.Clob; +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.NClob; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLClientInfoException; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.sql.SQLWarning; +import java.sql.SQLXML; +import java.sql.Savepoint; +import java.sql.Statement; +import java.sql.Struct; +import java.util.Map; +import java.util.Properties; +import java.util.concurrent.Executor; +import java.util.concurrent.TimeUnit; + +/** + * Implementation of {@link Connection} for Elasticsearch. + */ +public class JdbcConnection implements Connection, JdbcWrapper { + + private final String url, userName; + final JdbcConfiguration cfg; + final JdbcHttpClient client; + + private boolean closed = false; + private String catalog; + private String schema; + + public JdbcConnection(JdbcConfiguration connectionInfo) throws SQLException { + cfg = connectionInfo; + client = new JdbcHttpClient(connectionInfo); + + url = connectionInfo.connectionString(); + userName = connectionInfo.authUser(); + } + + private void checkOpen() throws SQLException { + if (isClosed()) { + throw new SQLException("Connection is closed"); + } + } + + @Override + public Statement createStatement() throws SQLException { + checkOpen(); + return new JdbcStatement(this, cfg); + } + + @Override + public PreparedStatement prepareStatement(String sql) throws SQLException { + checkOpen(); + return new JdbcPreparedStatement(this, cfg, sql); + } + + @Override + public CallableStatement prepareCall(String sql) throws SQLException { + throw new SQLFeatureNotSupportedException("Stored procedures not supported yet"); + } + + @Override + public String nativeSQL(String sql) throws SQLException { + checkOpen(); + return sql; + } + + @Override + public void setAutoCommit(boolean autoCommit) throws SQLException { + checkOpen(); + if (!autoCommit) { + new SQLFeatureNotSupportedException("Non auto-commit is not supported"); + } + } + + @Override + public boolean getAutoCommit() throws SQLException { + checkOpen(); + return true; + } + + @Override + public void commit() throws SQLException { + checkOpen(); + if (getAutoCommit()) { + throw new SQLException("Auto-commit is enabled"); + } + throw new SQLFeatureNotSupportedException("Commit/Rollback not supported"); + } + + @Override + public void rollback() throws SQLException { + checkOpen(); + if (getAutoCommit()) { + throw new SQLException("Auto-commit is enabled"); + } + throw new SQLFeatureNotSupportedException("Commit/Rollback not supported"); + } + + @Override + public void close() throws SQLException { + if (!isClosed()) { + closed = true; + Debug.release(cfg); + } + } + + @Override + public boolean isClosed() { + return closed; + } + + @Override + public DatabaseMetaData getMetaData() throws SQLException { + return new JdbcDatabaseMetaData(this); + } + + @Override + public void setReadOnly(boolean readOnly) throws SQLException { + if (!readOnly) { + throw new SQLFeatureNotSupportedException("Only read-only mode is supported"); + } + } + + @Override + public boolean isReadOnly() throws SQLException { + checkOpen(); + return true; + } + + @Override + public void setCatalog(String catalog) throws SQLException { + checkOpen(); + this.catalog = catalog; + } + + @Override + public String getCatalog() throws SQLException { + checkOpen(); + return catalog; + } + + @Override + public void setTransactionIsolation(int level) throws SQLException { + checkOpen(); + if (TRANSACTION_NONE != level) { + throw new SQLFeatureNotSupportedException("Transactions not supported"); + } + } + + @Override + public int getTransactionIsolation() throws SQLException { + checkOpen(); + return TRANSACTION_NONE; + } + + @Override + public SQLWarning getWarnings() throws SQLException { + checkOpen(); + return null; + } + + @Override + public void clearWarnings() throws SQLException { + checkOpen(); + // no-op + } + + @Override + public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException { + checkResultSet(resultSetType, resultSetConcurrency); + return createStatement(); + } + + @Override + public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { + checkResultSet(resultSetType, resultSetConcurrency); + return prepareStatement(sql); + } + + @Override + public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { + checkResultSet(resultSetType, resultSetConcurrency); + return prepareCall(sql); + } + + @Override + public Map> getTypeMap() throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("typeMap not supported"); + } + + @Override + public void setTypeMap(Map> map) throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("typeMap not supported"); + } + + @Override + public void setHoldability(int holdability) throws SQLException { + checkOpen(); + checkHoldability(holdability); + } + + @Override + public int getHoldability() throws SQLException { + checkOpen(); + return ResultSet.HOLD_CURSORS_OVER_COMMIT; + } + + @Override + public Savepoint setSavepoint() throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Savepoints not supported"); + } + + @Override + public Savepoint setSavepoint(String name) throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Savepoints not supported"); + } + + @Override + public void rollback(Savepoint savepoint) throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Savepoints not supported"); + } + + @Override + public void releaseSavepoint(Savepoint savepoint) throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Savepoints not supported"); + } + + @Override + public Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { + checkOpen(); + checkHoldability(resultSetHoldability); + return createStatement(resultSetType, resultSetConcurrency); + } + + @Override + public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) + throws SQLException { + checkOpen(); + checkHoldability(resultSetHoldability); + return prepareStatement(sql, resultSetType, resultSetConcurrency); + } + + @Override + public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) + throws SQLException { + checkOpen(); + checkHoldability(resultSetHoldability); + return prepareCall(sql, resultSetType, resultSetConcurrency); + } + + @Override + public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException { + checkOpen(); + if (autoGeneratedKeys != Statement.NO_GENERATED_KEYS) { + throw new SQLFeatureNotSupportedException("Auto generated keys must be NO_GENERATED_KEYS"); + } + return prepareStatement(sql); + } + + @Override + public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Autogenerated key not supported"); + } + + @Override + public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Autogenerated key not supported"); + } + + @Override + public Clob createClob() throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Clob not supported yet"); + } + + @Override + public Blob createBlob() throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Blob not supported yet"); + } + + @Override + public NClob createNClob() throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("NClob not supported yet"); + } + + @Override + public SQLXML createSQLXML() throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("SQLXML not supported yet"); + } + + @Override + public boolean isValid(int timeout) throws SQLException { + if (timeout < 0) { + throw new SQLException("Negative timeout"); + } + return !isClosed() && client.ping(TimeUnit.SECONDS.toMillis(timeout)); + } + + private void checkOpenClientInfo() throws SQLClientInfoException { + if (isClosed()) { + throw new SQLClientInfoException("Connection closed", null); + } + } + + @Override + public void setClientInfo(String name, String value) throws SQLClientInfoException { + checkOpenClientInfo(); + // no-op + throw new SQLClientInfoException("Unsupported operation", null); + } + + @Override + public void setClientInfo(Properties properties) throws SQLClientInfoException { + checkOpenClientInfo(); + // no-op + throw new SQLClientInfoException("Unsupported operation", null); + } + + @Override + public String getClientInfo(String name) throws SQLException { + checkOpenClientInfo(); + // we don't support client info - the docs indicate we should return null if properties are not supported + return null; + } + + @Override + public Properties getClientInfo() throws SQLException { + checkOpenClientInfo(); + // similar to getClientInfo - return an empty object instead of an exception + return new Properties(); + } + + @Override + public Array createArrayOf(String typeName, Object[] elements) throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Array not supported yet"); + } + + @Override + public Struct createStruct(String typeName, Object[] attributes) throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Struct not supported yet"); + } + + @Override + public void setSchema(String schema) throws SQLException { + checkOpen(); + this.schema = schema; + } + + @Override + public String getSchema() throws SQLException { + checkOpen(); + return schema; + } + + @Override + public void abort(Executor executor) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public int getNetworkTimeout() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + private void checkResultSet(int resultSetType, int resultSetConcurrency) throws SQLException { + if (ResultSet.TYPE_FORWARD_ONLY != resultSetType) { + throw new SQLFeatureNotSupportedException("ResultSet type can only be TYPE_FORWARD_ONLY"); + } + if (ResultSet.CONCUR_READ_ONLY != resultSetConcurrency) { + throw new SQLFeatureNotSupportedException("ResultSet concurrency can only be CONCUR_READ_ONLY"); + } + } + + private void checkHoldability(int resultSetHoldability) throws SQLException { + if (ResultSet.HOLD_CURSORS_OVER_COMMIT != resultSetHoldability) { + throw new SQLFeatureNotSupportedException("Holdability can only be HOLD_CURSORS_OVER_COMMIT"); + } + } + + String getURL() { + return url; + } + + String getUserName() { + return userName; + } + + // There's no checkOpen on these methods since they are used by + // DatabaseMetadata that can work on a closed connection as well + // in fact, this information is cached by the underlying client + // once retrieved + int esInfoMajorVersion() throws SQLException { + return client.serverInfo().majorVersion; + } + + int esInfoMinorVersion() throws SQLException { + return client.serverInfo().minorVersion; + } +} \ No newline at end of file diff --git a/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcDatabaseMetaData.java b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcDatabaseMetaData.java new file mode 100644 index 00000000000..fffda14927c --- /dev/null +++ b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcDatabaseMetaData.java @@ -0,0 +1,1266 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.jdbc; + +import org.elasticsearch.xpack.sql.client.shared.ObjectUtils; +import org.elasticsearch.xpack.sql.jdbc.JdbcSQLException; +import org.elasticsearch.xpack.sql.jdbc.net.client.Cursor; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfo; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.MetaColumnInfo; +import org.elasticsearch.xpack.sql.client.shared.Version; + +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.JDBCType; +import java.sql.ResultSet; +import java.sql.RowIdLifetime; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.util.ArrayList; +import java.util.List; + +import static org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcUtils.numericPrecisionRadix; + +/** + * Implementation of {@link DatabaseMetaData} for Elasticsearch. Draws inspiration + * from + * PostgreSQL. Virtual/synthetic tables are not supported so the client returns + * empty data instead of creating a query. + */ +class JdbcDatabaseMetaData implements DatabaseMetaData, JdbcWrapper { + + private final JdbcConnection con; + + JdbcDatabaseMetaData(JdbcConnection con) { + this.con = con; + } + + @Override + public boolean allProceduresAreCallable() throws SQLException { + return true; + } + + @Override + public boolean allTablesAreSelectable() throws SQLException { + return true; + } + + @Override + public String getURL() throws SQLException { + return con.getURL(); + } + + @Override + public String getUserName() throws SQLException { + return con.getUserName(); + } + + @Override + public boolean isReadOnly() throws SQLException { + return true; + } + + @Override + public boolean nullsAreSortedHigh() throws SQLException { + return false; + } + + @Override + public boolean nullsAreSortedLow() throws SQLException { + return false; + } + + @Override + public boolean nullsAreSortedAtStart() throws SQLException { + return false; + } + + @Override + public boolean nullsAreSortedAtEnd() throws SQLException { + return false; + } + + @Override + public String getDatabaseProductName() throws SQLException { + return "Elasticsearch"; + } + + @Override + public String getDatabaseProductVersion() throws SQLException { + return Version.version(); + } + + @Override + public String getDriverName() throws SQLException { + return "Elasticsearch JDBC Driver"; + } + + @Override + public String getDriverVersion() throws SQLException { + return Version.versionMajor() + "." + Version.versionMinor(); + } + + @Override + public int getDriverMajorVersion() { + return Version.versionMajor(); + } + + @Override + public int getDriverMinorVersion() { + return Version.versionMinor(); + } + + @Override + public boolean usesLocalFiles() throws SQLException { + return true; + } + + @Override + public boolean usesLocalFilePerTable() throws SQLException { + return true; + } + + @Override + public boolean supportsMixedCaseIdentifiers() throws SQLException { + return true; + } + + @Override + public boolean storesUpperCaseIdentifiers() throws SQLException { + return false; + } + + @Override + public boolean storesLowerCaseIdentifiers() throws SQLException { + return false; + } + + @Override + public boolean storesMixedCaseIdentifiers() throws SQLException { + //TODO: is the javadoc accurate + return false; + } + + @Override + public boolean supportsMixedCaseQuotedIdentifiers() throws SQLException { + return true; + } + + @Override + public boolean storesUpperCaseQuotedIdentifiers() throws SQLException { + return false; + } + + @Override + public boolean storesLowerCaseQuotedIdentifiers() throws SQLException { + return false; + } + + @Override + public boolean storesMixedCaseQuotedIdentifiers() throws SQLException { + return false; + } + + @Override + public String getIdentifierQuoteString() throws SQLException { + return "\""; + } + + @Override + public String getSQLKeywords() throws SQLException { + // TODO: sync this with the grammar + return ""; + } + + @Override + public String getNumericFunctions() throws SQLException { + // TODO: sync this with the grammar + return ""; + } + + @Override + public String getStringFunctions() throws SQLException { + // TODO: sync this with the grammar + return ""; + } + + @Override + public String getSystemFunctions() throws SQLException { + // TODO: sync this with the grammar + return ""; + } + + @Override + public String getTimeDateFunctions() throws SQLException { + return ""; + } + + @Override + public String getSearchStringEscape() throws SQLException { + return "\\"; + } + + @Override + public String getExtraNameCharacters() throws SQLException { + return ""; + } + + @Override + public boolean supportsAlterTableWithAddColumn() throws SQLException { + return false; + } + + @Override + public boolean supportsAlterTableWithDropColumn() throws SQLException { + return false; + } + + @Override + public boolean supportsColumnAliasing() throws SQLException { + return true; + } + + @Override + public boolean nullPlusNonNullIsNull() throws SQLException { + return true; + } + + @Override + public boolean supportsConvert() throws SQLException { + //TODO: add Convert + return false; + } + + @Override + public boolean supportsConvert(int fromType, int toType) throws SQLException { + return false; + } + + @Override + public boolean supportsTableCorrelationNames() throws SQLException { + return true; + } + + @Override + public boolean supportsDifferentTableCorrelationNames() throws SQLException { + return false; + } + + @Override + public boolean supportsExpressionsInOrderBy() throws SQLException { + return false; + } + + @Override + public boolean supportsOrderByUnrelated() throws SQLException { + return true; + } + + @Override + public boolean supportsGroupBy() throws SQLException { + return true; + } + + @Override + public boolean supportsGroupByUnrelated() throws SQLException { + return true; + } + + @Override + public boolean supportsGroupByBeyondSelect() throws SQLException { + return true; + } + + @Override + public boolean supportsLikeEscapeClause() throws SQLException { + return true; + } + + @Override + public boolean supportsMultipleResultSets() throws SQLException { + return false; + } + + @Override + public boolean supportsMultipleTransactions() throws SQLException { + return false; + } + + @Override + public boolean supportsNonNullableColumns() throws SQLException { + return true; + } + + @Override + public boolean supportsMinimumSQLGrammar() throws SQLException { + return true; + } + + @Override + public boolean supportsCoreSQLGrammar() throws SQLException { + return false; + } + + @Override + public boolean supportsExtendedSQLGrammar() throws SQLException { + return false; + } + + @Override + public boolean supportsANSI92EntryLevelSQL() throws SQLException { + return true; + } + + @Override + public boolean supportsANSI92IntermediateSQL() throws SQLException { + return false; + } + + @Override + public boolean supportsANSI92FullSQL() throws SQLException { + return false; + } + + @Override + public boolean supportsIntegrityEnhancementFacility() throws SQLException { + return false; + } + + @Override + public boolean supportsOuterJoins() throws SQLException { + return true; + } + + @Override + public boolean supportsFullOuterJoins() throws SQLException { + return false; + } + + @Override + public boolean supportsLimitedOuterJoins() throws SQLException { + return true; + } + + @Override + public String getSchemaTerm() throws SQLException { + return "schema"; + } + + @Override + public String getProcedureTerm() throws SQLException { + return "procedure"; + } + + @Override + public String getCatalogTerm() throws SQLException { + return "clusterName"; + } + + @Override + public boolean isCatalogAtStart() throws SQLException { + return true; + } + + @Override + public String getCatalogSeparator() throws SQLException { + return "."; + } + + @Override + public boolean supportsSchemasInDataManipulation() throws SQLException { + return true; + } + + @Override + public boolean supportsSchemasInProcedureCalls() throws SQLException { + return true; + } + + @Override + public boolean supportsSchemasInTableDefinitions() throws SQLException { + return true; + } + + @Override + public boolean supportsSchemasInIndexDefinitions() throws SQLException { + return true; + } + + @Override + public boolean supportsSchemasInPrivilegeDefinitions() throws SQLException { + return true; + } + + @Override + public boolean supportsCatalogsInDataManipulation() throws SQLException { + return true; + } + + @Override + public boolean supportsCatalogsInProcedureCalls() throws SQLException { + return true; + } + + @Override + public boolean supportsCatalogsInTableDefinitions() throws SQLException { + return true; + } + + @Override + public boolean supportsCatalogsInIndexDefinitions() throws SQLException { + return true; + } + + @Override + public boolean supportsCatalogsInPrivilegeDefinitions() throws SQLException { + return true; + } + + @Override + public boolean supportsPositionedDelete() throws SQLException { + return false; + } + + @Override + public boolean supportsPositionedUpdate() throws SQLException { + return false; + } + + @Override + public boolean supportsSelectForUpdate() throws SQLException { + return false; + } + + @Override + public boolean supportsStoredProcedures() throws SQLException { + return false; + } + + @Override + public boolean supportsSubqueriesInComparisons() throws SQLException { + return false; + } + + @Override + public boolean supportsSubqueriesInExists() throws SQLException { + return false; + } + + @Override + public boolean supportsSubqueriesInIns() throws SQLException { + return false; + } + + @Override + public boolean supportsSubqueriesInQuantifieds() throws SQLException { + return false; + } + + @Override + public boolean supportsCorrelatedSubqueries() throws SQLException { + return false; + } + + @Override + public boolean supportsUnion() throws SQLException { + return false; + } + + @Override + public boolean supportsUnionAll() throws SQLException { + return true; + } + + @Override + public boolean supportsOpenCursorsAcrossCommit() throws SQLException { + return false; + } + + @Override + public boolean supportsOpenCursorsAcrossRollback() throws SQLException { + return false; + } + + @Override + public boolean supportsOpenStatementsAcrossCommit() throws SQLException { + return false; + } + + @Override + public boolean supportsOpenStatementsAcrossRollback() throws SQLException { + return false; + } + + @Override + public int getMaxBinaryLiteralLength() throws SQLException { + return 0; + } + + @Override + public int getMaxCharLiteralLength() throws SQLException { + return 0; + } + + @Override + public int getMaxColumnNameLength() throws SQLException { + return 0; + } + + @Override + public int getMaxColumnsInGroupBy() throws SQLException { + return 0; + } + + @Override + public int getMaxColumnsInIndex() throws SQLException { + return 0; + } + + @Override + public int getMaxColumnsInOrderBy() throws SQLException { + return 0; + } + + @Override + public int getMaxColumnsInSelect() throws SQLException { + return 0; + } + + @Override + public int getMaxColumnsInTable() throws SQLException { + return 0; + } + + @Override + public int getMaxConnections() throws SQLException { + return 0; + } + + @Override + public int getMaxCursorNameLength() throws SQLException { + return 0; + } + + @Override + public int getMaxIndexLength() throws SQLException { + return 0; + } + + @Override + public int getMaxSchemaNameLength() throws SQLException { + return 0; + } + + @Override + public int getMaxProcedureNameLength() throws SQLException { + return 0; + } + + @Override + public int getMaxCatalogNameLength() throws SQLException { + return 0; + } + + @Override + public int getMaxRowSize() throws SQLException { + return 0; + } + + @Override + public boolean doesMaxRowSizeIncludeBlobs() throws SQLException { + return true; + } + + @Override + public int getMaxStatementLength() throws SQLException { + return 0; + } + + @Override + public int getMaxStatements() throws SQLException { + return 0; + } + + @Override + public int getMaxTableNameLength() throws SQLException { + return 0; + } + + @Override + public int getMaxTablesInSelect() throws SQLException { + return 0; + } + + @Override + public int getMaxUserNameLength() throws SQLException { + return 0; + } + + @Override + public int getDefaultTransactionIsolation() throws SQLException { + return Connection.TRANSACTION_NONE; + } + + @Override + public boolean supportsTransactions() throws SQLException { + return false; + } + + @Override + public boolean supportsTransactionIsolationLevel(int level) throws SQLException { + return Connection.TRANSACTION_NONE == level; + } + + @Override + public boolean supportsDataDefinitionAndDataManipulationTransactions() throws SQLException { + return false; + } + + @Override + public boolean supportsDataManipulationTransactionsOnly() throws SQLException { + return false; + } + + @Override + public boolean dataDefinitionCausesTransactionCommit() throws SQLException { + return false; + } + + @Override + public boolean dataDefinitionIgnoredInTransactions() throws SQLException { + return false; + } + + // https://www.postgresql.org/docs/9.0/static/infoschema-routines.html + @Override + public ResultSet getProcedures(String catalog, String schemaPattern, String procedureNamePattern) throws SQLException { + return emptySet(con.cfg, + "ROUTINES", + "PROCEDURE_CAT", + "PROCEDURE_SCHEM", + "PROCEDURE_NAME", + "NUM_INPUT_PARAMS", int.class, + "NUM_OUTPUT_PARAMS", int.class, + "NUM_RESULT_SETS", int.class, + "REMARKS", + "PROCEDURE_TYPE", short.class, + "SPECIFIC_NAME"); + } + + @Override + public ResultSet getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern, String columnNamePattern) + throws SQLException { + return emptySet(con.cfg, + "PARAMETERS", + "PROCEDURE_CAT", + "PROCEDURE_SCHEM", + "PROCEDURE_NAME", + "COLUMN_NAME", + "COLUMN_TYPE", short.class, + "DATA_TYPE", int.class, + "TYPE_NAME", + "PRECISION", int.class, + "LENGTH", int.class, + "SCALE", short.class, + "RADIX", short.class, + "NULLABLE", short.class, + "REMARKS", + "COLUMN_DEF", + "SQL_DATA_TYPE", int.class, + "SQL_DATETIME_SUB", int.class, + "CHAR_OCTET_LENGTH", int.class, + "ORDINAL_POSITION", int.class, + "IS_NULLABLE", + "SPECIFIC_NAME"); + } + + // return the cluster name as the catalog (database) + // helps with the various UIs + private String defaultCatalog() throws SQLException { + return con.client.serverInfo().cluster; + } + + private boolean isDefaultCatalog(String catalog) throws SQLException { + // null means catalog info is irrelevant + // % means return all catalogs + // "" means return those without a catalog + return catalog == null || catalog.equals("") || catalog.equals("%") || catalog.equals(defaultCatalog()); + } + + private boolean isDefaultSchema(String schema) { + // null means schema info is irrelevant + // % means return all schemas` + // "" means return those without a schema + return schema == null || schema.equals("") || schema.equals("%"); + } + + @Override + public ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) throws SQLException { + List info = columnInfo("TABLES", + "TABLE_CAT", //0 + "TABLE_SCHEM", //1 + "TABLE_NAME", //2 + "TABLE_TYPE", //3 + "REMARKS", //4 + "TYPE_CAT", //5 + "TYPE_SCHEM", //6 + "TYPE_NAME", //7 + "SELF_REFERENCING_COL_NAME", //8 + "REF_GENERATION"); //9 + + // schema and catalogs are not being used, if these are specified return an empty result set + if (!isDefaultCatalog(catalog) || !isDefaultSchema(schemaPattern)) { + return emptySet(con.cfg, info); + } + + String cat = defaultCatalog(); + List tables = con.client.metaInfoTables(tableNamePattern); + Object[][] data = new Object[tables.size()][]; + for (int i = 0; i < data.length; i++) { + data[i] = new Object[10]; + Object[] row = data[i]; + + row[0] = cat; + row[1] = ""; + row[2] = tables.get(i); + row[3] = "TABLE"; + row[4] = ""; + row[5] = null; + row[6] = null; + row[7] = null; + row[8] = null; + row[9] = null; + } + return memorySet(con.cfg, info, data); + } + + @Override + public ResultSet getSchemas() throws SQLException { + Object[][] data = { { "", defaultCatalog() } }; + return memorySet(con.cfg, columnInfo("SCHEMATA", + "TABLE_SCHEM", + "TABLE_CATALOG"), data); + } + + @Override + public ResultSet getSchemas(String catalog, String schemaPattern) throws SQLException { + List info = columnInfo("SCHEMATA", + "TABLE_SCHEM", + "TABLE_CATALOG"); + if (!isDefaultCatalog(catalog) || !isDefaultSchema(schemaPattern)) { + return emptySet(con.cfg, info); + } + Object[][] data = { { "", defaultCatalog() } }; + return memorySet(con.cfg, info, data); + } + + @Override + public ResultSet getCatalogs() throws SQLException { + Object[][] data = { { defaultCatalog() } }; + return memorySet(con.cfg, columnInfo("CATALOGS", + "TABLE_CAT"), data); + } + + @Override + public ResultSet getTableTypes() throws SQLException { + Object[][] data = { { "TABLE" } }; + return memorySet(con.cfg, columnInfo("TABLE_TYPES", + "TABLE_TYPE"), data); + } + + @Override + public ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) + throws SQLException { + List info = columnInfo("COLUMNS", + "TABLE_CAT", + "TABLE_SCHEM", + "TABLE_NAME", + "COLUMN_NAME", + "DATA_TYPE", int.class, + "TYPE_NAME", + "COLUMN_SIZE", int.class, + "BUFFER_LENGTH", void.class, + "DECIMAL_DIGITS", int.class, + "NUM_PREC_RADIX", int.class, + "NULLABLE", int.class, + "REMARKS", + "COLUMN_DEF", + "SQL_DATA_TYPE", int.class, + "SQL_DATETIME_SUB", int.class, + "CHAR_OCTET_LENGTH", int.class, + "ORDINAL_POSITION", int.class, + "IS_NULLABLE", + "SCOPE_CATALOG", + "SCOPE_SCHEMA", + "SCOPE_TABLE", + "SOURCE_DATA_TYPE", short.class, + "IS_AUTOINCREMENT", + "IS_GENERATEDCOLUMN"); + + + // schema and catalogs are not being used, if these are specified return an empty result set + if (!isDefaultCatalog(catalog) || !isDefaultSchema(schemaPattern)) { + return emptySet(con.cfg, info); + } + + String cat = defaultCatalog(); + // escaping is done on the server + List columns = con.client.metaInfoColumns(tableNamePattern, columnNamePattern); + Object[][] data = new Object[columns.size()][]; + for (int i = 0; i < data.length; i++) { + data[i] = new Object[24]; + Object[] row = data[i]; + MetaColumnInfo col = columns.get(i); + + row[ 0] = cat; + row[ 1] = ""; + row[ 2] = col.table; + row[ 3] = col.name; + row[ 4] = col.type.getVendorTypeNumber(); + row[ 5] = col.type.getName(); + row[ 6] = col.size; + row[ 7] = null; + row[ 8] = null; + row[ 9] = numericPrecisionRadix(col.type.getVendorTypeNumber()); + row[10] = columnNullable; + row[11] = null; + row[12] = null; + row[13] = null; + row[14] = null; + row[15] = null; + row[16] = col.position; + row[17] = "YES"; + row[18] = null; + row[19] = null; + row[20] = null; + row[21] = null; + row[22] = ""; + row[23] = ""; + } + return memorySet(con.cfg, info, data); + } + + @Override + public ResultSet getColumnPrivileges(String catalog, String schema, String table, String columnNamePattern) throws SQLException { + throw new SQLFeatureNotSupportedException("Privileges not supported"); + } + + @Override + public ResultSet getTablePrivileges(String catalog, String schemaPattern, String tableNamePattern) throws SQLException { + throw new SQLFeatureNotSupportedException("Privileges not supported"); + } + + @Override + public ResultSet getBestRowIdentifier(String catalog, String schema, String table, int scope, boolean nullable) throws SQLException { + throw new SQLFeatureNotSupportedException("Row identifiers not supported"); + } + + @Override + public ResultSet getVersionColumns(String catalog, String schema, String table) throws SQLException { + throw new SQLFeatureNotSupportedException("Version column not supported yet"); + } + + @Override + public ResultSet getPrimaryKeys(String catalog, String schema, String table) throws SQLException { + throw new SQLFeatureNotSupportedException("Primary keys not supported"); + } + + @Override + public ResultSet getImportedKeys(String catalog, String schema, String table) throws SQLException { + throw new SQLFeatureNotSupportedException("Imported keys not supported"); + } + + @Override + public ResultSet getExportedKeys(String catalog, String schema, String table) throws SQLException { + throw new SQLFeatureNotSupportedException("Exported keys not supported"); + } + + @Override + public ResultSet getCrossReference(String parentCatalog, String parentSchema, String parentTable, String foreignCatalog, + String foreignSchema, String foreignTable) throws SQLException { + throw new SQLFeatureNotSupportedException("Cross reference not supported"); + } + + @Override + public ResultSet getTypeInfo() throws SQLException { + return emptySet(con.cfg, + "TYPE_INFO", + "TYPE_NAME", + "DATA_TYPE", int.class, + "PRECISION", int.class, + "LITERAL_PREFIX", + "LITERAL_SUFFIX", + "CREATE_PARAMS", + "NULLABLE", short.class, + "CASE_SENSITIVE", boolean.class, + "SEARCHABLE", short.class, + "UNSIGNED_ATTRIBUTE", boolean.class, + "FIXED_PREC_SCALE", boolean.class, + "AUTO_INCREMENT", boolean.class, + "LOCAL_TYPE_NAME", + "MINIMUM_SCALE", short.class, + "MAXIMUM_SCALE", short.class, + "SQL_DATA_TYPE", int.class, + "SQL_DATETIME_SUB", int.class, + "NUM_PREC_RADIX", int.class + ); + } + + @Override + public ResultSet getIndexInfo(String catalog, String schema, String table, boolean unique, boolean approximate) throws SQLException { + throw new SQLFeatureNotSupportedException("Indicies not supported"); + } + + @Override + public boolean supportsResultSetType(int type) throws SQLException { + return ResultSet.TYPE_FORWARD_ONLY == type; + } + + @Override + public boolean supportsResultSetConcurrency(int type, int concurrency) throws SQLException { + return ResultSet.TYPE_FORWARD_ONLY == type && ResultSet.CONCUR_READ_ONLY == concurrency; + } + + @Override + public boolean ownUpdatesAreVisible(int type) throws SQLException { + return false; + } + + @Override + public boolean ownDeletesAreVisible(int type) throws SQLException { + return false; + } + + @Override + public boolean ownInsertsAreVisible(int type) throws SQLException { + return false; + } + + @Override + public boolean othersUpdatesAreVisible(int type) throws SQLException { + return false; + } + + @Override + public boolean othersDeletesAreVisible(int type) throws SQLException { + return false; + } + + @Override + public boolean othersInsertsAreVisible(int type) throws SQLException { + return false; + } + + @Override + public boolean updatesAreDetected(int type) throws SQLException { + return false; + } + + @Override + public boolean deletesAreDetected(int type) throws SQLException { + return false; + } + + @Override + public boolean insertsAreDetected(int type) throws SQLException { + return false; + } + + @Override + public boolean supportsBatchUpdates() throws SQLException { + return false; + } + + @Override + public ResultSet getUDTs(String catalog, String schemaPattern, String typeNamePattern, int[] types) throws SQLException { + return emptySet(con.cfg, + "USER_DEFINED_TYPES", + "TYPE_CAT", + "TYPE_SCHEM", + "TYPE_NAME", + "CLASS_NAME", + "DATA_TYPE", int.class, + "REMARKS", + "BASE_TYPE", short.class); + } + + @Override + public Connection getConnection() throws SQLException { + return con; + } + + @Override + public boolean supportsSavepoints() throws SQLException { + return false; + } + + @Override + public boolean supportsNamedParameters() throws SQLException { + return true; + } + + @Override + public boolean supportsMultipleOpenResults() throws SQLException { + return false; + } + + @Override + public boolean supportsGetGeneratedKeys() throws SQLException { + return false; + } + + @Override + public ResultSet getSuperTypes(String catalog, String schemaPattern, String typeNamePattern) throws SQLException { + return emptySet(con.cfg, + "SUPER_TYPES", + "TYPE_CAT", + "TYPE_SCHEM", + "TYPE_NAME", + "SUPERTYPE_CAT", + "SUPERTYPE_SCHEM", + "SUPERTYPE_NAME", + "BASE_TYPE"); + } + + @Override + public ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) throws SQLException { + return emptySet(con.cfg, "SUPER_TABLES", + "TABLE_CAT", + "TABLE_SCHEM", + "TABLE_NAME", + "SUPERTABLE_NAME"); + } + + @Override + public ResultSet getAttributes(String catalog, String schemaPattern, String typeNamePattern, String attributeNamePattern) + throws SQLException { + return emptySet(con.cfg, + "ATTRIBUTES", + "TYPE_CAT", + "TYPE_SCHEM", + "TYPE_NAME", + "ATTR_NAME", + "DATA_TYPE", int.class, + "ATTR_TYPE_NAME", + "ATTR_SIZE", int.class, + "DECIMAL_DIGITS", int.class, + "NUM_PREC_RADIX", int.class, + "NULLABLE", int.class, + "REMARKS", + "ATTR_DEF", + "SQL_DATA_TYPE", int.class, + "SQL_DATETIME_SUB", int.class, + "CHAR_OCTET_LENGTH", int.class, + "ORDINAL_POSITION", int.class, + "IS_NULLABLE", + "SCOPE_CATALOG", + "SCOPE_SCHEMA", + "SCOPE_TABLE", + "SOURCE_DATA_TYPE", short.class); + } + + @Override + public boolean supportsResultSetHoldability(int holdability) throws SQLException { + return ResultSet.HOLD_CURSORS_OVER_COMMIT == holdability; + } + + @Override + public int getResultSetHoldability() throws SQLException { + return ResultSet.HOLD_CURSORS_OVER_COMMIT; + } + + @Override + public int getDatabaseMajorVersion() throws SQLException { + return con.esInfoMajorVersion(); + } + + @Override + public int getDatabaseMinorVersion() throws SQLException { + return con.esInfoMinorVersion(); + } + + @Override + public int getJDBCMajorVersion() throws SQLException { + return Version.jdbcMajorVersion(); + } + + @Override + public int getJDBCMinorVersion() throws SQLException { + return Version.jdbcMinorVersion(); + } + + @Override + public int getSQLStateType() throws SQLException { + return DatabaseMetaData.sqlStateSQL; + } + + @Override + public boolean locatorsUpdateCopy() throws SQLException { + return true; + } + + @Override + public boolean supportsStatementPooling() throws SQLException { + return false; + } + + @Override + public RowIdLifetime getRowIdLifetime() throws SQLException { + return RowIdLifetime.ROWID_UNSUPPORTED; + } + + @Override + public boolean supportsStoredFunctionsUsingCallSyntax() throws SQLException { + return false; + } + + @Override + public boolean autoCommitFailureClosesAllResultSets() throws SQLException { + return false; + } + + @Override + public ResultSet getClientInfoProperties() throws SQLException { + throw new SQLException("Client info not implemented yet"); + } + + @Override + public ResultSet getFunctions(String catalog, String schemaPattern, String functionNamePattern) throws SQLException { + return emptySet(con.cfg, + "FUNCTIONS", + "FUNCTION_CAT", + "FUNCTION_SCHEM", + "FUNCTION_NAME", + "REMARKS", + "FUNCTION_TYPE", short.class, + "SPECIFIC_NAME"); + } + + @Override + public ResultSet getFunctionColumns(String catalog, String schemaPattern, String functionNamePattern, String columnNamePattern) + throws SQLException { + return emptySet(con.cfg, + "FUNCTION_COLUMNS", + "FUNCTION_CAT", + "FUNCTION_SCHEM", + "FUNCTION_NAME", + "COLUMN_NAME", + "DATA_TYPE", int.class, + "TYPE_NAME", + "PRECISION", int.class, + "LENGTH", int.class, + "SCALE", short.class, + "RADIX", short.class, + "NULLABLE", short.class, + "REMARKS", + "CHAR_OCTET_LENGTH", int.class, + "ORDINAL_POSITION", int.class, + "IS_NULLABLE", + "SPECIFIC_NAME"); + } + + @Override + public ResultSet getPseudoColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) + throws SQLException { + return emptySet(con.cfg, + "PSEUDO_COLUMNS", + "TABLE_CAT", + "TABLE_SCHEM", + "TABLE_NAME", + "COLUMN_NAME", + "DATA_TYPE", int.class, + "COLUMN_SIZE", int.class, + "DECIMAL_DIGITS", int.class, + "NUM_PREC_RADIX", int.class, + "REMARKS", + "COLUMN_USAGE", + "IS_NULLABLE"); + } + + @Override + public boolean generatedKeyAlwaysReturned() throws SQLException { + return false; + } + + private static List columnInfo(String tableName, Object... cols) throws JdbcSQLException { + List columns = new ArrayList<>(); + + for (int i = 0; i < cols.length; i++) { + Object obj = cols[i]; + if (obj instanceof String) { + String name = obj.toString(); + JDBCType type = JDBCType.VARCHAR; + if (i + 1 < cols.length) { + // check if the next item it's a type + if (cols[i + 1] instanceof Class) { + type = JDBCType.valueOf(JdbcUtils.fromClass((Class) cols[i + 1])); + i++; + } + // it's not, use the default and move on + } + columns.add(new ColumnInfo(name, type, tableName, "INFORMATION_SCHEMA", "", "", 0)); + } + else { + throw new JdbcSQLException("Invalid metadata schema definition"); + } + } + return columns; + } + + private static ResultSet emptySet(JdbcConfiguration cfg, String tableName, Object... cols) throws JdbcSQLException { + return new JdbcResultSet(cfg, null, new InMemoryCursor(columnInfo(tableName, cols), null)); + } + + private static ResultSet emptySet(JdbcConfiguration cfg, List columns) { + return memorySet(cfg, columns, null); + } + + private static ResultSet memorySet(JdbcConfiguration cfg, List columns, Object[][] data) { + return new JdbcResultSet(cfg, null, new InMemoryCursor(columns, data)); + } + + static class InMemoryCursor implements Cursor { + + private final List columns; + private final Object[][] data; + + private int row = -1; + + InMemoryCursor(List info, Object[][] data) { + this.columns = info; + this.data = data; + } + + @Override + public List columns() { + return columns; + } + + @Override + public boolean next() { + if (!ObjectUtils.isEmpty(data) && row < data.length - 1) { + row++; + return true; + } + return false; + } + + @Override + public Object column(int column) { + return data[row][column]; + } + + @Override + public int batchSize() { + return data.length; + } + + @Override + public void close() throws SQLException { + // this cursor doesn't hold any resource - no need to clean up + } + } +} diff --git a/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcDriver.java b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcDriver.java new file mode 100644 index 00000000000..cc19c518dba --- /dev/null +++ b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcDriver.java @@ -0,0 +1,135 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.jdbc; + +import org.elasticsearch.xpack.sql.jdbc.JdbcSQLException; +import org.elasticsearch.xpack.sql.jdbc.debug.Debug; +import org.elasticsearch.xpack.sql.client.shared.Version; + +import java.io.PrintWriter; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.DriverPropertyInfo; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.util.Properties; +import java.util.concurrent.TimeUnit; +import java.util.logging.Logger; + +public class JdbcDriver implements java.sql.Driver { + + private static final JdbcDriver INSTANCE = new JdbcDriver(); + + static { + // invoke Version to perform classpath/jar sanity checks + Version.version(); + + try { + register(); + } catch (SQLException ex) { + // the SQLException is bogus as there's no source for it + // but we handle it just in case + PrintWriter writer = DriverManager.getLogWriter(); + if (writer != null) { + ex.printStackTrace(writer); + writer.flush(); + } + throw new ExceptionInInitializerError(ex); + } + } + + public static JdbcDriver register() throws SQLException { + // no closing callback + DriverManager.registerDriver(INSTANCE, INSTANCE::close); + return INSTANCE; + } + + public static void deregister() throws SQLException { + try { + DriverManager.deregisterDriver(INSTANCE); + } catch (SQLException ex) { + // the SQLException is bogus as there's no source for it + // but we handle it just in case + PrintWriter writer = DriverManager.getLogWriter(); + if (writer != null) { + ex.printStackTrace(writer); + writer.flush(); + } + throw ex; + } + } + + // + // Jdbc 4.0 + // + public Connection connect(String url, Properties props) throws SQLException { + if (url == null) { + throw new JdbcSQLException("Non-null url required"); + } + if (!acceptsURL(url)) { + return null; + } + + JdbcConfiguration cfg = initCfg(url, props); + JdbcConnection con = new JdbcConnection(cfg); + return cfg.debug() ? Debug.proxy(cfg, con, DriverManager.getLogWriter()) : con; + } + + private static JdbcConfiguration initCfg(String url, Properties props) throws JdbcSQLException { + JdbcConfiguration ci = JdbcConfiguration.create(url, props); + + // if there's a timeout set on the DriverManager, make sure to use it + if (DriverManager.getLoginTimeout() > 0) { + ci.connectTimeout(TimeUnit.SECONDS.toMillis(DriverManager.getLoginTimeout())); + } + return ci; + } + + @Override + public boolean acceptsURL(String url) throws SQLException { + return JdbcConfiguration.canAccept(url); + } + + @Override + public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) throws SQLException { + if (!acceptsURL(url)) { + return new DriverPropertyInfo[0]; + } + return JdbcConfiguration.create(url, info).driverPropertyInfo(); + } + + @Override + public int getMajorVersion() { + return Version.versionMajor(); + } + + @Override + public int getMinorVersion() { + return Version.versionMinor(); + } + + @Override + public boolean jdbcCompliant() { + return false; + } + + // + // Jdbc 4.1 + // + + public Logger getParentLogger() throws SQLFeatureNotSupportedException { + throw new SQLFeatureNotSupportedException(); + } + + /** + * Cleanup method invoked by the DriverManager when unregistering the driver. + * Since this happens typically when the JDBC driver gets unloaded (from the classloader) + * cleaning all debug information is a good safety check. + */ + private void close() { + Debug.close(); + } +} \ No newline at end of file diff --git a/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcParameterMetaData.java b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcParameterMetaData.java new file mode 100644 index 00000000000..405601e57b7 --- /dev/null +++ b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcParameterMetaData.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.jdbc; + +import org.elasticsearch.xpack.sql.jdbc.jdbc.PreparedQuery.ParamInfo; + +import java.sql.ParameterMetaData; +import java.sql.SQLException; + +class JdbcParameterMetaData implements ParameterMetaData, JdbcWrapper { + + private final JdbcPreparedStatement ps; + + JdbcParameterMetaData(JdbcPreparedStatement ps) { + this.ps = ps; + } + + @Override + public int getParameterCount() throws SQLException { + ps.checkOpen(); + return ps.query.paramCount(); + } + + @Override + public int isNullable(int param) throws SQLException { + ps.checkOpen(); + return parameterNullableUnknown; + } + + @Override + public boolean isSigned(int param) throws SQLException { + return JdbcUtils.isSigned(paramInfo(param).type.getVendorTypeNumber().intValue()); + } + + @Override + public int getPrecision(int param) throws SQLException { + ps.checkOpen(); + return 0; + } + + @Override + public int getScale(int param) throws SQLException { + ps.checkOpen(); + return 0; + } + + @Override + public int getParameterType(int param) throws SQLException { + return paramInfo(param).type.getVendorTypeNumber().intValue(); + } + + @Override + public String getParameterTypeName(int param) throws SQLException { + return paramInfo(param).type.name(); + } + + @Override + public String getParameterClassName(int param) throws SQLException { + return JdbcUtils.classOf(paramInfo(param).type.getVendorTypeNumber()).getName(); + } + + @Override + public int getParameterMode(int param) throws SQLException { + ps.checkOpen(); + return parameterModeUnknown; + } + + private ParamInfo paramInfo(int param) throws SQLException { + ps.checkOpen(); + return ps.query.getParam(param); + } +} \ No newline at end of file diff --git a/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcPreparedStatement.java b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcPreparedStatement.java new file mode 100644 index 00000000000..49a4b4fb43f --- /dev/null +++ b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcPreparedStatement.java @@ -0,0 +1,380 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.jdbc; + +import java.io.InputStream; +import java.io.Reader; +import java.math.BigDecimal; +import java.net.URL; +import java.sql.Array; +import java.sql.Blob; +import java.sql.Clob; +import java.sql.Date; +import java.sql.JDBCType; +import java.sql.NClob; +import java.sql.ParameterMetaData; +import java.sql.PreparedStatement; +import java.sql.Ref; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.RowId; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.sql.SQLXML; +import java.sql.Time; +import java.sql.Timestamp; +import java.sql.Types; +import java.util.Calendar; + +class JdbcPreparedStatement extends JdbcStatement implements PreparedStatement { + final PreparedQuery query; + + JdbcPreparedStatement(JdbcConnection con, JdbcConfiguration info, String sql) throws SQLException { + super(con, info); + this.query = PreparedQuery.prepare(sql); + } + + @Override + public boolean execute() throws SQLException { + checkOpen(); + executeQuery(); + return true; + } + + @Override + public ResultSet executeQuery() throws SQLException { + checkOpen(); + initResultSet(query.assemble()); + return rs; + } + + @Override + public int executeUpdate() throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + private void setParam(int parameterIndex, Object value, int type) throws SQLException { + checkOpen(); + + if (parameterIndex < 0 || parameterIndex > query.paramCount()) { + throw new SQLException("Invalid parameter [ " + parameterIndex + "; needs to be between 1 and [" + query.paramCount() + "]"); + } + + query.setParam(parameterIndex, value, JDBCType.valueOf(type)); + } + + @Override + public void setNull(int parameterIndex, int sqlType) throws SQLException { + setParam(parameterIndex, "NULL", sqlType); + } + + @Override + public void setBoolean(int parameterIndex, boolean x) throws SQLException { + setParam(parameterIndex, Boolean.toString(x), Types.BOOLEAN); + } + + @Override + public void setByte(int parameterIndex, byte x) throws SQLException { + setParam(parameterIndex, Byte.toString(x), Types.TINYINT); + } + + @Override + public void setShort(int parameterIndex, short x) throws SQLException { + setParam(parameterIndex, Short.toString(x), Types.SMALLINT); + } + + @Override + public void setInt(int parameterIndex, int x) throws SQLException { + setParam(parameterIndex, Integer.toString(x), Types.INTEGER); + } + + @Override + public void setLong(int parameterIndex, long x) throws SQLException { + setParam(parameterIndex, Long.toString(x), Types.BIGINT); + } + + @Override + public void setFloat(int parameterIndex, float x) throws SQLException { + setParam(parameterIndex, Float.toString(x), Types.REAL); + } + + @Override + public void setDouble(int parameterIndex, double x) throws SQLException { + setParam(parameterIndex, Double.toString(x), Types.DOUBLE); + } + + @Override + public void setBigDecimal(int parameterIndex, BigDecimal x) throws SQLException { + throw new SQLFeatureNotSupportedException("BigDecimal not supported"); + } + + @Override + public void setString(int parameterIndex, String x) throws SQLException { + setParam(parameterIndex, PreparedQuery.escapeString(x), Types.VARCHAR); + } + + @Override + public void setBytes(int parameterIndex, byte[] x) throws SQLException { + throw new UnsupportedOperationException("Bytes not implemented yet"); + } + + @Override + public void setDate(int parameterIndex, Date x) throws SQLException { + throw new UnsupportedOperationException("Date/Time not implemented yet"); + } + + @Override + public void setTime(int parameterIndex, Time x) throws SQLException { + throw new UnsupportedOperationException("Date/Time not implemented yet"); + } + + @Override + public void setTimestamp(int parameterIndex, Timestamp x) throws SQLException { + throw new UnsupportedOperationException("Date/Time not implemented yet"); + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream x, int length) throws SQLException { + throw new SQLFeatureNotSupportedException("AsciiStream not supported"); + } + + @Override + @Deprecated + public void setUnicodeStream(int parameterIndex, InputStream x, int length) throws SQLException { + throw new SQLFeatureNotSupportedException("UnicodeStream not supported"); + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream x, int length) throws SQLException { + throw new SQLFeatureNotSupportedException("BinaryStream not supported"); + } + + @Override + public void clearParameters() throws SQLException { + checkOpen(); + query.clearParams(); + } + + @Override + public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException { + throw new UnsupportedOperationException("Object not implemented yet"); + } + + @Override + public void setObject(int parameterIndex, Object x) throws SQLException { + throw new SQLFeatureNotSupportedException("CharacterStream not supported"); + } + + @Override + public void addBatch() throws SQLException { + throw new SQLFeatureNotSupportedException("Batching not supported"); + } + + @Override + public void setCharacterStream(int parameterIndex, Reader reader, int length) throws SQLException { + throw new SQLFeatureNotSupportedException("CharacterStream not supported"); + } + + @Override + public void setRef(int parameterIndex, Ref x) throws SQLException { + throw new SQLFeatureNotSupportedException("Ref not supported"); + } + + @Override + public void setBlob(int parameterIndex, Blob x) throws SQLException { + throw new SQLFeatureNotSupportedException("Blob not supported"); + } + + @Override + public void setClob(int parameterIndex, Clob x) throws SQLException { + throw new SQLFeatureNotSupportedException("Clob not supported"); + } + + @Override + public void setArray(int parameterIndex, Array x) throws SQLException { + throw new SQLFeatureNotSupportedException("Array not supported"); + } + + @Override + public ResultSetMetaData getMetaData() throws SQLException { + return rs != null ? rs.getMetaData() : null; + } + + @Override + public void setDate(int parameterIndex, Date x, Calendar cal) throws SQLException { + throw new UnsupportedOperationException("Dates not implemented yet"); + } + + @Override + public void setTime(int parameterIndex, Time x, Calendar cal) throws SQLException { + throw new UnsupportedOperationException("Dates not implemented yet"); + } + + @Override + public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException { + throw new UnsupportedOperationException("Dates not implemented yet"); + } + + @Override + public void setNull(int parameterIndex, int sqlType, String typeName) throws SQLException { + setNull(parameterIndex, sqlType); + } + + @Override + public void setURL(int parameterIndex, URL x) throws SQLException { + throw new SQLFeatureNotSupportedException("Datalink not supported"); + } + + @Override + public ParameterMetaData getParameterMetaData() throws SQLException { + return new JdbcParameterMetaData(this); + } + + @Override + public void setRowId(int parameterIndex, RowId x) throws SQLException { + throw new SQLFeatureNotSupportedException("RowId not supported"); + } + + @Override + public void setNString(int parameterIndex, String value) throws SQLException { + throw new SQLFeatureNotSupportedException("NString not supported"); + } + + @Override + public void setNCharacterStream(int parameterIndex, Reader value, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("NCharacterStream not supported"); + } + + @Override + public void setNClob(int parameterIndex, NClob value) throws SQLException { + throw new SQLFeatureNotSupportedException("NClob not supported"); + } + + @Override + public void setClob(int parameterIndex, Reader reader, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("Clob not supported"); + } + + @Override + public void setBlob(int parameterIndex, InputStream inputStream, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("Blob not supported"); + } + + @Override + public void setNClob(int parameterIndex, Reader reader, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("NClob not supported"); + } + + @Override + public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException { + throw new SQLFeatureNotSupportedException("SQLXML not supported"); + } + + @Override + public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException { + throw new UnsupportedOperationException("Object not implemented yet"); + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream x, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("AsciiStream not supported"); + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream x, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("BinaryStream not supported"); + } + + @Override + public void setCharacterStream(int parameterIndex, Reader reader, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("CharacterStream not supported"); + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream x) throws SQLException { + throw new SQLFeatureNotSupportedException("AsciiStream not supported"); + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream x) throws SQLException { + throw new SQLFeatureNotSupportedException("BinaryStream not supported"); + } + + @Override + public void setCharacterStream(int parameterIndex, Reader reader) throws SQLException { + throw new SQLFeatureNotSupportedException("CharacterStream not supported"); + } + + @Override + public void setNCharacterStream(int parameterIndex, Reader value) throws SQLException { + throw new SQLFeatureNotSupportedException("NCharacterStream not supported"); + } + + @Override + public void setClob(int parameterIndex, Reader reader) throws SQLException { + throw new SQLFeatureNotSupportedException("Clob not supported"); + } + + @Override + public void setBlob(int parameterIndex, InputStream inputStream) throws SQLException { + throw new SQLFeatureNotSupportedException("Blob not supported"); + } + + @Override + public void setNClob(int parameterIndex, Reader reader) throws SQLException { + throw new SQLFeatureNotSupportedException("NClob not supported"); + } + + @Override + public ResultSet executeQuery(String sql) throws SQLException { + throw new SQLException("Forbidden method on PreparedStatement"); + } + + @Override + public int executeUpdate(String sql) throws SQLException { + throw new SQLException("Forbidden method on PreparedStatement"); + } + + @Override + public boolean execute(String sql) throws SQLException { + throw new SQLException("Forbidden method on PreparedStatement"); + } + + @Override + public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { + throw new SQLException("Forbidden method on PreparedStatement"); + } + + @Override + public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { + throw new SQLException("Forbidden method on PreparedStatement"); + } + + @Override + public int executeUpdate(String sql, String[] columnNames) throws SQLException { + throw new SQLException("Forbidden method on PreparedStatement"); + } + + @Override + public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { + throw new SQLException("Forbidden method on PreparedStatement"); + } + + @Override + public boolean execute(String sql, int[] columnIndexes) throws SQLException { + throw new SQLException("Forbidden method on PreparedStatement"); + } + + @Override + public boolean execute(String sql, String[] columnNames) throws SQLException { + throw new SQLException("Forbidden method on PreparedStatement"); + } + + @Override + public long executeLargeUpdate() throws SQLException { + throw new SQLFeatureNotSupportedException("Batching not supported"); + } +} \ No newline at end of file diff --git a/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcResultSet.java b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcResultSet.java new file mode 100644 index 00000000000..e56c74193d7 --- /dev/null +++ b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcResultSet.java @@ -0,0 +1,1140 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.jdbc; + +import org.elasticsearch.xpack.sql.jdbc.net.client.Cursor; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfo; +import org.elasticsearch.xpack.sql.protocol.shared.Nullable; + +import java.io.InputStream; +import java.io.Reader; +import java.math.BigDecimal; +import java.net.URL; +import java.sql.Array; +import java.sql.Blob; +import java.sql.Clob; +import java.sql.Date; +import java.sql.JDBCType; +import java.sql.NClob; +import java.sql.Ref; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.RowId; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.sql.SQLWarning; +import java.sql.SQLXML; +import java.sql.Statement; +import java.sql.Time; +import java.sql.Timestamp; +import java.util.Calendar; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +import static java.lang.String.format; + +class JdbcResultSet implements ResultSet, JdbcWrapper { + + // temporary calendar instance (per connection) used for normalizing the date and time + // even though the cfg is already in UTC format, JDBC 3.0 requires java.sql.Time to have its date + // removed (set to Jan 01 1970) and java.sql.Date to have its HH:mm:ss component removed + // instead of dealing with longs, a Calendar object is used instead + private final Calendar defaultCalendar; + + private final JdbcStatement statement; + private final Cursor cursor; + private final Map nameToIndex = new LinkedHashMap<>(); + + private boolean closed = false; + private boolean wasNull = false; + + private int rowNumber; + + JdbcResultSet(JdbcConfiguration cfg, @Nullable JdbcStatement statement, Cursor cursor) { + this.statement = statement; + this.cursor = cursor; + // statement can be null so we have to extract the timeZone from the non-nullable cfg + // TODO: should we consider the locale as well? + this.defaultCalendar = Calendar.getInstance(cfg.timeZone(), Locale.ROOT); + + List columns = cursor.columns(); + for (int i = 0; i < columns.size(); i++) { + nameToIndex.put(columns.get(i).name, Integer.valueOf(i)); + } + } + + private Object column(int columnIndex) throws SQLException { + checkOpen(); + if (columnIndex < 1 || columnIndex > cursor.columnSize()) { + throw new SQLException("Invalid column index [" + columnIndex + "]"); + } + Object object = null; + try { + object = cursor.column(columnIndex - 1); + } catch (IllegalArgumentException iae) { + throw new SQLException(iae.getMessage()); + } + wasNull = (object == null); + return object; + } + + private int column(String columnName) throws SQLException { + checkOpen(); + Integer index = nameToIndex.get(columnName); + if (index == null) { + throw new SQLException("Invalid column label [" + columnName + "]"); + } + return index.intValue(); + } + + void checkOpen() throws SQLException { + if (isClosed()) { + throw new SQLException("Closed result set"); + } + } + + @Override + public boolean next() throws SQLException { + checkOpen(); + if (cursor.next()) { + rowNumber++; + return true; + } + return false; + } + + @Override + public void close() throws SQLException { + if (!closed) { + closed = true; + if (statement != null) { + statement.resultSetWasClosed(); + } + cursor.close(); + } + } + + @Override + public boolean wasNull() throws SQLException { + checkOpen(); + return wasNull; + } + + @Override + public String getString(int columnIndex) throws SQLException { + Object val = column(columnIndex); + return val != null ? val.toString() : null; + } + + @Override + public boolean getBoolean(int columnIndex) throws SQLException { + Object val = column(columnIndex); + return val != null ? (Boolean) val : false; + } + + @Override + public byte getByte(int columnIndex) throws SQLException { + Object val = column(columnIndex); + return val != null ? ((Number) val).byteValue() : 0; + } + + @Override + public short getShort(int columnIndex) throws SQLException { + Object val = column(columnIndex); + return val != null ? ((Number) val).shortValue() : 0; + } + + @Override + public int getInt(int columnIndex) throws SQLException { + Object val = column(columnIndex); + return val != null ? ((Number) val).intValue() : 0; + } + + @Override + public long getLong(int columnIndex) throws SQLException { + Object val = column(columnIndex); + return val != null ? ((Number) val).longValue() : 0; + } + + @Override + public float getFloat(int columnIndex) throws SQLException { + Object val = column(columnIndex); + return val != null ? ((Number) val).floatValue() : 0; + } + + @Override + public double getDouble(int columnIndex) throws SQLException { + Object val = column(columnIndex); + return val != null ? ((Number) val).doubleValue() : 0; + } + + @Override + public byte[] getBytes(int columnIndex) throws SQLException { + return (byte[]) column(columnIndex); + } + + @Override + public Date getDate(int columnIndex) throws SQLException { + return getDate(columnIndex, null); + } + + @Override + public Time getTime(int columnIndex) throws SQLException { + return getTime(columnIndex, null); + } + + @Override + public Timestamp getTimestamp(int columnIndex) throws SQLException { + return getTimestamp(columnIndex, null); + } + + @Override + public String getString(String columnLabel) throws SQLException { + return getString(column(columnLabel)); + } + + @Override + public boolean getBoolean(String columnLabel) throws SQLException { + return getBoolean(column(columnLabel)); + } + + @Override + public byte getByte(String columnLabel) throws SQLException { + return getByte(column(columnLabel)); + } + + @Override + public short getShort(String columnLabel) throws SQLException { + return getShort(column(columnLabel)); + } + + @Override + public int getInt(String columnLabel) throws SQLException { + return getInt(column(columnLabel)); + } + + @Override + public long getLong(String columnLabel) throws SQLException { + return getLong(column(columnLabel)); + } + + @Override + public float getFloat(String columnLabel) throws SQLException { + return getFloat(column(columnLabel)); + } + + @Override + public double getDouble(String columnLabel) throws SQLException { + return getDouble(column(columnLabel)); + } + + @Override + public byte[] getBytes(String columnLabel) throws SQLException { + return getBytes(column(columnLabel)); + } + + @Override + public Date getDate(String columnLabel) throws SQLException { + return getDate(column(columnLabel)); + } + + private Long dateTime(int columnIndex) throws SQLException { + Object val = column(columnIndex); + return val == null ? null : (Long) val; + } + + private Calendar safeCalendar(Calendar calendar) { + return calendar == null ? defaultCalendar : calendar; + } + + @Override + public Date getDate(int columnIndex, Calendar cal) throws SQLException { + return TypeConverter.convertDate(dateTime(columnIndex), safeCalendar(cal)); + } + + @Override + public Date getDate(String columnLabel, Calendar cal) throws SQLException { + return getDate(column(columnLabel), cal); + } + + @Override + public Time getTime(int columnIndex, Calendar cal) throws SQLException { + return TypeConverter.convertTime(dateTime(columnIndex), safeCalendar(cal)); + } + + @Override + public Time getTime(String columnLabel) throws SQLException { + return getTime(column(columnLabel)); + } + + @Override + public Timestamp getTimestamp(int columnIndex, Calendar cal) throws SQLException { + return TypeConverter.convertTimestamp(dateTime(columnIndex), safeCalendar(cal)); + } + + @Override + public Timestamp getTimestamp(String columnLabel) throws SQLException { + return getTimestamp(column(columnLabel)); + } + + @Override + public Time getTime(String columnLabel, Calendar cal) throws SQLException { + return getTime(column(columnLabel), cal); + } + + @Override + public Timestamp getTimestamp(String columnLabel, Calendar cal) throws SQLException { + return getTimestamp(column(columnLabel), cal); + } + + @Override + public ResultSetMetaData getMetaData() throws SQLException { + return new JdbcResultSetMetaData(this, cursor.columns()); + } + + @Override + public Object getObject(int columnIndex) throws SQLException { + return convert(columnIndex, null); + } + + @Override + public T getObject(int columnIndex, Class type) throws SQLException { + if (type == null) { + throw new SQLException("type is null"); + } + + return getObject(columnIndex, type); + } + + private T convert(int columnIndex, Class type) throws SQLException { + checkOpen(); + if (columnIndex < 1 || columnIndex > cursor.columnSize()) { + throw new SQLException("Invalid column index [" + columnIndex + "]"); + } + + Object val = column(columnIndex); + + if (val == null) { + return null; + } + + if (type != null && type.isInstance(val)) { + return type.cast(val); + } + + JDBCType columnType = cursor.columns().get(columnIndex - 1).type; + + T t = TypeConverter.convert(val, columnType, type); + + if (t != null) { + return t; + } + throw new SQLException("Conversion from type [" + columnType + "] to [" + type.getName() + "] not supported"); + } + + @Override + public Object getObject(int columnIndex, Map> map) throws SQLException { + if (map == null || map.isEmpty()) { + return getObject(columnIndex); + } + throw new SQLFeatureNotSupportedException("getObject with non-empty Map not supported"); + } + + @Override + public Object getObject(String columnLabel) throws SQLException { + return getObject(column(columnLabel)); + } + + @Override + public T getObject(String columnLabel, Class type) throws SQLException { + return getObject(column(columnLabel), type); + } + + @Override + public Object getObject(String columnLabel, Map> map) throws SQLException { + return getObject(column(columnLabel), map); + } + + @Override + public int findColumn(String columnLabel) throws SQLException { + return column(columnLabel); + } + + @Override + public boolean isBeforeFirst() throws SQLException { + return rowNumber == 0; + } + + @Override + public boolean isAfterLast() throws SQLException { + throw new SQLFeatureNotSupportedException("isAfterLast not supported"); + } + + @Override + public boolean isFirst() throws SQLException { + return rowNumber == 1; + } + + @Override + public boolean isLast() throws SQLException { + throw new SQLFeatureNotSupportedException("isLast not supported"); + } + + @Override + public int getRow() throws SQLException { + return rowNumber; + } + + @Override + public void setFetchSize(int rows) throws SQLException { + checkOpen(); + if (rows < 0) { + throw new SQLException("Rows is negative"); + } + if (rows != getFetchSize()) { + throw new SQLException("Fetch size cannot be changed"); + } + // ignore fetch size since scrolls cannot be changed in flight + } + + @Override + public int getFetchSize() throws SQLException { + /* + * Instead of returning the fetch size the user requested we make a + * stab at returning the fetch size that we actually used, returning + * the batch size of the current row. This allows us to assert these + * batch sizes in testing and lets us point users to something that + * they can use for debugging. + */ + checkOpen(); + return cursor.batchSize(); + } + + @Override + public Statement getStatement() throws SQLException { + checkOpen(); + return statement; + } + + @Override + public boolean isClosed() { + return closed; + } + + @Override + @Deprecated + public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { + throw new SQLFeatureNotSupportedException("BigDecimal not supported"); + } + + @Override + public InputStream getAsciiStream(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("AsciiStream not supported"); + } + + @Override + @Deprecated + public InputStream getUnicodeStream(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("UnicodeStream not supported"); + } + + @Override + public InputStream getBinaryStream(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("BinaryStream not supported"); + } + + @Override + @Deprecated + public BigDecimal getBigDecimal(String columnLabel, int scale) throws SQLException { + throw new SQLFeatureNotSupportedException("BigDecimal not supported"); + } + + @Override + public InputStream getAsciiStream(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("AsciiStream not supported"); + } + + @Override + @Deprecated + public InputStream getUnicodeStream(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("UnicodeStream not supported"); + } + + @Override + public InputStream getBinaryStream(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("BinaryStream not supported"); + } + + @Override + public SQLWarning getWarnings() throws SQLException { + checkOpen(); + return null; + } + + @Override + public void clearWarnings() throws SQLException { + checkOpen(); + } + + @Override + public String getCursorName() throws SQLException { + throw new SQLFeatureNotSupportedException("Cursor name not supported"); + } + + @Override + public Reader getCharacterStream(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("CharacterStream not supported"); + } + + @Override + public Reader getCharacterStream(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("CharacterStream not supported"); + } + + @Override + public BigDecimal getBigDecimal(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("BigDecimal not supported"); + } + + @Override + public BigDecimal getBigDecimal(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("BigDecimal not supported"); + } + + @Override + public void beforeFirst() throws SQLException { + throw new SQLException("ResultSet is forward-only"); + } + + @Override + public void afterLast() throws SQLException { + throw new SQLException("ResultSet is forward-only"); + } + + @Override + public boolean first() throws SQLException { + throw new SQLException("ResultSet is forward-only"); + } + + @Override + public boolean last() throws SQLException { + throw new SQLException("ResultSet is forward-only"); + } + + @Override + public boolean absolute(int row) throws SQLException { + throw new SQLException("ResultSet is forward-only"); + } + + @Override + public boolean relative(int rows) throws SQLException { + throw new SQLException("ResultSet is forward-only"); + } + + @Override + public boolean previous() throws SQLException { + throw new SQLException("ResultSet is forward-only"); + } + + @Override + public int getType() throws SQLException { + checkOpen(); + return TYPE_FORWARD_ONLY; + } + + @Override + public int getConcurrency() throws SQLException { + checkOpen(); + return CONCUR_READ_ONLY; + } + + @Override + public void setFetchDirection(int direction) throws SQLException { + checkOpen(); + if (direction != FETCH_FORWARD) { + throw new SQLException("Fetch direction must be FETCH_FORWARD"); + } + } + + @Override + public int getFetchDirection() throws SQLException { + checkOpen(); + return FETCH_FORWARD; + } + + @Override + public boolean rowUpdated() throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public boolean rowInserted() throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public boolean rowDeleted() throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateNull(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateBoolean(int columnIndex, boolean x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateByte(int columnIndex, byte x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateShort(int columnIndex, short x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateInt(int columnIndex, int x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateLong(int columnIndex, long x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateFloat(int columnIndex, float x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateDouble(int columnIndex, double x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateBigDecimal(int columnIndex, BigDecimal x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateString(int columnIndex, String x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateBytes(int columnIndex, byte[] x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateDate(int columnIndex, Date x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateTime(int columnIndex, Time x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateTimestamp(int columnIndex, Timestamp x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x, int length) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x, int length) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x, int length) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateObject(int columnIndex, Object x, int scaleOrLength) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateObject(int columnIndex, Object x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateNull(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateBoolean(String columnLabel, boolean x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateByte(String columnLabel, byte x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateShort(String columnLabel, short x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateInt(String columnLabel, int x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateLong(String columnLabel, long x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateFloat(String columnLabel, float x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateDouble(String columnLabel, double x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateBigDecimal(String columnLabel, BigDecimal x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateString(String columnLabel, String x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateBytes(String columnLabel, byte[] x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateDate(String columnLabel, Date x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateTime(String columnLabel, Time x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateTimestamp(String columnLabel, Timestamp x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x, int length) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x, int length) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader, int length) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateObject(String columnLabel, Object x, int scaleOrLength) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateObject(String columnLabel, Object x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void insertRow() throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateRow() throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void deleteRow() throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void cancelRowUpdates() throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void moveToInsertRow() throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void refreshRow() throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void moveToCurrentRow() throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public Ref getRef(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("Ref not supported"); + } + + @Override + public Blob getBlob(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("Blob not supported"); + } + + @Override + public Clob getClob(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("Clob not supported"); + } + + @Override + public Array getArray(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("Array not supported"); + } + + @Override + public Ref getRef(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("Ref not supported"); + } + + @Override + public Blob getBlob(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("Blob not supported"); + } + + @Override + public Clob getClob(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("Clob not supported"); + } + + @Override + public Array getArray(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("Array not supported"); + } + + @Override + public URL getURL(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("URL not supported"); + } + + @Override + public URL getURL(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("URL not supported"); + } + + @Override + public void updateRef(int columnIndex, Ref x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateRef(String columnLabel, Ref x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateBlob(int columnIndex, Blob x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateBlob(String columnLabel, Blob x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateClob(int columnIndex, Clob x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateClob(String columnLabel, Clob x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateArray(int columnIndex, Array x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateArray(String columnLabel, Array x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public RowId getRowId(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("RowId not supported"); + } + + @Override + public RowId getRowId(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("RowId not supported"); + } + + @Override + public void updateRowId(int columnIndex, RowId x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateRowId(String columnLabel, RowId x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public int getHoldability() throws SQLException { + checkOpen(); + return HOLD_CURSORS_OVER_COMMIT; + } + + @Override + public void updateNString(int columnIndex, String nString) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateNString(String columnLabel, String nString) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateNClob(int columnIndex, NClob nClob) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateNClob(String columnLabel, NClob nClob) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public NClob getNClob(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("NClob not supported"); + } + + @Override + public NClob getNClob(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("NClob not supported"); + } + + @Override + public SQLXML getSQLXML(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("SQLXML not supported"); + } + + @Override + public SQLXML getSQLXML(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("SQLXML not supported"); + } + + @Override + public void updateSQLXML(int columnIndex, SQLXML xmlObject) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateSQLXML(String columnLabel, SQLXML xmlObject) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public String getNString(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("NString not supported"); + } + + @Override + public String getNString(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("NString not supported"); + } + + @Override + public Reader getNCharacterStream(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("NCharacterStream not supported"); + } + + @Override + public Reader getNCharacterStream(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("NCharacterStream not supported"); + } + + @Override + public void updateNCharacterStream(int columnIndex, Reader x, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateNCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateBlob(int columnIndex, InputStream inputStream, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateBlob(String columnLabel, InputStream inputStream, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateClob(int columnIndex, Reader reader, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateClob(String columnLabel, Reader reader, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateNClob(int columnIndex, Reader reader, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateNClob(String columnLabel, Reader reader, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateNCharacterStream(int columnIndex, Reader x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateNCharacterStream(String columnLabel, Reader reader) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateBlob(int columnIndex, InputStream inputStream) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateBlob(String columnLabel, InputStream inputStream) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateClob(int columnIndex, Reader reader) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateClob(String columnLabel, Reader reader) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateNClob(int columnIndex, Reader reader) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateNClob(String columnLabel, Reader reader) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public String toString() { + return format(Locale.ROOT, "%s:row %d", getClass().getSimpleName(), rowNumber); + } +} \ No newline at end of file diff --git a/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcResultSetMetaData.java b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcResultSetMetaData.java new file mode 100644 index 00000000000..6cdb4c8c723 --- /dev/null +++ b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcResultSetMetaData.java @@ -0,0 +1,161 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.jdbc; + +import org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfo; + +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.util.List; +import java.util.Locale; + +import static java.lang.String.format; + +class JdbcResultSetMetaData implements ResultSetMetaData, JdbcWrapper { + + private final JdbcResultSet rs; + private final List columns; + + JdbcResultSetMetaData(JdbcResultSet rs, List columns) { + this.rs = rs; + this.columns = columns; + } + + @Override + public int getColumnCount() throws SQLException { + checkOpen(); + return columns.size(); + } + + @Override + public boolean isAutoIncrement(int column) throws SQLException { + column(column); + return false; + } + + @Override + public boolean isCaseSensitive(int column) throws SQLException { + column(column); + return true; + } + + @Override + public boolean isSearchable(int column) throws SQLException { + column(column); + return true; + } + + @Override + public boolean isCurrency(int column) throws SQLException { + column(column); + return false; + } + + @Override + public int isNullable(int column) throws SQLException { + column(column); + return columnNullableUnknown; + } + + @Override + public boolean isSigned(int column) throws SQLException { + return JdbcUtils.isSigned(getColumnType(column)); + } + + @Override + public int getColumnDisplaySize(int column) throws SQLException { + return column(column).displaySize(); + } + + @Override + public String getColumnLabel(int column) throws SQLException { + return column(column).label; + } + + @Override + public String getColumnName(int column) throws SQLException { + return column(column).name; + } + + @Override + public String getSchemaName(int column) throws SQLException { + return column(column).schema; + } + + @Override + public int getPrecision(int column) throws SQLException { + column(column); + return 0; + } + + @Override + public int getScale(int column) throws SQLException { + column(column); + return 0; + } + + @Override + public String getTableName(int column) throws SQLException { + return column(column).table; + } + + @Override + public String getCatalogName(int column) throws SQLException { + return column(column).catalog; + } + + @Override + public int getColumnType(int column) throws SQLException { + return column(column).type.getVendorTypeNumber(); + } + + @Override + public String getColumnTypeName(int column) throws SQLException { + return column(column).type.name(); + } + + @Override + public boolean isReadOnly(int column) throws SQLException { + column(column); + return true; + } + + @Override + public boolean isWritable(int column) throws SQLException { + column(column); + return false; + } + + @Override + public boolean isDefinitelyWritable(int column) throws SQLException { + column(column); + return false; + } + + @Override + public String getColumnClassName(int column) throws SQLException { + return JdbcUtils.classOf(column(column).type.getVendorTypeNumber()).getName(); + } + + private void checkOpen() throws SQLException { + if (rs != null) { + rs.checkOpen(); + } + } + + private ColumnInfo column(int column) throws SQLException { + checkOpen(); + if (column < 1 || column > columns.size()) { + throw new SQLException("Invalid column index [" + column + "]"); + } + return columns.get(column - 1); + } + + @Override + public String toString() { + return format(Locale.ROOT, "%s(%s)", getClass().getSimpleName(), columns); + } +} \ No newline at end of file diff --git a/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcStatement.java b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcStatement.java new file mode 100644 index 00000000000..ae9a6621bee --- /dev/null +++ b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcStatement.java @@ -0,0 +1,402 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.jdbc; + +import org.elasticsearch.xpack.sql.jdbc.net.client.Cursor; +import org.elasticsearch.xpack.sql.jdbc.net.client.RequestMeta; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.sql.SQLWarning; +import java.sql.Statement; +import java.util.concurrent.TimeUnit; + +class JdbcStatement implements Statement, JdbcWrapper { + + final JdbcConnection con; + final JdbcConfiguration cfg; + + private boolean closed = false; + private boolean closeOnCompletion = false; + private boolean ignoreResultSetClose = false; + + protected JdbcResultSet rs; + final RequestMeta requestMeta = new RequestMeta(); + + JdbcStatement(JdbcConnection jdbcConnection, JdbcConfiguration info) { + this.con = jdbcConnection; + this.cfg = info; + } + + @Override + public ResultSet executeQuery(String sql) throws SQLException { + if (!execute(sql)) { + throw new SQLException("Invalid sql query [" + sql + "]"); + } + return rs; + } + + @Override + public int executeUpdate(String sql) throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Update not supported"); + } + + @Override + public void close() throws SQLException { + if (!closed) { + closed = true; + closeResultSet(); + } + } + + @Override + public int getMaxFieldSize() throws SQLException { + checkOpen(); + return 0; + } + + @Override + public void setMaxFieldSize(int max) throws SQLException { + checkOpen(); + if (max < 0) { + throw new SQLException("Field size must be positive"); + } + } + + @Override + public int getMaxRows() throws SQLException { + long result = getLargeMaxRows(); + if (result > Integer.MAX_VALUE) { + throw new SQLException("Max rows exceeds limit of " + Integer.MAX_VALUE); + } + return Math.toIntExact(result); + } + + + @Override + public long getLargeMaxRows() throws SQLException { + checkOpen(); + return 0; + } + + @Override + public void setMaxRows(int max) throws SQLException { + setLargeMaxRows(max); + } + + @Override + public void setLargeMaxRows(long max) throws SQLException { + checkOpen(); + if (max < 0) { + throw new SQLException("Field size must be positive"); + } + // ignore + } + + @Override + public void setEscapeProcessing(boolean enable) throws SQLException { + checkOpen(); + // no-op - always escape + } + + @Override + public int getQueryTimeout() throws SQLException { + checkOpen(); + return (int) TimeUnit.MILLISECONDS.toSeconds(requestMeta.timeoutInMs()); + } + + @Override + public void setQueryTimeout(int seconds) throws SQLException { + checkOpen(); + if (seconds < 0) { + throw new SQLException("Query timeout must be positive"); + } + requestMeta.timeout(TimeUnit.SECONDS.toMillis(seconds)); + } + + @Override + public void cancel() throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Cancel not supported"); + } + + @Override + public SQLWarning getWarnings() throws SQLException { + checkOpen(); + return null; + } + + @Override + public void clearWarnings() throws SQLException { + checkOpen(); + } + + @Override + public void setCursorName(String name) throws SQLException { + checkOpen(); + // no-op (doc is confusing - says no-op but also to throw an exception) + } + + @Override + public boolean execute(String sql) throws SQLException { + checkOpen(); + initResultSet(sql); + return true; + } + + // execute the query and handle the rs closing and initialization + protected void initResultSet(String sql) throws SQLException { + // close previous result set + closeResultSet(); + + Cursor cursor = con.client.query(sql, requestMeta); + rs = new JdbcResultSet(cfg, this, cursor); + } + + @Override + public ResultSet getResultSet() throws SQLException { + checkOpen(); + return rs; + } + + @Override + public int getUpdateCount() throws SQLException { + long count = getLargeUpdateCount(); + return count > Integer.MAX_VALUE ? Integer.MAX_VALUE : count < Integer.MIN_VALUE ? Integer.MIN_VALUE : (int) count; + } + + @Override + public long getLargeUpdateCount() throws SQLException { + checkOpen(); + return -1; + } + + @Override + public boolean getMoreResults() throws SQLException { + checkOpen(); + closeResultSet(); + return false; + } + + @Override + public void setFetchDirection(int direction) throws SQLException { + checkOpen(); + if (ResultSet.FETCH_REVERSE != direction + || ResultSet.FETCH_FORWARD != direction + || ResultSet.FETCH_UNKNOWN != direction) { + throw new SQLException("Invalid direction specified"); + } + } + + @Override + public int getFetchDirection() throws SQLException { + checkOpen(); + return ResultSet.FETCH_FORWARD; + } + + @Override + public void setFetchSize(int rows) throws SQLException { + checkOpen(); + if (rows < 0) { + throw new SQLException("Fetch size must be positive"); + } + requestMeta.fetchSize(rows); + } + + @Override + public int getFetchSize() throws SQLException { + checkOpen(); + int fetchSize = requestMeta.fetchSize(); + // the spec is somewhat unclear. It looks like there are 3 states: + // unset (in this case -1 which the user cannot set) - in this case, the default fetch size is returned + // 0 meaning the hint is disabled (the user has called setFetch) + // >0 means actual hint + + // tl;dr - if invalid, it means it was not set so return default - otherwise return the set value + return fetchSize < 0 ? cfg.pageSize() : fetchSize; + } + + @Override + public int getResultSetConcurrency() throws SQLException { + checkOpen(); + return ResultSet.CONCUR_READ_ONLY; + } + + @Override + public int getResultSetType() throws SQLException { + checkOpen(); + return ResultSet.TYPE_FORWARD_ONLY; + } + + @Override + public void addBatch(String sql) throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Batching not supported"); + } + + @Override + public void clearBatch() throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Batching not supported"); + } + + @Override + public int[] executeBatch() throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Batching not supported"); + } + + @Override + public Connection getConnection() throws SQLException { + checkOpen(); + return con; + } + + @Override + public boolean getMoreResults(int current) throws SQLException { + checkOpen(); + if (CLOSE_CURRENT_RESULT == current) { + closeResultSet(); + return false; + } + if (KEEP_CURRENT_RESULT == current || CLOSE_ALL_RESULTS == current) { + throw new SQLException("Invalid current parameter"); + } + + throw new SQLFeatureNotSupportedException("Multiple ResultSets not supported"); + } + + @Override + public ResultSet getGeneratedKeys() throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Generated keys not supported"); + } + + @Override + public long[] executeLargeBatch() throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Batching not supported"); + } + + @Override + public long executeLargeUpdate(String sql) throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Batching not supported"); + } + + @Override + public long executeLargeUpdate(String sql, int autoGeneratedKeys) throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Batching not supported"); + } + + @Override + public long executeLargeUpdate(String sql, int[] columnIndexes) throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Batching not supported"); + } + + @Override + public long executeLargeUpdate(String sql, String[] columnNames) throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Batching not supported"); + } + + @Override + public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Batching not supported"); + } + + @Override + public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Batching not supported"); + } + + @Override + public int executeUpdate(String sql, String[] columnNames) throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Batching not supported"); + } + + @Override + public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { + return execute(sql); + } + + @Override + public boolean execute(String sql, int[] columnIndexes) throws SQLException { + return execute(sql); + } + + @Override + public boolean execute(String sql, String[] columnNames) throws SQLException { + return execute(sql); + } + + @Override + public int getResultSetHoldability() throws SQLException { + checkOpen(); + return ResultSet.CLOSE_CURSORS_AT_COMMIT; + } + + @Override + public boolean isClosed() { + return closed; + } + + @Override + public void setPoolable(boolean poolable) throws SQLException { + checkOpen(); + // no-op + } + + @Override + public boolean isPoolable() throws SQLException { + checkOpen(); + return false; + } + + @Override + public void closeOnCompletion() throws SQLException { + checkOpen(); + closeOnCompletion = true; + } + + @Override + public boolean isCloseOnCompletion() throws SQLException { + checkOpen(); + return closeOnCompletion; + } + + protected final void checkOpen() throws SQLException { + if (isClosed()) { + throw new SQLException("Statement is closed"); + } + } + + protected final void closeResultSet() throws SQLException { + if (rs != null) { + ignoreResultSetClose = true; + try { + rs.close(); + } finally { + rs = null; + ignoreResultSetClose = false; + } + } + } + + final void resultSetWasClosed() throws SQLException { + if (closeOnCompletion && !ignoreResultSetClose) { + close(); + } + } +} \ No newline at end of file diff --git a/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcUtils.java b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcUtils.java new file mode 100644 index 00000000000..b27e0451d11 --- /dev/null +++ b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcUtils.java @@ -0,0 +1,219 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.jdbc; + +import org.elasticsearch.xpack.sql.jdbc.JdbcSQLException; + +import java.math.BigDecimal; +import java.sql.Blob; +import java.sql.Clob; +import java.sql.Date; +import java.sql.JDBCType; +import java.sql.Time; +import java.sql.Timestamp; + +import static java.sql.Types.BIGINT; +import static java.sql.Types.BINARY; +import static java.sql.Types.BIT; +import static java.sql.Types.BLOB; +import static java.sql.Types.BOOLEAN; +import static java.sql.Types.CHAR; +import static java.sql.Types.CLOB; +import static java.sql.Types.DATE; +import static java.sql.Types.DECIMAL; +import static java.sql.Types.DOUBLE; +import static java.sql.Types.FLOAT; +import static java.sql.Types.INTEGER; +import static java.sql.Types.LONGVARBINARY; +import static java.sql.Types.LONGVARCHAR; +import static java.sql.Types.NULL; +import static java.sql.Types.NUMERIC; +import static java.sql.Types.REAL; +import static java.sql.Types.SMALLINT; +import static java.sql.Types.TIME; +import static java.sql.Types.TIMESTAMP; +import static java.sql.Types.TIMESTAMP_WITH_TIMEZONE; +import static java.sql.Types.TINYINT; +import static java.sql.Types.VARBINARY; +import static java.sql.Types.VARCHAR; + +public abstract class JdbcUtils { + + public static Class asPrimitive(Class wrapperClass) { + if (Boolean.class == wrapperClass) { + return boolean.class; + } + if (Byte.class == wrapperClass) { + return byte.class; + } + if (Short.class == wrapperClass) { + return short.class; + } + if (Character.class == wrapperClass) { + return char.class; + } + if (Integer.class == wrapperClass) { + return int.class; + } + if (Long.class == wrapperClass) { + return long.class; + } + if (Double.class == wrapperClass) { + return double.class; + } + if (Float.class == wrapperClass) { + return float.class; + } + if (Void.class == wrapperClass) { + return void.class; + } + + return wrapperClass; + } + + public static int fromClass(Class clazz) throws JdbcSQLException { + if (clazz == null) { + return NULL; + } + if (clazz == String.class) { + return VARCHAR; + } + if (clazz == Boolean.class || clazz == boolean.class) { + return BOOLEAN; + } + if (clazz == Byte.class || clazz == byte.class) { + return TINYINT; + } + if (clazz == Short.class || clazz == short.class) { + return SMALLINT; + } + if (clazz == Integer.class || clazz == int.class) { + return INTEGER; + } + if (clazz == Long.class || clazz == long.class) { + return BIGINT; + } + if (clazz == Float.class || clazz == float.class) { + return REAL; + } + if (clazz == Double.class || clazz == double.class) { + return DOUBLE; + } + if (clazz == Void.class || clazz == void.class) { + return NULL; + } + if (clazz == byte[].class) { + return VARBINARY; + } + if (clazz == Date.class) { + return DATE; + } + if (clazz == Time.class) { + return TIME; + } + if (clazz == Timestamp.class) { + return TIMESTAMP; + } + if (clazz == Blob.class) { + return BLOB; + } + if (clazz == Clob.class) { + return CLOB; + } + if (clazz == BigDecimal.class) { + return DECIMAL; + } + + throw new JdbcSQLException("Unrecognized class [" + clazz + "]"); + } + + // see javax.sql.rowset.RowSetMetaDataImpl + // and https://db.apache.org/derby/docs/10.5/ref/rrefjdbc20377.html + public static Class classOf(int jdbcType) throws JdbcSQLException { + + switch (jdbcType) { + case NUMERIC: + case DECIMAL: + return BigDecimal.class; + case BOOLEAN: + case BIT: + return Boolean.class; + case TINYINT: + return Byte.class; + case SMALLINT: + return Short.class; + case INTEGER: + return Integer.class; + case BIGINT: + return Long.class; + case REAL: + return Float.class; + case FLOAT: + case DOUBLE: + return Double.class; + case BINARY: + case VARBINARY: + case LONGVARBINARY: + return byte[].class; + case CHAR: + case VARCHAR: + case LONGVARCHAR: + return String.class; + case DATE: + return Date.class; + case TIME: + return Time.class; + case TIMESTAMP: + return Timestamp.class; + case BLOB: + return Blob.class; + case CLOB: + return Clob.class; + case TIMESTAMP_WITH_TIMEZONE: + return Long.class; + default: + throw new JdbcSQLException("Unsupported JDBC type " + jdbcType + ", " + type(jdbcType).getName() + ""); + } + } + + static boolean isSigned(int type) { + switch (type) { + case BIGINT: + case DECIMAL: + case DOUBLE: + case FLOAT: + case INTEGER: + case SMALLINT: + case REAL: + case NUMERIC: + return true; + default: + return false; + } + } + + static JDBCType type(int jdbcType) { + return JDBCType.valueOf(jdbcType); + } + + static Integer numericPrecisionRadix(int type) { + switch (type) { + case TINYINT: + case SMALLINT: + case INTEGER: + case BIGINT: + return 10; + case REAL: + case DOUBLE: + case FLOAT: + case DECIMAL: + case NUMERIC: + return 2; + default: + return null; + } + } +} \ No newline at end of file diff --git a/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcWrapper.java b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcWrapper.java new file mode 100644 index 00000000000..646a9593c78 --- /dev/null +++ b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcWrapper.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.jdbc; + +import java.sql.SQLException; +import java.sql.Wrapper; + +interface JdbcWrapper extends Wrapper { + + @Override + default boolean isWrapperFor(Class iface) throws SQLException { + return iface != null && iface.isAssignableFrom(getClass()); + } + + @SuppressWarnings("unchecked") + @Override + default T unwrap(Class iface) throws SQLException { + if (isWrapperFor(iface)) { + return (T) this; + } + throw new SQLException(); + } +} diff --git a/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/PreparedQuery.java b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/PreparedQuery.java new file mode 100644 index 00000000000..e38dba645dc --- /dev/null +++ b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/PreparedQuery.java @@ -0,0 +1,235 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.jdbc; + +import org.elasticsearch.xpack.sql.jdbc.JdbcSQLException; + +import java.sql.JDBCType; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.util.ArrayList; +import java.util.List; + +class PreparedQuery { + + static class ParamInfo { + JDBCType type; + Object value; + + ParamInfo(Object value, JDBCType type) { + this.value = value; + this.type = type; + } + } + + private final List fragments; + final ParamInfo[] params; + + PreparedQuery(List fragments) { + this.fragments = fragments; + this.params = new ParamInfo[fragments.size() - 1]; + clearParams(); + } + + ParamInfo getParam(int param) throws JdbcSQLException { + if (param < 1 || param > params.length) { + throw new JdbcSQLException("Invalid parameter index [" + param + "]"); + } + return params[param - 1]; + } + + void setParam(int param, Object value, JDBCType type) throws JdbcSQLException { + if (param < 1 || param > params.length) { + throw new JdbcSQLException("Invalid parameter index [" + param + "]"); + } + params[param - 1].value = value; + params[param - 1].type = type; + } + + int paramCount() { + return params.length; + } + + void clearParams() { + for (int i = 0; i < params.length; i++) { + params[i] = new ParamInfo(null, JDBCType.VARCHAR); + } + } + + String assemble() { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < fragments.size(); i++) { + sb.append(fragments.get(i)); + if (i < params.length) { + sb.append(params[i]); + } + } + + return sb.toString(); + } + + @Override + public String toString() { + return assemble(); + } + + // Find the ? parameters for binding + // Additionally, throw away all JDBC escaping + static PreparedQuery prepare(String sql) throws SQLException { + int l = sql.length(); + + List fragments = new ArrayList<>(); + StringBuilder current = new StringBuilder(); + + for (int i = 0; i < l; i++) { + char c = sql.charAt(i); + + switch (c) { + // JDBC escape syntax + // https://db.apache.org/derby/docs/10.5/ref/rrefjdbc1020262.html + case '{': + jdbcEscape(); + break; + case '\'': + i = string(i, sql, current, c); + break; + case '"': + i = string(i, sql, current, c); + break; + case '?': + fragments.add(current.toString()); + current.setLength(0); + i++; + break; + case '-': + if (i + 1 < l && sql.charAt(i + 1) == '-') { + i = lineComment(i, sql, current); + } + else { + current.append(c); + } + break; + case '/': + if (i + 1 < l && sql.charAt(i + 1) == '*') { + i = multiLineComment(i, sql, current); + } + else { + current.append(c); + } + break; + + default: + current.append(c); + break; + } + } + + fragments.add(current.toString()); + + return new PreparedQuery(fragments); + } + + private static void jdbcEscape() throws SQLException { + throw new SQLFeatureNotSupportedException("JDBC escaping not supported yet"); + } + + + private static int lineComment(int i, String sql, StringBuilder current) { + for (; i < sql.length(); i++) { + char c = sql.charAt(i); + if (c != '\n' && c != '\r') { + current.append(c); + } + else { + return i; + } + } + return i; + } + + private static int multiLineComment(int i, String sql, StringBuilder current) throws JdbcSQLException { + int block = 1; + + for (; i < sql.length() - 1; i++) { + char c = sql.charAt(i); + if (c == '/' && sql.charAt(i + 1) == '*') { + current.append(c); + current.append(sql.charAt(++i)); + block++; + } + else if (c == '*' && sql.charAt(i + 1) == '/') { + current.append(c); + current.append(sql.charAt(++i)); + block--; + } + else { + current.append(c); + } + if (block == 0) { + return i; + } + } + throw new JdbcSQLException("Cannot parse given sql; unclosed /* comment"); + } + + private static int string(int i, String sql, StringBuilder current, char q) throws JdbcSQLException { + current.append(sql.charAt(i++)); + for (; i < sql.length(); i++) { + char c = sql.charAt(i); + if (c == q) { + current.append(c); + // double quotes mean escaping + if (sql.charAt(i + 1) == q) { + current.append(sql.charAt(++i)); + } + else { + return i; + } + } + else { + current.append(c); + } + } + throw new JdbcSQLException("Cannot parse given sql; unclosed string"); + } + + static String escapeString(String s) { + if (s == null) { + return "NULL"; + } + + if (s.contains("'") ) { + s = escapeString(s, '\''); + } + if (s.contains("\"")) { + s = escapeString(s, '"'); + } + + // add quotes + return "'" + s + "'"; + } + + private static String escapeString(String s, char sq) { + StringBuilder sb = new StringBuilder(); + + // escape individual single quotes + for (int i = 0; i < s.length(); i++) { + char c = s.charAt(i); + + // needs escaping + if (c == sq) { + // check if it's already escaped + if (s.charAt(i + 1) == sq) { + i++; + } + sb.append(c); + sb.append(c); + } + } + + return sb.toString(); + } +} \ No newline at end of file diff --git a/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverter.java b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverter.java new file mode 100644 index 00000000000..181d903fbf1 --- /dev/null +++ b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverter.java @@ -0,0 +1,424 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.jdbc; + +import java.sql.Date; +import java.sql.JDBCType; +import java.sql.SQLException; +import java.sql.Time; +import java.sql.Timestamp; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.OffsetDateTime; +import java.time.OffsetTime; +import java.util.Calendar; +import java.util.GregorianCalendar; +import java.util.Locale; +import java.util.function.Function; + +import static java.lang.String.format; +import static java.util.Calendar.DAY_OF_MONTH; +import static java.util.Calendar.ERA; +import static java.util.Calendar.HOUR_OF_DAY; +import static java.util.Calendar.MILLISECOND; +import static java.util.Calendar.MINUTE; +import static java.util.Calendar.MONTH; +import static java.util.Calendar.SECOND; +import static java.util.Calendar.YEAR; + +abstract class TypeConverter { + + private static final long DAY_IN_MILLIS = 60 * 60 * 24; + + static Date convertDate(Long millis, Calendar cal) { + return dateTimeConvert(millis, cal, c -> { + c.set(HOUR_OF_DAY, 0); + c.set(MINUTE, 0); + c.set(SECOND, 0); + c.set(MILLISECOND, 0); + return new Date(c.getTimeInMillis()); + }); + } + + static Time convertTime(Long millis, Calendar cal) { + return dateTimeConvert(millis, cal, c -> { + c.set(ERA, GregorianCalendar.AD); + c.set(YEAR, 1970); + c.set(MONTH, 0); + c.set(DAY_OF_MONTH, 1); + return new Time(c.getTimeInMillis()); + }); + } + + static Timestamp convertTimestamp(Long millis, Calendar cal) { + return dateTimeConvert(millis, cal, c -> { + return new Timestamp(c.getTimeInMillis()); + }); + } + + private static T dateTimeConvert(Long millis, Calendar c, Function creator) { + if (millis == null) { + return null; + } + long initial = c.getTimeInMillis(); + try { + c.setTimeInMillis(millis.longValue()); + return creator.apply(c); + } finally { + c.setTimeInMillis(initial); + } + } + + @SuppressWarnings("unchecked") + static T convert(Object val, JDBCType columnType, Class type) throws SQLException { + if (type == null) { + return (T) asNative(val, columnType); + } + if (type == String.class) { + return (T) asString(asNative(val, columnType)); + } + if (type == Boolean.class) { + return (T) asBoolean(val, columnType); + } + if (type == Byte.class) { + return (T) asByte(val, columnType); + } + if (type == Short.class) { + return (T) asShort(val, columnType); + } + if (type == Integer.class) { + return (T) asInteger(val, columnType); + } + if (type == Long.class) { + return (T) asLong(val, columnType); + } + if (type == Float.class) { + return (T) asFloat(val, columnType); + } + if (type == Double.class) { + return (T) asDouble(val, columnType); + } + if (type == Date.class) { + return (T) asDate(val, columnType); + } + if (type == Time.class) { + return (T) asTime(val, columnType); + } + if (type == Timestamp.class) { + return (T) asTimestamp(val, columnType); + } + if (type == byte[].class) { + return (T) asByteArray(val, columnType); + } + // + // JDK 8 types + // + if (type == LocalDate.class) { + return (T) asLocalDate(val, columnType); + } + if (type == LocalTime.class) { + return (T) asLocalTime(val, columnType); + } + if (type == LocalDateTime.class) { + return (T) asLocalDateTime(val, columnType); + } + if (type == OffsetTime.class) { + return (T) asOffsetTime(val, columnType); + } + if (type == OffsetDateTime.class) { + return (T) asOffsetDateTime(val, columnType); + } + return null; + } + + // keep in check with JdbcUtils#columnType + private static Object asNative(Object v, JDBCType columnType) { + Object result = null; + switch (columnType) { + case BIT: + case BOOLEAN: + case TINYINT: + case SMALLINT: + case INTEGER: + case BIGINT: + case REAL: + case FLOAT: + case DOUBLE: + case BINARY: + case VARBINARY: + case LONGVARBINARY: + case CHAR: + case VARCHAR: + case LONGVARCHAR: + case TIMESTAMP: + result = v; + break; + // since the date is already in UTC_CALENDAR just do calendar math + case DATE: + result = new Date(utcMillisRemoveTime(((Long) v).longValue())); + break; + case TIME: + result = new Time(utcMillisRemoveDate(((Long) v).longValue())); + break; + default: + } + return result; + } + + private static String asString(Object nativeValue) { + return nativeValue == null ? null : String.valueOf(nativeValue); + } + + private static Boolean asBoolean(Object val, JDBCType columnType) { + switch (columnType) { + case BIT: + case BOOLEAN: + case TINYINT: + case SMALLINT: + case INTEGER: + case BIGINT: + case REAL: + case FLOAT: + case DOUBLE: + return Boolean.valueOf(Integer.signum(((Number) val).intValue()) == 0); + default: + return null; + } + } + + private static Byte asByte(Object val, JDBCType columnType) throws SQLException { + switch (columnType) { + case BIT: + case BOOLEAN: + return Byte.valueOf(((Boolean) val).booleanValue() ? (byte) 1 : (byte) 0); + case TINYINT: + case SMALLINT: + case INTEGER: + case BIGINT: + return safeToByte(((Number) val).longValue()); + case REAL: + case FLOAT: + case DOUBLE: + return safeToByte(safeToLong(((Number) val).doubleValue())); + default: + } + + return null; + } + + private static Short asShort(Object val, JDBCType columnType) throws SQLException { + switch (columnType) { + case BIT: + case BOOLEAN: + return Short.valueOf(((Boolean) val).booleanValue() ? (short) 1 : (short) 0); + case TINYINT: + case SMALLINT: + case INTEGER: + case BIGINT: + return safeToShort(((Number) val).longValue()); + case REAL: + case FLOAT: + case DOUBLE: + return safeToShort(safeToLong(((Number) val).doubleValue())); + default: + } + + return null; + } + + private static Integer asInteger(Object val, JDBCType columnType) throws SQLException { + switch (columnType) { + case BIT: + case BOOLEAN: + return Integer.valueOf(((Boolean) val).booleanValue() ? 1 : 0); + case TINYINT: + case SMALLINT: + case INTEGER: + case BIGINT: + return safeToInt(((Number) val).longValue()); + case REAL: + case FLOAT: + case DOUBLE: + return safeToInt(safeToLong(((Number) val).doubleValue())); + default: + } + + return null; + } + + private static Long asLong(Object val, JDBCType columnType) throws SQLException { + switch (columnType) { + case BIT: + case BOOLEAN: + return Long.valueOf(((Boolean) val).booleanValue() ? 1 : 0); + case TINYINT: + case SMALLINT: + case INTEGER: + case BIGINT: + return Long.valueOf(((Number) val).longValue()); + case REAL: + case FLOAT: + case DOUBLE: + return safeToLong(((Number) val).doubleValue()); + case DATE: + return utcMillisRemoveTime(((Number) val).longValue()); + case TIME: + return utcMillisRemoveDate(((Number) val).longValue()); + case TIMESTAMP: + case TIMESTAMP_WITH_TIMEZONE: + return ((Number) val).longValue(); + default: + } + + return null; + } + + private static Float asFloat(Object val, JDBCType columnType) throws SQLException { + switch (columnType) { + case BIT: + case BOOLEAN: + return Float.valueOf(((Boolean) val).booleanValue() ? 1 : 0); + case TINYINT: + case SMALLINT: + case INTEGER: + case BIGINT: + return Float.valueOf((float) ((Number) val).longValue()); + case REAL: + case FLOAT: + case DOUBLE: + return new Float(((Number) val).doubleValue()); + default: + } + + return null; + } + + private static Double asDouble(Object val, JDBCType columnType) throws SQLException { + switch (columnType) { + case BIT: + case BOOLEAN: + return Double.valueOf(((Boolean) val).booleanValue() ? 1 : 0); + case TINYINT: + case SMALLINT: + case INTEGER: + case BIGINT: + return Double.valueOf((double) ((Number) val).longValue()); + case REAL: + case FLOAT: + case DOUBLE: + return new Double(((Number) val).doubleValue()); + default: + } + + return null; + } + + private static Date asDate(Object val, JDBCType columnType) throws SQLException { + switch (columnType) { + case TIME: + // time has no date component + return new Date(0); + case DATE: + case TIMESTAMP: + case TIMESTAMP_WITH_TIMEZONE: + return new Date(utcMillisRemoveTime(((Number) val).longValue())); + default: + } + + return null; + } + + private static Time asTime(Object val, JDBCType columnType) throws SQLException { + switch (columnType) { + case DATE: + // date has no time component + return new Time(0); + case TIME: + case TIMESTAMP: + case TIMESTAMP_WITH_TIMEZONE: + return new Time(utcMillisRemoveDate(((Number) val).longValue())); + default: + } + + return null; + } + + private static Timestamp asTimestamp(Object val, JDBCType columnType) throws SQLException { + switch (columnType) { + case DATE: + return new Timestamp(utcMillisRemoveTime(((Number) val).longValue())); + case TIME: + return new Timestamp(utcMillisRemoveDate(((Number) val).longValue())); + case TIMESTAMP: + case TIMESTAMP_WITH_TIMEZONE: + return new Timestamp(((Number) val).longValue()); + default: + } + + return null; + } + + private static byte[] asByteArray(Object val, JDBCType columnType) { + throw new UnsupportedOperationException(); + } + private static LocalDate asLocalDate(Object val, JDBCType columnType) { + throw new UnsupportedOperationException(); + } + + private static LocalTime asLocalTime(Object val, JDBCType columnType) { + throw new UnsupportedOperationException(); + } + + private static LocalDateTime asLocalDateTime(Object val, JDBCType columnType) { + throw new UnsupportedOperationException(); + } + + private static OffsetTime asOffsetTime(Object val, JDBCType columnType) { + throw new UnsupportedOperationException(); + } + + private static OffsetDateTime asOffsetDateTime(Object val, JDBCType columnType) { + throw new UnsupportedOperationException(); + } + + + private static long utcMillisRemoveTime(long l) { + return l - (l % DAY_IN_MILLIS); + } + + private static long utcMillisRemoveDate(long l) { + return l % DAY_IN_MILLIS; + } + + private static byte safeToByte(long x) throws SQLException { + if (x > Byte.MAX_VALUE || x < Byte.MIN_VALUE) { + throw new SQLException(format(Locale.ROOT, "Numeric %d out of range", Long.toString(x))); + } + return (byte) x; + } + + private static short safeToShort(long x) throws SQLException { + if (x > Short.MAX_VALUE || x < Short.MIN_VALUE) { + throw new SQLException(format(Locale.ROOT, "Numeric %d out of range", Long.toString(x))); + } + return (short) x; + } + + private static int safeToInt(long x) throws SQLException { + if (x > Integer.MAX_VALUE || x < Integer.MIN_VALUE) { + throw new SQLException(format(Locale.ROOT, "Numeric %d out of range", Long.toString(x))); + } + return (int) x; + } + + private static long safeToLong(double x) throws SQLException { + if (x > Long.MAX_VALUE || x < Long.MIN_VALUE) { + throw new SQLException(format(Locale.ROOT, "Numeric %d out of range", Double.toString(x))); + } + return Math.round(x); + } +} \ No newline at end of file diff --git a/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbcx/JdbcDataSource.java b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbcx/JdbcDataSource.java new file mode 100644 index 00000000000..8e03cc7cbea --- /dev/null +++ b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbcx/JdbcDataSource.java @@ -0,0 +1,123 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.jdbcx; + +import org.elasticsearch.xpack.sql.client.shared.ConnectionConfiguration; +import org.elasticsearch.xpack.sql.jdbc.debug.Debug; +import org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcConfiguration; +import org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcConnection; +import org.elasticsearch.xpack.sql.client.shared.Version; + +import java.io.PrintWriter; +import java.sql.Connection; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.sql.Wrapper; +import java.util.Properties; +import java.util.concurrent.TimeUnit; +import java.util.logging.Logger; + +import javax.sql.DataSource; + +public class JdbcDataSource implements DataSource, Wrapper { + + static { + Version.version(); + } + + private String url; + private PrintWriter writer; + private int loginTimeout; + private Properties props; + + public JdbcDataSource() {} + + @Override + public PrintWriter getLogWriter() throws SQLException { + return writer; + } + + @Override + public void setLogWriter(PrintWriter out) throws SQLException { + this.writer = out; + } + + @Override + public void setLoginTimeout(int seconds) throws SQLException { + if (seconds < 0) { + throw new SQLException("Negative timeout specified " + seconds); + } + loginTimeout = seconds; + } + + @Override + public int getLoginTimeout() throws SQLException { + return loginTimeout; + } + + @Override + public Logger getParentLogger() throws SQLFeatureNotSupportedException { + throw new SQLFeatureNotSupportedException(); + } + + public String getUrl() { + return url; + } + + public void setUrl(String url) { + this.url = url; + } + + public Properties getProperties() { + Properties copy = new Properties(); + if (props != null) { + copy.putAll(props); + } + return copy; + } + + public void setProperties(Properties props) { + this.props = new Properties(); + this.props.putAll(props); + } + + @Override + public Connection getConnection() throws SQLException { + return doGetConnection(getProperties()); + } + + @Override + public Connection getConnection(String username, String password) throws SQLException { + Properties p = getProperties(); + p.setProperty(ConnectionConfiguration.AUTH_USER, username); + p.setProperty(ConnectionConfiguration.AUTH_PASS, password); + return doGetConnection(p); + } + + private Connection doGetConnection(Properties p) throws SQLException { + JdbcConfiguration cfg = JdbcConfiguration.create(url, p); + if (loginTimeout > 0) { + cfg.connectTimeout(TimeUnit.SECONDS.toMillis(loginTimeout)); + } + JdbcConnection con = new JdbcConnection(cfg); + // enable logging if needed + return cfg.debug() ? Debug.proxy(cfg, con, writer) : con; + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + return iface != null && iface.isAssignableFrom(getClass()); + } + + @SuppressWarnings("unchecked") + @Override + public T unwrap(Class iface) throws SQLException { + if (isWrapperFor(iface)) { + return (T) this; + } + throw new SQLException(); + } +} \ No newline at end of file diff --git a/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/Cursor.java b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/Cursor.java new file mode 100644 index 00000000000..5549bc57d63 --- /dev/null +++ b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/Cursor.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.client; + +import java.sql.SQLException; +import java.util.List; + +import org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfo; + +public interface Cursor { + + List columns(); + + default int columnSize() { + return columns().size(); + } + + boolean next() throws SQLException; + + Object column(int column); + + /** + * Number of rows that this cursor has pulled back from the + * server in the current batch. + */ + int batchSize(); + + void close() throws SQLException; +} diff --git a/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/DefaultCursor.java b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/DefaultCursor.java new file mode 100644 index 00000000000..8e984e9be7b --- /dev/null +++ b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/DefaultCursor.java @@ -0,0 +1,67 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.client; + +import org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfo; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.Page; + +import java.sql.SQLException; +import java.util.List; + +class DefaultCursor implements Cursor { + + private final JdbcHttpClient client; + private final RequestMeta meta; + + private final Page page; + private int row = -1; + private String cursor; + + DefaultCursor(JdbcHttpClient client, String cursor, Page page, RequestMeta meta) { + this.client = client; + this.meta = meta; + this.cursor = cursor; + this.page = page; + } + + @Override + public List columns() { + return page.columnInfo(); + } + + @Override + public boolean next() throws SQLException { + if (row < page.rows() - 1) { + row++; + return true; + } + else { + if (cursor.isEmpty() == false) { + cursor = client.nextPage(cursor, page, meta); + row = -1; + return next(); + } + return false; + } + } + + @Override + public Object column(int column) { + return page.entry(row, column); + } + + @Override + public int batchSize() { + return page.rows(); + } + + @Override + public void close() throws SQLException { + if (cursor.isEmpty() == false) { + client.queryClose(cursor); + } + } +} diff --git a/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/HttpClient.java b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/HttpClient.java new file mode 100644 index 00000000000..74beeedc76a --- /dev/null +++ b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/HttpClient.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.client; + +import org.elasticsearch.xpack.sql.client.shared.ClientException; +import org.elasticsearch.xpack.sql.client.shared.JreHttpUrlConnection; +import org.elasticsearch.xpack.sql.client.shared.JreHttpUrlConnection.ResponseOrException; +import org.elasticsearch.xpack.sql.jdbc.JdbcSQLException; +import org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcConfiguration; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto; +import org.elasticsearch.xpack.sql.protocol.shared.Request; +import org.elasticsearch.xpack.sql.protocol.shared.Response; + +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.sql.SQLException; + +// http client +// handles nodes discovery, fail-over, errors, etc... +class HttpClient { + + private final JdbcConfiguration cfg; + + HttpClient(JdbcConfiguration connectionInfo) throws SQLException { + this.cfg = connectionInfo; + } + + void setNetworkTimeout(long millis) { + cfg.networkTimeout(millis); + } + + long getNetworkTimeout() { + return cfg.networkTimeout(); + } + + boolean head() throws JdbcSQLException { + try { + return AccessController.doPrivileged((PrivilegedAction) () -> + JreHttpUrlConnection.http("", "error_trace", cfg, JreHttpUrlConnection::head)); + } catch (ClientException ex) { + throw new JdbcSQLException(ex, "Cannot ping server"); + } + } + + Response post(Request request) throws SQLException { + try { + return AccessController.doPrivileged((PrivilegedAction>) () -> + JreHttpUrlConnection.http("_xpack/sql/jdbc", "error_trace", cfg, con -> + con.post( + out -> Proto.INSTANCE.writeRequest(request, out), + in -> Proto.INSTANCE.readResponse(request, in) + ) + ) + ).getResponseOrThrowException(); + } catch (ClientException ex) { + throw new JdbcSQLException(ex, "Transport failure"); + } + } + +} diff --git a/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/JdbcHttpClient.java b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/JdbcHttpClient.java new file mode 100644 index 00000000000..02a4a7003f2 --- /dev/null +++ b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/JdbcHttpClient.java @@ -0,0 +1,122 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.client; + +import org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcConfiguration; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.InfoRequest; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.InfoResponse; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.MetaColumnInfo; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.MetaColumnRequest; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.MetaColumnResponse; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.MetaTableRequest; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.MetaTableResponse; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.Page; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.QueryCloseRequest; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.QueryCloseResponse; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.QueryInitRequest; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.QueryInitResponse; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.QueryPageRequest; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.QueryPageResponse; +import org.elasticsearch.xpack.sql.protocol.shared.TimeoutInfo; + +import java.io.DataInput; +import java.io.IOException; +import java.sql.SQLException; +import java.time.Instant; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +public class JdbcHttpClient { + @FunctionalInterface + interface DataInputFunction { + R apply(DataInput in) throws IOException, SQLException; + } + + private final HttpClient http; + private final JdbcConfiguration conCfg; + private InfoResponse serverInfo; + + public JdbcHttpClient(JdbcConfiguration conCfg) throws SQLException { + http = new HttpClient(conCfg); + this.conCfg = conCfg; + } + + public boolean ping(long timeoutInMs) throws SQLException { + long oldTimeout = http.getNetworkTimeout(); + try { + // this works since the connection is single-threaded and its configuration not shared + // with others connections + http.setNetworkTimeout(timeoutInMs); + return http.head(); + } finally { + http.setNetworkTimeout(oldTimeout); + } + } + + public Cursor query(String sql, RequestMeta meta) throws SQLException { + int fetch = meta.fetchSize() > 0 ? meta.fetchSize() : conCfg.pageSize(); + QueryInitRequest request = new QueryInitRequest(sql, fetch, conCfg.timeZone(), timeout(meta)); + QueryInitResponse response = (QueryInitResponse) http.post(request); + return new DefaultCursor(this, response.cursor(), (Page) response.data, meta); + } + + /** + * Read the next page of results, updating the {@link Page} and returning + * the scroll id to use to fetch the next page. + */ + public String nextPage(String cursor, Page page, RequestMeta meta) throws SQLException { + QueryPageRequest request = new QueryPageRequest(cursor, timeout(meta), page); + return ((QueryPageResponse) http.post(request)).cursor(); + } + + public boolean queryClose(String cursor) throws SQLException { + QueryCloseRequest request = new QueryCloseRequest(cursor); + return ((QueryCloseResponse) http.post(request)).succeeded(); + } + + public InfoResponse serverInfo() throws SQLException { + if (serverInfo == null) { + serverInfo = fetchServerInfo(); + } + return serverInfo; + } + + private InfoResponse fetchServerInfo() throws SQLException { + InfoRequest request = new InfoRequest(); + return (InfoResponse) http.post(request); + } + + public List metaInfoTables(String pattern) throws SQLException { + MetaTableRequest request = new MetaTableRequest(pattern); + return ((MetaTableResponse) http.post(request)).tables; + } + + public List metaInfoColumns(String tablePattern, String columnPattern) throws SQLException { + MetaColumnRequest request = new MetaColumnRequest(tablePattern, columnPattern); + return ((MetaColumnResponse) http.post(request)).columns; + } + + public void setNetworkTimeout(long millis) { + http.setNetworkTimeout(millis); + } + + public long getNetworkTimeout() { + return http.getNetworkTimeout(); + } + + private TimeoutInfo timeout(RequestMeta meta) { + // client time + long clientTime = Instant.now().toEpochMilli(); + + // timeout (in ms) + long timeout = meta.timeoutInMs(); + if (timeout == 0) { + timeout = conCfg.queryTimeout(); + } + return new TimeoutInfo(clientTime, timeout, conCfg.pageTimeout()); + } +} diff --git a/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/RequestMeta.java b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/RequestMeta.java new file mode 100644 index 00000000000..a8108e6b0a4 --- /dev/null +++ b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/RequestMeta.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.client; + +public class RequestMeta { + + private int fetchSize; + private long timeoutInMs; + + public RequestMeta() { + this(-1, 0); + } + + public RequestMeta(int fetchSize, int timeout) { + this.fetchSize = fetchSize; + this.timeoutInMs = timeout; + } + + public RequestMeta timeout(long timeout) { + this.timeoutInMs = timeout; + return this; + } + + public RequestMeta fetchSize(int size) { + this.fetchSize = size; + return this; + } + + public int fetchSize() { + return fetchSize; + } + + public long timeoutInMs() { + return timeoutInMs; + } + +} diff --git a/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/util/AccessibleDataOutputStream.java b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/util/AccessibleDataOutputStream.java new file mode 100644 index 00000000000..214133d8a00 --- /dev/null +++ b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/util/AccessibleDataOutputStream.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.util; + +import java.io.DataOutputStream; +import java.io.OutputStream; + +public class AccessibleDataOutputStream extends DataOutputStream { + + public AccessibleDataOutputStream(OutputStream out) { + super(out); + } + + public OutputStream wrappedStream() { + return out; + } +} diff --git a/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/util/ArrayUtils.java b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/util/ArrayUtils.java new file mode 100644 index 00000000000..73c9e011c78 --- /dev/null +++ b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/util/ArrayUtils.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.util; + +import org.elasticsearch.xpack.sql.jdbc.JdbcSQLException; + +import java.sql.SQLException; + +// taken from org.apache.lucene.util +abstract class ArrayUtils { + + static byte[] grow(byte[] array, int minSize) throws SQLException { + assert minSize >= 0 : "size must be positive (got " + minSize + "): likely integer overflow?"; + if (array.length < minSize) { + byte[] newArray = new byte[oversize(minSize, 1)]; + System.arraycopy(array, 0, newArray, 0, array.length); + return newArray; + } + else return array; + } + + static int oversize(int minTargetSize, int bytesPerElement) throws SQLException { + + if (minTargetSize < 0) { + // catch usage that accidentally overflows int + throw new JdbcSQLException("invalid array size [" + minTargetSize + "]"); + } + + if (minTargetSize == 0) { + // wait until at least one element is requested + return 0; + } + + // asymptotic exponential growth by 1/8th, favors + // spending a bit more CPU to not tie up too much wasted + // RAM: + int extra = minTargetSize >> 3; + + if (extra < 3) { + // for very small arrays, where constant overhead of + // realloc is presumably relatively high, we grow + // faster + extra = 3; + } + + int newSize = minTargetSize + extra; + + // add 7 to allow for worst case byte alignment addition below: + if (newSize + 7 < 0) { + // int overflowed -- return max allowed array size + return Integer.MAX_VALUE; + } + + if (Constants.JRE_IS_64BIT) { + // round up to 8 byte alignment in 64bit env + switch (bytesPerElement) { + case 4: + // round up to multiple of 2 + return (newSize + 1) & 0x7ffffffe; + case 2: + // round up to multiple of 4 + return (newSize + 3) & 0x7ffffffc; + case 1: + // round up to multiple of 8 + return (newSize + 7) & 0x7ffffff8; + case 8: + // no rounding + default: + // odd (invalid?) size + return newSize; + } + } + else { + // round up to 4 byte alignment in 64bit env + switch (bytesPerElement) { + case 2: + // round up to multiple of 2 + return (newSize + 1) & 0x7ffffffe; + case 1: + // round up to multiple of 4 + return (newSize + 3) & 0x7ffffffc; + case 4: + case 8: + // no rounding + default: + // odd (invalid?) size + return newSize; + } + } + } +} diff --git a/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/util/Constants.java b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/util/Constants.java new file mode 100644 index 00000000000..a7b937fb223 --- /dev/null +++ b/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/util/Constants.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.util; + +import java.util.StringTokenizer; + +//taken from Apache Lucene +public abstract class Constants { + + /** JVM vendor info. */ + public static final String JVM_VENDOR = System.getProperty("java.vm.vendor"); + public static final String JVM_VERSION = System.getProperty("java.vm.version"); + public static final String JVM_NAME = System.getProperty("java.vm.name"); + public static final String JVM_SPEC_VERSION = System.getProperty("java.specification.version"); + + /** The value of System.getProperty("java.version"). **/ + public static final String JAVA_VERSION = System.getProperty("java.version"); + + public static final String OS_ARCH = System.getProperty("os.arch"); + public static final String OS_VERSION = System.getProperty("os.version"); + public static final String JAVA_VENDOR = System.getProperty("java.vendor"); + + private static final int JVM_MAJOR_VERSION; + private static final int JVM_MINOR_VERSION; + + /** True iff running on a 64bit JVM */ + public static final boolean JRE_IS_64BIT; + + static { + final StringTokenizer st = new StringTokenizer(JVM_SPEC_VERSION, "."); + JVM_MAJOR_VERSION = Integer.parseInt(st.nextToken()); + if (st.hasMoreTokens()) { + JVM_MINOR_VERSION = Integer.parseInt(st.nextToken()); + } + else { + JVM_MINOR_VERSION = 0; + } + boolean is64Bit = false; + final String x = System.getProperty("sun.arch.data.model"); + if (x != null) { + is64Bit = x.contains("64"); + } + else { + if (OS_ARCH != null && OS_ARCH.contains("64")) { + is64Bit = true; + } + else { + is64Bit = false; + } + } + JRE_IS_64BIT = is64Bit; + } + + public static final boolean JRE_IS_MINIMUM_JAVA7 = JVM_MAJOR_VERSION > 1 || (JVM_MAJOR_VERSION == 1 && JVM_MINOR_VERSION >= 7); + public static final boolean JRE_IS_MINIMUM_JAVA8 = JVM_MAJOR_VERSION > 1 || (JVM_MAJOR_VERSION == 1 && JVM_MINOR_VERSION >= 8); + public static final boolean JRE_IS_MINIMUM_JAVA9 = JVM_MAJOR_VERSION > 1 || (JVM_MAJOR_VERSION == 1 && JVM_MINOR_VERSION >= 9); + +} \ No newline at end of file diff --git a/sql/jdbc/src/main/resources/META-INF/services/java.sql.Driver b/sql/jdbc/src/main/resources/META-INF/services/java.sql.Driver new file mode 100644 index 00000000000..672e7aafcf7 --- /dev/null +++ b/sql/jdbc/src/main/resources/META-INF/services/java.sql.Driver @@ -0,0 +1 @@ +org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcDriver \ No newline at end of file diff --git a/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/DriverManagerRegistrationTests.java b/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/DriverManagerRegistrationTests.java new file mode 100644 index 00000000000..df98f32dcd1 --- /dev/null +++ b/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/DriverManagerRegistrationTests.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcDriver; + +import java.security.AccessController; +import java.security.PrivilegedExceptionAction; +import java.sql.Driver; +import java.sql.DriverManager; +import java.sql.SQLException; + +public class DriverManagerRegistrationTests extends ESTestCase { + + + public void testRegistration() throws Exception { + String url = "jdbc:es:localhost:9200/"; + Driver driver = null; + try { + // can happen (if the driver jar was not loaded) + driver = DriverManager.getDriver(url); + } catch (SQLException ex) { + assertEquals("No suitable driver", ex.getMessage()); + } + boolean set = driver != null; + try { + Driver d = JdbcDriver.register(); + if (driver != null) { + assertEquals(driver, d); + } + AccessController.doPrivileged((PrivilegedExceptionAction) () -> { + // mimic DriverManager and unregister the driver + JdbcDriver.deregister(); + return null; + }); + + SQLException ex = expectThrows(SQLException.class, () -> DriverManager.getDriver(url)); + assertEquals("No suitable driver", ex.getMessage()); + } finally { + if (set) { + JdbcDriver.register(); + } + } + } +} diff --git a/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfigurationTests.java b/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfigurationTests.java new file mode 100644 index 00000000000..84ccbfaabad --- /dev/null +++ b/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfigurationTests.java @@ -0,0 +1,82 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcConfiguration; + +import java.sql.SQLException; + +import static org.hamcrest.Matchers.is; + +public class JdbcConfigurationTests extends ESTestCase { + + private JdbcConfiguration ci(String url) throws SQLException { + return JdbcConfiguration.create(url, null); + } + + public void testJustThePrefix() throws Exception { + Exception e = expectThrows(JdbcSQLException.class, () -> ci("jdbc:es:")); + assertEquals("Expected [jdbc:es://] url, received [jdbc:es:]", e.getMessage()); + } + + public void testJustTheHost() throws Exception { + assertThat(ci("jdbc:es://localhost").baseUri().toString(), is("http://localhost:9200/")); + } + + public void testHostAndPort() throws Exception { + assertThat(ci("jdbc:es://localhost:1234").baseUri().toString(), is("http://localhost:1234/")); + } + + public void testTrailingSlashForHost() throws Exception { + assertThat(ci("jdbc:es://localhost:1234/").baseUri().toString(), is("http://localhost:1234/")); + } + + public void testMultiPathSuffix() throws Exception { + assertThat(ci("jdbc:es://a:1/foo/bar/tar").baseUri().toString(), is("http://a:1/foo/bar/tar")); + } + + public void testV6Localhost() throws Exception { + assertThat(ci("jdbc:es://[::1]:54161/foo/bar").baseUri().toString(), is("http://[::1]:54161/foo/bar")); + } + + public void testDebug() throws Exception { + JdbcConfiguration ci = ci("jdbc:es://a:1/?debug=true"); + assertThat(ci.baseUri().toString(), is("http://a:1/")); + assertThat(ci.debug(), is(true)); + assertThat(ci.debugOut(), is("err")); + } + + public void testDebugOut() throws Exception { + JdbcConfiguration ci = ci("jdbc:es://a:1/?debug=true&debug.output=jdbc.out"); + assertThat(ci.baseUri().toString(), is("http://a:1/")); + assertThat(ci.debug(), is(true)); + assertThat(ci.debugOut(), is("jdbc.out")); + } + + public void testTypeInParam() throws Exception { + Exception e = expectThrows(JdbcSQLException.class, () -> ci("jdbc:es://a:1/foo/bar/tar?debug=true&debug.out=jdbc.out")); + assertEquals("Unknown parameter [debug.out] ; did you mean [debug.output]", e.getMessage()); + } + + public void testDebugOutWithSuffix() throws Exception { + JdbcConfiguration ci = ci("jdbc:es://a:1/foo/bar/tar?debug=true&debug.output=jdbc.out"); + assertThat(ci.baseUri().toString(), is("http://a:1/foo/bar/tar")); + assertThat(ci.debug(), is(true)); + assertThat(ci.debugOut(), is("jdbc.out")); + } + + public void testHttpWithSSLEnabled() throws Exception { + JdbcConfiguration ci = ci("jdbc:es://test?ssl=true"); + assertThat(ci.baseUri().toString(), is("https://test:9200/")); + } + + public void testHttpWithSSLDisabled() throws Exception { + JdbcConfiguration ci = ci("jdbc:es://test?ssl=false"); + assertThat(ci.baseUri().toString(), is("http://test:9200/")); + } + +} diff --git a/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/VersionTests.java b/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/VersionTests.java new file mode 100644 index 00000000000..785601054b9 --- /dev/null +++ b/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/VersionTests.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.client.shared.Version; + +public class VersionTests extends ESTestCase { + public void testVersionIsCurrent() { + /* This test will only work properly in gradle because in gradle we run the tests + * using the jar. */ + assertEquals(org.elasticsearch.Version.CURRENT.toString(), Version.versionNumber()); + assertNotNull(Version.versionHash()); + assertEquals(org.elasticsearch.Version.CURRENT.major, Version.versionMajor()); + assertEquals(org.elasticsearch.Version.CURRENT.minor, Version.versionMinor()); + } +} diff --git a/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/package-info.java b/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/package-info.java new file mode 100644 index 00000000000..4c45324bb08 --- /dev/null +++ b/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/package-info.java @@ -0,0 +1,10 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/** + * Unit tests for the Elasticsearch JDBC client. + */ +package org.elasticsearch.xpack.sql.jdbc; diff --git a/sql/jdbc/src/test/resources/plugin-security.policy b/sql/jdbc/src/test/resources/plugin-security.policy new file mode 100644 index 00000000000..5f16c1579b0 --- /dev/null +++ b/sql/jdbc/src/test/resources/plugin-security.policy @@ -0,0 +1,4 @@ +grant { + // Required for testing the Driver registration + permission java.sql.SQLPermission "deregisterDriver"; +}; diff --git a/sql/server/build.gradle b/sql/server/build.gradle new file mode 100644 index 00000000000..0e7b46dd1ad --- /dev/null +++ b/sql/server/build.gradle @@ -0,0 +1,93 @@ +description = 'The server components of SQL for Elasticsearch' + +dependencies { + compile project(':x-pack-elasticsearch:sql:jdbc-proto') + compile project(':x-pack-elasticsearch:sql:cli-proto') + compile project(':x-pack-elasticsearch:sql:shared-proto') + provided "org.elasticsearch.plugin:aggs-matrix-stats-client:${project.versions.elasticsearch}" + compile 'org.antlr:antlr4-runtime:4.5.3' + provided "org.elasticsearch:elasticsearch:${project.versions.elasticsearch}" + +} + +dependencyLicenses { + mapping from: /jdbc-proto.*/, to: 'elasticsearch' + mapping from: /cli-proto.*/, to: 'elasticsearch' + mapping from: /shared-proto.*/, to: 'elasticsearch' + ignoreSha 'jdbc-proto' + ignoreSha 'cli-proto' + ignoreSha 'shared-proto' +} + +// TODO probably not a good thing to rely on. See https://github.com/elastic/x-pack-elasticsearch/issues/2871 +compileJava.options.compilerArgs << "-parameters" +compileTestJava.options.compilerArgs << "-parameters" + +/********************************************** + * SQL Parser regeneration * + **********************************************/ + +configurations { + regenerate +} + +dependencies { + regenerate 'org.antlr:antlr4:4.5.3' +} + +String grammarPath = 'src/main/antlr' +String outputPath = 'src/main/java/org/elasticsearch/xpack/sql/parser' + +task cleanGenerated(type: Delete) { + delete fileTree(grammarPath) { + include '*.tokens' + } + delete fileTree(outputPath) { + include 'SqlBase*.java' + } +} + +task regenParser(type: JavaExec) { + dependsOn cleanGenerated + main = 'org.antlr.v4.Tool' + classpath = configurations.regenerate + systemProperty 'file.encoding', 'UTF-8' + systemProperty 'user.language', 'en' + systemProperty 'user.country', 'US' + systemProperty 'user.variant', '' + args '-Werror', + '-package', 'org.elasticsearch.xpack.sql.parser', + '-listener', + '-visitor', + '-o', outputPath, + "${file(grammarPath)}/SqlBase.g4" +} + +task regen { + dependsOn regenParser + doLast { + // moves token files to grammar directory for use with IDE's + ant.move(file: "${outputPath}/SqlBase.tokens", toDir: grammarPath) + ant.move(file: "${outputPath}/SqlBaseLexer.tokens", toDir: grammarPath) + // make the generated classes package private + ant.replaceregexp(match: 'public ((interface|class) \\QSqlBase\\E\\w+)', + replace: '\\1', + encoding: 'UTF-8') { + fileset(dir: outputPath, includes: 'SqlBase*.java') + } + // nuke timestamps/filenames in generated files + ant.replaceregexp(match: '\\Q// Generated from \\E.*', + replace: '\\/\\/ ANTLR GENERATED CODE: DO NOT EDIT', + encoding: 'UTF-8') { + fileset(dir: outputPath, includes: 'SqlBase*.java') + } + // remove tabs in antlr generated files + ant.replaceregexp(match: '\t', flags: 'g', replace: ' ', encoding: 'UTF-8') { + fileset(dir: outputPath, includes: 'SqlBase*.java') + } + // fix line endings + ant.fixcrlf(srcdir: outputPath, eol: 'lf') { + patternset(includes: 'SqlBase*.java') + } + } +} diff --git a/sql/server/licenses/antlr4-runtime-4.5.3.jar.sha1 b/sql/server/licenses/antlr4-runtime-4.5.3.jar.sha1 new file mode 100644 index 00000000000..535955b7d68 --- /dev/null +++ b/sql/server/licenses/antlr4-runtime-4.5.3.jar.sha1 @@ -0,0 +1 @@ +2609e36f18f7e8d593cc1cddfb2ac776dc96b8e0 \ No newline at end of file diff --git a/sql/server/licenses/antlr4-runtime-LICENSE.txt b/sql/server/licenses/antlr4-runtime-LICENSE.txt new file mode 100644 index 00000000000..95d0a2554f6 --- /dev/null +++ b/sql/server/licenses/antlr4-runtime-LICENSE.txt @@ -0,0 +1,26 @@ +[The "BSD license"] +Copyright (c) 2015 Terence Parr, Sam Harwell +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + 3. The name of the author may not be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/sql/server/licenses/antlr4-runtime-NOTICE.txt b/sql/server/licenses/antlr4-runtime-NOTICE.txt new file mode 100644 index 00000000000..e69de29bb2d diff --git a/sql/server/licenses/elasticsearch-LICENSE.txt b/sql/server/licenses/elasticsearch-LICENSE.txt new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/sql/server/licenses/elasticsearch-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/sql/server/licenses/elasticsearch-NOTICE.txt b/sql/server/licenses/elasticsearch-NOTICE.txt new file mode 100644 index 00000000000..643a060cd05 --- /dev/null +++ b/sql/server/licenses/elasticsearch-NOTICE.txt @@ -0,0 +1,5 @@ +Elasticsearch +Copyright 2009-2017 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). diff --git a/sql/server/src/main/antlr/SqlBase.g4 b/sql/server/src/main/antlr/SqlBase.g4 new file mode 100644 index 00000000000..ea007623107 --- /dev/null +++ b/sql/server/src/main/antlr/SqlBase.g4 @@ -0,0 +1,431 @@ +/* + * ELASTICSEARCH CONFIDENTIAL + * __________________ + * + * [2014] Elasticsearch Incorporated. All Rights Reserved. + * + * NOTICE: All information contained herein is, and remains + * the property of Elasticsearch Incorporated and its suppliers, + * if any. The intellectual and technical concepts contained + * herein are proprietary to Elasticsearch Incorporated + * and its suppliers and may be covered by U.S. and Foreign Patents, + * patents in process, and are protected by trade secret or copyright law. + * Dissemination of this information or reproduction of this material + * is strictly forbidden unless prior written permission is obtained + * from Elasticsearch Incorporated. + */ + +/** Fork from Presto Parser - significantly trimmed down and adjusted for ES */ +/** presto-parser/src/main/antlr4/com/facebook/presto/sql/parser/SqlBase.g4 grammar */ + +grammar SqlBase; + +tokens { + DELIMITER +} + +singleStatement + : statement EOF + ; + +singleExpression + : expression EOF + ; + +statement + : query #statementDefault + | EXPLAIN + ('(' + ( + PLAN type=(PARSED | ANALYZED | OPTIMIZED | MAPPED | EXECUTABLE | ALL) + | FORMAT format=(TEXT | GRAPHVIZ) + | VERIFY verify=booleanValue + )* + ')')? + statement #explain + | DEBUG + ('(' + ( + PLAN type=(ANALYZED | OPTIMIZED) + | FORMAT format=(TEXT | GRAPHVIZ) + )* + ')')? + statement #debug + | SHOW TABLES (LIKE? pattern=STRING)? #showTables + | SHOW COLUMNS (FROM | IN) tableIdentifier #showColumns + | (DESCRIBE | DESC) tableIdentifier #showColumns + | SHOW FUNCTIONS (LIKE? pattern=STRING)? #showFunctions + | SHOW SCHEMAS #showSchemas + ; + +query + : (WITH namedQuery (',' namedQuery)*)? queryNoWith + ; + +queryNoWith + : queryTerm + /** we could add sort by - sort per partition */ + (ORDER BY orderBy (',' orderBy)*)? + (LIMIT limit=(INTEGER_VALUE | ALL))? + ; + +queryTerm + : querySpecification #queryPrimaryDefault + | '(' queryNoWith ')' #subquery + ; + +orderBy + : expression ordering=(ASC | DESC)? + ; + +querySpecification + : SELECT setQuantifier? selectItem (',' selectItem)* + fromClause? + (WHERE where=booleanExpression)? + (GROUP BY groupBy)? + (HAVING having=booleanExpression)? + ; + +fromClause + : FROM relation (',' relation)* + ; + +groupBy + : setQuantifier? groupingElement (',' groupingElement)* + ; + +groupingElement + : groupingExpressions #singleGroupingSet + ; + +groupingExpressions + : '(' (expression (',' expression)*)? ')' + | expression + ; + +namedQuery + : name=identifier AS '(' queryNoWith ')' + ; + +setQuantifier + : DISTINCT + | ALL + ; + +selectItem + : expression (AS? identifier)? #selectExpression + ; + +relation + : relationPrimary joinRelation* + ; + +joinRelation + : (joinType) JOIN right=relationPrimary joinCriteria? + | NATURAL joinType JOIN right=relationPrimary + ; + +joinType + : INNER? + | LEFT OUTER? + | RIGHT OUTER? + | FULL OUTER? + ; + +joinCriteria + : ON booleanExpression + | USING '(' identifier (',' identifier)* ')' + ; + +relationPrimary + : tableIdentifier (AS? qualifiedName)? #tableName + | '(' queryNoWith ')' (AS? qualifiedName)? #aliasedQuery + | '(' relation ')' (AS? qualifiedName)? #aliasedRelation + ; + +expression + : booleanExpression + ; + +booleanExpression + : NOT booleanExpression #logicalNot + | EXISTS '(' query ')' #exists + | QUERY '(' queryString=STRING (',' options=STRING)* ')' #stringQuery + | MATCH '(' singleField=qualifiedName ',' queryString=STRING (',' options=STRING)* ')' #matchQuery + | MATCH '(' multiFields=STRING ',' queryString=STRING (',' options=STRING)* ')' #multiMatchQuery + | predicated #booleanDefault + | left=booleanExpression operator=AND right=booleanExpression #logicalBinary + | left=booleanExpression operator=OR right=booleanExpression #logicalBinary + ; + +// workaround for: +// https://github.com/antlr/antlr4/issues/780 +// https://github.com/antlr/antlr4/issues/781 +predicated + : valueExpression predicate? + ; + +// dedicated calls for each branch are not used to reuse the NOT handling across them +// instead the property kind is used to differentiate +predicate + : NOT? kind=BETWEEN lower=valueExpression AND upper=valueExpression + | NOT? kind=IN '(' expression (',' expression)* ')' + | NOT? kind=IN '(' query ')' + | NOT? kind=(LIKE | RLIKE) pattern=valueExpression + | IS NOT? kind=NULL + ; + +valueExpression + : primaryExpression #valueExpressionDefault + | operator=(MINUS | PLUS) valueExpression #arithmeticUnary + | left=valueExpression operator=(ASTERISK | SLASH | PERCENT) right=valueExpression #arithmeticBinary + | left=valueExpression operator=(PLUS | MINUS) right=valueExpression #arithmeticBinary + | left=valueExpression comparisonOperator right=valueExpression #comparison + ; + +primaryExpression + : CAST '(' expression AS dataType ')' #cast + | EXTRACT '(' field=identifier FROM valueExpression ')' #extract + | constant #constantDefault + | ASTERISK #star + | (qualifier=columnExpression '.')? ASTERISK #star + | identifier '(' (setQuantifier? expression (',' expression)*)? ')' #functionCall + | '(' query ')' #subqueryExpression + | columnExpression #columnReference + | base=columnExpression '.' fieldName=identifier #dereference + | '(' expression ')' #parenthesizedExpression + ; + +columnExpression + : ((alias=identifier | table=tableIdentifier) '.' )? name=identifier + ; + +constant + : NULL #nullLiteral + | identifier STRING #typeConstructor + | number #numericLiteral + | booleanValue #booleanLiteral + | STRING+ #stringLiteral + ; + +comparisonOperator + : EQ | NEQ | LT | LTE | GT | GTE + ; + +booleanValue + : TRUE | FALSE + ; + +dataType + : identifier #primitiveDataType + ; + +whenClause + : WHEN condition=expression THEN result=expression + ; + +qualifiedName + : identifier ('.' identifier)* + ; + +tableIdentifier + : index=identifier + ; + +identifier + : quoteIdentifier + | unquoteIdentifier + ; + +quoteIdentifier + : QUOTED_IDENTIFIER #quotedIdentifier + | BACKQUOTED_IDENTIFIER #backQuotedIdentifier + ; + +unquoteIdentifier + : IDENTIFIER #unquotedIdentifier + | nonReserved #unquotedIdentifier + | DIGIT_IDENTIFIER #digitIdentifier + ; + +number + : DECIMAL_VALUE #decimalLiteral + | INTEGER_VALUE #integerLiteral + ; + +// http://developer.mimer.se/validator/sql-reserved-words.tml +nonReserved + : ANALYZE | ANALYZED + | COLUMNS + | DEBUG + | EXECUTABLE | EXPLAIN + | FORMAT | FUNCTIONS | FROM + | GRAPHVIZ + | LOGICAL + | MAPPED + | OPTIMIZED + | PARSED | PHYSICAL | PLAN + | QUERY + | RESET | RLIKE + | SCHEMAS | SESSION | SETS | SHOW + | TABLES | TEXT | TYPE + | USE + | VERIFY + ; + +ALL: 'ALL'; +ANALYZE: 'ANALYZE'; +ANALYZED: 'ANALYZED'; +AND: 'AND'; +ANY: 'ANY'; +AS: 'AS'; +ASC: 'ASC'; +BETWEEN: 'BETWEEN'; +BY: 'BY'; +CAST: 'CAST'; +COLUMN: 'COLUMN'; +COLUMNS: 'COLUMNS'; +CROSS: 'CROSS'; +DEBUG: 'DEBUG'; +DESC: 'DESC'; +DESCRIBE: 'DESCRIBE'; +DISTINCT: 'DISTINCT'; +EXECUTABLE: 'EXECUTABLE'; +EXISTS: 'EXISTS'; +EXPLAIN: 'EXPLAIN'; +EXTRACT: 'EXTRACT'; +FALSE: 'FALSE'; +FOR: 'FOR'; +FORMAT: 'FORMAT'; +FROM: 'FROM'; +FULL: 'FULL'; +FUNCTIONS: 'FUNCTIONS'; +GRAPHVIZ: 'GRAPHVIZ'; +GROUP: 'GROUP'; +GROUPING: 'GROUPING'; +HAVING: 'HAVING'; +IN: 'IN'; +INNER: 'INNER'; +INTEGER: 'INTEGER'; +INTO: 'INTO'; +IS: 'IS'; +JOIN: 'JOIN'; +LAST: 'LAST'; +LEFT: 'LEFT'; +LIKE: 'LIKE'; +LIMIT: 'LIMIT'; +LOGICAL: 'LOGICAL'; +MAPPED: 'MAPPED'; +MATCH: 'MATCH'; +NATURAL: 'NATURAL'; +NO: 'NO'; +NOT: 'NOT'; +NULL: 'NULL'; +ON: 'ON'; +OPTIMIZED: 'OPTIMIZED'; +OPTION: 'OPTION'; +OR: 'OR'; +ORDER: 'ORDER'; +OUTER: 'OUTER'; +PARSED: 'PARSED'; +PHYSICAL: 'PHYSICAL'; +PLAN: 'PLAN'; +QUERY: 'QUERY'; +RESET: 'RESET'; +RIGHT: 'RIGHT'; +RLIKE: 'RLIKE'; +SCHEMAS: 'SCHEMAS'; +SELECT: 'SELECT'; +SESSION: 'SESSION'; +SET: 'SET'; +SETS: 'SETS'; +SHOW: 'SHOW'; +TABLE: 'TABLE'; +TABLES: 'TABLES'; +TEXT: 'TEXT'; +THEN: 'THEN'; +TO: 'TO'; +TRUE: 'TRUE'; +TYPE: 'TYPE'; +USE: 'USE'; +USING: 'USING'; +VERIFY: 'VERIFY'; +WHEN: 'WHEN'; +WHERE: 'WHERE'; +WITH: 'WITH'; + +EQ : '='; +NEQ : '<>' | '!=' | '<=>'; +LT : '<'; +LTE : '<='; +GT : '>'; +GTE : '>='; + +PLUS: '+'; +MINUS: '-'; +ASTERISK: '*'; +SLASH: '/'; +PERCENT: '%'; +CONCAT: '||'; + +STRING + : '\'' ( ~'\'' | '\'\'' )* '\'' + ; + +INTEGER_VALUE + : DIGIT+ + ; + +DECIMAL_VALUE + : DIGIT+ '.' DIGIT* + | '.' DIGIT+ + | DIGIT+ ('.' DIGIT*)? EXPONENT + | '.' DIGIT+ EXPONENT + ; + +IDENTIFIER + : (LETTER | '_') (LETTER | DIGIT | '_' | '@' | ':')* + ; + +DIGIT_IDENTIFIER + : DIGIT (LETTER | DIGIT | '_' | '@' | ':')+ + ; + +QUOTED_IDENTIFIER + : '"' ( ~'"' | '""' )* '"' + ; + +BACKQUOTED_IDENTIFIER + : '`' ( ~'`' | '``' )* '`' + ; + +fragment EXPONENT + : 'E' [+-]? DIGIT+ + ; + +fragment DIGIT + : [0-9] + ; + +fragment LETTER + : [A-Z] + ; + +SIMPLE_COMMENT + : '--' ~[\r\n]* '\r'? '\n'? -> channel(HIDDEN) + ; + +BRACKETED_COMMENT + : '/*' (BRACKETED_COMMENT|.)*? '*/' -> channel(HIDDEN) + ; + +WS + : [ \r\n\t]+ -> channel(HIDDEN) + ; + +// Catch-all for anything we can't recognize. +// We use this to be able to ignore and recover all the text +// when splitting statements with DelimiterLexer +UNRECOGNIZED + : . + ; diff --git a/sql/server/src/main/antlr/SqlBase.tokens b/sql/server/src/main/antlr/SqlBase.tokens new file mode 100644 index 00000000000..734e7cc4143 --- /dev/null +++ b/sql/server/src/main/antlr/SqlBase.tokens @@ -0,0 +1,203 @@ +T__0=1 +T__1=2 +T__2=3 +T__3=4 +ALL=5 +ANALYZE=6 +ANALYZED=7 +AND=8 +ANY=9 +AS=10 +ASC=11 +BETWEEN=12 +BY=13 +CAST=14 +COLUMN=15 +COLUMNS=16 +CROSS=17 +DEBUG=18 +DESC=19 +DESCRIBE=20 +DISTINCT=21 +EXECUTABLE=22 +EXISTS=23 +EXPLAIN=24 +EXTRACT=25 +FALSE=26 +FOR=27 +FORMAT=28 +FROM=29 +FULL=30 +FUNCTIONS=31 +GRAPHVIZ=32 +GROUP=33 +GROUPING=34 +HAVING=35 +IN=36 +INNER=37 +INTEGER=38 +INTO=39 +IS=40 +JOIN=41 +LAST=42 +LEFT=43 +LIKE=44 +LIMIT=45 +LOGICAL=46 +MAPPED=47 +MATCH=48 +NATURAL=49 +NO=50 +NOT=51 +NULL=52 +ON=53 +OPTIMIZED=54 +OPTION=55 +OR=56 +ORDER=57 +OUTER=58 +PARSED=59 +PHYSICAL=60 +PLAN=61 +QUERY=62 +RESET=63 +RIGHT=64 +RLIKE=65 +SCHEMAS=66 +SELECT=67 +SESSION=68 +SET=69 +SETS=70 +SHOW=71 +TABLE=72 +TABLES=73 +TEXT=74 +THEN=75 +TO=76 +TRUE=77 +TYPE=78 +USE=79 +USING=80 +VERIFY=81 +WHEN=82 +WHERE=83 +WITH=84 +EQ=85 +NEQ=86 +LT=87 +LTE=88 +GT=89 +GTE=90 +PLUS=91 +MINUS=92 +ASTERISK=93 +SLASH=94 +PERCENT=95 +CONCAT=96 +STRING=97 +INTEGER_VALUE=98 +DECIMAL_VALUE=99 +IDENTIFIER=100 +DIGIT_IDENTIFIER=101 +QUOTED_IDENTIFIER=102 +BACKQUOTED_IDENTIFIER=103 +SIMPLE_COMMENT=104 +BRACKETED_COMMENT=105 +WS=106 +UNRECOGNIZED=107 +DELIMITER=108 +'('=1 +')'=2 +','=3 +'.'=4 +'ALL'=5 +'ANALYZE'=6 +'ANALYZED'=7 +'AND'=8 +'ANY'=9 +'AS'=10 +'ASC'=11 +'BETWEEN'=12 +'BY'=13 +'CAST'=14 +'COLUMN'=15 +'COLUMNS'=16 +'CROSS'=17 +'DEBUG'=18 +'DESC'=19 +'DESCRIBE'=20 +'DISTINCT'=21 +'EXECUTABLE'=22 +'EXISTS'=23 +'EXPLAIN'=24 +'EXTRACT'=25 +'FALSE'=26 +'FOR'=27 +'FORMAT'=28 +'FROM'=29 +'FULL'=30 +'FUNCTIONS'=31 +'GRAPHVIZ'=32 +'GROUP'=33 +'GROUPING'=34 +'HAVING'=35 +'IN'=36 +'INNER'=37 +'INTEGER'=38 +'INTO'=39 +'IS'=40 +'JOIN'=41 +'LAST'=42 +'LEFT'=43 +'LIKE'=44 +'LIMIT'=45 +'LOGICAL'=46 +'MAPPED'=47 +'MATCH'=48 +'NATURAL'=49 +'NO'=50 +'NOT'=51 +'NULL'=52 +'ON'=53 +'OPTIMIZED'=54 +'OPTION'=55 +'OR'=56 +'ORDER'=57 +'OUTER'=58 +'PARSED'=59 +'PHYSICAL'=60 +'PLAN'=61 +'QUERY'=62 +'RESET'=63 +'RIGHT'=64 +'RLIKE'=65 +'SCHEMAS'=66 +'SELECT'=67 +'SESSION'=68 +'SET'=69 +'SETS'=70 +'SHOW'=71 +'TABLE'=72 +'TABLES'=73 +'TEXT'=74 +'THEN'=75 +'TO'=76 +'TRUE'=77 +'TYPE'=78 +'USE'=79 +'USING'=80 +'VERIFY'=81 +'WHEN'=82 +'WHERE'=83 +'WITH'=84 +'='=85 +'<'=87 +'<='=88 +'>'=89 +'>='=90 +'+'=91 +'-'=92 +'*'=93 +'/'=94 +'%'=95 +'||'=96 diff --git a/sql/server/src/main/antlr/SqlBaseLexer.tokens b/sql/server/src/main/antlr/SqlBaseLexer.tokens new file mode 100644 index 00000000000..19b1d8e6bb9 --- /dev/null +++ b/sql/server/src/main/antlr/SqlBaseLexer.tokens @@ -0,0 +1,202 @@ +T__0=1 +T__1=2 +T__2=3 +T__3=4 +ALL=5 +ANALYZE=6 +ANALYZED=7 +AND=8 +ANY=9 +AS=10 +ASC=11 +BETWEEN=12 +BY=13 +CAST=14 +COLUMN=15 +COLUMNS=16 +CROSS=17 +DEBUG=18 +DESC=19 +DESCRIBE=20 +DISTINCT=21 +EXECUTABLE=22 +EXISTS=23 +EXPLAIN=24 +EXTRACT=25 +FALSE=26 +FOR=27 +FORMAT=28 +FROM=29 +FULL=30 +FUNCTIONS=31 +GRAPHVIZ=32 +GROUP=33 +GROUPING=34 +HAVING=35 +IN=36 +INNER=37 +INTEGER=38 +INTO=39 +IS=40 +JOIN=41 +LAST=42 +LEFT=43 +LIKE=44 +LIMIT=45 +LOGICAL=46 +MAPPED=47 +MATCH=48 +NATURAL=49 +NO=50 +NOT=51 +NULL=52 +ON=53 +OPTIMIZED=54 +OPTION=55 +OR=56 +ORDER=57 +OUTER=58 +PARSED=59 +PHYSICAL=60 +PLAN=61 +QUERY=62 +RESET=63 +RIGHT=64 +RLIKE=65 +SCHEMAS=66 +SELECT=67 +SESSION=68 +SET=69 +SETS=70 +SHOW=71 +TABLE=72 +TABLES=73 +TEXT=74 +THEN=75 +TO=76 +TRUE=77 +TYPE=78 +USE=79 +USING=80 +VERIFY=81 +WHEN=82 +WHERE=83 +WITH=84 +EQ=85 +NEQ=86 +LT=87 +LTE=88 +GT=89 +GTE=90 +PLUS=91 +MINUS=92 +ASTERISK=93 +SLASH=94 +PERCENT=95 +CONCAT=96 +STRING=97 +INTEGER_VALUE=98 +DECIMAL_VALUE=99 +IDENTIFIER=100 +DIGIT_IDENTIFIER=101 +QUOTED_IDENTIFIER=102 +BACKQUOTED_IDENTIFIER=103 +SIMPLE_COMMENT=104 +BRACKETED_COMMENT=105 +WS=106 +UNRECOGNIZED=107 +'('=1 +')'=2 +','=3 +'.'=4 +'ALL'=5 +'ANALYZE'=6 +'ANALYZED'=7 +'AND'=8 +'ANY'=9 +'AS'=10 +'ASC'=11 +'BETWEEN'=12 +'BY'=13 +'CAST'=14 +'COLUMN'=15 +'COLUMNS'=16 +'CROSS'=17 +'DEBUG'=18 +'DESC'=19 +'DESCRIBE'=20 +'DISTINCT'=21 +'EXECUTABLE'=22 +'EXISTS'=23 +'EXPLAIN'=24 +'EXTRACT'=25 +'FALSE'=26 +'FOR'=27 +'FORMAT'=28 +'FROM'=29 +'FULL'=30 +'FUNCTIONS'=31 +'GRAPHVIZ'=32 +'GROUP'=33 +'GROUPING'=34 +'HAVING'=35 +'IN'=36 +'INNER'=37 +'INTEGER'=38 +'INTO'=39 +'IS'=40 +'JOIN'=41 +'LAST'=42 +'LEFT'=43 +'LIKE'=44 +'LIMIT'=45 +'LOGICAL'=46 +'MAPPED'=47 +'MATCH'=48 +'NATURAL'=49 +'NO'=50 +'NOT'=51 +'NULL'=52 +'ON'=53 +'OPTIMIZED'=54 +'OPTION'=55 +'OR'=56 +'ORDER'=57 +'OUTER'=58 +'PARSED'=59 +'PHYSICAL'=60 +'PLAN'=61 +'QUERY'=62 +'RESET'=63 +'RIGHT'=64 +'RLIKE'=65 +'SCHEMAS'=66 +'SELECT'=67 +'SESSION'=68 +'SET'=69 +'SETS'=70 +'SHOW'=71 +'TABLE'=72 +'TABLES'=73 +'TEXT'=74 +'THEN'=75 +'TO'=76 +'TRUE'=77 +'TYPE'=78 +'USE'=79 +'USING'=80 +'VERIFY'=81 +'WHEN'=82 +'WHERE'=83 +'WITH'=84 +'='=85 +'<'=87 +'<='=88 +'>'=89 +'>='=90 +'+'=91 +'-'=92 +'*'=93 +'/'=94 +'%'=95 +'||'=96 diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/ClientSqlException.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/ClientSqlException.java new file mode 100644 index 00000000000..accca2a6a45 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/ClientSqlException.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql; + +public abstract class ClientSqlException extends SqlException { + + protected ClientSqlException(String message, Object... args) { + super(message, args); + } + + protected ClientSqlException(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) { + super(message, cause, enableSuppression, writableStackTrace); + } + + protected ClientSqlException(String message, Throwable cause) { + super(message, cause); + } + + protected ClientSqlException(Throwable cause, String message, Object... args) { + super(cause, message, args); + } + + protected ClientSqlException(Throwable cause) { + super(cause); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/ServerSqlException.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/ServerSqlException.java new file mode 100644 index 00000000000..e8548ab7f93 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/ServerSqlException.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql; + +public abstract class ServerSqlException extends SqlException { + + protected ServerSqlException(String message, Object... args) { + super(message, args); + } + + protected ServerSqlException(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) { + super(message, cause, enableSuppression, writableStackTrace); + } + + protected ServerSqlException(String message, Throwable cause) { + super(message, cause); + } + + protected ServerSqlException(Throwable cause, String message, Object... args) { + super(cause, message, args); + } + + protected ServerSqlException(Throwable cause) { + super(cause); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/SqlException.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/SqlException.java new file mode 100644 index 00000000000..d405649a247 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/SqlException.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql; + +import org.elasticsearch.ElasticsearchException; + +import java.util.Locale; + +import static java.lang.String.format; + +public abstract class SqlException extends ElasticsearchException { + public SqlException(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) { + super(message, cause, enableSuppression, writableStackTrace); + } + + public SqlException(String message, Throwable cause) { + super(message, cause); + } + + public SqlException(String message, Object... args) { + this(null, message, args); + } + + public SqlException(Throwable cause, String message, Object... args) { + super(format(Locale.ROOT, message, args), cause); + } + + public SqlException(Throwable cause) { + super(cause); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/SqlIllegalArgumentException.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/SqlIllegalArgumentException.java new file mode 100644 index 00000000000..ac90374621d --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/SqlIllegalArgumentException.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql; + +public class SqlIllegalArgumentException extends ServerSqlException { + public SqlIllegalArgumentException(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) { + super(message, cause, enableSuppression, writableStackTrace); + } + + public SqlIllegalArgumentException(String message, Throwable cause) { + super(message, cause); + } + + public SqlIllegalArgumentException(String message, Object... args) { + this(null, message, args); + } + + public SqlIllegalArgumentException(Throwable cause, String message, Object... args) { + super(cause, message, args); + } + + public SqlIllegalArgumentException(String message) { + super(message); + } + + public SqlIllegalArgumentException(Throwable cause) { + super(cause); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/analysis/AnalysisException.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/analysis/AnalysisException.java new file mode 100644 index 00000000000..3decbed9cc3 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/analysis/AnalysisException.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.analysis; + +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.sql.ClientSqlException; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.Node; + +import java.util.Locale; + +import static java.lang.String.format; + +public class AnalysisException extends ClientSqlException { + + private final int line; + private final int column; + + public AnalysisException(Node source, String message, Object... args) { + super(message, args); + + Location loc = Location.EMPTY; + if (source != null && source.location() != null) { + loc = source.location(); + } + this.line = loc.getLineNumber(); + this.column = loc.getColumnNumber(); + } + + public AnalysisException(Node source, String message, Throwable cause) { + super(message, cause); + + Location loc = Location.EMPTY; + if (source != null && source.location() != null) { + loc = source.location(); + } + this.line = loc.getLineNumber(); + this.column = loc.getColumnNumber(); + } + + public int getLineNumber() { + return line; + } + + public int getColumnNumber() { + return column; + } + + @Override + public RestStatus status() { + return RestStatus.BAD_REQUEST; + } + + @Override + public String getMessage() { + return format(Locale.ROOT, "line %s:%s: %s", getLineNumber(), getColumnNumber(), super.getMessage()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java new file mode 100644 index 00000000000..43bcc52d674 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java @@ -0,0 +1,1016 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.analysis.analyzer; + +import org.elasticsearch.xpack.sql.analysis.AnalysisException; +import org.elasticsearch.xpack.sql.analysis.analyzer.Verifier.Failure; +import org.elasticsearch.xpack.sql.analysis.index.GetIndexResult; +import org.elasticsearch.xpack.sql.capabilities.Resolvables; +import org.elasticsearch.xpack.sql.expression.Alias; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.AttributeSet; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.Literal; +import org.elasticsearch.xpack.sql.expression.NamedExpression; +import org.elasticsearch.xpack.sql.expression.NestedFieldAttribute; +import org.elasticsearch.xpack.sql.expression.Order; +import org.elasticsearch.xpack.sql.expression.SubQueryExpression; +import org.elasticsearch.xpack.sql.expression.TypedAttribute; +import org.elasticsearch.xpack.sql.expression.UnresolvedAlias; +import org.elasticsearch.xpack.sql.expression.UnresolvedAttribute; +import org.elasticsearch.xpack.sql.expression.UnresolvedStar; +import org.elasticsearch.xpack.sql.expression.function.Function; +import org.elasticsearch.xpack.sql.expression.function.FunctionDefinition; +import org.elasticsearch.xpack.sql.expression.function.FunctionRegistry; +import org.elasticsearch.xpack.sql.expression.function.Functions; +import org.elasticsearch.xpack.sql.expression.function.UnresolvedFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.Cast; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.ArithmeticFunction; +import org.elasticsearch.xpack.sql.plan.TableIdentifier; +import org.elasticsearch.xpack.sql.plan.logical.Aggregate; +import org.elasticsearch.xpack.sql.plan.logical.EsRelation; +import org.elasticsearch.xpack.sql.plan.logical.Filter; +import org.elasticsearch.xpack.sql.plan.logical.Join; +import org.elasticsearch.xpack.sql.plan.logical.LocalRelation; +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.plan.logical.OrderBy; +import org.elasticsearch.xpack.sql.plan.logical.Project; +import org.elasticsearch.xpack.sql.plan.logical.SubQueryAlias; +import org.elasticsearch.xpack.sql.plan.logical.UnaryPlan; +import org.elasticsearch.xpack.sql.plan.logical.UnresolvedRelation; +import org.elasticsearch.xpack.sql.plan.logical.With; +import org.elasticsearch.xpack.sql.rule.Rule; +import org.elasticsearch.xpack.sql.rule.RuleExecutor; +import org.elasticsearch.xpack.sql.session.SqlSession; +import org.elasticsearch.xpack.sql.tree.Node; +import org.elasticsearch.xpack.sql.type.CompoundDataType; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypeConversion; +import org.elasticsearch.xpack.sql.util.StringUtils; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.LinkedHashMap; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Stream; + +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; +import static java.util.stream.Collectors.toList; +import static java.util.stream.Collectors.toMap; +import static org.elasticsearch.xpack.sql.util.CollectionUtils.combine; + +public class Analyzer extends RuleExecutor { + + private final FunctionRegistry functionRegistry; + + public Analyzer(FunctionRegistry functionRegistry) { + this.functionRegistry = functionRegistry; + } + + @Override + protected Iterable.Batch> batches() { + Batch substitution = new Batch("Substitution", + new CTESubstitution()); + Batch resolution = new Batch("Resolution", + new ResolveTable(), + new ResolveRefs(), + new ResolveOrdinalInOrderByAndGroupBy(), + new ResolveMissingRefs(), + new ResolveFunctions(), + new ResolveAliases(), + new ProjectedAggregations(), + new ResolveAggsInHavingAndOrderBy() + //new ImplicitCasting() + ); + // TODO: this might be removed since the deduplication happens already in ResolveFunctions + Batch deduplication = new Batch("Deduplication", + new PruneDuplicateFunctions()); + + return Arrays.asList(substitution, resolution); + } + + public LogicalPlan analyze(LogicalPlan plan) { + return analyze(plan, true); + } + + public LogicalPlan analyze(LogicalPlan plan, boolean verify) { + if (plan.analyzed()) { + return plan; + } + return verify ? verify(execute(plan)) : execute(plan); + } + + public ExecutionInfo debugAnalyze(LogicalPlan plan) { + return plan.analyzed() ? null : executeWithInfo(plan); + } + + public LogicalPlan verify(LogicalPlan plan) { + Collection failures = Verifier.verify(plan); + if (!failures.isEmpty()) { + throw new VerificationException(failures); + } + return plan; + } + + public Map, String> verifyFailures(LogicalPlan plan) { + Collection failures = Verifier.verify(plan); + return failures.stream().collect(toMap(Failure::source, Failure::message)); + } + + @SuppressWarnings("unchecked") + private static E resolveExpression(E expression, LogicalPlan plan, boolean lenient) { + return (E) expression.transformUp(e -> { + if (e instanceof UnresolvedAttribute) { + UnresolvedAttribute ua = (UnresolvedAttribute) e; + Attribute a = resolveAgainstList(ua, plan.output(), lenient); + return (a != null ? a : e); + } + return e; + }); + } + + // + // Shared methods around the analyzer rules + // + + private static Attribute resolveAgainstList(UnresolvedAttribute u, List attrList, boolean lenient) { + List matches = new ArrayList<>(); + + // use the qualifier if present + if (u.qualifier() != null) { + for (Attribute attribute : attrList) { + if (!attribute.synthetic()) { + if (Objects.equals(u.qualifiedName(), attribute.qualifiedName())) { + matches.add(attribute); + } + if (attribute instanceof NestedFieldAttribute) { + // since u might be unqualified but the parent shows up as a qualifier + if (Objects.equals(u.qualifiedName(), attribute.name())) { + matches.add(attribute.withLocation(u.location())); + } + } + } + } + } + + // if none is found, try to do a match just on the name (to filter out missing qualifiers) + if (matches.isEmpty()) { + for (Attribute attribute : attrList) { + if (!attribute.synthetic() && Objects.equals(u.name(), attribute.name())) { + matches.add(attribute.withLocation(u.location())); + } + } + } + + // none found + if (matches.isEmpty()) { + return null; + } + + if (matches.size() == 1) { + return matches.get(0); + } + + // too many references - should it be ignored? + // TODO: move away from exceptions inside the analyzer + if (!lenient) { + throw new AnalysisException(u, "Reference %s is ambiguous, matches any of %s", u.nodeString(), matches); + } + + return null; + } + + private static boolean hasStar(List exprs) { + for (Expression expression : exprs) { + if (expression instanceof UnresolvedStar) { + return true; + } + } + return false; + } + + private static boolean containsAggregate(List list) { + return Expressions.anyMatch(list, Functions::isAggregate); + } + + private static boolean containsAggregate(Expression exp) { + return containsAggregate(singletonList(exp)); + } + + + private static class CTESubstitution extends AnalyzeRule { + + @Override + protected LogicalPlan rule(With plan) { + return substituteCTE(plan.child(), plan.subQueries()); + } + + private LogicalPlan substituteCTE(LogicalPlan p, Map subQueries) { + if (p instanceof UnresolvedRelation) { + UnresolvedRelation ur = (UnresolvedRelation) p; + SubQueryAlias subQueryAlias = subQueries.get(ur.table().index()); + if (subQueryAlias != null) { + if (ur.alias() != null) { + return new SubQueryAlias(ur.location(), subQueryAlias, ur.alias()); + } + return subQueryAlias; + } + return ur; + } + // inlined queries (SELECT 1 + 2) are already resolved + else if (p instanceof LocalRelation) { + return p; + } + + return p.transformExpressionsDown(e -> { + if (e instanceof SubQueryExpression) { + SubQueryExpression sq = (SubQueryExpression) e; + return sq.withQuery(substituteCTE(sq.query(), subQueries)); + } + return e; + }); + } + + @Override + protected boolean skipResolved() { + return false; + } + } + + private class ResolveTable extends AnalyzeRule { + @Override + protected LogicalPlan rule(UnresolvedRelation plan) { + TableIdentifier table = plan.table(); + GetIndexResult index = SqlSession.currentContext().getIndexResult; + if (index.isValid() == false) { + return plan.unresolvedMessage().equals(index.toString()) ? plan : new UnresolvedRelation(plan.location(), plan.table(), + plan.alias(), index.toString()); + } + assert index.matches(table.index()); + LogicalPlan logicalPlan = new EsRelation(plan.location(), index.get()); + SubQueryAlias sa = new SubQueryAlias(plan.location(), logicalPlan, table.index()); + + if (plan.alias() != null) { + sa = new SubQueryAlias(plan.location(), sa, plan.alias()); + } + + return sa; + } + } + + private static class ResolveRefs extends AnalyzeRule { + + @Override + protected LogicalPlan rule(LogicalPlan plan) { + // if the children are not resolved, there's no way the node can be resolved + if (!plan.childrenResolved()) { + return plan; + } + + // okay, there's a chance so let's get started + + if (plan instanceof Project) { + Project p = (Project) plan; + if (hasStar(p.projections())) { + return new Project(p.location(), p.child(), expandProjections(p.projections(), p.child())); + } + } + else if (plan instanceof Aggregate) { + Aggregate a = (Aggregate) plan; + if (hasStar(a.aggregates())) { + return new Aggregate(a.location(), a.child(), a.groupings(), + expandProjections(a.aggregates(), a.child())); + } + // if the grouping is unresolved but the aggs are, use the latter to resolve the former + // solves the case of queries declaring an alias in SELECT and referring to it in GROUP BY + if (!a.expressionsResolved() && Resolvables.resolved(a.aggregates())) { + List groupings = a.groupings(); + List newGroupings = new ArrayList<>(); + List resolved = Expressions.asAttributes(a.aggregates()); + boolean changed = false; + for (int i = 0; i < groupings.size(); i++) { + Expression grouping = groupings.get(i); + if (grouping instanceof UnresolvedAttribute) { + Attribute maybeResolved = resolveAgainstList((UnresolvedAttribute) grouping, resolved, true); + if (maybeResolved != null) { + changed = true; + // use the matched expression (not its attribute) + grouping = a.aggregates().get(i); + } + } + newGroupings.add(grouping); + } + + return changed ? new Aggregate(a.location(), a.child(), newGroupings, a.aggregates()) : a; + } + } + + else if (plan instanceof Join) { + Join j = (Join) plan; + if (!j.duplicatesResolved()) { + LogicalPlan deduped = dedupRight(j.left(), j.right()); + return new Join(j.location(), j.left(), deduped, j.type(), j.condition()); + } + } + // try resolving the order expression (the children are resolved as this point) + else if (plan instanceof OrderBy) { + OrderBy o = (OrderBy) plan; + if (!o.resolved()) { + List resolvedOrder = o.order().stream() + .map(or -> resolveExpression(or, o.child(), true)) + .collect(toList()); + return new OrderBy(o.location(), o.child(), resolvedOrder); + } + } + + if (log.isTraceEnabled()) { + log.trace("Attempting to resolve {}", plan.nodeString()); + } + + return plan.transformExpressionsUp(e -> { + if (e instanceof UnresolvedAttribute) { + UnresolvedAttribute u = (UnresolvedAttribute) e; + NamedExpression named = resolveAgainstList(u, + plan.children().stream() + .flatMap(c -> c.output().stream()) + .collect(toList()), + false); + // if resolved, return it; otherwise keep it in place to be resolved later + if (named != null) { + // it's a compound type so convert it + if (named instanceof TypedAttribute && ((TypedAttribute) named).dataType() instanceof CompoundDataType) { + named = new UnresolvedStar(e.location(), + new UnresolvedAttribute(e.location(), u.name(), u.qualifier())); + } + + if (log.isTraceEnabled()) { + log.trace("Resolved {} to {}", u, named); + } + return named; + } + } + //TODO: likely have to expand * inside functions as well + return e; + }); + } + + private List expandProjections(List projections, LogicalPlan child) { + return projections.stream().flatMap(e -> { + // check if there's a qualifier + // no - means only top-level + // it is - return only that level + if (e instanceof UnresolvedStar) { + List output = child.output(); + UnresolvedStar us = (UnresolvedStar) e; + + Stream stream = output.stream(); + + if (us.qualifier() == null) { + stream = stream.filter(a -> !(a instanceof NestedFieldAttribute)); + } + + // if there's a qualifier, inspect that level + if (us.qualifier() != null) { + // qualifier is selected, need to resolve that first. + Attribute qualifier = resolveAgainstList(us.qualifier(), output, false); + stream = stream.filter(a -> (a instanceof NestedFieldAttribute) + && Objects.equals(a.qualifier(), qualifier.qualifier()) + && Objects.equals(((NestedFieldAttribute) a).parentPath(), qualifier.name())); + } + + return stream.filter(a -> !(a.dataType() instanceof CompoundDataType)); + } + else if (e instanceof UnresolvedAlias) { + UnresolvedAlias ua = (UnresolvedAlias) e; + if (ua.child() instanceof UnresolvedStar) { + return child.output().stream(); + } + return Stream.of(e); + } + return Stream.of(e); + }) + .map(NamedExpression.class::cast) + .collect(toList()); + } + + // generate a new (right) logical plan with different IDs for all conflicting attributes + private LogicalPlan dedupRight(LogicalPlan left, LogicalPlan right) { + AttributeSet conflicting = left.outputSet().intersect(right.outputSet()); + + if (log.isTraceEnabled()) { + log.trace("Trying to resolve conflicts {} between left {} and right {}", conflicting, left.nodeString(), right.nodeString()); + } + + throw new UnsupportedOperationException("don't know how to resolve conficting IDs yet"); + } + } + + // Allow ordinal positioning in order/sort by (quite useful when dealing with aggs) + // Note that ordering starts at 1 + private static class ResolveOrdinalInOrderByAndGroupBy extends AnalyzeRule { + + @Override + protected boolean skipResolved() { + return false; + } + + @Override + protected LogicalPlan rule(LogicalPlan plan) { + if (!plan.childrenResolved()) { + return plan; + } + if (plan instanceof OrderBy) { + OrderBy orderBy = (OrderBy) plan; + boolean changed = false; + + List newOrder = new ArrayList<>(orderBy.order().size()); + List ordinalReference = orderBy.child().output(); + int max = ordinalReference.size(); + + for (Order order : orderBy.order()) { + Integer ordinal = findOrdinal(order.child()); + if (ordinal != null) { + changed = true; + if (ordinal > 0 && ordinal <= max) { + newOrder.add(new Order(order.location(), orderBy.child().output().get(ordinal - 1), order.direction())); + } + else { + throw new AnalysisException(order, "Invalid %d specified in OrderBy (valid range is [1, %d])", ordinal, max); + } + } + else { + newOrder.add(order); + } + } + + return changed ? new OrderBy(orderBy.location(), orderBy.child(), newOrder) : orderBy; + } + + if (plan instanceof Aggregate) { + Aggregate agg = (Aggregate) plan; + + if (!Resolvables.resolved(agg.aggregates())) { + return agg; + } + + boolean changed = false; + List newGroupings = new ArrayList<>(agg.groupings().size()); + List aggregates = agg.aggregates(); + int max = aggregates.size(); + + for (Expression exp : agg.groupings()) { + Integer ordinal = findOrdinal(exp); + if (ordinal != null) { + changed = true; + if (ordinal > 0 && ordinal <= max) { + NamedExpression reference = aggregates.get(ordinal - 1); + if (containsAggregate(reference)) { + throw new AnalysisException(exp, "Group ordinal %d refers to an aggregate function %s which is not compatible/allowed with GROUP BY", ordinal, reference.nodeName()); + } + newGroupings.add(reference); + } + else { + throw new AnalysisException(exp, "Invalid ordinal %d specified in Aggregate (valid range is [1, %d])", ordinal, max); + } + } + else { + newGroupings.add(exp); + } + } + + return changed ? new Aggregate(agg.location(), agg.child(), newGroupings, aggregates) : agg; + } + + return plan; + } + + private Integer findOrdinal(Expression expression) { + if (expression instanceof Literal) { + Literal l = (Literal) expression; + if (l.dataType().isInteger()) { + Object v = l.value(); + if (v instanceof Number) { + return Integer.valueOf(((Number) v).intValue()); + } + } + } + return null; + } + } + + // It is valid to filter (including HAVING) or sort by attributes not present in the SELECT clause. + // This rule pushed down the attributes for them to be resolved then projects them away. + // As such this rule is an extended version of ResolveRefs + private static class ResolveMissingRefs extends AnalyzeRule { + + private static class AggGroupingFailure { + final List expectedGrouping; + + private AggGroupingFailure(List expectedGrouping) { + this.expectedGrouping = expectedGrouping; + } + } + + @Override + protected LogicalPlan rule(LogicalPlan plan) { + + if (plan instanceof OrderBy && !plan.resolved() && plan.childrenResolved()) { + OrderBy o = (OrderBy) plan; + List maybeResolved = o.order().stream() + .map(or -> tryResolveExpression(or, o.child())) + .collect(toList()); + + AttributeSet resolvedRefs = Expressions.references(maybeResolved.stream() + .filter(Expression::resolved) + .collect(toList())); + + + AttributeSet missing = resolvedRefs.substract(o.child().outputSet()); + + if (!missing.isEmpty()) { + // Add missing attributes but project them away afterwards + List failedAttrs = new ArrayList<>(); + LogicalPlan newChild = propagateMissing(o.child(), missing, failedAttrs); + + // resolution failed and the failed expressions might contain resolution information so copy it over + if (!failedAttrs.isEmpty()) { + List newOrders = new ArrayList<>(); + // transform the orders with the failed information + for (Order order : o.order()) { + Order transformed = (Order) order.transformUp(ua -> resolveMetadataToMessage(ua, failedAttrs, "order"), + UnresolvedAttribute.class); + newOrders.add(order.equals(transformed) ? order : transformed); + } + + return o.order().equals(newOrders) ? o : new OrderBy(o.location(), o.child(), newOrders); + } + + // everything worked + return new Project(o.location(), new OrderBy(o.location(), newChild, maybeResolved), o.child().output()); + } + + if (!maybeResolved.equals(o.order())) { + return new OrderBy(o.location(), o.child(), maybeResolved); + } + } + + if (plan instanceof Filter && !plan.resolved() && plan.childrenResolved()) { + Filter f = (Filter) plan; + Expression maybeResolved = tryResolveExpression(f.condition(), f.child()); + AttributeSet resolvedRefs = new AttributeSet(maybeResolved.references().stream() + .filter(Expression::resolved) + .collect(toList())); + + AttributeSet missing = resolvedRefs.substract(f.child().outputSet()); + + if (!missing.isEmpty()) { + // Again, add missing attributes and project them away + List failedAttrs = new ArrayList<>(); + LogicalPlan newChild = propagateMissing(f.child(), missing, failedAttrs); + + // resolution failed and the failed expressions might contain resolution information so copy it over + if (!failedAttrs.isEmpty()) { + // transform the orders with the failed information + Expression transformed = f.condition().transformUp(ua -> resolveMetadataToMessage(ua, failedAttrs, "filter"), + UnresolvedAttribute.class); + + return f.condition().equals(transformed) ? f : new Filter(f.location(), f.child(), transformed); + } + + return new Project(f.location(), new Filter(f.location(), newChild, maybeResolved), f.child().output()); + } + + if (!maybeResolved.equals(f.condition())) { + return new Filter(f.location(), f.child(), maybeResolved); + } + } + + return plan; + } + + static E tryResolveExpression(E exp, LogicalPlan plan) { + E resolved = resolveExpression(exp, plan, true); + if (!resolved.resolved()) { + // look at unary trees but ignore subqueries + if (plan.children().size() == 1 && !(plan instanceof SubQueryAlias)) { + return tryResolveExpression(resolved, plan.children().get(0)); + } + } + return resolved; + } + + + private static LogicalPlan propagateMissing(LogicalPlan plan, AttributeSet missing, List failed) { + // no more attributes, bail out + if (missing.isEmpty()) { + return plan; + } + + if (plan instanceof Project) { + Project p = (Project) plan; + AttributeSet diff = missing.substract(p.child().outputSet()); + return new Project(p.location(), propagateMissing(p.child(), diff, failed), combine(p.projections(), missing)); + } + + if (plan instanceof Aggregate) { + Aggregate a = (Aggregate) plan; + // missing attributes can only be grouping expressions + for (Attribute m : missing) { + // but we don't can't add an agg if the group is missing + if (!Expressions.anyMatch(a.groupings(), m::semanticEquals)) { + if (m instanceof Attribute) { + // pass failure information to help the verifier + m = new UnresolvedAttribute(m.location(), m.name(), m.qualifier(), null, null, + new AggGroupingFailure(Expressions.names(a.groupings()))); + } + failed.add(m); + } + } + // propagation failed, return original plan + if (!failed.isEmpty()) { + return plan; + } + return new Aggregate(a.location(), a.child(), a.groupings(), combine(a.aggregates(), missing)); + } + + // LeafPlans are tables and BinaryPlans are joins so pushing can only happen on unary + if (plan instanceof UnaryPlan) { + return plan.replaceChildren(singletonList(propagateMissing(((UnaryPlan) plan).child(), missing, failed))); + } + + failed.addAll(missing); + return plan; + } + + private static UnresolvedAttribute resolveMetadataToMessage(UnresolvedAttribute ua, List attrs, String actionName) { + for (Attribute attr : attrs) { + if (ua.resolutionMetadata() == null && attr.name().equals(ua.name())) { + if (attr instanceof UnresolvedAttribute) { + UnresolvedAttribute fua = (UnresolvedAttribute) attr; + Object metadata = fua.resolutionMetadata(); + if (metadata instanceof AggGroupingFailure) { + List names = ((AggGroupingFailure) metadata).expectedGrouping; + return ua.withUnresolvedMessage( + "Cannot " + actionName + " by non-grouped column [" + ua.qualifiedName() + "], expected " + names); + } + } + } + } + return ua; + }; + } + + // to avoid creating duplicate functions + // this rule does two iterations + // 1. collect all functions + // 2. search unresolved functions and first try resolving them from already 'seen' functions + private class ResolveFunctions extends AnalyzeRule { + + @Override + protected LogicalPlan rule(LogicalPlan plan) { + Map> seen = new LinkedHashMap<>(); + // collect (and replace duplicates) + LogicalPlan p = plan.transformExpressionsUp(e -> collectResolvedAndReplace(e, seen)); + // resolve based on seen + return resolve(p, seen); + } + + private Expression collectResolvedAndReplace(Expression e, Map> seen) { + if (e instanceof Function && e.resolved()) { + Function f = (Function) e; + String fName = f.functionName(); + // the function is resolved and its name normalized already + List list = getList(seen, fName); + for (Function seenFunction : list) { + if (seenFunction != f && f.arguments().equals(seenFunction.arguments())) { + return seenFunction; + } + } + list.add(f); + } + + return e; + } + + protected LogicalPlan resolve(LogicalPlan plan, Map> seen) { + return plan.transformExpressionsUp(e -> { + if (e instanceof UnresolvedFunction) { + UnresolvedFunction uf = (UnresolvedFunction) e; + + if (uf.analyzed()) { + return uf; + } + + String name = uf.name(); + + if (hasStar(uf.arguments())) { + if (uf.distinct()) { + throw new AnalysisException(uf, "DISTINCT and wildcard/star are not compatible"); + } + // TODO: might be removed + // dedicated count optimization + if (name.toUpperCase(Locale.ROOT).equals("COUNT")) { + uf = new UnresolvedFunction(uf.location(), uf.name(), uf.distinct(), singletonList(Literal.of(uf.arguments().get(0).location(), Integer.valueOf(1)))); + } + } + + if (!uf.childrenResolved()) { + return uf; + } + + String normalizedName = functionRegistry.concreteFunctionName(name); + + List list = getList(seen, normalizedName); + // first try to resolve from seen functions + if (!list.isEmpty()) { + for (Function seenFunction : list) { + if (uf.arguments().equals(seenFunction.arguments())) { + return seenFunction; + } + } + } + + // not seen before, use the registry + if (!functionRegistry.functionExists(name)) { + + // try to find alternatives + Set names = new LinkedHashSet<>(); + for (FunctionDefinition def : functionRegistry.listFunctions()) { + names.add(def.name()); + names.addAll(def.aliases()); + } + + List matches = StringUtils.findSimilar(normalizedName, names); + String message = matches.isEmpty() ? uf.unresolvedMessage() : UnresolvedFunction.errorMessage(normalizedName, matches); + return new UnresolvedFunction(uf.location(), uf.name(), uf.distinct(), uf.children(), true, message); + } + // TODO: look into Generator for significant terms, etc.. + Function f = functionRegistry.resolveFunction(uf, SqlSession.currentContext().configuration); + + list.add(f); + return f; + } + return e; + }); + } + + private List getList(Map> seen, String name) { + List list = seen.get(name); + if (list == null) { + list = new ArrayList<>(); + seen.put(name, list); + } + return list; + } + } + + private static class ResolveAliases extends AnalyzeRule { + + @Override + protected LogicalPlan rule(LogicalPlan plan) { + if (plan instanceof Project) { + Project p = (Project) plan; + if (p.childrenResolved() && hasUnresolvedAliases(p.projections())) { + return new Project(p.location(), p.child(), assignAliases(p.projections())); + } + return p; + } + if (plan instanceof Aggregate) { + Aggregate a = (Aggregate) plan; + if (a.childrenResolved() && hasUnresolvedAliases(a.aggregates())) { + return new Aggregate(a.location(), a.child(), a.groupings(), assignAliases(a.aggregates())); + } + return a; + } + + return plan; + } + + private boolean hasUnresolvedAliases(List expressions) { + return (expressions != null && expressions.stream().anyMatch(e -> e instanceof UnresolvedAlias)); + } + + private List assignAliases(List exprs) { + List newExpr = new ArrayList<>(exprs.size()); + for (int i = 0; i < exprs.size(); i++) { + NamedExpression expr = exprs.get(i); + NamedExpression transformed = (NamedExpression) expr.transformUp(ua -> { + Expression child = ua.child(); + if (child instanceof NamedExpression) { + return child; + } + if (!child.resolved()) { + return ua; + } + if (child instanceof Cast) { + Cast c = (Cast) child; + if (c.field() instanceof NamedExpression) { + return new Alias(c.location(), ((NamedExpression) c.field()).name(), c); + } + } + //TODO: maybe add something closer to SQL + return new Alias(child.location(), child.toString(), child); + }, UnresolvedAlias.class); + newExpr.add(expr.equals(transformed) ? expr : transformed); + } + return newExpr; + } + } + + + // + // Replace a project with aggregation into an aggregation + // + private static class ProjectedAggregations extends AnalyzeRule { + + @Override + protected LogicalPlan rule(Project p) { + if (containsAggregate(p.projections())) { + return new Aggregate(p.location(), p.child(), emptyList(), p.projections()); + } + return p; + } + }; + + // + // Handle aggs in HAVING and ORDER BY clause. To help folding any aggs not found in Aggregation + // will be pushed down to the Aggregate and then projected. This also simplifies the Verifier's job. + // + private class ResolveAggsInHavingAndOrderBy extends AnalyzeRule { + + @Override + protected boolean skipResolved() { + return false; + } + + @Override + protected LogicalPlan rule(LogicalPlan plan) { + // HAVING = Filter followed by an Agg + if (plan instanceof Filter) { + Filter f = (Filter) plan; + if (f.child() instanceof Aggregate && f.child().resolved()) { + Aggregate agg = (Aggregate) f.child(); + + Set missing = null; + Expression condition = f.condition(); + + // the condition might contain an agg (AVG(salary)) that could have been resolved + // (salary cannot be pushed down to Aggregate since there's no grouping and thus the function wasn't resolved either) + + // so try resolving the condition in one go through a 'dummy' aggregate + if (!condition.resolved()) { + // that's why try to resolve the condition + Aggregate tryResolvingCondition = new Aggregate(agg.location(), agg.child(), agg.groupings(), + singletonList(new Alias(f.location(), ".having", condition))); + + LogicalPlan conditionResolved = analyze(tryResolvingCondition, false); + + // if it got resolved + if (conditionResolved.resolved()) { + // replace the condition with the resolved one + condition = ((Alias) ((Aggregate) conditionResolved).aggregates().get(0)).child(); + } else { + // else bail out + return plan; + } + } + + missing = findMissingAggregate(agg, condition); + + if (!missing.isEmpty()) { + Aggregate newAgg = new Aggregate(agg.location(), agg.child(), agg.groupings(), combine(agg.aggregates(), missing)); + Filter newFilter = new Filter(f.location(), newAgg, condition); + // preserve old output + return new Project(f.location(), newFilter, f.output()); + } + } + return plan; + } + + return plan; + } + + private Set findMissingAggregate(Aggregate target, Expression from) { + Set missing = new LinkedHashSet<>(); + + for (Expression filterAgg : from.collect(Functions::isAggregate)) { + if (!Expressions.anyMatch(target.aggregates(), + a -> { + Attribute attr = Expressions.attribute(a); + return attr != null && attr.semanticEquals(Expressions.attribute(filterAgg)); + })) { + missing.add(Expressions.wrapAsNamed(filterAgg)); + } + } + + return missing; + } + } + + private class PruneDuplicateFunctions extends AnalyzeRule { + + @Override + protected boolean skipResolved() { + return false; + } + + @Override + public LogicalPlan rule(LogicalPlan plan) { + List seen = new ArrayList<>(); + LogicalPlan p = plan.transformExpressionsUp(e -> rule(e, seen)); + return p; + } + + private Expression rule(Expression e, List seen) { + if (e instanceof Function) { + Function f = (Function) e; + for (Function seenFunction : seen) { + if (seenFunction != f && functionsEquals(f, seenFunction)) { + return seenFunction; + } + } + seen.add(f); + } + + return e; + } + + private boolean functionsEquals(Function f, Function seenFunction) { + return f.name().equals(seenFunction.name()) && f.arguments().equals(seenFunction.arguments()); + } + } + + private class ImplicitCasting extends AnalyzeRule { + + @Override + protected boolean skipResolved() { + return false; + } + + @Override + protected LogicalPlan rule(LogicalPlan plan) { + return plan.transformExpressionsDown(this::implicitCast); + } + + private Expression implicitCast(Expression e) { + if (!e.childrenResolved()) { + return e; + } + + Expression left = null, right = null; + + // BinaryOperations are ignored as they are pushed down to ES + // and casting (and thus Aliasing when folding) gets in the way + + if (e instanceof ArithmeticFunction) { + ArithmeticFunction f = (ArithmeticFunction) e; + left = f.left(); + right = f.right(); + } + + if (left != null) { + DataType l = left.dataType(); + DataType r = right.dataType(); + if (!l.same(r)) { + DataType common = DataTypeConversion.commonType(l, r); + if (common == null) { + return e; + } + left = l.same(common) ? left : new Cast(left.location(), left, common); + right = r.same(common) ? right : new Cast(right.location(), right, common); + return e.replaceChildren(Arrays.asList(left, right)); + } + } + + return e; + } + } + + abstract static class AnalyzeRule extends Rule { + + // transformUp (post-order) - that is first children and then the node + // but with a twist; only if the tree is not resolved or analyzed + @Override + public final LogicalPlan apply(LogicalPlan plan) { + return plan.transformUp(t -> t.analyzed() || (skipResolved() && t.resolved()) ? t : rule(t), typeToken()); + } + + @Override + protected abstract LogicalPlan rule(SubPlan plan); + + protected boolean skipResolved() { + return true; + } + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/PreAnalyzer.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/PreAnalyzer.java new file mode 100644 index 00000000000..17a5305a4f9 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/PreAnalyzer.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.analysis.analyzer; + +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.plan.logical.UnresolvedRelation; + +import java.util.ArrayList; +import java.util.List; + +import static java.util.Collections.emptyList; + +// Since the pre-analyzer only inspect (and does NOT transform) the tree +// it is not built as a rule executor. +// Further more it applies 'the rules' only once and needs to return some +// state back. +public class PreAnalyzer { + + public static class PreAnalysis { + public static final PreAnalysis EMPTY = new PreAnalysis(emptyList()); + + public final List indices; + + PreAnalysis(List indices) { + this.indices = indices; + } + } + + public PreAnalysis preAnalyze(LogicalPlan plan) { + if (plan.analyzed()) { + return PreAnalysis.EMPTY; + } + + return doPreAnalyze(plan); + } + + private PreAnalysis doPreAnalyze(LogicalPlan plan) { + List indices = new ArrayList<>(); + + plan.forEachUp(p -> indices.add(p.table().index()), UnresolvedRelation.class); + + // mark plan as preAnalyzed (if it were marked, there would be no analysis) + plan.forEachUp(LogicalPlan::setPreAnalyzed); + + return new PreAnalysis(indices); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerificationException.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerificationException.java new file mode 100644 index 00000000000..5ee06f51060 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerificationException.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.analysis.analyzer; + +import java.util.Collection; +import java.util.Locale; +import java.util.stream.Collectors; + +import org.elasticsearch.xpack.sql.analysis.AnalysisException; +import org.elasticsearch.xpack.sql.analysis.analyzer.Verifier.Failure; +import org.elasticsearch.xpack.sql.util.StringUtils; + +import static java.lang.String.format; + +public class VerificationException extends AnalysisException { + + private final Collection failures; + + protected VerificationException(Collection sources) { + super(null, StringUtils.EMPTY); + failures = sources; + } + + @Override + public String getMessage() { + return failures.stream() + .map(f -> format(Locale.ROOT, "line %s:%s: %s", f.source().location().getLineNumber(), f.source().location().getColumnNumber(), f.message())) + .collect(Collectors.joining(StringUtils.NEW_LINE, "Found " + failures.size() + " problem(s)\n", StringUtils.EMPTY)); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java new file mode 100644 index 00000000000..992975288fc --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java @@ -0,0 +1,350 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.analysis.analyzer; + +import org.elasticsearch.xpack.sql.capabilities.Unresolvable; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.UnresolvedAttribute; +import org.elasticsearch.xpack.sql.expression.function.Function; +import org.elasticsearch.xpack.sql.expression.function.FunctionAttribute; +import org.elasticsearch.xpack.sql.expression.function.Functions; +import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunction; +import org.elasticsearch.xpack.sql.plan.logical.Aggregate; +import org.elasticsearch.xpack.sql.plan.logical.Filter; +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.plan.logical.OrderBy; +import org.elasticsearch.xpack.sql.tree.Node; +import org.elasticsearch.xpack.sql.util.StringUtils; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.LinkedHashMap; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.Set; + +import static java.lang.String.format; + +abstract class Verifier { + + static class Failure { + private final Node source; + private final String message; + + Failure(Node source, String message) { + this.source = source; + this.message = message; + } + + Node source() { + return source; + } + + String message() { + return message; + } + + @Override + public int hashCode() { + return source.hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + Verifier.Failure other = (Verifier.Failure) obj; + return Objects.equals(source, other.source); + } + + @Override + public String toString() { + return message; + } + } + + private static Failure fail(Node source, String message, Object... args) { + return new Failure(source, format(Locale.ROOT, message, args)); + } + + static Collection verify(LogicalPlan plan) { + Set failures = new LinkedHashSet<>(); + + // start bottom-up + plan.forEachUp(p -> { + + if (p.analyzed()) { + return; + } + + // if the children are unresolved, this node will also so counting it will only add noise + if (!p.childrenResolved()) { + return; + } + + Set localFailures = new LinkedHashSet<>(); + + // + // First handle usual suspects + // + + if (p instanceof Unresolvable) { + localFailures.add(fail(p, ((Unresolvable) p).unresolvedMessage())); + } + else { + // then take a look at the expressions + p.forEachExpressions(e -> { + // everything is fine, skip expression + if (e.resolved()) { + return; + } + + e.forEachUp(ae -> { + // we're only interested in the children + if (!ae.childrenResolved()) { + return; + } + // again the usual suspects + if (ae instanceof Unresolvable) { + // handle Attributes different to provide more context + if (ae instanceof UnresolvedAttribute) { + UnresolvedAttribute ua = (UnresolvedAttribute) ae; + boolean useQualifier = ua.qualifier() != null; + List potentialMatches = new ArrayList<>(); + for (Attribute a : p.intputSet()) { + potentialMatches.add(useQualifier ? a.qualifiedName() : a.name()); + } + + List matches = StringUtils.findSimilar(ua.qualifiedName(), potentialMatches); + if (!matches.isEmpty()) { + ae = new UnresolvedAttribute(ua.location(), ua.name(), ua.qualifier(), UnresolvedAttribute.errorMessage(ua.qualifiedName(), matches)); + } + } + + localFailures.add(fail(ae, ((Unresolvable) ae).unresolvedMessage())); + return; + } + // type resolution + if (ae.typeResolved().unresolved()) { + localFailures.add(fail(ae, ae.typeResolved().message())); + } + }); + }); + } + failures.addAll(localFailures); + }); + + // Concrete verifications + + // + // if there are no (major) unresolved failures, do more in-depth analysis + + if (failures.isEmpty()) { + Map resolvedFunctions = new LinkedHashMap<>(); + + // collect Function to better reason about encountered attributes + plan.forEachExpressionsDown(e -> { + if (e.resolved() && e instanceof Function) { + Function f = (Function) e; + resolvedFunctions.put(f.functionId(), f); + } + }); + + // for filtering out duplicated errors + final Set groupingFailures = new LinkedHashSet<>(); + + plan.forEachDown(p -> { + if (p.analyzed()) { + return; + } + + // if the children are unresolved, so will this node; counting it will only add noise + if (!p.childrenResolved()) { + return; + } + + Set localFailures = new LinkedHashSet<>(); + + if (!groupingFailures.contains(p)) { + checkGroupBy(p, localFailures, resolvedFunctions, groupingFailures); + } + // everything checks out + // mark the plan as analyzed + if (localFailures.isEmpty()) { + p.setAnalyzed(); + } + + failures.addAll(localFailures); + }); + } + + return failures; + } + + /** + * Check validity of Aggregate/GroupBy. + * This rule is needed for two reasons: + * 1. a user might specify an invalid aggregate (SELECT foo GROUP BY bar) + * 2. the order/having might contain a non-grouped attribute. This is typically caught by the Analyzer however if wrapped in a function (ABS()) it gets resolved + * (because the expression gets resolved little by little without being pushed down, without the Analyzer modifying anything. + */ + private static boolean checkGroupBy(LogicalPlan p, Set localFailures, Map resolvedFunctions, Set groupingFailures) { + return checkGroupByAgg(p, localFailures, groupingFailures, resolvedFunctions) + && checkGroupByOrder(p, localFailures, groupingFailures, resolvedFunctions) + && checkGroupByHaving(p, localFailures, groupingFailures, resolvedFunctions); + } + + // check whether an orderBy failed + private static boolean checkGroupByOrder(LogicalPlan p, Set localFailures, Set groupingFailures, Map functions) { + if (p instanceof OrderBy) { + OrderBy o = (OrderBy) p; + if (o.child() instanceof Aggregate) { + Aggregate a = (Aggregate) o.child(); + + Map> missing = new LinkedHashMap<>(); + o.order().forEach(oe -> oe.collectFirstChildren(c -> checkGroupMatch(c, oe, a.groupings(), missing, functions))); + + if (!missing.isEmpty()) { + String plural = missing.size() > 1 ? "s" : StringUtils.EMPTY; + // get the location of the first missing expression as the order by might be on a different line + localFailures.add( + fail(missing.values().iterator().next(), "Cannot order by non-grouped column" + plural + " %s, expected %s", + Expressions.names(missing.keySet()), + Expressions.names(a.groupings()))); + groupingFailures.add(a); + return false; + } + } + } + return true; + } + + + private static boolean checkGroupByHaving(LogicalPlan p, Set localFailures, Set groupingFailures, Map functions) { + if (p instanceof Filter) { + Filter f = (Filter) p; + if (f.child() instanceof Aggregate) { + Aggregate a = (Aggregate) f.child(); + + Map> missing = new LinkedHashMap<>(); + Expression condition = f.condition(); + condition.collectFirstChildren(c -> checkGroupMatch(c, condition, a.groupings(), missing, functions)); + + if (!missing.isEmpty()) { + String plural = missing.size() > 1 ? "s" : StringUtils.EMPTY; + localFailures.add(fail(condition, "Cannot filter by non-grouped column" + plural + " %s, expected %s", + Expressions.names(missing.keySet()), + Expressions.names(a.groupings()))); + groupingFailures.add(a); + return false; + } + } + } + return true; + } + // check whether plain columns specified in an agg are mentioned in the group-by + private static boolean checkGroupByAgg(LogicalPlan p, Set localFailures, Set groupingFailures, Map functions) { + if (p instanceof Aggregate) { + Aggregate a = (Aggregate) p; + + // The grouping can not be an aggregate function + a.groupings().forEach(e -> e.forEachUp(c -> { + if (Functions.isAggregate(c)) { + localFailures.add(fail(c, "Cannot use an aggregate [" + c.nodeName().toUpperCase(Locale.ROOT) + "] for grouping")); + } + })); + + if (!localFailures.isEmpty()) { + return false; + } + + // The agg can be: + // 1. plain column - in which case, there should be an equivalent in groupings + // 2. aggregate over non-grouped column + // 3. scalar function on top of 1 and/or 2. the function needs unfolding to make sure + // the 'source' is valid. + + // Note that grouping can be done by a function (GROUP BY YEAR(date)) which means date + // cannot be used as a plain column, only YEAR(date) or aggs(?) on top of it + + Map> missing = new LinkedHashMap<>(); + a.aggregates().forEach(ne -> + ne.collectFirstChildren(c -> checkGroupMatch(c, ne, a.groupings(), missing, functions))); + + if (!missing.isEmpty()) { + String plural = missing.size() > 1 ? "s" : StringUtils.EMPTY; + localFailures.add(fail(missing.values().iterator().next(), "Cannot use non-grouped column" + plural + " %s, expected %s", + Expressions.names(missing.keySet()), + Expressions.names(a.groupings()))); + return false; + } + } + + return true; + } + + private static boolean checkGroupMatch(Expression e, Node source, List groupings, Map> missing, Map functions) { + + // resolve FunctionAttribute to backing functions + if (e instanceof FunctionAttribute) { + FunctionAttribute fa = (FunctionAttribute) e; + Function function = functions.get(fa.functionId()); + // TODO: this should be handled by a different rule + if (function == null) { + return false; + } + e = function; + } + + // scalar functions can be a binary tree + // first test the function against the grouping + // and if that fails, start unpacking hoping to find matches + if (e instanceof ScalarFunction) { + ScalarFunction sf = (ScalarFunction) e; + // found group for the expression + if (Expressions.anyMatch(groupings, e::semanticEquals)) { + return true; + } + // unwrap function to find the base + for (Expression arg : sf.arguments()) { + arg.collectFirstChildren(c -> checkGroupMatch(c, source, groupings, missing, functions)); + } + + return true; + } + + // skip literals / foldable + if (e.foldable()) { + return true; + } + // skip aggs (allowed to refer to non-group columns) + // TODO: need to check whether it's possible to agg on a field used inside a scalar for grouping + if (Functions.isAggregate(e)) { + return true; + } + // left without leaves which have to match; if not there's a failure + + final Expression exp = e; + if (e.children().isEmpty()) { + if (!Expressions.anyMatch(groupings, c -> exp.semanticEquals(exp instanceof Attribute ? Expressions.attribute(c) : c))) { + missing.put(e, source); + } + return true; + } + return false; + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/analysis/index/EsIndex.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/analysis/index/EsIndex.java new file mode 100644 index 00000000000..5c28dc1c92a --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/analysis/index/EsIndex.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.analysis.index; + +import org.elasticsearch.xpack.sql.type.DataType; + +import java.util.Map; + +public class EsIndex { + + private final String name; + private final Map mapping; + + public EsIndex(String name, Map mapping) { + assert name != null; + assert mapping != null; + this.name = name; + this.mapping = mapping; + } + + public String name() { + return name; + } + + public Map mapping() { + return mapping; + } + + @Override + public String toString() { + return name; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/analysis/index/GetIndexResult.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/analysis/index/GetIndexResult.java new file mode 100644 index 00000000000..5b84030a34f --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/analysis/index/GetIndexResult.java @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.analysis.index; + +import org.elasticsearch.common.Nullable; + +import java.util.Objects; + +public final class GetIndexResult { + public static GetIndexResult valid(EsIndex index) { + Objects.requireNonNull(index, "index must not be null if it was found"); + return new GetIndexResult(index, null); + } + public static GetIndexResult invalid(String invalid) { + Objects.requireNonNull(invalid, "invalid must not be null to signal that the index is invalid"); + return new GetIndexResult(null, invalid); + } + public static GetIndexResult notFound(String name) { + Objects.requireNonNull(name, "name must not be null"); + return invalid("Index '" + name + "' does not exist"); + } + + private final EsIndex index; + @Nullable + private final String invalid; + + private GetIndexResult(EsIndex index, @Nullable String invalid) { + this.index = index; + this.invalid = invalid; + } + + public boolean matches(String index) { + return isValid() && this.index.name().equals(index); + } + + /** + * Get the {@linkplain EsIndex} + * @throws MappingException if the index is invalid for use with sql + */ + public EsIndex get() { + if (invalid != null) { + throw new MappingException(invalid); + } + return index; + } + + /** + * Is the index valid for use with sql? Returns {@code false} if the + * index wasn't found. + */ + public boolean isValid() { + return invalid == null; + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + GetIndexResult other = (GetIndexResult) obj; + return Objects.equals(index, other.index) + && Objects.equals(invalid, other.invalid); + } + + @Override + public int hashCode() { + return Objects.hash(index, invalid); + } + + @Override + public String toString() { + return invalid != null ? invalid : index.name(); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java new file mode 100644 index 00000000000..6aa1ac6b602 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java @@ -0,0 +1,135 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.analysis.index; + +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.get.GetIndexRequest; +import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.Types; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.Map; + +public class IndexResolver { + + private final Client client; + + public IndexResolver(Client client) { + this.client = client; + } + + /** + * Resolves a single index by name. + */ + public void asIndex(final String index, ActionListener listener) { + GetIndexRequest getIndexRequest = createGetIndexRequest(index); + client.admin().indices().getIndex(getIndexRequest, ActionListener.wrap(getIndexResponse -> { + GetIndexResult result; + if (getIndexResponse.getMappings().size() > 1) { + result = GetIndexResult.invalid( + "[" + index + "] is an alias pointing to more than one index which is currently incompatible with sql"); + } else if (getIndexResponse.getMappings().size() == 1){ + ObjectObjectCursor> indexMappings = + getIndexResponse.getMappings().iterator().next(); + String concreteIndex = indexMappings.key; + /* + * here we don't support wildcards: we can either have an alias or an index. However names get resolved (through + * security or not) we need to preserve the original names as they will be used in the subsequent search request. + * With security enabled, if the user is authorized for an alias and not its corresponding concrete index, we have to + * make sure that the search is executed against the same alias name from the original command, rather than + * the resolved concrete index that we get back from the get index API + */ + result = buildGetIndexResult(concreteIndex, index, indexMappings.value); + } else { + result = GetIndexResult.notFound(index); + } + listener.onResponse(result); + }, listener::onFailure)); + } + + /** + * Discover (multiple) matching indices for a given name. + */ + public void asList(String index, ActionListener> listener) { + GetIndexRequest getIndexRequest = createGetIndexRequest(index); + client.admin().indices().getIndex(getIndexRequest, ActionListener.wrap(getIndexResponse -> { + ImmutableOpenMap> mappings = getIndexResponse.getMappings(); + List results = new ArrayList<>(mappings.size()); + for (ObjectObjectCursor> indexMappings : mappings) { + /* + * We support wildcard expressions here, and it's only for commands that only perform the get index call. + * We can and simply have to use the concrete index name and show that to users. + * Get index against an alias with security enabled, where the user has only access to get mappings for the alias + * and not the concrete index: there is a well known information leak of the concrete index name in the response. + */ + String concreteIndex = indexMappings.key; + GetIndexResult getIndexResult = buildGetIndexResult(concreteIndex, concreteIndex, indexMappings.value); + if (getIndexResult.isValid()) { + results.add(getIndexResult.get()); + } + } + results.sort(Comparator.comparing(EsIndex::name)); + listener.onResponse(results); + }, listener::onFailure)); + } + + private static GetIndexRequest createGetIndexRequest(String index) { + return new GetIndexRequest() + .local(true) + .indices(index) + .features(Feature.MAPPINGS) + //lenient because we throw our own errors looking at the response e.g. if something was not resolved + //also because this way security doesn't throw authorization exceptions but rather honours ignore_unavailable + .indicesOptions(IndicesOptions.lenientExpandOpen()); + } + + private static GetIndexResult buildGetIndexResult(String concreteIndex, String indexOrAlias, + ImmutableOpenMap mappings) { + if (concreteIndex.startsWith(".")) { + //Indices that start with "." are considered internal and should not be available to SQL + return GetIndexResult.notFound(indexOrAlias); + } + + // Make sure that the index contains only a single type + MappingMetaData singleType = null; + List typeNames = null; + for (ObjectObjectCursor type : mappings) { + //Default mappings are ignored as they are applied to each type. Each type alone holds all of its fields. + if ("_default_".equals(type.key)) { + continue; + } + if (singleType != null) { + // There are more than one types + if (typeNames == null) { + typeNames = new ArrayList<>(); + typeNames.add(singleType.type()); + } + typeNames.add(type.key); + } + singleType = type.value; + } + + if (singleType == null) { + return GetIndexResult.invalid("[" + indexOrAlias + "] doesn't have any types so it is incompatible with sql"); + } else if (typeNames != null) { + Collections.sort(typeNames); + return GetIndexResult.invalid( + "[" + indexOrAlias + "] contains more than one type " + typeNames + " so it is incompatible with sql"); + } else { + Map mapping = Types.fromEs(singleType.sourceAsMap()); + return GetIndexResult.valid(new EsIndex(indexOrAlias, mapping)); + } + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/analysis/index/MappingException.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/analysis/index/MappingException.java new file mode 100644 index 00000000000..46387936a90 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/analysis/index/MappingException.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.analysis.index; + +import org.elasticsearch.xpack.sql.ClientSqlException; + +public class MappingException extends ClientSqlException { + + public MappingException(String message, Object... args) { + super(message, args); + } + + public MappingException(String message, Throwable ex) { + super(message, ex); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/capabilities/Resolvable.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/capabilities/Resolvable.java new file mode 100644 index 00000000000..a627f061655 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/capabilities/Resolvable.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.capabilities; + +public interface Resolvable { + + boolean resolved(); +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/capabilities/Resolvables.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/capabilities/Resolvables.java new file mode 100644 index 00000000000..222ba7a87c0 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/capabilities/Resolvables.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.capabilities; + +public abstract class Resolvables { + + public static boolean resolved(Iterable resolvables) { + for (Resolvable resolvable : resolvables) { + if (!resolvable.resolved()) { + return false; + } + } + return true; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/capabilities/Unresolvable.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/capabilities/Unresolvable.java new file mode 100644 index 00000000000..9cd2e641697 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/capabilities/Unresolvable.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.capabilities; + + +public interface Unresolvable extends Resolvable { + + String UNRESOLVED_PREFIX = "?"; + + @Override + default boolean resolved() { + return false; + } + + String unresolvedMessage(); +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/capabilities/UnresolvedException.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/capabilities/UnresolvedException.java new file mode 100644 index 00000000000..d6c7543f6af --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/capabilities/UnresolvedException.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.capabilities; + +import org.elasticsearch.xpack.sql.ServerSqlException; + +import java.util.Locale; + +import static java.lang.String.format; + +/** + * Thrown when we accidentally attempt to resolve something on on an unresolved entity. Throwing this + * is always a bug. + */ +public class UnresolvedException extends ServerSqlException { + public UnresolvedException(String action, Object target) { + super(format(Locale.ROOT, "Invalid call to %s on an unresolved object %s", action, target)); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/ExecutionException.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/ExecutionException.java new file mode 100644 index 00000000000..511d6552fed --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/ExecutionException.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution; + +import org.elasticsearch.xpack.sql.SqlException; + +//TODO: beef up the exception or remove it +public class ExecutionException extends SqlException { + + public ExecutionException(String message, Object ...args) { + super(message, args); + } + + public ExecutionException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/PlanExecutor.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/PlanExecutor.java new file mode 100644 index 00000000000..22549aa4de4 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/PlanExecutor.java @@ -0,0 +1,78 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.xpack.sql.analysis.analyzer.Analyzer; +import org.elasticsearch.xpack.sql.analysis.analyzer.PreAnalyzer; +import org.elasticsearch.xpack.sql.analysis.index.IndexResolver; +import org.elasticsearch.xpack.sql.execution.search.SourceGenerator; +import org.elasticsearch.xpack.sql.expression.function.DefaultFunctionRegistry; +import org.elasticsearch.xpack.sql.expression.function.FunctionRegistry; +import org.elasticsearch.xpack.sql.optimizer.Optimizer; +import org.elasticsearch.xpack.sql.parser.SqlParser; +import org.elasticsearch.xpack.sql.plan.physical.EsQueryExec; +import org.elasticsearch.xpack.sql.planner.Planner; +import org.elasticsearch.xpack.sql.planner.PlanningException; +import org.elasticsearch.xpack.sql.session.Configuration; +import org.elasticsearch.xpack.sql.session.Cursor; +import org.elasticsearch.xpack.sql.session.RowSet; +import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.session.SqlSession; + +public class PlanExecutor { + private final Client client; + + private final FunctionRegistry functionRegistry; + + private final SqlParser parser; + private final IndexResolver indexResolver; + private final PreAnalyzer preAnalyzer; + private final Analyzer analyzer; + private final Optimizer optimizer; + private final Planner planner; + + public PlanExecutor(Client client, IndexResolver indexResolver) { + this.client = client; + this.indexResolver = indexResolver; + this.functionRegistry = new DefaultFunctionRegistry(); + + this.parser = new SqlParser(); + this.preAnalyzer = new PreAnalyzer(); + this.analyzer = new Analyzer(functionRegistry); + this.optimizer = new Optimizer(); + this.planner = new Planner(); + } + + private SqlSession newSession(Configuration cfg) { + return new SqlSession(cfg, client, functionRegistry, parser, indexResolver, preAnalyzer, analyzer, optimizer, planner); + } + + public void searchSource(String sql, Configuration settings, ActionListener listener) { + newSession(settings).sqlExecutable(sql, ActionListener.wrap(exec -> { + if (exec instanceof EsQueryExec) { + EsQueryExec e = (EsQueryExec) exec; + listener.onResponse(SourceGenerator.sourceBuilder(e.queryContainer(), settings.filter(), settings.pageSize())); + } else { + listener.onFailure(new PlanningException("Cannot generate a query DSL for %s", sql)); + } + }, listener::onFailure)); + } + + public void sql(Configuration cfg, String sql, ActionListener listener) { + newSession(cfg).sql(sql, listener); + } + + public void nextPage(Configuration cfg, Cursor cursor, ActionListener listener) { + cursor.nextPage(cfg, client, listener); + } + + public void cleanCursor(Configuration cfg, Cursor cursor, ActionListener listener) { + cursor.clear(cfg, client, listener); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/AbstractSearchHitRowSet.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/AbstractSearchHitRowSet.java new file mode 100644 index 00000000000..5ac37c2482f --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/AbstractSearchHitRowSet.java @@ -0,0 +1,173 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search; + +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.execution.search.extractor.HitExtractor; +import org.elasticsearch.xpack.sql.session.AbstractRowSet; +import org.elasticsearch.xpack.sql.session.Cursor; + +import java.util.Arrays; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Set; + +/** + * Extracts rows from an array of {@link SearchHit}. + */ +abstract class AbstractSearchHitRowSet extends AbstractRowSet { + private final SearchHit[] hits; + private final Cursor cursor; + private final String scrollId; + private final List extractors; + private final Set innerHits = new LinkedHashSet<>(); + private final String innerHit; + private final int limit; + + private final int size; + private final int[] indexPerLevel; + private int row = 0; + + AbstractSearchHitRowSet(List exts, SearchHit[] hits, int limitHits, String scrollId) { + this.hits = hits; + this.scrollId = scrollId; + this.extractors = exts; + + // Since the results might contain nested docs, the iteration is similar to that of Aggregation + // namely it discovers the nested docs and then, for iteration, increments the deepest level first + // and eventually carries that over to the top level + + String innerHit = null; + for (HitExtractor ex : exts) { + innerHit = ex.innerHitName(); + if (innerHit != null) { + innerHits.add(innerHit); + } + } + + int sz = hits.length; + + int maxDepth = 0; + if (!innerHits.isEmpty()) { + if (innerHits.size() > 1) { + throw new SqlIllegalArgumentException("Multi-nested docs not yet supported %s", innerHits); + } + maxDepth = 1; + + sz = 0; + for (int i = 0; i < hits.length; i++) { + SearchHit hit = hits[i]; + for (String ih : innerHits) { + SearchHits sh = hit.getInnerHits().get(ih); + if (sh != null) { + sz += sh.getHits().length; + } + } + } + } + // overall limit + limit = limitHits; + // page size + size = limitHits < 0 ? sz : Math.min(sz, limitHits); + indexPerLevel = new int[maxDepth + 1]; + this.innerHit = innerHit; + + if (scrollId == null) { + /* SearchResponse can contain a null scroll when you start a + * scroll but all results fit in the first page. */ + cursor = Cursor.EMPTY; + } else { + // compute remaining limit + int remainingLimit = limit - size; + // if the computed limit is zero, or the size is zero it means either there's nothing left or the limit has been reached + if (size == 0 || remainingLimit == 0) { + cursor = Cursor.EMPTY; + } else { + cursor = new ScrollCursor(scrollId, extractors, remainingLimit); + } + } + } + + @Override + protected Object getColumn(int column) { + HitExtractor e = extractors.get(column); + int extractorLevel = e.innerHitName() == null ? 0 : 1; + + SearchHit hit = null; + SearchHit[] sh = hits; + for (int lvl = 0; lvl <= extractorLevel ; lvl++) { + // TODO: add support for multi-nested doc + if (hit != null) { + SearchHits innerHits = hit.getInnerHits().get(innerHit); + sh = innerHits == null ? SearchHits.EMPTY : innerHits.getHits(); + } + hit = sh[indexPerLevel[lvl]]; + } + + return e.get(hit); + } + + @Override + protected boolean doHasCurrent() { + return row < size; + } + + @Override + protected boolean doNext() { + if (row < size - 1) { + row++; + // increment last row + indexPerLevel[indexPerLevel.length - 1]++; + // then check size + SearchHit[] sh = hits; + for (int lvl = 0; lvl < indexPerLevel.length; lvl++) { + if (indexPerLevel[lvl] == sh.length) { + // reset the current branch + indexPerLevel[lvl] = 0; + // bump the parent - if it's too big it, the loop will restart again from that position + indexPerLevel[lvl - 1]++; + // restart the loop + lvl = 0; + sh = hits; + } + else { + SearchHit h = sh[indexPerLevel[lvl]]; + // TODO: improve this for multi-nested responses + String path = lvl == 0 ? innerHit : null; + if (path != null) { + SearchHits innerHits = h.getInnerHits().get(path); + sh = innerHits == null ? SearchHits.EMPTY : innerHits.getHits(); + } + } + } + + return true; + } + return false; + } + + @Override + protected void doReset() { + row = 0; + Arrays.fill(indexPerLevel, 0); + } + + @Override + public int size() { + return size; + } + + public String scrollId() { + return scrollId; + } + + @Override + public Cursor nextPageCursor() { + return cursor; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/AggValues.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/AggValues.java new file mode 100644 index 00000000000..39f29930378 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/AggValues.java @@ -0,0 +1,126 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search; + +import java.util.Arrays; +import java.util.List; + +/** + * Aggregations are returned by Elasticsearch in a tree structure where each nested level can have a different size. + * For example a group by a, b, c results in 3-level nested array where each level contains all the relevant values + * for its parent entry. + * Assuming there's a total of 2 A's, 3 B's and 5 C's, the values will be + * A-agg level = { A1, A2 } + * B-agg level = { { A1B1, A1B2, A1B3 }, { A2B1, A2B2, A2B3 } + * C-agg level = { { { A1B1C1, A1B1C2 ..}, { A1B2C1, etc... } } } and so on + * + * Further more the columns are added in the order in which they are requested (0, 1, 2) eliminating the need for keys as these are implicit (their position in the list). + * + * To help with the iteration, there are two dedicated counters : + * one that carries (increments) the counter for each level (indicated by the position inside the array) once the children reach their max + * a flat cursor to indicate the row + */ +class AggValues { + private int row = 0; + + private final List columns; + private int[] indexPerLevel; + private int size; + + AggValues(List columns) { + this.columns = columns; + } + + void init(int maxDepth, int limit) { + int sz = computeSize(columns, maxDepth); + size = limit > 0 ? Math.min(limit, sz) : sz; + indexPerLevel = new int[maxDepth + 1]; + } + + private static int computeSize(List columns, int maxDepth) { + // look only at arrays with the right depth (the others might be + // counters or other functions) + // then return the parent array to compute the actual returned results + Object[] leafArray = null; + for (int i = 0; i < columns.size() && leafArray == null; i++) { + Object[] col = columns.get(i); + Object o = col; + int level = 0; + Object[] parent = null; + // keep unwrapping until the desired level is reached + while (o instanceof Object[]) { + col = ((Object[]) o); + if (col.length > 0) { + if (level == maxDepth) { + leafArray = parent; + break; + } else { + parent = col; + level++; + o = col[0]; + } + } else { + o = null; + } + } + } + + if (leafArray == null) { + return columns.get(0).length; + } + + int sz = 0; + for (Object leaf : leafArray) { + sz += ((Object[]) leaf).length; + } + return sz; + } + + Object column(int column) { + Object o = columns.get(column); + + for (int lvl = 0; o instanceof Object[]; lvl++) { + Object[] arr = (Object[]) o; + // the current branch is done + if (indexPerLevel[lvl] == arr.length) { + // reset the current branch + indexPerLevel[lvl] = 0; + // bump the parent - if it's too big it, the loop will restart + // again from that position + indexPerLevel[lvl - 1]++; + // restart the loop + lvl = -1; + o = columns.get(column); + } else { + o = arr[indexPerLevel[lvl]]; + } + } + return o; + } + + int size() { + return size; + } + + void reset() { + row = 0; + Arrays.fill(indexPerLevel, 0); + } + + boolean nextRow() { + if (row < size - 1) { + row++; + // increment leaf counter - the size check is done lazily while retrieving the columns + indexPerLevel[indexPerLevel.length - 1]++; + return true; + } + return false; + } + + boolean hasCurrentRow() { + return row < size; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/AggsRowSet.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/AggsRowSet.java new file mode 100644 index 00000000000..d18040eaee2 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/AggsRowSet.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search; + +import org.elasticsearch.xpack.sql.session.AbstractRowSet; +import org.elasticsearch.xpack.sql.session.Cursor; +import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.type.Schema; + +import java.util.List; +import java.util.function.Supplier; + +class AggsRowSet extends AbstractRowSet implements SchemaRowSet { + private final Schema schema; + private final AggValues agg; + private final List> columns; + private int row = 0; + + AggsRowSet(Schema schema, AggValues agg, List> columns) { + this.schema = schema; + this.agg = agg; + this.columns = columns; + } + + @Override + protected Object getColumn(int column) { + return columns.get(column).get(); + } + + @Override + protected boolean doHasCurrent() { + return row < size(); + } + + @Override + protected boolean doNext() { + return agg.nextRow(); + } + + @Override + protected void doReset() { + agg.reset(); + } + + @Override + public int size() { + return agg.size(); + } + + @Override + public Cursor nextPageCursor() { + return Cursor.EMPTY; + } + + @Override + public Schema schema() { + return schema; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/InitialSearchHitRowSet.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/InitialSearchHitRowSet.java new file mode 100644 index 00000000000..403aa29e40e --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/InitialSearchHitRowSet.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search; + +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.xpack.sql.execution.search.extractor.HitExtractor; +import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.type.Schema; + +import java.util.List; + +/** + * Initial results from a scroll search. Distinct from the following pages + * because it has a {@link Schema} available. See {@link ScrolledSearchHitRowSet} + * for the next pages. + */ +public class InitialSearchHitRowSet extends AbstractSearchHitRowSet implements SchemaRowSet { + private final Schema schema; + + public InitialSearchHitRowSet(Schema schema, List exts, SearchHit[] hits, int limitHits, String scrollId) { + super(exts, hits, limitHits, scrollId); + this.schema = schema; + } + + @Override + public Schema schema() { + return schema; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/ScrollCursor.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/ScrollCursor.java new file mode 100644 index 00000000000..b75878997e6 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/ScrollCursor.java @@ -0,0 +1,108 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search; + + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.search.ClearScrollRequest; +import org.elasticsearch.action.search.ClearScrollResponse; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchScrollRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.sql.execution.search.extractor.HitExtractor; +import org.elasticsearch.xpack.sql.session.Configuration; +import org.elasticsearch.xpack.sql.session.Cursor; +import org.elasticsearch.xpack.sql.session.RowSet; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +public class ScrollCursor implements Cursor { + public static final String NAME = "s"; + + private final String scrollId; + private final List extractors; + private final int limit; + + public ScrollCursor(String scrollId, List extractors, int limit) { + this.scrollId = scrollId; + this.extractors = extractors; + this.limit = limit; + } + + public ScrollCursor(StreamInput in) throws IOException { + scrollId = in.readString(); + extractors = in.readNamedWriteableList(HitExtractor.class); + limit = in.readVInt(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(scrollId); + out.writeNamedWriteableList(extractors); + out.writeVInt(limit); + } + + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public void nextPage(Configuration cfg, Client client, ActionListener listener) { + SearchScrollRequest request = new SearchScrollRequest(scrollId).scroll(cfg.pageTimeout()); + client.searchScroll(request, ActionListener.wrap((SearchResponse response) -> { + ScrolledSearchHitRowSet rowSet = new ScrolledSearchHitRowSet(extractors, response.getHits().getHits(), + limit, response.getScrollId()); + if (rowSet.nextPageCursor() == Cursor.EMPTY ) { + // we are finished with this cursor, let's clean it before continuing + clear(cfg, client, ActionListener.wrap(success -> listener.onResponse(rowSet), listener::onFailure)); + } else { + listener.onResponse(rowSet); + } + }, listener::onFailure)); + } + + @Override + public void clear(Configuration cfg, Client client, ActionListener listener) { + cleanCursor(client, scrollId, + ActionListener.wrap( + clearScrollResponse -> listener.onResponse(clearScrollResponse.isSucceeded()), + listener::onFailure)); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + ScrollCursor other = (ScrollCursor) obj; + return Objects.equals(scrollId, other.scrollId) + && Objects.equals(extractors, other.extractors) + && Objects.equals(limit, other.limit); + } + + @Override + public int hashCode() { + return Objects.hash(scrollId, extractors, limit); + } + + @Override + public String toString() { + return "cursor for scroll [" + scrollId + "]"; + } + + public static void cleanCursor(Client client, String scrollId, ActionListener listener) { + ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); + clearScrollRequest.addScrollId(scrollId); + client.clearScroll(clearScrollRequest, listener); + } + +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/ScrolledSearchHitRowSet.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/ScrolledSearchHitRowSet.java new file mode 100644 index 00000000000..cac3b537b63 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/ScrolledSearchHitRowSet.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search; + +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.xpack.sql.execution.search.extractor.HitExtractor; +import org.elasticsearch.xpack.sql.type.Schema; + +import java.util.List; + +/** + * "Next" page of results from a scroll search. Distinct from the first page + * because it no longer has the {@link Schema}. See {@link InitialSearchHitRowSet} + * for the initial results. + */ +public class ScrolledSearchHitRowSet extends AbstractSearchHitRowSet { + private final int columnCount; + + public ScrolledSearchHitRowSet(List exts, SearchHit[] hits, int limitHits, String scrollId) { + super(exts, hits, limitHits, scrollId); + this.columnCount = exts.size(); + } + + @Override + public int columnCount() { + return columnCount; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/Scroller.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/Scroller.java new file mode 100644 index 00000000000..978ceced06b --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/Scroller.java @@ -0,0 +1,349 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.inject.internal.Nullable; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.aggregations.Aggregations; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; +import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; +import org.elasticsearch.search.aggregations.support.AggregationPath; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.execution.ExecutionException; +import org.elasticsearch.xpack.sql.execution.search.extractor.ComputingHitExtractor; +import org.elasticsearch.xpack.sql.execution.search.extractor.DocValueExtractor; +import org.elasticsearch.xpack.sql.execution.search.extractor.HitExtractor; +import org.elasticsearch.xpack.sql.execution.search.extractor.InnerHitExtractor; +import org.elasticsearch.xpack.sql.execution.search.extractor.SourceExtractor; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.AggPathInput; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.AggValueInput; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.HitExtractorInput; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ReferenceInput; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; +import org.elasticsearch.xpack.sql.querydsl.agg.AggPath; +import org.elasticsearch.xpack.sql.querydsl.container.AggRef; +import org.elasticsearch.xpack.sql.querydsl.container.ColumnReference; +import org.elasticsearch.xpack.sql.querydsl.container.ComputedRef; +import org.elasticsearch.xpack.sql.querydsl.container.NestedFieldRef; +import org.elasticsearch.xpack.sql.querydsl.container.QueryContainer; +import org.elasticsearch.xpack.sql.querydsl.container.ScriptFieldRef; +import org.elasticsearch.xpack.sql.querydsl.container.SearchHitFieldRef; +import org.elasticsearch.xpack.sql.querydsl.container.TotalCountRef; +import org.elasticsearch.xpack.sql.session.Configuration; +import org.elasticsearch.xpack.sql.session.Rows; +import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.type.Schema; +import org.elasticsearch.xpack.sql.util.StringUtils; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Supplier; +// TODO: add retry/back-off +public class Scroller { + + private final Logger log = Loggers.getLogger(getClass()); + + private final TimeValue keepAlive, timeout; + private final int size; + private final Client client; + @Nullable + private final QueryBuilder filter; + + public Scroller(Client client, Configuration cfg) { + this(client, cfg.requestTimeout(), cfg.pageTimeout(), cfg.filter(), cfg.pageSize()); + } + + public Scroller(Client client, TimeValue keepAlive, TimeValue timeout, QueryBuilder filter, int size) { + this.client = client; + this.keepAlive = keepAlive; + this.timeout = timeout; + this.filter = filter; + this.size = size; + } + + public void scroll(Schema schema, QueryContainer query, String index, ActionListener listener) { + // prepare the request + SearchSourceBuilder sourceBuilder = SourceGenerator.sourceBuilder(query, filter, size); + + if (log.isTraceEnabled()) { + log.trace("About to execute query {} on {}", StringUtils.toString(sourceBuilder), index); + } + + SearchRequest search = client.prepareSearch(index).setSource(sourceBuilder).request(); + search.scroll(keepAlive).source().timeout(timeout); + + boolean isAggsOnly = query.isAggsOnly(); + + ScrollerActionListener l; + if (isAggsOnly) { + l = new AggsScrollActionListener(listener, client, timeout, schema, query); + } else { + l = new HandshakeScrollActionListener(listener, client, timeout, schema, query); + } + client.search(search, l); + } + + // dedicated scroll used for aggs-only/group-by results + static class AggsScrollActionListener extends ScrollerActionListener { + + private final QueryContainer query; + + AggsScrollActionListener(ActionListener listener, Client client, TimeValue keepAlive, Schema schema, QueryContainer query) { + super(listener, client, keepAlive, schema); + this.query = query; + } + + @Override + protected void handleResponse(SearchResponse response, ActionListener listener) { + + final List extractedAggs = new ArrayList<>(); + AggValues aggValues = new AggValues(extractedAggs); + List> aggColumns = new ArrayList<>(query.columns().size()); + + // this method assumes the nested aggregation are all part of the same tree (the SQL group-by) + int maxDepth = -1; + + List cols = query.columns(); + for (int index = 0; index < cols.size(); index++) { + ColumnReference col = cols.get(index); + Supplier supplier = null; + + if (col instanceof ComputedRef) { + ComputedRef pRef = (ComputedRef) col; + + Processor processor = pRef.processor().transformUp(a -> { + Object[] value = extractAggValue(new AggRef(a.context()), response); + extractedAggs.add(value); + final int aggPosition = extractedAggs.size() - 1; + Supplier action = null; + if (a.action() != null) { + action = () -> a.action().process(aggValues.column(aggPosition)); + } + else { + action = () -> aggValues.column(aggPosition); + } + return new AggValueInput(a.expression(), action, a.innerKey()); + }, AggPathInput.class).asProcessor(); + // the input is provided through the value input above + supplier = () -> processor.process(null); + } + else { + extractedAggs.add(extractAggValue(col, response)); + final int aggPosition = extractedAggs.size() - 1; + supplier = () -> aggValues.column(aggPosition); + } + + aggColumns.add(supplier); + if (col.depth() > maxDepth) { + maxDepth = col.depth(); + } + } + + aggValues.init(maxDepth, query.limit()); + clearScroll(response.getScrollId(), ActionListener.wrap( + succeeded -> listener.onResponse(new AggsRowSet(schema, aggValues, aggColumns)), + listener::onFailure)); + } + + private Object[] extractAggValue(ColumnReference col, SearchResponse response) { + if (col == TotalCountRef.INSTANCE) { + return new Object[] { Long.valueOf(response.getHits().getTotalHits()) }; + } + else if (col instanceof AggRef) { + Object[] arr; + + String path = ((AggRef) col).path(); + // yup, this is instance equality to make sure we only check the path used by the code + if (path == TotalCountRef.PATH) { + arr = new Object[] { Long.valueOf(response.getHits().getTotalHits()) }; + } + else { + // workaround for elastic/elasticsearch/issues/23056 + boolean formattedKey = AggPath.isBucketValueFormatted(path); + if (formattedKey) { + path = AggPath.bucketValueWithoutFormat(path); + } + Object value = getAggProperty(response.getAggregations(), path); + + // // FIXME: this can be tabular in nature + // if (ref instanceof MappedAggRef) { + // Map map = (Map) value; + // Object extractedValue = map.get(((MappedAggRef) + // ref).fieldName()); + // } + + if (formattedKey) { + List buckets = ((MultiBucketsAggregation) value).getBuckets(); + arr = new Object[buckets.size()]; + for (int i = 0; i < buckets.size(); i++) { + arr[i] = buckets.get(i).getKeyAsString(); + } + } else { + arr = value instanceof Object[] ? (Object[]) value : new Object[] { value }; + } + } + + return arr; + } + throw new SqlIllegalArgumentException("Unexpected non-agg/grouped column specified; %s", col.getClass()); + } + + private static Object getAggProperty(Aggregations aggs, String path) { + List list = AggregationPath.parse(path).getPathElementsAsStringList(); + String aggName = list.get(0); + InternalAggregation agg = aggs.get(aggName); + if (agg == null) { + throw new ExecutionException("Cannot find an aggregation named %s", aggName); + } + return agg.getProperty(list.subList(1, list.size())); + } + } + + // initial scroll used for parsing search hits (handles possible aggs) + static class HandshakeScrollActionListener extends ScrollerActionListener { + private final QueryContainer query; + + HandshakeScrollActionListener(ActionListener listener, Client client, TimeValue keepAlive, + Schema schema, QueryContainer query) { + super(listener, client, keepAlive, schema); + this.query = query; + } + + @Override + public void onResponse(SearchResponse response) { + super.onResponse(response); + } + + @Override + protected void handleResponse(SearchResponse response, ActionListener listener) { + SearchHit[] hits = response.getHits().getHits(); + List exts = getExtractors(); + + // there are some results + if (hits.length > 0) { + String scrollId = response.getScrollId(); + + // if there's an id, try to setup next scroll + if (scrollId != null && + // is all the content already retrieved? + (Boolean.TRUE.equals(response.isTerminatedEarly()) || response.getHits().getTotalHits() == hits.length + // or maybe the limit has been reached + || (hits.length >= query.limit() && query.limit() > -1))) { + // if so, clear the scroll + clearScroll(response.getScrollId(), ActionListener.wrap( + succeeded -> listener.onResponse(new InitialSearchHitRowSet(schema, exts, hits, query.limit(), null)), + listener::onFailure)); + } else { + listener.onResponse(new InitialSearchHitRowSet(schema, exts, hits, query.limit(), scrollId)); + } + } + // no hits + else { + clearScroll(response.getScrollId(), ActionListener.wrap(succeeded -> listener.onResponse(Rows.empty(schema)), + listener::onFailure)); + } + } + + private List getExtractors() { + // create response extractors for the first time + List refs = query.columns(); + + List exts = new ArrayList<>(refs.size()); + + for (ColumnReference ref : refs) { + exts.add(createExtractor(ref)); + } + return exts; + } + + private HitExtractor createExtractor(ColumnReference ref) { + if (ref instanceof SearchHitFieldRef) { + SearchHitFieldRef f = (SearchHitFieldRef) ref; + return f.useDocValue() ? new DocValueExtractor(f.name()) : new SourceExtractor(f.name()); + } + + if (ref instanceof NestedFieldRef) { + NestedFieldRef f = (NestedFieldRef) ref; + return new InnerHitExtractor(f.parent(), f.name(), f.useDocValue()); + } + + if (ref instanceof ScriptFieldRef) { + ScriptFieldRef f = (ScriptFieldRef) ref; + return new DocValueExtractor(f.name()); + } + + if (ref instanceof ComputedRef) { + ProcessorDefinition proc = ((ComputedRef) ref).processor(); + proc = proc.transformDown(l -> new HitExtractorInput(l.expression(), createExtractor(l.context())), ReferenceInput.class); + return new ComputingHitExtractor(proc.asProcessor()); + } + + throw new SqlIllegalArgumentException("Unexpected ValueReference %s", ref.getClass()); + } + } + + abstract static class ScrollerActionListener implements ActionListener { + + final ActionListener listener; + + final Client client; + final TimeValue keepAlive; + final Schema schema; + + ScrollerActionListener(ActionListener listener, Client client, TimeValue keepAlive, Schema schema) { + this.listener = listener; + + this.client = client; + this.keepAlive = keepAlive; + this.schema = schema; + } + + // TODO: need to handle rejections plus check failures (shard size, etc...) + @Override + public void onResponse(final SearchResponse response) { + try { + ShardSearchFailure[] failure = response.getShardFailures(); + if (!CollectionUtils.isEmpty(failure)) { + onFailure(new ExecutionException(failure[0].reason(), failure[0].getCause())); + } + handleResponse(response, listener); + } catch (Exception ex) { + onFailure(ex); + } + } + + protected abstract void handleResponse(SearchResponse response, ActionListener listener); + + protected final void clearScroll(String scrollId, ActionListener listener) { + if (scrollId != null) { + client.prepareClearScroll().addScrollId(scrollId).execute( + ActionListener.wrap( + clearScrollResponse -> listener.onResponse(clearScrollResponse.isSucceeded()), + listener::onFailure)); + } else { + listener.onResponse(false); + } + } + + @Override + public final void onFailure(Exception ex) { + listener.onFailure(ex); + } + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/SourceGenerator.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/SourceGenerator.java new file mode 100644 index 00000000000..54a701e5acc --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/SourceGenerator.java @@ -0,0 +1,239 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search; + +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.ConstantScoreQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.script.Script; +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.fetch.StoredFieldsContext; +import org.elasticsearch.search.fetch.subphase.FetchSourceContext; +import org.elasticsearch.search.sort.FieldSortBuilder; +import org.elasticsearch.search.sort.NestedSortBuilder; +import org.elasticsearch.search.sort.ScriptSortBuilder.ScriptSortType; +import org.elasticsearch.search.sort.SortBuilder; +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.FieldAttribute; +import org.elasticsearch.xpack.sql.expression.NestedFieldAttribute; +import org.elasticsearch.xpack.sql.expression.RootFieldAttribute; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ReferenceInput; +import org.elasticsearch.xpack.sql.querydsl.agg.Aggs; +import org.elasticsearch.xpack.sql.querydsl.agg.GroupByColumnAgg; +import org.elasticsearch.xpack.sql.querydsl.agg.GroupingAgg; +import org.elasticsearch.xpack.sql.querydsl.container.AttributeSort; +import org.elasticsearch.xpack.sql.querydsl.container.ColumnReference; +import org.elasticsearch.xpack.sql.querydsl.container.ComputedRef; +import org.elasticsearch.xpack.sql.querydsl.container.QueryContainer; +import org.elasticsearch.xpack.sql.querydsl.container.ScriptFieldRef; +import org.elasticsearch.xpack.sql.querydsl.container.ScriptSort; +import org.elasticsearch.xpack.sql.querydsl.container.SearchHitFieldRef; +import org.elasticsearch.xpack.sql.querydsl.container.Sort; +import org.elasticsearch.xpack.sql.querydsl.container.Sort.Direction; +import org.elasticsearch.xpack.sql.querydsl.query.NestedQuery; + +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; + +import static java.util.Collections.singletonList; +import static org.elasticsearch.search.sort.SortBuilders.fieldSort; +import static org.elasticsearch.search.sort.SortBuilders.scriptSort; + +public abstract class SourceGenerator { + + private static final List NO_STORED_FIELD = singletonList(StoredFieldsContext._NONE_); + + public static SearchSourceBuilder sourceBuilder(QueryContainer container, QueryBuilder filter, Integer size) { + SearchSourceBuilder source = new SearchSourceBuilder(); + // add the source + if (container.query() != null) { + if (filter != null) { + source.query(new BoolQueryBuilder().must(container.query().asBuilder()).filter(filter)); + } else { + source.query(container.query().asBuilder()); + } + } else { + if (filter != null) { + source.query(new ConstantScoreQueryBuilder(filter)); + } + } + + // translate fields to source-fields or script fields + Set sourceFields = new LinkedHashSet<>(); + Set docFields = new LinkedHashSet<>(); + Map scriptFields = new LinkedHashMap<>(); + + for (ColumnReference ref : container.columns()) { + collectFields(ref, sourceFields, docFields, scriptFields); + } + + if (!sourceFields.isEmpty()) { + source.fetchSource(sourceFields.toArray(new String[sourceFields.size()]), null); + } + + for (String field : docFields) { + source.docValueField(field); + } + + for (Entry entry : scriptFields.entrySet()) { + source.scriptField(entry.getKey(), entry.getValue()); + } + + sorting(container, source); + + // add the aggs + Aggs aggs = container.aggs(); + + // push limit onto group aggs + if (container.limit() > 0) { + List groups = new ArrayList<>(aggs.groups()); + if (groups.size() > 0) { + // get just the root agg + GroupingAgg mainAgg = groups.get(0); + if (mainAgg instanceof GroupByColumnAgg) { + groups.set(0, ((GroupByColumnAgg) mainAgg).withLimit(container.limit())); + aggs = aggs.with(groups); + } + } + } + + + for (AggregationBuilder builder : aggs.asAggBuilders()) { + source.aggregation(builder); + } + + // add the pipeline aggs + for (PipelineAggregationBuilder builder : aggs.asPipelineBuilders()) { + source.aggregation(builder); + } + + optimize(container, source); + + // set size + if (size != null) { + if (source.size() == -1) { + int sz = container.limit() > 0 ? Math.min(container.limit(), size) : size; + source.size(sz); + } + } + + return source; + } + + private static void collectFields(ColumnReference ref, Set sourceFields, Set docFields, Map scriptFields) { + if (ref instanceof ComputedRef) { + ProcessorDefinition proc = ((ComputedRef) ref).processor(); + proc.forEachUp(l -> collectFields(l.context(), sourceFields, docFields, scriptFields), ReferenceInput.class); + } + else if (ref instanceof SearchHitFieldRef) { + SearchHitFieldRef sh = (SearchHitFieldRef) ref; + Set collection = sh.useDocValue() ? docFields : sourceFields; + collection.add(sh.name()); + } + else if (ref instanceof ScriptFieldRef) { + ScriptFieldRef sfr = (ScriptFieldRef) ref; + scriptFields.put(sfr.name(), sfr.script().toPainless()); + } + } + + private static void sorting(QueryContainer container, SearchSourceBuilder source) { + if (container.sort() != null) { + + for (Sort sortable : container.sort()) { + SortBuilder sortBuilder = null; + + if (sortable instanceof AttributeSort) { + AttributeSort as = (AttributeSort) sortable; + Attribute attr = as.attribute(); + + // sorting only works on not-analyzed fields - look for a multi-field replacement + if (attr instanceof FieldAttribute) { + FieldAttribute fa = (FieldAttribute) attr; + attr = fa.isAnalyzed() ? fa.notAnalyzedAttribute() : attr; + } + + // top-level doc value + if (attr instanceof RootFieldAttribute) { + sortBuilder = fieldSort(((RootFieldAttribute) attr).name()); + } + if (attr instanceof NestedFieldAttribute) { + NestedFieldAttribute nfa = (NestedFieldAttribute) attr; + FieldSortBuilder fieldSort = fieldSort(nfa.name()); + + String nestedPath = nfa.parentPath(); + NestedSortBuilder newSort = new NestedSortBuilder(nestedPath); + NestedSortBuilder nestedSort = fieldSort.getNestedSort(); + + if (nestedSort == null) { + fieldSort.setNestedSort(newSort); + } else { + for (; nestedSort.getNestedSort() != null; nestedSort = nestedSort.getNestedSort()) { + } + nestedSort.setNestedSort(newSort); + } + + nestedSort = newSort; + + List nestedQuery = new ArrayList<>(1); + + // copy also the nested queries fr(if any) + if (container.query() != null) { + container.query().forEachDown(nq -> { + // found a match + if (nestedPath.equals(nq.path())) { + // get the child query - the nested wrapping and inner hits are not needed + nestedQuery.add(nq.child().asBuilder()); + } + }, NestedQuery.class); + } + + if (nestedQuery.size() > 0) { + if (nestedQuery.size() > 1) { + throw new SqlIllegalArgumentException("nested query should have been grouped in one place"); + } + nestedSort.setFilter(nestedQuery.get(0)); + } + + sortBuilder = fieldSort; + } + } + if (sortable instanceof ScriptSort) { + ScriptSort ss = (ScriptSort) sortable; + sortBuilder = scriptSort(ss.script().toPainless(), ss.script().outputType().isNumeric() ? ScriptSortType.NUMBER : ScriptSortType.STRING); + } + + if (sortBuilder != null) { + sortBuilder.order(sortable.direction() == Direction.ASC ? SortOrder.ASC : SortOrder.DESC); + source.sort(sortBuilder); + } + } + } + else { + // if no sorting is specified, use the _doc one + source.sort("_doc"); + } + } + + private static void optimize(QueryContainer query, SearchSourceBuilder source) { + // if only aggs are needed, don't retrieve any docs + if (query.isAggsOnly()) { + source.size(0); + // disable source fetching (only doc values are used) + source.fetchSource(FetchSourceContext.DO_NOT_FETCH_SOURCE); + source.storedFields(NO_STORED_FIELD); + } + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/ComputingHitExtractor.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/ComputingHitExtractor.java new file mode 100644 index 00000000000..c64aa6368f5 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/ComputingHitExtractor.java @@ -0,0 +1,82 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search.extractor; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.HitExtractorProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; + +import java.io.IOException; +import java.util.Objects; + +/** + * HitExtractor that delegates to a processor. The difference between this class + * and {@link HitExtractorProcessor} is that the latter is used inside a + * {@link Processor} tree as a leaf (and thus can effectively parse the + * {@link SearchHit} while this class is used when scrolling and passing down + * the results. + * + * In the future, the processor might be used across the board for all columns + * to reduce API complexity (and keep the {@link HitExtractor} only as an + * internal implementation detail). + */ +public class ComputingHitExtractor implements HitExtractor { + static final String NAME = "p"; + private final Processor processor; + + public ComputingHitExtractor(Processor processor) { + this.processor = processor; + } + + ComputingHitExtractor(StreamInput in) throws IOException { + processor = in.readNamedWriteable(Processor.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeNamedWriteable(processor); + } + + @Override + public String getWriteableName() { + return NAME; + } + + public Processor processor() { + return processor; + } + + @Override + public Object get(SearchHit hit) { + return processor.process(hit); + } + + @Override + public String innerHitName() { + return null; + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + ComputingHitExtractor other = (ComputingHitExtractor) obj; + return processor.equals(other.processor); + } + + @Override + public int hashCode() { + return Objects.hash(processor); + } + + @Override + public String toString() { + return processor.toString(); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/ConstantExtractor.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/ConstantExtractor.java new file mode 100644 index 00000000000..b0112c91e9e --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/ConstantExtractor.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search.extractor; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.SearchHit; + +import java.io.IOException; +import java.util.Objects; + +/** + * Returns the a constant for every search hit against which it is run. + */ +public class ConstantExtractor implements HitExtractor { + static final String NAME = "c"; + private final Object constant; + + public ConstantExtractor(Object constant) { + this.constant = constant; + } + + ConstantExtractor(StreamInput in) throws IOException { + constant = in.readGenericValue(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeGenericValue(constant); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public Object get(SearchHit hit) { + return constant; + } + + @Override + public String innerHitName() { + return null; + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + ConstantExtractor other = (ConstantExtractor) obj; + return Objects.equals(constant, other.constant); + } + + @Override + public int hashCode() { + return Objects.hashCode(constant); + } + + @Override + public String toString() { + return "^" + constant; + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/DocValueExtractor.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/DocValueExtractor.java new file mode 100644 index 00000000000..1d05748e0a0 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/DocValueExtractor.java @@ -0,0 +1,84 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search.extractor; + +import org.elasticsearch.common.document.DocumentField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.SearchHit; +import org.joda.time.ReadableInstant; + +import java.io.IOException; + +/** + * Extracts field values from {@link SearchHit#field(String)}. + */ +public class DocValueExtractor implements HitExtractor { + static final String NAME = "f"; + private final String fieldName; + + public DocValueExtractor(String name) { + this.fieldName = name; + } + + DocValueExtractor(StreamInput in) throws IOException { + fieldName = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(fieldName); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public Object get(SearchHit hit) { + // TODO we should think about what to do with multi-valued fields. + // Tracked by https://github.com/elastic/x-pack-elasticsearch/issues/2874 + DocumentField field = hit.field(fieldName); + if (field != null) { + Object value = field.getValue(); + if (value != null && value instanceof ReadableInstant) { + return ((ReadableInstant) value).getMillis(); + } else { + return value; + } + } else { + return null; + } + } + + @Override + public String innerHitName() { + return null; + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + DocValueExtractor other = (DocValueExtractor) obj; + return fieldName.equals(other.fieldName); + } + + @Override + public int hashCode() { + return fieldName.hashCode(); + } + + @Override + public String toString() { + /* % kind of looks like two 0s with a column separator between + * them so it makes me think of columnar storage which doc + * values are. */ + return "%" + fieldName; + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/HitExtractor.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/HitExtractor.java new file mode 100644 index 00000000000..a3936b9fe16 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/HitExtractor.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search.extractor; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.search.SearchHit; + +/** + * Extracts a column value from a {@link SearchHit}. + */ +public interface HitExtractor extends NamedWriteable { + /** + * Extract the value from a hit. + */ + Object get(SearchHit hit); + + /** + * Name of the inner hit needed by this extractor if it needs one, {@code null} otherwise. + */ + @Nullable + String innerHitName(); +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/HitExtractors.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/HitExtractors.java new file mode 100644 index 00000000000..729b951a831 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/HitExtractors.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search.extractor; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry; +import org.elasticsearch.xpack.sql.expression.function.scalar.Processors; + +import java.util.ArrayList; +import java.util.List; + +public abstract class HitExtractors { + + /** + * All of the named writeables needed to deserialize the instances of + * {@linkplain HitExtractor}. + */ + public static List getNamedWriteables() { + List entries = new ArrayList<>(); + entries.add(new Entry(HitExtractor.class, ConstantExtractor.NAME, ConstantExtractor::new)); + entries.add(new Entry(HitExtractor.class, DocValueExtractor.NAME, DocValueExtractor::new)); + entries.add(new Entry(HitExtractor.class, InnerHitExtractor.NAME, InnerHitExtractor::new)); + entries.add(new Entry(HitExtractor.class, SourceExtractor.NAME, SourceExtractor::new)); + entries.add(new Entry(HitExtractor.class, ComputingHitExtractor.NAME, ComputingHitExtractor::new)); + entries.addAll(Processors.getNamedWriteables()); + return entries; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/InnerHitExtractor.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/InnerHitExtractor.java new file mode 100644 index 00000000000..7d90659d979 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/InnerHitExtractor.java @@ -0,0 +1,112 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search.extractor; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.document.DocumentField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.xpack.sql.execution.ExecutionException; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +public class InnerHitExtractor implements HitExtractor { + static final String NAME = "i"; + private final String hitName, fieldName; + private final boolean useDocValue; + private final String[] tree; + + public InnerHitExtractor(String hitName, String name, boolean useDocValue) { + this.hitName = hitName; + this.fieldName = name; + this.useDocValue = useDocValue; + this.tree = useDocValue ? Strings.EMPTY_ARRAY : Strings.tokenizeToStringArray(name, "."); + } + + InnerHitExtractor(StreamInput in) throws IOException { + hitName = in.readString(); + fieldName = in.readString(); + useDocValue = in.readBoolean(); + tree = useDocValue ? Strings.EMPTY_ARRAY : Strings.tokenizeToStringArray(fieldName, "."); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(hitName); + out.writeString(fieldName); + out.writeBoolean(useDocValue); + } + + @SuppressWarnings("unchecked") + @Override + public Object get(SearchHit hit) { + if (useDocValue) { + DocumentField field = hit.field(fieldName); + return field != null ? field.getValue() : null; + } + else { + Map source = hit.getSourceAsMap(); + if (source == null) { + return null; + } + Object value = null; + for (String node : tree) { + if (value != null) { + if (value instanceof Map) { + source = (Map) value; + } + else { + throw new ExecutionException("Cannot extract value %s from source", fieldName); + } + } + value = source.get(node); + } + return value; + } + } + + @Override + public String innerHitName() { + return hitName; + } + + String fieldName() { + return fieldName; + } + + public String hitName() { + return hitName; + } + + @Override + public String toString() { + return fieldName + "@" + hitName; + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + InnerHitExtractor other = (InnerHitExtractor) obj; + return fieldName.equals(other.fieldName) + && hitName.equals(other.hitName) + && useDocValue == other.useDocValue; + } + + @Override + public int hashCode() { + return Objects.hash(hitName, fieldName, useDocValue); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/SourceExtractor.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/SourceExtractor.java new file mode 100644 index 00000000000..2de0fbc862e --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/SourceExtractor.java @@ -0,0 +1,71 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search.extractor; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.SearchHit; + +import java.io.IOException; +import java.util.Map; + +public class SourceExtractor implements HitExtractor { + public static final String NAME = "s"; + private final String fieldName; + + public SourceExtractor(String name) { + this.fieldName = name; + } + + SourceExtractor(StreamInput in) throws IOException { + fieldName = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(fieldName); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public Object get(SearchHit hit) { + Map source = hit.getSourceAsMap(); + // TODO I think this will not work with dotted field names (objects or actual dots in the names) + // confusingly, I think this is actually handled by InnerHitExtractor. This needs investigating or renaming + // Tracked by https://github.com/elastic/x-pack-elasticsearch/issues/2874 + return source != null ? source.get(fieldName) : null; + } + + @Override + public String innerHitName() { + return null; + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + SourceExtractor other = (SourceExtractor) obj; + return fieldName.equals(other.fieldName); + } + + @Override + public int hashCode() { + return fieldName.hashCode(); + } + + @Override + public String toString() { + /* # is sometimes known as the "hash" sign which reminds + * me of a hash table lookup. */ + return "#" + fieldName; + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/Alias.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/Alias.java new file mode 100644 index 00000000000..0899b84e01a --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/Alias.java @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; + +import static java.util.Collections.singletonList; + +public class Alias extends NamedExpression { + + private final Expression child; + private final String qualifier; + + /** + * Postpone attribute creation until it is actually created. + * Being immutable, create only one instance. + */ + private Attribute lazyAttribute; + + public Alias(Location location, String name, Expression child) { + this(location, name, null, child, null); + } + + public Alias(Location location, String name, String qualifier, Expression child) { + this(location, name, qualifier, child, null); + } + + public Alias(Location location, String name, String qualifier, Expression child, ExpressionId id) { + this(location, name, qualifier, child, id, false); + } + + public Alias(Location location, String name, String qualifier, Expression child, ExpressionId id, boolean synthetic) { + super(location, name, singletonList(child), id, synthetic); + this.child = child; + this.qualifier = qualifier; + } + + public Expression child() { + return child; + } + + public String qualifier() { + return qualifier; + } + + @Override + public boolean nullable() { + return child.nullable(); + } + + @Override + public DataType dataType() { + return child.dataType(); + } + + @Override + public Attribute toAttribute() { + if (lazyAttribute == null) { + lazyAttribute = createAttribute(); + } + return lazyAttribute; + } + + private Attribute createAttribute() { + if (resolved()) { + Expression c = child(); + + Attribute attr = Expressions.attribute(c); + if (attr != null) { + return attr.clone(location(), name(), child.dataType(), qualifier, child.nullable(), id(), synthetic()); + } + else { + return new RootFieldAttribute(location(), name(), child.dataType(), qualifier, child.nullable(), id(), synthetic()); + } + } + + return new UnresolvedAttribute(location(), name(), qualifier); + } + + @Override + public String toString() { + return child + " AS " + name() + "#" + id(); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/Attribute.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/Attribute.java new file mode 100644 index 00000000000..0b5de87970a --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/Attribute.java @@ -0,0 +1,98 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; + +import java.util.Objects; + +import static java.util.Collections.emptyList; + +public abstract class Attribute extends NamedExpression { + + // empty - such as a top level attribute in SELECT cause + // present - table name or a table name alias + private final String qualifier; + + // can the attr be null - typically used in JOINs + private final boolean nullable; + + public Attribute(Location location, String name, String qualifier, ExpressionId id) { + this(location, name, qualifier, true, id); + } + + public Attribute(Location location, String name, String qualifier, boolean nullable, ExpressionId id) { + this(location, name, qualifier, nullable, id, false); + } + + public Attribute(Location location, String name, String qualifier, boolean nullable, ExpressionId id, boolean synthetic) { + super(location, name, emptyList(), id, synthetic); + this.qualifier = qualifier; + this.nullable = nullable; + } + + public String qualifier() { + return qualifier; + } + + public String qualifiedName() { + return qualifier == null ? name() : qualifier + "." + name(); + } + + public boolean nullable() { + return nullable; + } + + @Override + public AttributeSet references() { + return new AttributeSet(this); + } + + public Attribute withLocation(Location location) { + return Objects.equals(location(), location) ? this : clone(location, name(), dataType(), qualifier(), nullable(), id(), synthetic()); + } + + public Attribute withQualifier(String qualifier) { + return Objects.equals(qualifier(), qualifier) ? this : clone(location(), name(), dataType(), qualifier, nullable(), id(), synthetic()); + } + + public Attribute withName(String name) { + return Objects.equals(name(), name) ? this : clone(location(), name, dataType(), qualifier(), nullable(), id(), synthetic()); + } + + public Attribute withNullability(boolean nullable) { + return Objects.equals(nullable(), nullable) ? this : clone(location(), name(), dataType(), qualifier(), nullable, id(), synthetic()); + } + + public Attribute withId(ExpressionId id) { + return Objects.equals(id(), id) ? this : clone(location(), name(), dataType(), qualifier(), nullable(), id, synthetic()); + } + + protected abstract Attribute clone(Location location, String name, DataType dataType, String qualifier, boolean nullable, ExpressionId id, boolean synthetic); + + @Override + public Attribute toAttribute() { + return this; + } + + @Override + public int semanticHash() { + return id().hashCode(); + } + + @Override + public boolean semanticEquals(Expression other) { + return other instanceof Attribute ? id().equals(((Attribute) other).id()) : false; + } + + @Override + public String toString() { + return name() + "{" + label() + "}" + "#" + id(); + } + + protected abstract String label(); +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/AttributeMap.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/AttributeMap.java new file mode 100644 index 00000000000..57dc8f6152e --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/AttributeMap.java @@ -0,0 +1,325 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import java.util.AbstractSet; +import java.util.Collection; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.LinkedHashSet; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.function.BiConsumer; +import java.util.stream.Stream; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; +import static java.util.Collections.unmodifiableCollection; +import static java.util.Collections.unmodifiableSet; + +public class AttributeMap { + + static class AttributeWrapper { + + private final Attribute attr; + + AttributeWrapper(Attribute attr) { + this.attr = attr; + } + + @Override + public int hashCode() { + return attr.semanticHash(); + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof AttributeWrapper) { + AttributeWrapper aw = (AttributeWrapper) obj; + return attr.semanticEquals(aw.attr); + } + + return false; + } + + @Override + public String toString() { + return attr.toString(); + } + } + + /** + * Set that does unwrapping of keys inside the keySet and iterator. + */ + private abstract static class UnwrappingSet extends AbstractSet { + private final Set set; + + UnwrappingSet(Set originalSet) { + set = unmodifiableSet(originalSet); + } + + @Override + public Iterator iterator() { + return new Iterator() { + final Iterator i = set.iterator(); + + @Override + public boolean hasNext() { + return i.hasNext(); + } + + @Override + public U next() { + return unwrap(i.next()); + } + }; + } + + protected abstract U unwrap(W next); + + + @Override + public Stream stream() { + return set.stream().map(this::unwrap); + } + + @Override + public Stream parallelStream() { + return set.parallelStream().map(this::unwrap); + } + + @Override + public int size() { + return set.size(); + } + + @Override + public boolean equals(Object o) { + return set.equals(o); + } + + @Override + public int hashCode() { + return set.hashCode(); + } + + @Override + public Object[] toArray() { + Object[] array = set.toArray(); + for (int i = 0; i < array.length; i++) { + array[i] = ((AttributeWrapper) array[i]).attr; + } + return array; + } + + @Override + @SuppressWarnings("unchecked") + public A[] toArray(A[] a) { + // collection is immutable so use that to our advantage + if (a.length < size()) + a = (A[]) java.lang.reflect.Array.newInstance(a.getClass().getComponentType(), size()); + int i = 0; + Object[] result = a; + for (U u : this) { + result[i++] = u; + } + // array larger than size, mark the ending element as null + if (a.length > size()) { + a[size()] = null; + } + return a; + } + + @Override + public String toString() { + return set.toString(); + } + }; + + private final Map delegate; + private Set keySet = null; + private Collection values = null; + private Set> entrySet = null; + + public AttributeMap() { + delegate = new LinkedHashMap<>(); + } + + public AttributeMap(Map attr) { + if (attr.isEmpty()) { + delegate = emptyMap(); + } + else { + delegate = new LinkedHashMap<>(attr.size()); + + for (Entry entry : attr.entrySet()) { + delegate.put(new AttributeWrapper(entry.getKey()), entry.getValue()); + } + } + } + + public AttributeMap(Attribute key, E value) { + delegate = singletonMap(new AttributeWrapper(key), value); + } + + void add(Attribute key, E value) { + delegate.put(new AttributeWrapper(key), value); + } + + // a set from a collection of sets without (too much) copying + void addAll(AttributeMap other) { + delegate.putAll(other.delegate); + } + + public AttributeMap substract(AttributeMap other) { + AttributeMap diff = new AttributeMap<>(); + for (Entry entry : this.delegate.entrySet()) { + if (!other.delegate.containsKey(entry.getKey())) { + diff.delegate.put(entry.getKey(), entry.getValue()); + } + } + + return diff; + } + + public AttributeMap intersect(AttributeMap other) { + AttributeMap smaller = (other.size() > size() ? this : other); + AttributeMap larger = (smaller == this ? other : this); + + AttributeMap intersect = new AttributeMap<>(); + for (Entry entry : smaller.delegate.entrySet()) { + if (larger.delegate.containsKey(entry.getKey())) { + intersect.delegate.put(entry.getKey(), entry.getValue()); + } + } + + return intersect; + } + + public boolean subsetOf(AttributeMap other) { + if (this.size() > other.size()) { + return false; + } + for (AttributeWrapper aw : delegate.keySet()) { + if (!other.delegate.containsKey(aw)) { + return false; + } + } + + return true; + } + + public Set attributeNames() { + Set s = new LinkedHashSet<>(size()); + + for (AttributeWrapper aw : delegate.keySet()) { + s.add(aw.attr.name()); + } + return s; + } + + public int size() { + return delegate.size(); + } + + public boolean isEmpty() { + return delegate.isEmpty(); + } + + public boolean containsKey(Object key) { + if (key instanceof NamedExpression) { + return delegate.keySet().contains(new AttributeWrapper(((NamedExpression) key).toAttribute())); + } + return false; + } + + public boolean containsValue(Object value) { + return delegate.values().contains(value); + } + + public E get(Object key) { + if (key instanceof NamedExpression) { + return delegate.get(new AttributeWrapper(((NamedExpression) key).toAttribute())); + } + return null; + } + + public E getOrDefault(Object key, E defaultValue) { + E e; + return (((e = get(key)) != null) || containsKey(key)) + ? e + : defaultValue; + } + + public Set keySet() { + if (keySet == null) { + keySet = new UnwrappingSet(delegate.keySet()) { + @Override + protected Attribute unwrap(AttributeWrapper next) { + return next.attr; + } + }; + } + return keySet; + } + + public Collection values() { + if (values == null) { + values = unmodifiableCollection(delegate.values()); + } + return values; + } + + public Set> entrySet() { + if (entrySet == null) { + entrySet = new UnwrappingSet, Entry>(delegate.entrySet()) { + @Override + protected Entry unwrap(final Entry next) { + return new Entry() { + @Override + public Attribute getKey() { + return next.getKey().attr; + } + + @Override + public E getValue() { + return next.getValue(); + } + + @Override + public E setValue(E value) { + throw new UnsupportedOperationException(); + } + }; + } + }; + } + return entrySet; + } + + public void forEach(BiConsumer action) { + delegate.forEach((k, v) -> action.accept(k.attr, v)); + } + + @Override + public int hashCode() { + return delegate.hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof AttributeMap) { + obj = ((AttributeMap) obj).delegate; + } + return delegate.equals(obj); + } + + @Override + public String toString() { + return delegate.toString(); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/AttributeSet.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/AttributeSet.java new file mode 100644 index 00000000000..decaed81fb8 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/AttributeSet.java @@ -0,0 +1,164 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import java.util.Collection; +import java.util.Iterator; +import java.util.Set; +import java.util.Spliterator; +import java.util.function.Consumer; +import java.util.function.Predicate; +import java.util.stream.Stream; + +import static java.util.Collections.emptyMap; + +public class AttributeSet implements Set { + + private static final AttributeMap EMPTY_DELEGATE = new AttributeMap<>(emptyMap()); + + public static final AttributeSet EMPTY = new AttributeSet(EMPTY_DELEGATE); + + // use the same name as in HashSet + private static final Object PRESENT = new Object(); + + private final AttributeMap delegate; + + public AttributeSet() { + delegate = new AttributeMap<>(); + } + + public AttributeSet(Attribute attr) { + delegate = new AttributeMap(attr, PRESENT); + } + + public AttributeSet(Collection attr) { + if (attr.isEmpty()) { + delegate = EMPTY_DELEGATE; + } + else { + delegate = new AttributeMap(); + + for (Attribute a : attr) { + delegate.add(a, PRESENT); + } + } + } + + private AttributeSet(AttributeMap delegate) { + this.delegate = delegate; + } + + // package protected - should be called through Expressions to cheaply create + // a set from a collection of sets without too much copying + void addAll(AttributeSet other) { + delegate.addAll(other.delegate); + } + + public AttributeSet substract(AttributeSet other) { + return new AttributeSet(delegate.substract(other.delegate)); + } + + public AttributeSet intersect(AttributeSet other) { + return new AttributeSet(delegate.intersect(other.delegate)); + } + + public boolean subsetOf(AttributeSet other) { + return delegate.subsetOf(other.delegate); + } + + public Set names() { + return delegate.attributeNames(); + } + + public void forEach(Consumer action) { + delegate.forEach((k, v) -> action.accept(k)); + } + + public int size() { + return delegate.size(); + } + + public boolean isEmpty() { + return delegate.isEmpty(); + } + + public boolean contains(Object o) { + return delegate.containsKey(o); + } + + public boolean containsAll(Collection c) { + for (Object o : c) { + if (!delegate.containsKey(o)) { + return false; + } + } + return true; + } + + public Iterator iterator() { + return delegate.keySet().iterator(); + } + + public Object[] toArray() { + return delegate.keySet().toArray(); + } + + public T[] toArray(T[] a) { + return delegate.keySet().toArray(a); + } + + public boolean add(Attribute e) { + throw new UnsupportedOperationException(); + } + + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public void clear() { + throw new UnsupportedOperationException(); + } + + public Spliterator spliterator() { + throw new UnsupportedOperationException(); + } + + public boolean removeIf(Predicate filter) { + throw new UnsupportedOperationException(); + } + + public Stream stream() { + return delegate.keySet().stream(); + } + + public Stream parallelStream() { + return delegate.keySet().parallelStream(); + } + + public boolean equals(Object o) { + return delegate.equals(o); + } + + public int hashCode() { + return delegate.hashCode(); + } + @Override + public String toString() { + return delegate.keySet().toString(); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/BinaryExpression.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/BinaryExpression.java new file mode 100644 index 00000000000..0b654dd26d4 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/BinaryExpression.java @@ -0,0 +1,72 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.Arrays; +import java.util.Objects; + +public abstract class BinaryExpression extends Expression { + + private final Expression left, right; + + protected BinaryExpression(Location location, Expression left, Expression right) { + super(location, Arrays.asList(left, right)); + this.left = left; + this.right = right; + } + + public Expression left() { + return left; + } + + public Expression right() { + return right; + } + + @Override + public boolean foldable() { + return left.foldable() && right.foldable(); + } + + @Override + public boolean nullable() { + return left.nullable() || left.nullable(); + } + + @Override + public int hashCode() { + return Objects.hash(left, right); + } + + @Override + public boolean equals(Object obj) { + if (!super.equals(obj)) { + return false; + } + + BinaryExpression other = (BinaryExpression) obj; + return Objects.equals(left, other.left) + && Objects.equals(right, other.right); + } + + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(left()); + sb.append(" "); + sb.append(symbol()); + sb.append(" "); + sb.append(right()); + return sb.toString(); + } + + public abstract String symbol(); + + public abstract BinaryExpression swapLeftAndRight(); +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/BinaryLogic.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/BinaryLogic.java new file mode 100644 index 00000000000..edef746e41c --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/BinaryLogic.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; + +public abstract class BinaryLogic extends BinaryOperator { + + protected BinaryLogic(Location location, Expression left, Expression right) { + super(location, left, right); + } + + @Override + public DataType dataType() { + return DataTypes.BOOLEAN; + } + + @Override + protected TypeResolution resolveInputType(DataType inputType) { + return DataTypes.BOOLEAN.equals(inputType) ? TypeResolution.TYPE_RESOLVED : new TypeResolution( + "'%s' requires type %s not %s", symbol(), DataTypes.BOOLEAN.sqlName(), inputType.sqlName()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/BinaryOperator.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/BinaryOperator.java new file mode 100644 index 00000000000..8390a86c4ad --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/BinaryOperator.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; +//Binary expression that requires both input expressions to have the same type +//Compatible types should be handled by the analyzer (by using the narrowest type) +public abstract class BinaryOperator extends BinaryExpression { + + public interface Negateable { + BinaryExpression negate(); + } + + protected BinaryOperator(Location location, Expression left, Expression right) { + super(location, left, right); + } + + protected abstract TypeResolution resolveInputType(DataType inputType); + + @Override + protected TypeResolution resolveType() { + if (!childrenResolved()) { + return new TypeResolution("Unresolved children"); + } + DataType l = left().dataType(); + DataType r = right().dataType(); + + TypeResolution resolution = resolveInputType(l); + + if (resolution == TypeResolution.TYPE_RESOLVED) { + return resolveInputType(r); + } + return resolution; + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/Exists.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/Exists.java new file mode 100644 index 00000000000..a158661e560 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/Exists.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; + +public class Exists extends SubQueryExpression { + + public Exists(Location location, LogicalPlan query) { + this(location, query, null); + } + + public Exists(Location location, LogicalPlan query, ExpressionId id) { + super(location, query, id); + } + + @Override + protected SubQueryExpression clone(LogicalPlan newQuery) { + return new Exists(location(), newQuery); + } + + @Override + public DataType dataType() { + return DataTypes.BOOLEAN; + } + + @Override + public boolean nullable() { + return false; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/Expression.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/Expression.java new file mode 100644 index 00000000000..4797e50d411 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/Expression.java @@ -0,0 +1,127 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.capabilities.Resolvable; +import org.elasticsearch.xpack.sql.capabilities.Resolvables; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.Node; +import org.elasticsearch.xpack.sql.tree.NodeUtils; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.util.StringUtils; + +import java.util.List; +import java.util.Locale; + +import static java.lang.String.format; + +public abstract class Expression extends Node implements Resolvable { + + public static class TypeResolution { + private final boolean failed; + private final String message; + + public static final TypeResolution TYPE_RESOLVED = new TypeResolution(false, StringUtils.EMPTY); + + public TypeResolution(String message, Object... args) { + this(true, format(Locale.ROOT, message, args)); + } + + private TypeResolution(boolean unresolved, String message) { + this.failed = unresolved; + this.message = message; + } + + public boolean unresolved() { + return failed; + } + + public boolean resolved() { + return !failed; + } + + public String message() { + return message; + } + } + + private TypeResolution lazyTypeResolution = null; + private Boolean lazyChildrenResolved = null; + private Expression lazyCanonical = null; + + public Expression(Location location, List children) { + super(location, children); + } + + // whether the expression can be evaluated statically (folded) or not + public boolean foldable() { + return false; + } + + public Object fold() { + throw new SqlIllegalArgumentException("Should not fold expression"); + } + + public abstract boolean nullable(); + + // the references/inputs/leaves of the expression tree + public AttributeSet references() { + return Expressions.references(children()); + } + + public boolean childrenResolved() { + if (lazyChildrenResolved == null) { + lazyChildrenResolved = Boolean.valueOf(Resolvables.resolved(children())); + } + return lazyChildrenResolved; + } + + public final TypeResolution typeResolved() { + if (lazyTypeResolution == null) { + lazyTypeResolution = resolveType(); + } + return lazyTypeResolution; + } + + protected TypeResolution resolveType() { + return TypeResolution.TYPE_RESOLVED; + } + + public final Expression canonical() { + if (lazyCanonical == null) { + lazyCanonical = canonicalize(); + } + return lazyCanonical; + } + + protected Expression canonicalize() { + return this; + } + + public boolean semanticEquals(Expression other) { + return canonical().equals(other.canonical()); + } + + public int semanticHash() { + return canonical().hashCode(); + } + + @Override + public boolean resolved() { + return childrenResolved() && typeResolved().resolved(); + } + + public abstract DataType dataType(); + + @Override + public abstract int hashCode(); + + @Override + public String toString() { + return nodeName() + "[" + NodeUtils.propertiesToString(this, false) + "]"; + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/ExpressionId.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/ExpressionId.java new file mode 100644 index 00000000000..0191e63e430 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/ExpressionId.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import java.util.Objects; + +public class ExpressionId { + + private final int id; + private final String jvmId; + + ExpressionId(int id, String jvmId) { + this.id = id; + this.jvmId = jvmId; + } + + @Override + public int hashCode() { + return Objects.hash(id, jvmId); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + ExpressionId other = (ExpressionId) obj; + return id == other.id + && Objects.equals(jvmId, other.jvmId); + } + + @Override + public String toString() { + return String.valueOf(id); + //#+ jvmId; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/ExpressionIdGenerator.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/ExpressionIdGenerator.java new file mode 100644 index 00000000000..957898a84dc --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/ExpressionIdGenerator.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import java.util.UUID; +import java.util.concurrent.atomic.AtomicInteger; + +//TODO: this class is thread-safe but used across multiple sessions might cause the id to roll over and potentially generate an already assigned id +// making this session scope would simplify things +// (which also begs the question on whether thread-safety is needed than) + +// TODO: hook this into SqlSession#SessionContext +public class ExpressionIdGenerator { + + private static final AtomicInteger GLOBAL_ID = new AtomicInteger(); + private static final String JVM_ID = "@" + UUID.randomUUID().toString(); + + public static final ExpressionId EMPTY = new ExpressionId(-1, "@"); + + public static ExpressionId newId() { + return new ExpressionId(GLOBAL_ID.getAndIncrement(), JVM_ID); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/ExpressionSet.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/ExpressionSet.java new file mode 100644 index 00000000000..3adea47c6b8 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/ExpressionSet.java @@ -0,0 +1,153 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import java.util.Collection; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.Set; + +import static java.util.Collections.emptyList; + +/** + * @param expression type + */ +public class ExpressionSet implements Set { + + @SuppressWarnings("rawtypes") + public static final ExpressionSet EMPTY = new ExpressionSet<>(emptyList()); + + @SuppressWarnings("unchecked") + public static ExpressionSet emptySet() { + return (ExpressionSet) EMPTY; + } + + // canonical to actual/original association + private final Map map = new LinkedHashMap<>(); + + public ExpressionSet() { + super(); + } + + public ExpressionSet(Collection c) { + addAll(c); + } + + // Returns the equivalent expression (if already exists in the set) or null if none is found + public E get(Expression e) { + return map.get(e.canonical()); + } + + @Override + public int size() { + return map.size(); + } + + @Override + public boolean isEmpty() { + return map.isEmpty(); + } + + @Override + public boolean contains(Object o) { + if (o instanceof Expression) { + return map.containsKey(((Expression) o).canonical()); + } + return false; + } + + @Override + public boolean containsAll(Collection c) { + for (Object o : c) { + if (!contains(o)) { + return false; + } + } + return true; + } + + @Override + public Iterator iterator() { + return map.values().iterator(); + } + + @Override + public boolean add(E e) { + return map.putIfAbsent(e.canonical(), e) == null; + } + + @Override + public boolean addAll(Collection c) { + boolean result = true; + for (E o : c) { + result &= add(o); + } + return result; + } + + @Override + public boolean retainAll(Collection c) { + boolean modified = false; + + Iterator keys = map.keySet().iterator(); + + while (keys.hasNext()) { + Expression key = keys.next(); + boolean found = false; + for (Object o : c) { + if (o instanceof Expression) { + o = ((Expression) o).canonical(); + } + if (key.equals(o)) { + found = true; + break; + } + } + if (!found) { + keys.remove(); + } + } + return modified; + } + + @Override + public boolean remove(Object o) { + if (o instanceof Expression) { + return map.remove(((Expression) o).canonical()) != null; + } + return false; + } + + @Override + public boolean removeAll(Collection c) { + boolean modified = false; + for (Object o : c) { + modified |= remove(o); + } + return modified; + } + + @Override + public void clear() { + map.clear(); + } + + @Override + public Object[] toArray() { + return map.values().toArray(); + } + + @Override + public T[] toArray(T[] a) { + return map.values().toArray(a); + } + + @Override + public String toString() { + return map.toString(); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/Expressions.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/Expressions.java new file mode 100644 index 00000000000..cfd4a49bb40 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/Expressions.java @@ -0,0 +1,102 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.expression.Expression.TypeResolution; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.function.Predicate; + +import static java.util.Collections.emptyList; +import static java.util.stream.Collectors.toList; + +public abstract class Expressions { + + public static List asNamed(List exp) { + return exp.stream() + .map(NamedExpression.class::cast) + .collect(toList()); + } + + public static NamedExpression wrapAsNamed(Expression exp) { + return exp instanceof NamedExpression ? (NamedExpression) exp : new Alias(exp.location(), exp.nodeName(), exp); + } + + public static List asAttributes(List named) { + if (named.isEmpty()) { + return emptyList(); + } + List list = new ArrayList<>(named.size()); + for (NamedExpression exp : named) { + list.add(exp.toAttribute()); + } + return list; + } + + public static boolean anyMatch(List exps, Predicate predicate) { + for (Expression exp : exps) { + if (exp.anyMatch(predicate)) { + return true; + } + } + return false; + } + + public static boolean nullable(List exps) { + for (Expression exp : exps) { + if (!exp.nullable()) { + return false; + } + } + return true; + } + + public static AttributeSet references(List exps) { + if (exps.isEmpty()) { + return AttributeSet.EMPTY; + } + + AttributeSet set = new AttributeSet(); + for (Expression exp : exps) { + set.addAll(exp.references()); + } + return set; + } + + public static String name(Expression e) { + return e instanceof NamedExpression ? ((NamedExpression) e).name() : e.nodeName(); + } + + public static List names(Collection e) { + List names = new ArrayList<>(e.size()); + for (Expression ex : e) { + names.add(name(ex)); + } + + return names; + } + + public static Attribute attribute(Expression e) { + if (e instanceof NamedExpression) { + return ((NamedExpression) e).toAttribute(); + } + if (e != null && e.foldable()) { + return new LiteralAttribute(Literal.of(e)); + } + return null; + } + + public static TypeResolution typeMustBe(Expression e, Predicate predicate, String message) { + return predicate.test(e) ? TypeResolution.TYPE_RESOLVED : new TypeResolution(message); + } + + public static TypeResolution typeMustBeNumeric(Expression e) { + return e.dataType().isNumeric()? TypeResolution.TYPE_RESOLVED : new TypeResolution( + "Argument required to be numeric ('%s' of type '%s')", Expressions.name(e), e.dataType().esName()); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/FieldAttribute.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/FieldAttribute.java new file mode 100644 index 00000000000..d6783f6f423 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/FieldAttribute.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import java.util.Map; +import java.util.Map.Entry; + +import org.elasticsearch.xpack.sql.analysis.index.MappingException; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.TextType; + +public abstract class FieldAttribute extends TypedAttribute { + + FieldAttribute(Location location, String name, DataType dataType) { + this(location, name, dataType, null, true, null, false); + } + + FieldAttribute(Location location, String name, DataType dataType, String qualifier, boolean nullable, ExpressionId id, boolean synthetic) { + super(location, name, dataType, qualifier, nullable, id, synthetic); + } + + public boolean isAnalyzed() { + return dataType() instanceof TextType; + } + + public FieldAttribute notAnalyzedAttribute() { + if (isAnalyzed()) { + Map docValueFields = ((TextType) dataType()).docValueFields(); + if (docValueFields.size() == 1) { + Entry entry = docValueFields.entrySet().iterator().next(); + return with(entry.getKey(), entry.getValue()); + } + if (docValueFields.isEmpty()) { + throw new MappingException("No docValue multi-field defined for %s", name()); + } + if (docValueFields.size() > 1) { + DataType dataType = docValueFields.get("keyword"); + if (dataType != null && dataType.hasDocValues()) { + return with("keyword", dataType); + } + throw new MappingException("Default 'keyword' not available as multi-fields and multiple options available for %s", name()); + } + } + return this; + } + + protected FieldAttribute with(String subFieldName, DataType type) { + return (FieldAttribute) clone(location(), name() + "." + subFieldName, type, qualifier(), nullable(), id(), synthetic()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/Foldables.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/Foldables.java new file mode 100644 index 00000000000..6f8cdaa9dae --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/Foldables.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypeConversion; +import org.elasticsearch.xpack.sql.type.DataTypes; + +import java.util.ArrayList; +import java.util.List; + +public abstract class Foldables { + + @SuppressWarnings("unchecked") + public static T valueOf(Expression e, DataType to) { + if (e.foldable()) { + return (T) DataTypeConversion.conversionFor(e.dataType(), to).convert(e.fold()); + } + throw new SqlIllegalArgumentException("Cannot determine value for %s", e); + } + + public static Object valueOf(Expression e) { + if (e.foldable()) { + return e.fold(); + } + throw new SqlIllegalArgumentException("Cannot determine value for %s", e); + } + + public static String stringValueOf(Expression e) { + return valueOf(e, DataTypes.KEYWORD); + } + + public static Integer intValueOf(Expression e) { + return valueOf(e, DataTypes.INTEGER); + } + + public static Long longValueOf(Expression e) { + return valueOf(e, DataTypes.LONG); + } + + public static double doubleValueOf(Expression e) { + return valueOf(e, DataTypes.DOUBLE); + } + + public static List valuesOf(List list, DataType to) { + List l = new ArrayList<>(list.size()); + for (Expression e : list) { + l.add(valueOf(e, to)); + } + return l; + } + + public static List doubleValuesOf(List list) { + return valuesOf(list, DataTypes.DOUBLE); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/LeafExpression.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/LeafExpression.java new file mode 100644 index 00000000000..3f8978725de --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/LeafExpression.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.tree.Location; + +import static java.util.Collections.emptyList; + +public abstract class LeafExpression extends Expression { + + protected LeafExpression(Location location) { + super(location, emptyList()); + } + + public AttributeSet references() { + return AttributeSet.EMPTY; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/Literal.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/Literal.java new file mode 100644 index 00000000000..aa36caa8997 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/Literal.java @@ -0,0 +1,102 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypeConversion; +import org.elasticsearch.xpack.sql.type.DataTypes; + +import java.util.Objects; + +public class Literal extends LeafExpression { + + private final Object value; + private final DataType dataType; + + public static final Literal TRUE = Literal.of(Location.EMPTY, Boolean.TRUE); + public static final Literal FALSE = Literal.of(Location.EMPTY, Boolean.FALSE); + + public Literal(Location location, Object value, DataType dataType) { + super(location); + this.dataType = dataType; + this.value = DataTypeConversion.convert(value, dataType); + } + + public Object value() { + return value; + } + + @Override + public boolean foldable() { + return true; + } + + @Override + public boolean nullable() { + return value == null; + } + + @Override + public DataType dataType() { + return dataType; + } + + @Override + public int hashCode() { + return Objects.hash(value, dataType); + } + + @Override + public boolean resolved() { + return true; + } + + @Override + public Object fold() { + return value; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + Literal other = (Literal) obj; + return Objects.equals(value, other.value) + && Objects.equals(dataType, other.dataType); + } + + @Override + public String toString() { + return Objects.toString(value); + } + + public static Literal of(Location loc, Object value) { + if (value instanceof Literal) { + return (Literal) value; + } + return new Literal(loc, value, DataTypes.fromJava(value)); + } + + public static Literal of(Expression foldable) { + if (foldable instanceof Literal) { + return (Literal) foldable; + } + + if (!foldable.foldable()) { + throw new SqlIllegalArgumentException("Foldable expression required for Literal creation; received unfoldable " + foldable); + } + + return new Literal(foldable.location(), foldable.fold(), foldable.dataType()); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/LiteralAttribute.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/LiteralAttribute.java new file mode 100644 index 00000000000..c656a0d7975 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/LiteralAttribute.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ConstantInput; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinition; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; + +public class LiteralAttribute extends TypedAttribute { + + private final Literal literal; + + public LiteralAttribute(Literal literal) { + this(literal.location(), String.valueOf(literal.fold()), null, false, null, false, literal.dataType(), literal); + } + + public LiteralAttribute(Location location, String name, String qualifier, boolean nullable, ExpressionId id, boolean synthetic, + DataType dataType, Literal literal) { + super(location, name, dataType, qualifier, nullable, id, synthetic); + this.literal = literal; + } + + public Literal literal() { + return literal; + } + + @Override + protected LiteralAttribute clone(Location location, String name, DataType dataType, String qualifier, boolean nullable, ExpressionId id, boolean synthetic) { + return new LiteralAttribute(location, name, qualifier, nullable, id, synthetic, dataType, literal); + } + + public ProcessorDefinition asProcessorDefinition() { + return new ConstantInput(literal, literal.value()); + } + + @Override + protected String label() { + return "c"; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/NamedExpression.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/NamedExpression.java new file mode 100644 index 00000000000..d80e8bfbd51 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/NamedExpression.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.List; +import java.util.Objects; + +public abstract class NamedExpression extends Expression { + + private final String name; + private final ExpressionId id; + private final boolean synthetic; + + public NamedExpression(Location location, String name, List children, ExpressionId id) { + this(location, name, children, id, false); + } + + public NamedExpression(Location location, String name, List children, ExpressionId id, boolean synthetic) { + super(location, children); + this.name = name; + this.id = (id == null ? ExpressionIdGenerator.newId() : id); + this.synthetic = synthetic; + } + + public String name() { + return name; + } + + public ExpressionId id() { + return id; + } + + public boolean synthetic() { + return synthetic; + } + + public abstract Attribute toAttribute(); + + @Override + public int hashCode() { + return Objects.hash(id, name, synthetic); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + NamedExpression other = (NamedExpression) obj; + return Objects.equals(synthetic, other.synthetic) + && Objects.equals(id, other.id) + && Objects.equals(name(), other.name()) + && Objects.equals(children(), other.children()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/NestedFieldAttribute.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/NestedFieldAttribute.java new file mode 100644 index 00000000000..744d1637f5c --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/NestedFieldAttribute.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import java.util.List; + +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.util.StringUtils; + +import static java.util.Collections.emptyList; + +public class NestedFieldAttribute extends FieldAttribute { + + private final List parents; + private final String parentPath; + + public NestedFieldAttribute(Location location, String name, DataType dataType, List parents) { + this(location, name, dataType, null, true, null, false, parents); + } + + public NestedFieldAttribute(Location location, String name, DataType dataType, String qualifier, boolean nullable, ExpressionId id, boolean synthetic, List parents) { + super(location, name, dataType, qualifier, nullable, id, synthetic); + this.parents = parents == null || parents.isEmpty() ? emptyList() : parents; + this.parentPath = StringUtils.concatWithDot(parents); + } + + public List parents() { + return parents; + } + + public String parentPath() { + return parentPath; + } + + @Override + protected Expression canonicalize() { + return new NestedFieldAttribute(location(), "", dataType(), null, true, id(), false, emptyList()); + } + + @Override + protected Attribute clone(Location location, String name, DataType dataType, String qualifier, boolean nullable, ExpressionId id, boolean synthetic) { + return new NestedFieldAttribute(location, name, dataType, qualifier, nullable, id, synthetic, parents); + } + + @Override + public String toString() { + if (parents.size() > 0) { + return name().replace('.', '>') + "#" + id(); + } + return super.toString(); + } + + @Override + protected String label() { + return "n"; + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/NullIntolerant.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/NullIntolerant.java new file mode 100644 index 00000000000..d3e62db81ad --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/NullIntolerant.java @@ -0,0 +1,12 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +// marker interface for expressions that are do not support null +// and thus are eliminated by it +public interface NullIntolerant { + +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/Order.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/Order.java new file mode 100644 index 00000000000..094ae227a2c --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/Order.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import java.util.Objects; + +import org.elasticsearch.xpack.sql.tree.Location; + +public class Order extends UnaryExpression { + + public enum OrderDirection { + ASC, DESC + } + + private final OrderDirection direction; + + public Order(Location location, Expression child, OrderDirection direction) { + super(location, child); + this.direction = direction; + } + + public OrderDirection direction() { + return direction; + } + + @Override + public int hashCode() { + return Objects.hash(child(), direction); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + Order other = (Order) obj; + return Objects.equals(direction, other.direction) + && Objects.equals(child(), other.child()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/RootFieldAttribute.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/RootFieldAttribute.java new file mode 100644 index 00000000000..ec2a24de03f --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/RootFieldAttribute.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; + +public class RootFieldAttribute extends FieldAttribute { + + public RootFieldAttribute(Location location, String name, DataType dataType) { + this(location, name, dataType, null, true, null, false); + } + + public RootFieldAttribute(Location location, String name, DataType dataType, String qualifier, boolean nullable, ExpressionId id, boolean synthetic) { + super(location, name, dataType, qualifier, nullable, id, synthetic); + } + + @Override + protected Expression canonicalize() { + return new RootFieldAttribute(location(), "", dataType(), null, true, id(), false); + } + + @Override + protected Attribute clone(Location location, String name, DataType dataType, String qualifier, boolean nullable, ExpressionId id, boolean synthetic) { + return new RootFieldAttribute(location, name, dataType, qualifier, nullable, id, synthetic); + } + + @Override + protected String label() { + return "r"; + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/ScalarSubquery.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/ScalarSubquery.java new file mode 100644 index 00000000000..c28068396f4 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/ScalarSubquery.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; + +public class ScalarSubquery extends SubQueryExpression { + + public ScalarSubquery(Location location, LogicalPlan query) { + this(location, query, null); + } + + public ScalarSubquery(Location location, LogicalPlan query, ExpressionId id) { + super(location, query, id); + } + + @Override + protected ScalarSubquery clone(LogicalPlan newQuery) { + return new ScalarSubquery(location(), newQuery); + } + + @Override + public DataType dataType() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean nullable() { + return true; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/SubQueryExpression.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/SubQueryExpression.java new file mode 100644 index 00000000000..90297088c0f --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/SubQueryExpression.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import java.util.Collections; +import java.util.Objects; + +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.tree.Location; + +public abstract class SubQueryExpression extends Expression { + + private final LogicalPlan query; + private final ExpressionId id; + + public SubQueryExpression(Location location, LogicalPlan query) { + this(location, query, null); + } + + public SubQueryExpression(Location location, LogicalPlan query, ExpressionId id) { + super(location, Collections.emptyList()); + this.query = query; + this.id = (id == null ? ExpressionIdGenerator.newId() : id); + } + + public LogicalPlan query() { + return query; + } + + public ExpressionId id() { + return id; + } + + @Override + public boolean resolved() { + return false; + } + + public SubQueryExpression withQuery(LogicalPlan newQuery) { + return (Objects.equals(query, newQuery) ? this : clone(newQuery)); + } + + protected abstract SubQueryExpression clone(LogicalPlan newQuery); + + @Override + public int hashCode() { + return Objects.hash(query()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + Exists other = (Exists) obj; + return Objects.equals(query(), other.query()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/TypedAttribute.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/TypedAttribute.java new file mode 100644 index 00000000000..a1f76315a53 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/TypedAttribute.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import java.util.Objects; + +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; + +public abstract class TypedAttribute extends Attribute { + + private final DataType dataType; + + protected TypedAttribute(Location location, String name, DataType dataType) { + this(location, name, dataType, null, true, null, false); + } + + protected TypedAttribute(Location location, String name, DataType dataType, String qualifier, boolean nullable, ExpressionId id, boolean synthetic) { + super(location, name, qualifier, nullable, id, synthetic); + this.dataType = dataType; + } + + @Override + public DataType dataType() { + return dataType; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + TypedAttribute other = (TypedAttribute) obj; + return Objects.equals(name(), other.name()) + && Objects.equals(id(), other.id()) + && Objects.equals(nullable(), other.nullable()) + && Objects.equals(dataType(), other.dataType()) + && Objects.equals(qualifier(), other.qualifier()) + && Objects.equals(synthetic(), other.synthetic()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/UnaryExpression.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/UnaryExpression.java new file mode 100644 index 00000000000..40c264e9bb9 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/UnaryExpression.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; + +import java.util.Objects; + +import static java.util.Collections.singletonList; + +public abstract class UnaryExpression extends Expression { + + private final Expression child; + + protected UnaryExpression(Location location, Expression child) { + super(location, singletonList(child)); + this.child = child; + } + + public Expression child() { + return child; + } + + @Override + public boolean nullable() { + return child.nullable(); + } + + @Override + public boolean resolved() { + return child.resolved(); + } + + @Override + public DataType dataType() { + return child.dataType(); + } + + @Override + public int hashCode() { + return Objects.hash(child); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + UnaryExpression other = (UnaryExpression) obj; + return Objects.equals(child, other.child); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/UnresolvedAlias.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/UnresolvedAlias.java new file mode 100644 index 00000000000..fb26e40aca1 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/UnresolvedAlias.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.capabilities.UnresolvedException; + +import java.util.Objects; + +import static java.util.Collections.singletonList; + +public class UnresolvedAlias extends UnresolvedNamedExpression { + + private final Expression child; + + public UnresolvedAlias(Expression child) { + super(child.location(), singletonList(child)); + this.child = child; + } + + public Expression child() { + return child; + } + + @Override + public String unresolvedMessage() { + return "Unknown alias [" + name() + "]"; + } + + @Override + public boolean nullable() { + throw new UnresolvedException("nullable", this); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + UnresolvedAlias other = (UnresolvedAlias) obj; + return Objects.equals(child, other.child); + } + + @Override + public String toString() { + return child + " AS ?"; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/UnresolvedAttribute.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/UnresolvedAttribute.java new file mode 100644 index 00000000000..2c1de5cb67c --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/UnresolvedAttribute.java @@ -0,0 +1,103 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.capabilities.Unresolvable; +import org.elasticsearch.xpack.sql.capabilities.UnresolvedException; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.util.CollectionUtils; + +import java.util.List; +import java.util.Locale; +import java.util.Objects; + +import static java.lang.String.format; + +public class UnresolvedAttribute extends Attribute implements Unresolvable { + + private final String unresolvedMsg; + private final Object resolutionMetadata; + + public UnresolvedAttribute(Location location, String name) { + this(location, name, null); + } + + public UnresolvedAttribute(Location location, String name, String qualifier) { + this(location, name, qualifier, null); + } + + public UnresolvedAttribute(Location location, String name, String qualifier, String unresolvedMessage) { + this(location, name, qualifier, null, unresolvedMessage, null); + } + + public UnresolvedAttribute(Location location, String name, String qualifier, ExpressionId id, String unresolvedMessage, Object resolutionMetadata) { + super(location, name, qualifier, id); + this.unresolvedMsg = unresolvedMessage == null ? errorMessage(qualifiedName(), null) : unresolvedMessage; + this.resolutionMetadata = resolutionMetadata; + } + + + public Object resolutionMetadata() { + return resolutionMetadata; + } + + @Override + public boolean resolved() { + return false; + } + + @Override + protected Attribute clone(Location location, String name, DataType dataType, String qualifier, boolean nullable, ExpressionId id, boolean synthetic) { + return this; + } + + public UnresolvedAttribute withUnresolvedMessage(String unresolvedMsg) { + return new UnresolvedAttribute(location(), name(), qualifier(), id(), unresolvedMsg, resolutionMetadata()); + } + + @Override + public DataType dataType() { + throw new UnresolvedException("dataType", this); + } + + @Override + public String nodeString() { + return format(Locale.ROOT, "unknown column '%s'", name()); + } + + @Override + public String toString() { + return UNRESOLVED_PREFIX + qualifiedName(); + } + + @Override + protected String label() { + return UNRESOLVED_PREFIX; + } + + @Override + public String unresolvedMessage() { + return unresolvedMsg; + } + + public static String errorMessage(String name, List potentialMatches) { + String msg = "Unknown column [" + name + "]"; + if (!CollectionUtils.isEmpty(potentialMatches)) { + msg += ", did you mean " + (potentialMatches.size() == 1 ? "[" + potentialMatches.get(0) + "]": "any of " + potentialMatches.toString()) + "?"; + } + return msg; + } + + @Override + public boolean equals(Object obj) { + if (super.equals(obj)) { + UnresolvedAttribute ua = (UnresolvedAttribute) obj; + return Objects.equals(resolutionMetadata, ua.resolutionMetadata) && Objects.equals(unresolvedMsg, ua.unresolvedMsg); + } + return false; + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/UnresolvedNamedExpression.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/UnresolvedNamedExpression.java new file mode 100644 index 00000000000..1c56f87bb3e --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/UnresolvedNamedExpression.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.capabilities.Unresolvable; +import org.elasticsearch.xpack.sql.capabilities.UnresolvedException; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; + +import java.util.List; + +abstract class UnresolvedNamedExpression extends NamedExpression implements Unresolvable { + + UnresolvedNamedExpression(Location location, List children) { + super(location, "", children, ExpressionIdGenerator.EMPTY); + } + + @Override + public boolean resolved() { + return false; + } + + @Override + public String name() { + throw new UnresolvedException("name", this); + } + + @Override + public ExpressionId id() { + throw new UnresolvedException("id", this); + } + + @Override + public DataType dataType() { + throw new UnresolvedException("data type", this); + } + + @Override + public Attribute toAttribute() { + throw new UnresolvedException("attribute", this); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/UnresolvedStar.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/UnresolvedStar.java new file mode 100644 index 00000000000..ee9b87b650f --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/UnresolvedStar.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.capabilities.UnresolvedException; +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.Objects; + +import static java.util.Collections.emptyList; + +public class UnresolvedStar extends UnresolvedNamedExpression { + + // typically used for nested fields + private final UnresolvedAttribute qualifier; + + public UnresolvedStar(Location location, UnresolvedAttribute qualifier) { + super(location, emptyList()); + this.qualifier = qualifier; + } + + @Override + public boolean nullable() { + throw new UnresolvedException("nullable", this); + } + + public UnresolvedAttribute qualifier() { + return qualifier; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + UnresolvedStar other = (UnresolvedStar) obj; + return Objects.equals(qualifier, other.qualifier); + } + + private String message() { + return (qualifier() != null ? qualifier() + "." : "") + "*"; + } + + @Override + public String unresolvedMessage() { + return "Cannot determine columns for " + message(); + } + + @Override + public String toString() { + return UNRESOLVED_PREFIX + message(); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/AbstractFunctionRegistry.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/AbstractFunctionRegistry.java new file mode 100644 index 00000000000..e5c8b11f109 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/AbstractFunctionRegistry.java @@ -0,0 +1,182 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.function.aware.DistinctAware; +import org.elasticsearch.xpack.sql.expression.function.aware.TimeZoneAware; +import org.elasticsearch.xpack.sql.parser.ParsingException; +import org.elasticsearch.xpack.sql.session.Configuration; +import org.elasticsearch.xpack.sql.tree.Node; +import org.elasticsearch.xpack.sql.tree.NodeUtils; +import org.elasticsearch.xpack.sql.tree.NodeUtils.NodeInfo; +import org.elasticsearch.xpack.sql.util.Check; +import org.elasticsearch.xpack.sql.util.StringUtils; + +import java.lang.reflect.InvocationTargetException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.regex.Pattern; + +import static java.util.Collections.emptyList; +import static java.util.stream.Collectors.toList; + +abstract class AbstractFunctionRegistry implements FunctionRegistry { + + protected final Map defs = new LinkedHashMap<>(); + + { + for (Class f : functions()) { + FunctionDefinition def = def(f, aliases()); + defs.put(def.name(), def); + for (String alias : def.aliases()) { + Check.isTrue(defs.containsKey(alias) == false, "Alias %s already exists", alias); + // alias should be already normalized but to be double sure + defs.put(normalize(alias), def); + } + } + } + + //TODO: change this to some type of auto discovery or auto creation of the discovery (annotation or the like) + protected abstract Collection> functions(); + + protected abstract Map aliases(); + + + @Override + public Function resolveFunction(UnresolvedFunction ur, Configuration cfg) { + FunctionDefinition def = defs.get(normalize(ur.name())); + if (def == null) { + throw new SqlIllegalArgumentException("Cannot find function %s; this should have been caught during analysis", ur.name()); + } + return createInstance(def.clazz(), ur, cfg); + } + + @Override + public String concreteFunctionName(String alias) { + String normalized = normalize(alias); + return aliases().getOrDefault(normalized, normalized); + } + + @Override + public boolean functionExists(String name) { + return defs.containsKey(normalize(name)); + } + + @Override + public Collection listFunctions() { + return defs.entrySet().stream() + .map(e -> new FunctionDefinition(e.getKey(), emptyList(), e.getValue().clazz())) + .collect(toList()); + } + + @Override + public Collection listFunctions(String pattern) { + Pattern p = Strings.hasText(pattern) ? StringUtils.likeRegex(normalize(pattern)) : null; + return defs.entrySet().stream() + .filter(e -> p == null || p.matcher(e.getKey()).matches()) + .map(e -> new FunctionDefinition(e.getKey(), emptyList(), e.getValue().clazz())) + .collect(toList()); + } + + private static FunctionDefinition def(Class function, Map aliases) { + String primaryName = normalize(function.getSimpleName()); + List al = aliases.entrySet().stream() + .filter(e -> primaryName.equals(e.getValue())) + .map(Map.Entry::getKey) + .collect(toList()); + + return new FunctionDefinition(primaryName, al, function); + } + + protected static String normalize(String name) { + // translate CamelCase to camel_case + return StringUtils.camelCaseToUnderscore(name); + } + + // + // Instantiates a function through reflection. + // Picks up the constructor by expecting to be of type (Location,Expression) or (Location,List) depending on the size of given children, parameters. + // If the function has certain 'aware'-ness (based on the interface implemented), the appropriate types are added to the signature + + @SuppressWarnings("rawtypes") + private static Function createInstance(Class clazz, UnresolvedFunction ur, Configuration cfg) { + NodeInfo info = NodeUtils.info((Class) clazz); + Class[] pTypes = info.ctr.getParameterTypes(); + + boolean distinctAware = DistinctAware.class.isAssignableFrom(clazz); + boolean timezoneAware = TimeZoneAware.class.isAssignableFrom(clazz); + + // constructor types - location - distinct? - timezone? + int expectedParamCount = pTypes.length - (1 + (distinctAware ? 1 : 0) + (timezoneAware ? 1 : 0)); + + // check constructor signature + if (ur.children().size() != expectedParamCount) { + List expected = new ArrayList<>(); + + for (int i = 1; i < expectedParamCount; i++) { + expected.add(pTypes[i].getSimpleName()); + } + + throw new ParsingException(ur.location(), "Invalid number of arguments given to function [%s], expected %d argument(s):%s but received %d:%s", + ur.name(), expected.size(), expected.toString(), ur.children().size(), ur.children()); + } + + // validate distinct ctor + if (!distinctAware && ur.distinct()) { + throw new ParsingException(ur.location(), "Function [%s] does not support DISTINCT yet it was specified", ur.name()); + } + + // List ctorSignature = new ArrayList<>(); + // ctorSignature.add(Location.class); + // + // // might be a constant function + // if (expVal instanceof List && ((List) expVal).isEmpty()) { + // noExpression = Arrays.equals(new Class[] { Location.class }, info.ctr.getParameterTypes()); + // } + // else { + // ctorSignature.add(exp); + // } + // + // // aware stuff + // if (distinctAware) { + // ctorSignature.add(boolean.class); + // } + // if (timezoneAware) { + // ctorSignature.add(DateTimeZone.class); + // } + // + // // validate + // Assert.isTrue(Arrays.equals(ctorSignature.toArray(new Class[ctorSignature.size()]), info.ctr.getParameterTypes()), + // "No constructor with signature %s found for [%s], found %s instead", ctorSignature, clazz.getTypeName(), info.ctr); + + // now add the actual values + try { + List args = new ArrayList<>(); + + // always add location first + args.add(ur.location()); + + // has multiple arguments + args.addAll(ur.children()); + + if (distinctAware) { + args.add(ur.distinct()); + } + if (timezoneAware) { + args.add(cfg.timeZone()); + } + + return (Function) info.ctr.newInstance(args.toArray()); + } catch (InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException ex) { + throw new SqlIllegalArgumentException(ex, "Cannot create instance of function %s", ur.name()); + } + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/DefaultFunctionRegistry.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/DefaultFunctionRegistry.java new file mode 100644 index 00000000000..bf81661af4e --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/DefaultFunctionRegistry.java @@ -0,0 +1,157 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function; + +import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunction; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Avg; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Count; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Kurtosis; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Max; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Mean; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Min; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Percentile; +import org.elasticsearch.xpack.sql.expression.function.aggregate.PercentileRank; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Skewness; +import org.elasticsearch.xpack.sql.expression.function.aggregate.StddevPop; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Sum; +import org.elasticsearch.xpack.sql.expression.function.aggregate.SumOfSquares; +import org.elasticsearch.xpack.sql.expression.function.aggregate.VarPop; +import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DayOfMonth; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DayOfWeek; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DayOfYear; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.HourOfDay; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.MinuteOfDay; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.MinuteOfHour; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.MonthOfYear; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.SecondOfMinute; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.Year; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.ACos; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.ASin; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.ATan; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Abs; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Cbrt; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Ceil; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Cos; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Cosh; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Degrees; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.E; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Exp; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Expm1; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Floor; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Log; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Log10; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Pi; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Radians; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Round; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Sin; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Sinh; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Sqrt; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Tan; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Map; +import java.util.TreeMap; + +import static java.util.Collections.unmodifiableMap; +import static org.elasticsearch.xpack.sql.util.CollectionUtils.combine; + +public class DefaultFunctionRegistry extends AbstractFunctionRegistry { + + private static final Collection> FUNCTIONS = combine(agg(), scalar()); + + private static final Map ALIASES; + static { + Map aliases = new TreeMap<>(); + aliases.put("DAY", "DAY_OF_MONTH"); + aliases.put("DOM", "DAY_OF_MONTH"); + aliases.put("DOW", "DAY_OF_WEEK"); + aliases.put("DOY", "DAY_OF_YEAR"); + aliases.put("HOUR", "HOUR_OF_DAY"); + aliases.put("MINUTE", "MINUTE_OF_HOUR"); + aliases.put("MONTH", "MONTH_OF_YEAR"); + aliases.put("SECOND", "SECOND_OF_MINUTE"); + ALIASES = unmodifiableMap(aliases); + } + + @Override + protected Collection> functions() { + return FUNCTIONS; + } + + @Override + protected Map aliases() { + return ALIASES; + } + + private static Collection> agg() { + return Arrays.asList( + Avg.class, + Count.class, + Max.class, + Min.class, + Sum.class, + // statistics + Mean.class, + StddevPop.class, + VarPop.class, + SumOfSquares.class, + Skewness.class, + Kurtosis.class, + Percentile.class, + PercentileRank.class + // TODO: add multi arg functions like Covariance, Correlate + + ); + } + + private static Collection> scalar() { + return combine(dateTimeFunctions(), + mathFunctions()); + } + + private static Collection> dateTimeFunctions() { + return Arrays.asList( + DayOfMonth.class, + DayOfWeek.class, + DayOfYear.class, + HourOfDay.class, + MinuteOfDay.class, + MinuteOfHour.class, + SecondOfMinute.class, + MonthOfYear.class, + Year.class + ); + } + + private static Collection> mathFunctions() { + return Arrays.asList( + Abs.class, + ACos.class, + ASin.class, + ATan.class, + Cbrt.class, + Ceil.class, + Cos.class, + Cosh.class, + Degrees.class, + E.class, + Exp.class, + Expm1.class, + Floor.class, + Log.class, + Log10.class, + Pi.class, + Radians.class, + Round.class, + Sin.class, + Sinh.class, + Sqrt.class, + Tan.class + ); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/Function.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/Function.java new file mode 100644 index 00000000000..ea7eb8ff61e --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/Function.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.ExpressionId; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.NamedExpression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.util.StringUtils; + +import java.util.List; +import java.util.StringJoiner; + +public abstract class Function extends NamedExpression { + + private final String functionName, name; + + protected Function(Location location, List children) { + this(location, children, null, false); + } + + // TODO: Functions supporting distinct should add a dedicated constructor Location, List, boolean + protected Function(Location location, List children, ExpressionId id, boolean synthetic) { + // cannot detect name yet so override the name + super(location, null, children, id, synthetic); + functionName = StringUtils.camelCaseToUnderscore(getClass().getSimpleName()); + name = functionName() + functionArgs(); + } + + public final List arguments() { + return children(); + } + + @Override + public String name() { + return name; + } + + @Override + public boolean nullable() { + return false; + } + + @Override + public String toString() { + return name() + "#" + id(); + } + + public String functionName() { + return functionName; + } + + // TODO: ExpressionId might be converted into an Int which could make the String an int as well + public String functionId() { + return id().toString(); + } + + protected String functionArgs() { + StringJoiner sj = new StringJoiner(",", "(", ")"); + for (Expression child : children()) { + String val = child instanceof NamedExpression && child.resolved() ? Expressions.name(child) : child.toString(); + sj.add(val); + } + return sj.toString(); + } + + public boolean functionEquals(Function f) { + return f != null && getClass() == f.getClass() && arguments().equals(f.arguments()); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionAttribute.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionAttribute.java new file mode 100644 index 00000000000..6cbbd461554 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionAttribute.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function; + +import org.elasticsearch.xpack.sql.expression.ExpressionId; +import org.elasticsearch.xpack.sql.expression.TypedAttribute; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; + +import java.util.Objects; + +public abstract class FunctionAttribute extends TypedAttribute { + + private final String functionId; + + protected FunctionAttribute(Location location, String name, DataType dataType, String qualifier, boolean nullable, ExpressionId id, + boolean synthetic, String functionId) { + super(location, name, dataType, qualifier, nullable, id, synthetic); + this.functionId = functionId; + } + + public String functionId() { + return functionId; + } + + @Override + public boolean equals(Object obj) { + return super.equals(obj) && Objects.equals(functionId, ((FunctionAttribute) obj).functionId()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionDefinition.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionDefinition.java new file mode 100644 index 00000000000..e7dc8c21628 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionDefinition.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function; + +import java.util.List; +import java.util.Locale; +import java.util.Objects; + +import static java.lang.String.format; + +public class FunctionDefinition { + + private final String name; + private final List aliases; + private final Class clazz; + private final FunctionType type; + + FunctionDefinition(String name, List aliases, Class clazz) { + this.name = name; + this.aliases = aliases; + this.clazz = clazz; + this.type = FunctionType.of(clazz); + } + + public String name() { + return name; + } + + public List aliases() { + return aliases; + } + + public FunctionType type() { + return type; + } + + Class clazz() { + return clazz; + } + + @Override + public int hashCode() { + return Objects.hash(clazz); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + FunctionDefinition other = (FunctionDefinition) obj; + return Objects.equals(clazz, other.clazz) && + Objects.equals(name, other.name) && + Objects.equals(aliases, aliases); + } + + @Override + public String toString() { + return format(Locale.ROOT, "%s(%s)", name, aliases.isEmpty() ? "" : aliases.size() == 1 ? aliases.get(0) : aliases ); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistry.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistry.java new file mode 100644 index 00000000000..23ec34f2864 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistry.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function; + +import org.elasticsearch.xpack.sql.session.Configuration; + +import java.util.Collection; + +public interface FunctionRegistry { + + Function resolveFunction(UnresolvedFunction ur, Configuration settings); + + String concreteFunctionName(String alias); + + boolean functionExists(String name); + + Collection listFunctions(); + + Collection listFunctions(String pattern); + +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionType.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionType.java new file mode 100644 index 00000000000..471fc374a92 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionType.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function; + +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunction; + +public enum FunctionType { + + AGGREGATE (AggregateFunction.class), + SCALAR(ScalarFunction.class); + + private final Class baseClass; + + FunctionType(Class base) { + this.baseClass = base; + } + + public static FunctionType of(Class clazz) { + for (FunctionType type : values()) { + if (type.baseClass.isAssignableFrom(clazz)) { + return type; + } + } + throw new SqlIllegalArgumentException("Cannot identify the function type for %s", clazz); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/Functions.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/Functions.java new file mode 100644 index 00000000000..585af56eff0 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/Functions.java @@ -0,0 +1,16 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunction; + +public abstract class Functions { + + public static boolean isAggregate(Expression e) { + return e instanceof AggregateFunction; + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/UnresolvedFunction.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/UnresolvedFunction.java new file mode 100644 index 00000000000..f717aece370 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/UnresolvedFunction.java @@ -0,0 +1,98 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function; + +import org.elasticsearch.xpack.sql.capabilities.Unresolvable; +import org.elasticsearch.xpack.sql.capabilities.UnresolvedException; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.util.CollectionUtils; + +import java.util.List; + +public class UnresolvedFunction extends Function implements Unresolvable { + + private final String name; + private final boolean distinct; + private final String unresolvedMsg; + // flag to indicate analysis has been applied and there's no point in doing it again + // this is an optimization to prevent searching for a better unresolved message over and over again + private final boolean analyzed; + + public UnresolvedFunction(Location location, String name, boolean distinct, List children) { + this(location, name, distinct, children, false, null); + } + + /** + * Constructor used for specifying a more descriptive message (typically 'did you mean') instead of the default one. + */ + public UnresolvedFunction(Location location, String name, boolean distinct, List children, boolean analyzed, String unresolvedMessage) { + super(location, children); + this.name = name; + this.distinct = distinct; + this.analyzed = analyzed; + this.unresolvedMsg = unresolvedMessage == null ? errorMessage(name, null) : unresolvedMessage; + } + + @Override + public boolean resolved() { + return false; + } + + @Override + public String name() { + return name; + } + + @Override + public String functionName() { + return name; + } + + public boolean distinct() { + return distinct; + } + + public boolean analyzed() { + return analyzed; + } + + @Override + public DataType dataType() { + throw new UnresolvedException("dataType", this); + } + + @Override + public boolean nullable() { + throw new UnresolvedException("nullable", this); + } + + @Override + public Attribute toAttribute() { + throw new UnresolvedException("attribute", this); + } + + @Override + public String unresolvedMessage() { + return unresolvedMsg; + } + + @Override + public String toString() { + return UNRESOLVED_PREFIX + functionName() + functionArgs(); + } + + public static String errorMessage(String name, List potentialMatches) { + String msg = "Unknown function [" + name + "]"; + if (!CollectionUtils.isEmpty(potentialMatches)) { + msg += ", did you mean " + + (potentialMatches.size() == 1 ? "[" + potentialMatches.get(0) + "]" : "any of " + potentialMatches.toString()) + "?"; + } + return msg; + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/AggregateFunction.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/AggregateFunction.java new file mode 100644 index 00000000000..bf82e5f11b0 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/AggregateFunction.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.Function; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.util.CollectionUtils; + +import java.util.List; + +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; + +public abstract class AggregateFunction extends Function { + + private final Expression field; + private final List parameters; + + private AggregateFunctionAttribute lazyAttribute; + + AggregateFunction(Location location, Expression field) { + this(location, field, emptyList()); + } + + AggregateFunction(Location location, Expression field, List parameters) { + super(location, CollectionUtils.combine(singletonList(field), parameters)); + this.field = field; + this.parameters = parameters; + } + + public Expression field() { + return field; + } + + public List parameters() { + return parameters; + } + + @Override + public AggregateFunctionAttribute toAttribute() { + if (lazyAttribute == null) { + // this is highly correlated with QueryFolder$FoldAggregate#addFunction (regarding the function name within the querydsl) + lazyAttribute = new AggregateFunctionAttribute(location(), name(), dataType(), id(), functionId(), null); + } + return lazyAttribute; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/AggregateFunctionAttribute.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/AggregateFunctionAttribute.java new file mode 100644 index 00000000000..d1db6230f05 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/AggregateFunctionAttribute.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.ExpressionId; +import org.elasticsearch.xpack.sql.expression.function.FunctionAttribute; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; + +import java.util.Objects; + +public class AggregateFunctionAttribute extends FunctionAttribute { + + private final String propertyPath; + + AggregateFunctionAttribute(Location location, String name, DataType dataType, ExpressionId id, String functionId, String propertyPath) { + this(location, name, dataType, null, false, id, false, functionId, propertyPath); + } + + AggregateFunctionAttribute(Location location, String name, DataType dataType, String qualifier, boolean nullable, ExpressionId id, boolean synthetic, String functionId, String propertyPath) { + super(location, name, dataType, qualifier, nullable, id, synthetic, functionId); + this.propertyPath = propertyPath; + } + + public String propertyPath() { + return propertyPath; + } + + @Override + protected Expression canonicalize() { + return new AggregateFunctionAttribute(location(), "", dataType(), null, true, id(), false, "", null); + } + + @Override + protected Attribute clone(Location location, String name, DataType dataType, String qualifier, boolean nullable, ExpressionId id, boolean synthetic) { + // this is highly correlated with QueryFolder$FoldAggregate#addFunction (regarding the function name within the querydsl) + // that is the functionId is actually derived from the expression id to easily track it across contexts + return new AggregateFunctionAttribute(location, name, dataType, qualifier, nullable, id, synthetic, functionId(), propertyPath); + } + + public AggregateFunctionAttribute withFunctionId(String functionId, String propertyPath) { + return new AggregateFunctionAttribute(location(), name(), dataType(), qualifier(), nullable(), id(), synthetic(), functionId, propertyPath); + } + + @Override + public boolean equals(Object obj) { + return super.equals(obj) && Objects.equals(propertyPath(), ((AggregateFunctionAttribute) obj).propertyPath()); + } + + @Override + protected String label() { + return "a->" + functionId(); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Avg.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Avg.java new file mode 100644 index 00000000000..8c63dcdebf2 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Avg.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; + +public class Avg extends NumericAggregate implements EnclosedAgg { + + public Avg(Location location, Expression field) { + super(location, field); + } + + @Override + public String innerName() { + return "avg"; + } + + @Override + public DataType dataType() { + return field().dataType(); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/CompoundNumericAggregate.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/CompoundNumericAggregate.java new file mode 100644 index 00000000000..3ff2ae0c44e --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/CompoundNumericAggregate.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.List; + +// marker type for compound aggregates, that is aggregate that provide multiple values (like Stats or Matrix) +// and thus cannot be used directly in SQL and are mainly for internal use +public abstract class CompoundNumericAggregate extends NumericAggregate { + + CompoundNumericAggregate(Location location, Expression field, List arguments) { + super(location, field, arguments); + } + + CompoundNumericAggregate(Location location, Expression field) { + super(location, field); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Count.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Count.java new file mode 100644 index 00000000000..93df442a8da --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Count.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.NamedExpression; +import org.elasticsearch.xpack.sql.expression.function.aware.DistinctAware; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; + +public class Count extends AggregateFunction implements DistinctAware { + + private final boolean distinct; + + public Count(Location location, Expression field, boolean distinct) { + super(location, field); + this.distinct = distinct; + } + + public boolean distinct() { + return distinct; + } + + @Override + public DataType dataType() { + return DataTypes.LONG; + } + + @Override + public String functionId() { + String functionId = id().toString(); + // if count works against a given expression, use its id (to identify the group) + if (field() instanceof NamedExpression) { + functionId = ((NamedExpression) field()).id().toString(); + } + return functionId; + } + + @Override + public AggregateFunctionAttribute toAttribute() { + return new AggregateFunctionAttribute(location(), name(), dataType(), id(), functionId(), "_count"); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/EnclosedAgg.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/EnclosedAgg.java new file mode 100644 index 00000000000..146cc68ba14 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/EnclosedAgg.java @@ -0,0 +1,12 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +// Agg 'enclosed' by another agg. Used for agg that return multiple embedded aggs (like MatrixStats) +public interface EnclosedAgg { + + String innerName(); +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/ExtendedStats.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/ExtendedStats.java new file mode 100644 index 00000000000..aded773608a --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/ExtendedStats.java @@ -0,0 +1,16 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; + +public class ExtendedStats extends CompoundNumericAggregate { + + public ExtendedStats(Location location, Expression field) { + super(location, field); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/ExtendedStatsEnclosed.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/ExtendedStatsEnclosed.java new file mode 100644 index 00000000000..3e51fa4ef1a --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/ExtendedStatsEnclosed.java @@ -0,0 +1,10 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +public interface ExtendedStatsEnclosed extends StatsEnclosed, EnclosedAgg { + +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/InnerAggregate.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/InnerAggregate.java new file mode 100644 index 00000000000..04a5d83de65 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/InnerAggregate.java @@ -0,0 +1,78 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.Function; +import org.elasticsearch.xpack.sql.querydsl.agg.AggPath; +import org.elasticsearch.xpack.sql.type.DataType; + +public class InnerAggregate extends AggregateFunction { + + private final AggregateFunction inner; + private final CompoundNumericAggregate outer; + private final String innerId; + // used when the result needs to be extracted from a map (like in MatrixAggs or Percentiles) + private final Expression innerKey; + + public InnerAggregate(AggregateFunction inner, CompoundNumericAggregate outer) { + this(inner, outer, null); + } + + public InnerAggregate(AggregateFunction inner, CompoundNumericAggregate outer, Expression innerKey) { + super(inner.location(), outer.field(), outer.arguments()); + this.inner = inner; + this.outer = outer; + this.innerId = ((EnclosedAgg) inner).innerName(); + this.innerKey = innerKey; + } + + public AggregateFunction inner() { + return inner; + } + + public CompoundNumericAggregate outer() { + return outer; + } + + public String innerId() { + return innerId; + } + + public Expression innerKey() { + return innerKey; + } + + @Override + public DataType dataType() { + return inner.dataType(); + } + + @Override + public String functionId() { + return outer.id().toString(); + } + + @Override + public AggregateFunctionAttribute toAttribute() { + // this is highly correlated with QueryFolder$FoldAggregate#addFunction (regarding the function name within the querydsl) + return new AggregateFunctionAttribute(location(), name(), dataType(), outer.id(), functionId(), AggPath.metricValue(functionId(), innerId)); + } + + @Override + public boolean functionEquals(Function f) { + if (super.equals(f)) { + InnerAggregate other = (InnerAggregate) f; + return inner.equals(other.inner) && outer.equals(other.outer); + } + return false; + } + + @Override + public String name() { + return "(" + inner.functionName() + "#" + inner.id() + "/" + outer.toString() + ")"; + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Kurtosis.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Kurtosis.java new file mode 100644 index 00000000000..ef7c280324a --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Kurtosis.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; + +public class Kurtosis extends NumericAggregate implements MatrixStatsEnclosed { + + public Kurtosis(Location location, Expression field) { + super(location, field); + } + + @Override + public String innerName() { + return "kurtosis"; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/MatrixStats.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/MatrixStats.java new file mode 100644 index 00000000000..9f5c5750c99 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/MatrixStats.java @@ -0,0 +1,16 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; + +public class MatrixStats extends CompoundNumericAggregate { + + public MatrixStats(Location location, Expression field) { + super(location, field); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/MatrixStatsEnclosed.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/MatrixStatsEnclosed.java new file mode 100644 index 00000000000..67d60ae1a64 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/MatrixStatsEnclosed.java @@ -0,0 +1,10 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +public interface MatrixStatsEnclosed extends EnclosedAgg { + +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Max.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Max.java new file mode 100644 index 00000000000..011087e35f7 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Max.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; + +public class Max extends NumericAggregate implements EnclosedAgg { + + public Max(Location location, Expression field) { + super(location, field); + } + + @Override + public DataType dataType() { + return field().dataType(); + } + + @Override + public String innerName() { + return "max"; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Mean.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Mean.java new file mode 100644 index 00000000000..2c1755fe546 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Mean.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; + +public class Mean extends NumericAggregate implements MatrixStatsEnclosed { + + public Mean(Location location, Expression field) { + super(location, field); + } + + @Override + public DataType dataType() { + return DataTypes.DOUBLE; + } + + @Override + public String innerName() { + return "means"; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Min.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Min.java new file mode 100644 index 00000000000..2e098da9a07 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Min.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; + +public class Min extends NumericAggregate implements EnclosedAgg { + + public Min(Location location, Expression field) { + super(location, field); + } + + @Override + public DataType dataType() { + return field().dataType(); + } + + @Override + public String innerName() { + return "min"; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/NumericAggregate.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/NumericAggregate.java new file mode 100644 index 00000000000..09306c35a07 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/NumericAggregate.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; + +import java.util.List; + +class NumericAggregate extends AggregateFunction { + + NumericAggregate(Location location, Expression field, List parameters) { + super(location, field, parameters); + } + + NumericAggregate(Location location, Expression field) { + super(location, field); + } + + @Override + protected TypeResolution resolveType() { + return Expressions.typeMustBeNumeric(field()); + } + + @Override + public DataType dataType() { + return DataTypes.DOUBLE; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Percentile.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Percentile.java new file mode 100644 index 00000000000..07083bf2d3e --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Percentile.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.Foldables; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; + +import static java.util.Collections.singletonList; + +public class Percentile extends NumericAggregate implements EnclosedAgg { + + private final Expression percent; + + public Percentile(Location location, Expression field, Expression percent) { + super(location, field, singletonList(percent)); + this.percent = percent; + } + + @Override + protected TypeResolution resolveType() { + TypeResolution resolution = super.resolveType(); + + if (TypeResolution.TYPE_RESOLVED.equals(resolution)) { + resolution = Expressions.typeMustBeNumeric(percent()); + } + + return resolution; + } + + public Expression percent() { + return percent; + } + + @Override + public DataType dataType() { + return DataTypes.DOUBLE; + } + + @Override + public String innerName() { + return "[" + Double.toString(Foldables.doubleValueOf(percent)) + "]"; + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/PercentileRank.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/PercentileRank.java new file mode 100644 index 00000000000..ccdd8333a02 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/PercentileRank.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.Foldables; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; + +import static java.util.Collections.singletonList; + +public class PercentileRank extends AggregateFunction implements EnclosedAgg { + + private final Expression value; + + public PercentileRank(Location location, Expression field, Expression value) { + super(location, field, singletonList(value)); + this.value = value; + } + + @Override + protected TypeResolution resolveType() { + TypeResolution resolution = super.resolveType(); + + if (TypeResolution.TYPE_RESOLVED.equals(resolution)) { + resolution = Expressions.typeMustBeNumeric(value); + } + + return resolution; + } + + public Expression value() { + return value; + } + + @Override + public DataType dataType() { + return DataTypes.DOUBLE; + } + + @Override + public String innerName() { + return "[" + Double.toString(Foldables.doubleValueOf(value)) + "]"; + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/PercentileRanks.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/PercentileRanks.java new file mode 100644 index 00000000000..651a517be17 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/PercentileRanks.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.List; + +public class PercentileRanks extends CompoundNumericAggregate { + + private final List values; + + public PercentileRanks(Location location, Expression field, List values) { + super(location, field, values); + this.values = values; + } + + public List values() { + return values; + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Percentiles.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Percentiles.java new file mode 100644 index 00000000000..876fe7fa1d6 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Percentiles.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.List; + +public class Percentiles extends CompoundNumericAggregate { + + private final List percents; + + public Percentiles(Location location, Expression field, List percents) { + super(location, field, percents); + this.percents = percents; + } + + public List percents() { + return percents; + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Skewness.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Skewness.java new file mode 100644 index 00000000000..605812a82dc --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Skewness.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; + +public class Skewness extends NumericAggregate implements MatrixStatsEnclosed { + + public Skewness(Location location, Expression field) { + super(location, field); + } + + @Override + public String innerName() { + return "skewness"; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Stats.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Stats.java new file mode 100644 index 00000000000..87f1f70fc5d --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Stats.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; + +public class Stats extends CompoundNumericAggregate { + + public Stats(Location location, Expression field) { + super(location, field); + } + + public static boolean isTypeCompatible(Expression e) { + return e instanceof Min || e instanceof Max || e instanceof Avg || e instanceof Sum; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/StatsEnclosed.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/StatsEnclosed.java new file mode 100644 index 00000000000..e68de8d6028 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/StatsEnclosed.java @@ -0,0 +1,10 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +public interface StatsEnclosed { + +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/StddevPop.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/StddevPop.java new file mode 100644 index 00000000000..a9eee0b6dae --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/StddevPop.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; + +public class StddevPop extends NumericAggregate implements ExtendedStatsEnclosed { + + public StddevPop(Location location, Expression field) { + super(location, field); + } + + @Override + public String innerName() { + return "std_deviation"; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Sum.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Sum.java new file mode 100644 index 00000000000..3b6becd0236 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Sum.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; + +public class Sum extends NumericAggregate implements EnclosedAgg { + + public Sum(Location location, Expression field) { + super(location, field); + } + + @Override + public DataType dataType() { + return field().dataType(); + } + + @Override + public String innerName() { + return "sum"; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/SumOfSquares.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/SumOfSquares.java new file mode 100644 index 00000000000..ecf0bacc840 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/SumOfSquares.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; + +public class SumOfSquares extends NumericAggregate implements ExtendedStatsEnclosed { + + public SumOfSquares(Location location, Expression field) { + super(location, field); + } + + @Override + public String innerName() { + return "sum_of_squares"; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/VarPop.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/VarPop.java new file mode 100644 index 00000000000..546e2df4bf1 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/VarPop.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; + +public class VarPop extends NumericAggregate implements ExtendedStatsEnclosed { + + public VarPop(Location location, Expression field) { + super(location, field); + } + + @Override + public String innerName() { + return "variance"; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aware/DistinctAware.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aware/DistinctAware.java new file mode 100644 index 00000000000..17043ea8027 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aware/DistinctAware.java @@ -0,0 +1,10 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aware; + +public interface DistinctAware { + +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aware/TimeZoneAware.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aware/TimeZoneAware.java new file mode 100644 index 00000000000..9cf7641d17e --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/aware/TimeZoneAware.java @@ -0,0 +1,10 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aware; + +public interface TimeZoneAware { + +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/BinaryScalarFunction.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/BinaryScalarFunction.java new file mode 100644 index 00000000000..9395bd336a6 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/BinaryScalarFunction.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.Arrays; + +public abstract class BinaryScalarFunction extends ScalarFunction { + + private final Expression left, right; + + protected BinaryScalarFunction(Location location, Expression left, Expression right) { + super(location, Arrays.asList(left, right)); + this.left = left; + this.right = right; + } + + public Expression left() { + return left; + } + + public Expression right() { + return right; + } + + @Override + public boolean foldable() { + return left.foldable() && right.foldable(); + } + + @Override + public ScriptTemplate asScript() { + ScriptTemplate leftScript = asScript(left()); + ScriptTemplate rightScript = asScript(right()); + + return asScriptFrom(leftScript, rightScript); + } + + protected abstract ScriptTemplate asScriptFrom(ScriptTemplate leftScript, ScriptTemplate rightScript); +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Cast.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Cast.java new file mode 100644 index 00000000000..57a1b9e1ece --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Cast.java @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.FieldAttribute; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinitions; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.UnaryProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.Params; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypeConversion; + +import java.util.Objects; + +public class Cast extends UnaryScalarFunction { + + private final DataType dataType; + + public Cast(Location location, Expression field, DataType dataType) { + super(location, field); + this.dataType = dataType; + } + + public DataType from() { + return field().dataType(); + } + + public DataType to() { + return dataType; + } + + @Override + public DataType dataType() { + return dataType; + } + + @Override + public boolean foldable() { + return field().foldable(); + } + + @Override + public Object fold() { + return DataTypeConversion.convert(field().fold(), dataType); + } + + @Override + public boolean nullable() { + return field().nullable() || DataTypeConversion.nullable(from()); + } + + @Override + protected TypeResolution resolveType() { + return DataTypeConversion.canConvert(from(), to()) ? + TypeResolution.TYPE_RESOLVED : + new TypeResolution("Cannot cast %s to %s", from(), to()); + } + + @Override + protected ScriptTemplate asScriptFrom(ScalarFunctionAttribute scalar) { + return scalar.script(); + } + + @Override + protected ScriptTemplate asScriptFrom(FieldAttribute field) { + return new ScriptTemplate(field.name(), Params.EMPTY, field.dataType()); + } + + @Override + protected ProcessorDefinition makeProcessorDefinition() { + return new UnaryProcessorDefinition(this, ProcessorDefinitions.toProcessorDefinition(field()), new CastProcessor(DataTypeConversion.conversionFor(from(), to()))); + } + + @Override + public boolean equals(Object obj) { + return super.equals(obj) && Objects.equals(dataType, ((Cast) obj).dataType()); + } + + @Override + public String toString() { + return functionName() + "(" + field().toString() + " AS " + to().sqlName() + ")#" + id(); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/CastProcessor.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/CastProcessor.java new file mode 100644 index 00000000000..f5fe541fb46 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/CastProcessor.java @@ -0,0 +1,72 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; +import org.elasticsearch.xpack.sql.type.DataTypeConversion.Conversion; + +import java.io.IOException; +import java.util.Objects; + +public class CastProcessor implements Processor { + + public static final String NAME = "ca"; + + private final Conversion conversion; + + public CastProcessor(Conversion conversion) { + this.conversion = conversion; + } + + public CastProcessor(StreamInput in) throws IOException { + conversion = in.readEnum(Conversion.class); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeEnum(conversion); + } + + @Override + public Object process(Object input) { + return conversion.convert(input); + } + + Conversion converter() { + return conversion; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + CastProcessor other = (CastProcessor) obj; + return Objects.equals(conversion, other.conversion); + } + + @Override + public int hashCode() { + return Objects.hash(conversion); + } + + @Override + public String toString() { + return conversion.name(); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java new file mode 100644 index 00000000000..4534792ce4b --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.BinaryArithmeticProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.UnaryArithmeticProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.ChainingProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.ConstantProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.HitExtractorProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.MatrixFieldProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; + +import java.util.ArrayList; +import java.util.List; + +public abstract class Processors { + + /** + * All of the named writeables needed to deserialize the instances of + * {@linkplain Processors}. + */ + public static List getNamedWriteables() { + List entries = new ArrayList<>(); + // base + entries.add(new Entry(Processor.class, ConstantProcessor.NAME, ConstantProcessor::new)); + entries.add(new Entry(Processor.class, HitExtractorProcessor.NAME, HitExtractorProcessor::new)); + entries.add(new Entry(Processor.class, CastProcessor.NAME, CastProcessor::new)); + entries.add(new Entry(Processor.class, ChainingProcessor.NAME, ChainingProcessor::new)); + entries.add(new Entry(Processor.class, MatrixFieldProcessor.NAME, MatrixFieldProcessor::new)); + + // arithmetic + entries.add(new Entry(Processor.class, BinaryArithmeticProcessor.NAME, BinaryArithmeticProcessor::new)); + entries.add(new Entry(Processor.class, UnaryArithmeticProcessor.NAME, UnaryArithmeticProcessor::new)); + // datetime + entries.add(new Entry(Processor.class, DateTimeProcessor.NAME, DateTimeProcessor::new)); + // math + entries.add(new Entry(Processor.class, MathProcessor.NAME, MathProcessor::new)); + return entries; + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/ScalarFunction.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/ScalarFunction.java new file mode 100644 index 00000000000..b6f4b1dad36 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/ScalarFunction.java @@ -0,0 +1,113 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar; + +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.FieldAttribute; +import org.elasticsearch.xpack.sql.expression.function.Function; +import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunctionAttribute; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.Params; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.List; + +import static java.util.Collections.emptyList; +import static org.elasticsearch.xpack.sql.expression.function.scalar.script.ParamsBuilder.paramsBuilder; +import static org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate.formatTemplate; + +public abstract class ScalarFunction extends Function { + + private ScalarFunctionAttribute lazyAttribute = null; + private ProcessorDefinition lazyProcessor = null; + + + protected ScalarFunction(Location location) { + super(location, emptyList()); + } + + protected ScalarFunction(Location location, List fields) { + super(location, fields); + } + + @Override + public ScalarFunctionAttribute toAttribute() { + if (lazyAttribute == null) { + lazyAttribute = new ScalarFunctionAttribute(location(), name(), dataType(), id(), functionId(), asScript(), orderBy(), + asProcessorDefinition()); + } + return lazyAttribute; + } + + public abstract ScriptTemplate asScript(); + + // utility methods for creating the actual scripts + protected ScriptTemplate asScript(Expression exp) { + if (exp.foldable()) { + return asScriptFromFoldable(exp); + } + + Attribute attr = Expressions.attribute(exp); + if (attr != null) { + if (attr instanceof ScalarFunctionAttribute) { + return asScriptFrom((ScalarFunctionAttribute) attr); + } + if (attr instanceof AggregateFunctionAttribute) { + return asScriptFrom((AggregateFunctionAttribute) attr); + } + // fall-back to + return asScriptFrom((FieldAttribute) attr); + } + throw new SqlIllegalArgumentException("Cannot evaluate script for expression %s", exp); + } + + protected ScriptTemplate asScriptFrom(ScalarFunctionAttribute scalar) { + ScriptTemplate nested = scalar.script(); + Params p = paramsBuilder().script(nested.params()).build(); + return new ScriptTemplate(formatScript(nested.template()), p, dataType()); + } + + protected ScriptTemplate asScriptFromFoldable(Expression foldable) { + return new ScriptTemplate(formatScript("{}"), + paramsBuilder().variable(foldable.fold()).build(), + foldable.dataType()); + } + + protected ScriptTemplate asScriptFrom(FieldAttribute field) { + return new ScriptTemplate(formatScript("doc[{}].value"), + paramsBuilder().variable(field.name()).build(), + field.dataType()); + } + + protected ScriptTemplate asScriptFrom(AggregateFunctionAttribute aggregate) { + return new ScriptTemplate(formatScript("{}"), + paramsBuilder().agg(aggregate).build(), + aggregate.dataType()); + } + + protected String formatScript(String scriptTemplate) { + return formatTemplate(scriptTemplate); + } + + public ProcessorDefinition asProcessorDefinition() { + if (lazyProcessor == null) { + lazyProcessor = makeProcessorDefinition(); + } + return lazyProcessor; + } + + protected abstract ProcessorDefinition makeProcessorDefinition(); + + // used if the function is monotonic and thus does not have to be computed for ordering purposes + // null means the script needs to be used; expression the field/expression to be used instead + public Expression orderBy() { + return null; + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/ScalarFunctionAttribute.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/ScalarFunctionAttribute.java new file mode 100644 index 00000000000..4d6c9662f1b --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/ScalarFunctionAttribute.java @@ -0,0 +1,71 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar; + +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.ExpressionId; +import org.elasticsearch.xpack.sql.expression.function.FunctionAttribute; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; + +import java.util.Objects; + +public class ScalarFunctionAttribute extends FunctionAttribute { + + private final ScriptTemplate script; + private final Expression orderBy; + private final ProcessorDefinition processorDef; + + ScalarFunctionAttribute(Location location, String name, DataType dataType, ExpressionId id, String functionId, ScriptTemplate script, + Expression orderBy, ProcessorDefinition processorDef) { + this(location, name, dataType, null, true, id, false, functionId, script, orderBy, processorDef); + } + + ScalarFunctionAttribute(Location location, String name, DataType dataType, String qualifier, boolean nullable, ExpressionId id, + boolean synthetic, String functionId, ScriptTemplate script, Expression orderBy, ProcessorDefinition processorDef) { + super(location, name, dataType, qualifier, nullable, id, synthetic, functionId); + this.script = script; + this.orderBy = orderBy; + this.processorDef = processorDef; + } + + public ScriptTemplate script() { + return script; + } + + public Expression orderBy() { + return orderBy; + } + + public ProcessorDefinition processorDef() { + return processorDef; + } + + @Override + protected Expression canonicalize() { + return new ScalarFunctionAttribute(location(), "", dataType(), null, true, id(), false, functionId(), script, orderBy, + processorDef); + } + + @Override + protected Attribute clone(Location location, String name, DataType dataType, String qualifier, boolean nullable, ExpressionId id, boolean synthetic) { + return new ScalarFunctionAttribute(location, name, dataType, qualifier, nullable, id, synthetic, functionId(), script, orderBy, + processorDef); + } + + @Override + public boolean equals(Object obj) { + return super.equals(obj) && Objects.equals(orderBy, ((ScalarFunctionAttribute) obj).orderBy()); + } + + @Override + protected String label() { + return "s->" + functionId(); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/UnaryScalarFunction.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/UnaryScalarFunction.java new file mode 100644 index 00000000000..b974b50e217 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/UnaryScalarFunction.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.tree.Location; + +import static java.util.Collections.singletonList; + +public abstract class UnaryScalarFunction extends ScalarFunction { + + private final Expression field; + + protected UnaryScalarFunction(Location location) { + super(location); + this.field = null; + } + + protected UnaryScalarFunction(Location location, Expression field) { + super(location, singletonList(field)); + this.field = field; + } + + public Expression field() { + return field; + } + + @Override + public boolean foldable() { + return field.foldable(); + } + + @Override + public ScriptTemplate asScript() { + return asScript(field); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Add.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Add.java new file mode 100644 index 00000000000..7df251780b2 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Add.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.BinaryArithmeticProcessor.BinaryArithmeticOperation; +import org.elasticsearch.xpack.sql.tree.Location; + +public class Add extends ArithmeticFunction { + + public Add(Location location, Expression left, Expression right) { + super(location, left, right, BinaryArithmeticOperation.ADD); + } + + @Override + public Number fold() { + return Arithmetics.add((Number) left().fold(), (Number) right().fold()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/ArithmeticFunction.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/ArithmeticFunction.java new file mode 100644 index 00000000000..c68b2527314 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/ArithmeticFunction.java @@ -0,0 +1,110 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Literal; +import org.elasticsearch.xpack.sql.expression.function.scalar.BinaryScalarFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.BinaryArithmeticProcessor.BinaryArithmeticOperation; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinitions; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; + +import java.util.Locale; + +import static java.lang.String.format; +import static org.elasticsearch.xpack.sql.expression.function.scalar.script.ParamsBuilder.paramsBuilder; + +public abstract class ArithmeticFunction extends BinaryScalarFunction { + + private BinaryArithmeticOperation operation; + + ArithmeticFunction(Location location, Expression left, Expression right, BinaryArithmeticOperation operation) { + super(location, left, right); + this.operation = operation; + } + + public BinaryArithmeticOperation operation() { + return operation; + } + + @Override + public DataType dataType() { + // left or right have to be compatible so either one works + return left().dataType(); + } + + @Override + protected TypeResolution resolveType() { + if (!childrenResolved()) { + return new TypeResolution("Unresolved children"); + } + DataType l = left().dataType(); + DataType r = right().dataType(); + + TypeResolution resolution = resolveInputType(l); + + if (resolution == TypeResolution.TYPE_RESOLVED) { + return resolveInputType(r); + } + return resolution; + } + + protected TypeResolution resolveInputType(DataType inputType) { + return inputType.isNumeric() ? TypeResolution.TYPE_RESOLVED + : new TypeResolution("'%s' requires a numeric type, not %s", operation, inputType.sqlName()); + } + + @Override + protected ScriptTemplate asScriptFrom(ScriptTemplate leftScript, ScriptTemplate rightScript) { + String op = operation.symbol(); + // escape % + if (operation == BinaryArithmeticOperation.MOD) { + op = "%" + op; + } + return new ScriptTemplate(format(Locale.ROOT, "(%s) %s (%s)", leftScript.template(), op, rightScript.template()), + paramsBuilder() + .script(leftScript.params()).script(rightScript.params()) + .build(), dataType()); + } + + protected final BinaryArithmeticProcessorDefinition makeProcessorDefinition() { + return new BinaryArithmeticProcessorDefinition(this, ProcessorDefinitions.toProcessorDefinition(left()), ProcessorDefinitions.toProcessorDefinition(right()), operation); + } + + @Override + public String name() { + return toString(); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("("); + sb.append(left()); + if (!(left() instanceof Literal)) { + sb.insert(1, "("); + sb.append(")"); + } + sb.append(" "); + sb.append(operation); + sb.append(" "); + int pos = sb.length(); + sb.append(right()); + if (!(right() instanceof Literal)) { + sb.insert(pos, "("); + sb.append(")"); + } + sb.append(")#"); + sb.append(functionId()); + return sb.toString(); + } + + protected boolean useParanthesis() { + return !(left() instanceof Literal) || !(right() instanceof Literal); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Arithmetics.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Arithmetics.java new file mode 100644 index 00000000000..2bae1935e4f --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Arithmetics.java @@ -0,0 +1,105 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic; + +/** + * Arithmetic operation using the type widening rules of the JLS 5.6.2 namely + * widen to double or float or long or int in this order. + */ +abstract class Arithmetics { + + static Number add(Number l, Number r) { + if (l instanceof Double || r instanceof Double) { + return Double.valueOf(l.doubleValue() + r.doubleValue()); + } + if (l instanceof Float || r instanceof Float) { + return Float.valueOf(l.floatValue() + r.floatValue()); + } + if (l instanceof Long || r instanceof Long) { + return Long.valueOf(Math.addExact(l.longValue(), r.longValue())); + } + + return Integer.valueOf(Math.addExact(l.intValue(), r.intValue())); + } + + static Number sub(Number l, Number r) { + if (l instanceof Double || r instanceof Double) { + return Double.valueOf(l.doubleValue() - r.doubleValue()); + } + if (l instanceof Float || r instanceof Float) { + return Float.valueOf(l.floatValue() - r.floatValue()); + } + if (l instanceof Long || r instanceof Long) { + return Long.valueOf(Math.subtractExact(l.longValue(), r.longValue())); + } + + return Integer.valueOf(Math.subtractExact(l.intValue(), r.intValue())); + } + + static Number mul(Number l, Number r) { + if (l instanceof Double || r instanceof Double) { + return Double.valueOf(l.doubleValue() * r.doubleValue()); + } + if (l instanceof Float || r instanceof Float) { + return Float.valueOf(l.floatValue() * r.floatValue()); + } + if (l instanceof Long || r instanceof Long) { + return Long.valueOf(Math.multiplyExact(l.longValue(), r.longValue())); + } + + return Integer.valueOf(Math.multiplyExact(l.intValue(), r.intValue())); + } + + static Number div(Number l, Number r) { + if (l instanceof Double || r instanceof Double) { + return l.doubleValue() / r.doubleValue(); + } + if (l instanceof Float || r instanceof Float) { + return l.floatValue() / r.floatValue(); + } + if (l instanceof Long || r instanceof Long) { + return l.longValue() / r.longValue(); + } + + return l.intValue() / r.intValue(); + } + + static Number mod(Number l, Number r) { + if (l instanceof Long || r instanceof Long) { + return Long.valueOf(Math.floorMod(l.longValue(), r.longValue())); + } + if (l instanceof Double || r instanceof Double) { + return Double.valueOf(l.doubleValue() % r.doubleValue()); + } + if (l instanceof Float || r instanceof Float) { + return Float.valueOf(l.floatValue() % r.floatValue()); + } + + return Math.floorMod(l.intValue(), r.intValue()); + } + + static Number negate(Number n) { + if (n instanceof Double) { + double d = n.doubleValue(); + if (d == Double.MIN_VALUE) { + throw new ArithmeticException("double overflow"); + } + return Double.valueOf(-n.doubleValue()); + } + if (n instanceof Float) { + float f = n.floatValue(); + if (f == Float.MIN_VALUE) { + throw new ArithmeticException("float overflow"); + } + return Float.valueOf(-n.floatValue()); + } + if (n instanceof Long) { + return Long.valueOf(Math.negateExact(n.longValue())); + } + + return Integer.valueOf(Math.negateExact(n.intValue())); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/BinaryArithmeticProcessor.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/BinaryArithmeticProcessor.java new file mode 100644 index 00000000000..bfb4cd811cb --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/BinaryArithmeticProcessor.java @@ -0,0 +1,106 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.BinaryProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; + +import java.io.IOException; +import java.util.Locale; +import java.util.Objects; +import java.util.function.BiFunction; + +import static java.lang.String.format; + +public class BinaryArithmeticProcessor extends BinaryProcessor { + + public enum BinaryArithmeticOperation { + + ADD(Arithmetics::add, "+"), + SUB(Arithmetics::sub, "-"), + MUL(Arithmetics::mul, "*"), + DIV(Arithmetics::div, "/"), + MOD(Arithmetics::mod, "%"); + + private final BiFunction process; + private final String symbol; + + BinaryArithmeticOperation(BiFunction process, String symbol) { + this.process = process; + this.symbol = symbol; + } + + public String symbol() { + return symbol; + } + + public final Number apply(Number left, Number right) { + return process.apply(left, right); + } + + @Override + public String toString() { + return symbol; + } + } + + public static final String NAME = "ab"; + + private final BinaryArithmeticOperation operation; + + public BinaryArithmeticProcessor(Processor left, Processor right, BinaryArithmeticOperation operation) { + super(left, right); + this.operation = operation; + } + + public BinaryArithmeticProcessor(StreamInput in) throws IOException { + super(in); + operation = in.readEnum(BinaryArithmeticOperation.class); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + protected void doWrite(StreamOutput out) throws IOException { + out.writeEnum(operation); + } + + @Override + protected Object doProcess(Object left, Object right) { + return operation.apply((Number) left, (Number) right); + } + + @Override + public int hashCode() { + return operation.hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + BinaryArithmeticProcessor other = (BinaryArithmeticProcessor) obj; + return Objects.equals(operation, other.operation) + && Objects.equals(left(), other.left()) + && Objects.equals(right(), other.right()); + } + + @Override + public String toString() { + return format(Locale.ROOT, "(%s %s %s)", left(), operation, right()); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/BinaryArithmeticProcessorDefinition.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/BinaryArithmeticProcessorDefinition.java new file mode 100644 index 00000000000..e0d60fe6ef5 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/BinaryArithmeticProcessorDefinition.java @@ -0,0 +1,53 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.BinaryArithmeticProcessor.BinaryArithmeticOperation; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.BinaryProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinition; + +import java.util.Objects; + +public class BinaryArithmeticProcessorDefinition extends BinaryProcessorDefinition { + + private final BinaryArithmeticOperation operation; + + public BinaryArithmeticProcessorDefinition(Expression expression, ProcessorDefinition left, ProcessorDefinition right, BinaryArithmeticOperation operation) { + super(expression, left, right); + this.operation = operation; + } + + public BinaryArithmeticOperation operation() { + return operation; + } + + @Override + public BinaryArithmeticProcessor asProcessor() { + return new BinaryArithmeticProcessor(left().asProcessor(), right().asProcessor(), operation); + } + + @Override + public int hashCode() { + return Objects.hash(left(), right(), operation); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + BinaryArithmeticProcessorDefinition other = (BinaryArithmeticProcessorDefinition) obj; + return Objects.equals(operation, other.operation) + && Objects.equals(left(), other.left()) + && Objects.equals(right(), other.right()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Div.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Div.java new file mode 100644 index 00000000000..d4b084483a8 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Div.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.BinaryArithmeticProcessor.BinaryArithmeticOperation; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypeConversion; + +public class Div extends ArithmeticFunction { + + public Div(Location location, Expression left, Expression right) { + super(location, left, right, BinaryArithmeticOperation.DIV); + } + + @Override + public Object fold() { + return Arithmetics.div((Number) left().fold(), (Number) right().fold()); + } + + @Override + public DataType dataType() { + return DataTypeConversion.commonType(left().dataType(), right().dataType()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Mod.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Mod.java new file mode 100644 index 00000000000..59ac868bae2 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Mod.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.BinaryArithmeticProcessor.BinaryArithmeticOperation; +import org.elasticsearch.xpack.sql.tree.Location; + +public class Mod extends ArithmeticFunction { + + public Mod(Location location, Expression left, Expression right) { + super(location, left, right, BinaryArithmeticOperation.MOD); + } + + @Override + public Object fold() { + return Arithmetics.mod((Number) left().fold(), (Number) right().fold()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Mul.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Mul.java new file mode 100644 index 00000000000..3120fe11384 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Mul.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.BinaryArithmeticProcessor.BinaryArithmeticOperation; +import org.elasticsearch.xpack.sql.tree.Location; + +public class Mul extends ArithmeticFunction { + + public Mul(Location location, Expression left, Expression right) { + super(location, left, right, BinaryArithmeticOperation.MUL); + } + + @Override + public Object fold() { + return Arithmetics.mul((Number) left().fold(), (Number) right().fold()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Neg.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Neg.java new file mode 100644 index 00000000000..e8535061771 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Neg.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.UnaryArithmeticProcessor.UnaryArithmeticOperation; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinitions; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.UnaryProcessorDefinition; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; + +public class Neg extends UnaryScalarFunction { + + public Neg(Location location, Expression field) { + super(location, field); + } + + @Override + protected TypeResolution resolveType() { + return Expressions.typeMustBeNumeric(field()); + } + + @Override + public Object fold() { + return Arithmetics.negate((Number) field().fold()); + } + + @Override + public DataType dataType() { + return field().dataType(); + } + + @Override + protected String formatScript(String template) { + // Painless supports negating (and hopefully its corner cases) + return super.formatScript("-" + template); + } + + @Override + protected ProcessorDefinition makeProcessorDefinition() { + return new UnaryProcessorDefinition(this, ProcessorDefinitions.toProcessorDefinition(field()), new UnaryArithmeticProcessor(UnaryArithmeticOperation.NEGATE)); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Sub.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Sub.java new file mode 100644 index 00000000000..64f08c4d452 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Sub.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.BinaryArithmeticProcessor.BinaryArithmeticOperation; +import org.elasticsearch.xpack.sql.tree.Location; + +public class Sub extends ArithmeticFunction { + + public Sub(Location location, Expression left, Expression right) { + super(location, left, right, BinaryArithmeticOperation.SUB); + } + + @Override + public Object fold() { + return Arithmetics.sub((Number) left().fold(), (Number) right().fold()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/UnaryArithmeticProcessor.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/UnaryArithmeticProcessor.java new file mode 100644 index 00000000000..cc60a8c5004 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/UnaryArithmeticProcessor.java @@ -0,0 +1,72 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; + +import java.io.IOException; +import java.util.function.Function; + +public class UnaryArithmeticProcessor implements Processor { + + public enum UnaryArithmeticOperation { + + NEGATE(Arithmetics::negate); + + private final Function process; + + UnaryArithmeticOperation(Function process) { + this.process = process; + } + + public final Number apply(Number number) { + return process.apply(number); + } + + public String symbol() { + return "-"; + } + } + + public static final String NAME = "au"; + + private final UnaryArithmeticOperation operation; + + public UnaryArithmeticProcessor(UnaryArithmeticOperation operation) { + this.operation = operation; + } + + public UnaryArithmeticProcessor(StreamInput in) throws IOException { + operation = in.readEnum(UnaryArithmeticOperation.class); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeEnum(operation); + } + + @Override + public Object process(Object input) { + if (input instanceof Number) { + return operation.apply((Number) input); + } + throw new SqlIllegalArgumentException("A number is required; received %s", input); + } + + + @Override + public String toString() { + return operation.symbol() + super.toString(); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeFunction.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeFunction.java new file mode 100644 index 00000000000..69b7d9824fa --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeFunction.java @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.FieldAttribute; +import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunctionAttribute; +import org.elasticsearch.xpack.sql.expression.function.aware.TimeZoneAware; +import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessor.DateTimeExtractor; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinitions; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.UnaryProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ParamsBuilder; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; +import org.joda.time.DateTimeZone; + +import java.time.temporal.ChronoField; + +import static org.elasticsearch.xpack.sql.expression.function.scalar.script.ParamsBuilder.paramsBuilder; +import static org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate.formatTemplate; + +public abstract class DateTimeFunction extends UnaryScalarFunction implements TimeZoneAware { + + private final DateTimeZone timeZone; + private final String name; + + DateTimeFunction(Location location, Expression field, DateTimeZone timeZone) { + super(location, field); + this.timeZone = timeZone; + + StringBuilder sb = new StringBuilder(super.name()); + // add timezone as last argument + sb.insert(sb.length() - 1, " [" + timeZone.getID() + "]"); + + this.name = sb.toString(); + } + + public DateTimeZone timeZone() { + return timeZone; + } + + @Override + public boolean foldable() { + return field().foldable(); + } + + @Override + protected TypeResolution resolveType() { + return field().dataType().same(DataTypes.DATE) ? + TypeResolution.TYPE_RESOLVED : + new TypeResolution("Function '%s' cannot be applied on a non-date expression ('%s' of type '%s')", functionName(), Expressions.name(field()), field().dataType().esName()); + } + + @Override + protected ScriptTemplate asScriptFrom(FieldAttribute field) { + ParamsBuilder params = paramsBuilder(); + + String template = null; + if (DateTimeZone.UTC.equals(timeZone)) { + // TODO: it would be nice to be able to externalize the extract function and reuse the script across all extractors + template = formatTemplate("doc[{}].value.get" + extractFunction() + "()"); + params.variable(field.name()); + } else { + // TODO ewwww + /* This uses the Java 8 time API because Painless doesn't whitelist creation of new + * Joda classes. */ + + // ideally JodaTime should be used since that's internally used and there are subtle differences between that and the JDK API + // all variables are externalized to reuse the script across invocations + // the actual script is ZonedDateTime.ofInstant(Instant.ofEpochMilli(.value.millis), ZoneId.of()).get(ChronoField.get(MONTH_OF_YEAR)) + + template = formatTemplate("ZonedDateTime.ofInstant(Instant.ofEpochMilli(doc[{}].value.millis), ZoneId.of({})).get(ChronoField.valueOf({}))"); + params.variable(field.name()) + .variable(timeZone.getID()) + .variable(chronoField().name()); + } + + return new ScriptTemplate(template, params.build(), dataType()); + } + + + @Override + protected ScriptTemplate asScriptFrom(AggregateFunctionAttribute aggregate) { + throw new UnsupportedOperationException(); + } + + protected String extractFunction() { + return getClass().getSimpleName(); + } + + /** + * Used for generating the painless script version of this function when the time zone is not UTC + */ + protected abstract ChronoField chronoField(); + + @Override + protected final ProcessorDefinition makeProcessorDefinition() { + return new UnaryProcessorDefinition(this, ProcessorDefinitions.toProcessorDefinition(field()), new DateTimeProcessor(extractor(), timeZone)); + } + + protected abstract DateTimeExtractor extractor(); + + @Override + public DataType dataType() { + return DataTypes.INTEGER; + } + + // used for applying ranges + public abstract String dateTimeFormat(); + + // add tz along the rest of the params + @Override + public String name() { + return name; + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeHistogramFunction.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeHistogramFunction.java new file mode 100644 index 00000000000..5f74c47d49a --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeHistogramFunction.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.aware.TimeZoneAware; +import org.elasticsearch.xpack.sql.tree.Location; +import org.joda.time.DateTimeZone; + +/** + * DateTimeFunctions that can be mapped as histogram. This means the dates order is maintained + * Unfortunately this means only YEAR works since everything else changes the order + */ +public abstract class DateTimeHistogramFunction extends DateTimeFunction implements TimeZoneAware { + + DateTimeHistogramFunction(Location location, Expression field, DateTimeZone timeZone) { + super(location, field, timeZone); + } + + /** + * used for aggregration (date histogram) + */ + public abstract String interval(); +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessor.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessor.java new file mode 100644 index 00000000000..461bd0049e3 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessor.java @@ -0,0 +1,113 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; +import org.joda.time.DateTime; +import org.joda.time.DateTimeFieldType; +import org.joda.time.DateTimeZone; +import org.joda.time.ReadableDateTime; + +import java.io.IOException; +import java.util.Objects; + +public class DateTimeProcessor implements Processor { + + public enum DateTimeExtractor { + DAY_OF_MONTH(DateTimeFieldType.dayOfMonth()), + DAY_OF_WEEK(DateTimeFieldType.dayOfWeek()), + DAY_OF_YEAR(DateTimeFieldType.dayOfYear()), + HOUR_OF_DAY(DateTimeFieldType.hourOfDay()), + MINUTE_OF_DAY(DateTimeFieldType.minuteOfDay()), + MINUTE_OF_HOUR(DateTimeFieldType.minuteOfHour()), + MONTH_OF_YEAR(DateTimeFieldType.monthOfYear()), + SECOND_OF_MINUTE(DateTimeFieldType.secondOfMinute()), + WEEK_OF_YEAR(DateTimeFieldType.weekOfWeekyear()), + YEAR(DateTimeFieldType.year()); + + private final DateTimeFieldType field; + + DateTimeExtractor(DateTimeFieldType field) { + this.field = field; + } + + public int extract(ReadableDateTime dt) { + return dt.get(field); + } + } + + public static final String NAME = "dt"; + + private final DateTimeExtractor extractor; + private final DateTimeZone timeZone; + + public DateTimeProcessor(DateTimeExtractor extractor, DateTimeZone timeZone) { + this.extractor = extractor; + this.timeZone = timeZone; + } + + public DateTimeProcessor(StreamInput in) throws IOException { + extractor = in.readEnum(DateTimeExtractor.class); + timeZone = DateTimeZone.forID(in.readString()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeEnum(extractor); + out.writeString(timeZone.getID()); + } + + @Override + public String getWriteableName() { + return NAME; + } + + DateTimeExtractor extractor() { + return extractor; + } + + @Override + public Object process(Object l) { + if (l == null) { + return null; + } + + ReadableDateTime dt; + // most dates are returned as long + if (l instanceof Long) { + dt = new DateTime(((Long) l).longValue(), DateTimeZone.UTC); + } + else { + dt = (ReadableDateTime) l; + } + if (!DateTimeZone.UTC.equals(timeZone)) { + dt = dt.toDateTime().withZone(timeZone); + } + return extractor.extract(dt); + } + + @Override + public int hashCode() { + return Objects.hash(extractor, timeZone); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + DateTimeProcessor other = (DateTimeProcessor) obj; + return Objects.equals(extractor, other.extractor) + && Objects.equals(timeZone, other.timeZone); + } + + @Override + public String toString() { + return extractor.toString(); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfMonth.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfMonth.java new file mode 100644 index 00000000000..db74265fff5 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfMonth.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessor.DateTimeExtractor; +import org.elasticsearch.xpack.sql.tree.Location; +import org.joda.time.DateTimeZone; + +import java.time.temporal.ChronoField; + +public class DayOfMonth extends DateTimeFunction { + public DayOfMonth(Location location, Expression field, DateTimeZone timeZone) { + super(location, field, timeZone); + } + + @Override + public String dateTimeFormat() { + return "d"; + } + + @Override + protected ChronoField chronoField() { + return ChronoField.DAY_OF_MONTH; + } + + @Override + protected DateTimeExtractor extractor() { + return DateTimeExtractor.DAY_OF_MONTH; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfWeek.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfWeek.java new file mode 100644 index 00000000000..4753b94f8f8 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfWeek.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessor.DateTimeExtractor; +import org.elasticsearch.xpack.sql.tree.Location; +import org.joda.time.DateTimeZone; + +import java.time.temporal.ChronoField; + +public class DayOfWeek extends DateTimeFunction { + public DayOfWeek(Location location, Expression field, DateTimeZone timeZone) { + super(location, field, timeZone); + } + + @Override + public String dateTimeFormat() { + return "e"; + } + + @Override + protected ChronoField chronoField() { + return ChronoField.DAY_OF_WEEK; + } + + @Override + protected DateTimeExtractor extractor() { + return DateTimeExtractor.DAY_OF_WEEK; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfYear.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfYear.java new file mode 100644 index 00000000000..3891af67a27 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfYear.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessor.DateTimeExtractor; +import org.elasticsearch.xpack.sql.tree.Location; +import org.joda.time.DateTimeZone; + +import java.time.temporal.ChronoField; + +public class DayOfYear extends DateTimeFunction { + public DayOfYear(Location location, Expression field, DateTimeZone timeZone) { + super(location, field, timeZone); + } + + @Override + public String dateTimeFormat() { + return "D"; + } + + @Override + protected ChronoField chronoField() { + return ChronoField.DAY_OF_YEAR; + } + + @Override + protected DateTimeExtractor extractor() { + return DateTimeExtractor.DAY_OF_YEAR; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/Extract.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/Extract.java new file mode 100644 index 00000000000..91cf1f1c5e5 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/Extract.java @@ -0,0 +1,117 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.session.SqlSession; +import org.elasticsearch.xpack.sql.tree.Location; +import org.joda.time.DateTimeZone; + +public enum Extract { + + YEAR { + @Override + public DateTimeFunction toFunction(Location source, Expression argument, DateTimeZone timeZone) { + return new Year(source, argument, timeZone); + } + }, + MONTH { + @Override + public DateTimeFunction toFunction(Location source, Expression argument, DateTimeZone timeZone) { + return new MonthOfYear(source, argument, timeZone); + } + }, + WEEK { + @Override + public DateTimeFunction toFunction(Location source, Expression argument, DateTimeZone timeZone) { + return new WeekOfWeekYear(source, argument, timeZone); + } + }, + DAY { + @Override + public DateTimeFunction toFunction(Location source, Expression argument, DateTimeZone timeZone) { + return new DayOfMonth(source, argument, timeZone); + } + }, + DAY_OF_MONTH { + @Override + public DateTimeFunction toFunction(Location source, Expression argument, DateTimeZone timeZone) { + return DAY.toFunction(source, argument, timeZone); + } + }, + DOM { + @Override + public DateTimeFunction toFunction(Location source, Expression argument, DateTimeZone timeZone) { + return DAY.toFunction(source, argument, timeZone); + } + }, + DAY_OF_WEEK { + @Override + public DateTimeFunction toFunction(Location source, Expression argument, DateTimeZone timeZone) { + return new DayOfWeek(source, argument, timeZone); + } + }, + DOW { + @Override + public DateTimeFunction toFunction(Location source, Expression argument, DateTimeZone timeZone) { + return DAY_OF_WEEK.toFunction(source, argument, timeZone); + } + }, + DAY_OF_YEAR { + @Override + public DateTimeFunction toFunction(Location source, Expression argument, DateTimeZone timeZone) { + return new DayOfYear(source, argument, timeZone); + } + }, + DOY { + @Override + public DateTimeFunction toFunction(Location source, Expression argument, DateTimeZone timeZone) { + return DAY_OF_YEAR.toFunction(source, argument, timeZone); + } + }, + HOUR { + @Override + public DateTimeFunction toFunction(Location source, Expression argument, DateTimeZone timeZone) { + return new HourOfDay(source, argument, timeZone); + } + }, + MINUTE { + @Override + public DateTimeFunction toFunction(Location source, Expression argument, DateTimeZone timeZone) { + return new MinuteOfHour(source, argument, timeZone); + } + }, + MINUTE_OF_HOUR { + @Override + public DateTimeFunction toFunction(Location source, Expression argument, DateTimeZone timeZone) { + return MINUTE.toFunction(source, argument, timeZone); + } + }, + MINUTE_OF_DAY { + @Override + public DateTimeFunction toFunction(Location source, Expression argument, DateTimeZone timeZone) { + return new MinuteOfDay(source, argument, timeZone); + } + }, + SECOND { + @Override + public DateTimeFunction toFunction(Location source, Expression argument, DateTimeZone timeZone) { + return new SecondOfMinute(source, argument, timeZone); + } + }, + SECOND_OF_MINUTE { + @Override + public DateTimeFunction toFunction(Location source, Expression argument, DateTimeZone timeZone) { + return SECOND.toFunction(source, argument, timeZone); + } + }; + + public DateTimeFunction toFunction(Location source, Expression argument) { + return toFunction(source, argument, SqlSession.currentContext().configuration.timeZone()); + } + + public abstract DateTimeFunction toFunction(Location source, Expression argument, DateTimeZone timeZone); +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/HourOfDay.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/HourOfDay.java new file mode 100644 index 00000000000..5dd4f7d0a9d --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/HourOfDay.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessor.DateTimeExtractor; +import org.elasticsearch.xpack.sql.tree.Location; +import org.joda.time.DateTimeZone; + +import java.time.temporal.ChronoField; + +public class HourOfDay extends DateTimeFunction { + public HourOfDay(Location location, Expression field, DateTimeZone timeZone) { + super(location, field, timeZone); + } + + @Override + public String dateTimeFormat() { + return "hour"; + } + + @Override + protected ChronoField chronoField() { + return ChronoField.HOUR_OF_DAY; + } + + @Override + protected DateTimeExtractor extractor() { + return DateTimeExtractor.HOUR_OF_DAY; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MinuteOfDay.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MinuteOfDay.java new file mode 100644 index 00000000000..2f0698e3326 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MinuteOfDay.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessor.DateTimeExtractor; +import org.elasticsearch.xpack.sql.tree.Location; +import org.joda.time.DateTimeZone; + +import java.time.temporal.ChronoField; + +public class MinuteOfDay extends DateTimeFunction { + + public MinuteOfDay(Location location, Expression field, DateTimeZone timeZone) { + super(location, field, timeZone); + } + + @Override + public String dateTimeFormat() { + throw new UnsupportedOperationException("is there a format for it?"); + } + + @Override + protected ChronoField chronoField() { + return ChronoField.MINUTE_OF_DAY; + } + + @Override + protected DateTimeExtractor extractor() { + return DateTimeExtractor.MINUTE_OF_DAY; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MinuteOfHour.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MinuteOfHour.java new file mode 100644 index 00000000000..2c420af6456 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MinuteOfHour.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessor.DateTimeExtractor; +import org.elasticsearch.xpack.sql.tree.Location; +import org.joda.time.DateTimeZone; + +import java.time.temporal.ChronoField; + +public class MinuteOfHour extends DateTimeFunction { + public MinuteOfHour(Location location, Expression field, DateTimeZone timeZone) { + super(location, field, timeZone); + } + + @Override + public String dateTimeFormat() { + return "m"; + } + + @Override + protected ChronoField chronoField() { + return ChronoField.MINUTE_OF_HOUR; + } + + @Override + protected DateTimeExtractor extractor() { + return DateTimeExtractor.MINUTE_OF_HOUR; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MonthOfYear.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MonthOfYear.java new file mode 100644 index 00000000000..f553bf74555 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MonthOfYear.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessor.DateTimeExtractor; +import org.elasticsearch.xpack.sql.tree.Location; +import org.joda.time.DateTimeZone; + +import java.time.temporal.ChronoField; + +public class MonthOfYear extends DateTimeFunction { + public MonthOfYear(Location location, Expression field, DateTimeZone timeZone) { + super(location, field, timeZone); + } + + @Override + public String dateTimeFormat() { + return "M"; + } + + @Override + protected ChronoField chronoField() { + return ChronoField.MONTH_OF_YEAR; + } + + @Override + protected DateTimeExtractor extractor() { + return DateTimeExtractor.MONTH_OF_YEAR; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/SecondOfMinute.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/SecondOfMinute.java new file mode 100644 index 00000000000..83e3d5ced42 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/SecondOfMinute.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessor.DateTimeExtractor; +import org.elasticsearch.xpack.sql.tree.Location; +import org.joda.time.DateTimeZone; + +import java.time.temporal.ChronoField; + +public class SecondOfMinute extends DateTimeFunction { + public SecondOfMinute(Location location, Expression field, DateTimeZone timeZone) { + super(location, field, timeZone); + } + + @Override + public String dateTimeFormat() { + return "s"; + } + + @Override + protected ChronoField chronoField() { + return ChronoField.SECOND_OF_MINUTE; + } + + @Override + protected DateTimeExtractor extractor() { + return DateTimeExtractor.SECOND_OF_MINUTE; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/WeekOfWeekYear.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/WeekOfWeekYear.java new file mode 100644 index 00000000000..e17a2461528 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/WeekOfWeekYear.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessor.DateTimeExtractor; +import org.elasticsearch.xpack.sql.tree.Location; +import org.joda.time.DateTimeZone; + +import java.time.temporal.ChronoField; + +public class WeekOfWeekYear extends DateTimeFunction { + public WeekOfWeekYear(Location location, Expression field, DateTimeZone timeZone) { + super(location, field, timeZone); + } + + @Override + public String dateTimeFormat() { + return "w"; + } + + @Override + protected ChronoField chronoField() { + return ChronoField.ALIGNED_WEEK_OF_YEAR; + } + + @Override + protected DateTimeExtractor extractor() { + return DateTimeExtractor.WEEK_OF_YEAR; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/Year.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/Year.java new file mode 100644 index 00000000000..a062ead4876 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/Year.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessor.DateTimeExtractor; +import org.elasticsearch.xpack.sql.tree.Location; +import org.joda.time.DateTimeZone; + +import java.time.temporal.ChronoField; + +public class Year extends DateTimeHistogramFunction { + public Year(Location location, Expression field, DateTimeZone timeZone) { + super(location, field, timeZone); + } + + @Override + public String dateTimeFormat() { + return "year"; + } + + @Override + public Expression orderBy() { + return field(); + } + + @Override + protected ChronoField chronoField() { + return ChronoField.YEAR; + } + + @Override + protected DateTimeExtractor extractor() { + return DateTimeExtractor.YEAR; + } + + @Override + public String interval() { + return "year"; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/ACos.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/ACos.java new file mode 100644 index 00000000000..a5c972d23d5 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/ACos.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; + +public class ACos extends MathFunction { + public ACos(Location location, Expression field) { + super(location, field); + } + + @Override + protected MathOperation operation() { + return MathOperation.ACOS; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/ASin.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/ASin.java new file mode 100644 index 00000000000..a082f17ffb2 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/ASin.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; + +public class ASin extends MathFunction { + public ASin(Location location, Expression field) { + super(location, field); + } + + @Override + protected MathOperation operation() { + return MathOperation.ASIN; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/ATan.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/ATan.java new file mode 100644 index 00000000000..3b8c8253b2a --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/ATan.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; + +public class ATan extends MathFunction { + public ATan(Location location, Expression field) { + super(location, field); + } + + @Override + protected MathOperation operation() { + return MathOperation.ATAN; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Abs.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Abs.java new file mode 100644 index 00000000000..60ae94e7f64 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Abs.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; + +public class Abs extends MathFunction { + public Abs(Location location, Expression field) { + super(location, field); + } + + @Override + protected MathOperation operation() { + return MathOperation.ABS; + } + + @Override + public DataType dataType() { + return field().dataType(); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Cbrt.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Cbrt.java new file mode 100644 index 00000000000..e3a8f920323 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Cbrt.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; + +public class Cbrt extends MathFunction { + public Cbrt(Location location, Expression field) { + super(location, field); + } + + @Override + protected MathOperation operation() { + return MathOperation.CBRT; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Ceil.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Ceil.java new file mode 100644 index 00000000000..e12ff38431f --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Ceil.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypeConversion; + +public class Ceil extends MathFunction { + public Ceil(Location location, Expression field) { + super(location, field); + } + + @Override + protected MathOperation operation() { + return MathOperation.CEIL; + } + + @Override + public DataType dataType() { + return DataTypeConversion.asInteger(field().dataType()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Cos.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Cos.java new file mode 100644 index 00000000000..086f389797c --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Cos.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; + +public class Cos extends MathFunction { + public Cos(Location location, Expression field) { + super(location, field); + } + + @Override + protected MathOperation operation() { + return MathOperation.COS; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Cosh.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Cosh.java new file mode 100644 index 00000000000..5c957df9fb7 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Cosh.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; + +public class Cosh extends MathFunction { + public Cosh(Location location, Expression field) { + super(location, field); + } + + @Override + protected MathOperation operation() { + return MathOperation.COSH; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Degrees.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Degrees.java new file mode 100644 index 00000000000..f4feb49dcd7 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Degrees.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; + +public class Degrees extends MathFunction { + public Degrees(Location location, Expression field) { + super(location, field); + } + + @Override + protected String mathFunction() { + return "toDegrees"; + } + + @Override + protected MathOperation operation() { + return MathOperation.DEGREES; + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/E.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/E.java new file mode 100644 index 00000000000..356dc57b058 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/E.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + + +import org.elasticsearch.xpack.sql.expression.Literal; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.Params; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataTypes; +import org.elasticsearch.xpack.sql.util.StringUtils; + +public class E extends MathFunction { + + private static final ScriptTemplate TEMPLATE = new ScriptTemplate("Math.E", Params.EMPTY, DataTypes.DOUBLE); + + public E(Location location) { + super(location, new Literal(location, Math.E, DataTypes.DOUBLE)); + } + + @Override + public Object fold() { + return Math.E; + } + + @Override + protected String functionArgs() { + return StringUtils.EMPTY; + } + + @Override + public ScriptTemplate asScript() { + return TEMPLATE; + } + + @Override + protected MathOperation operation() { + return MathOperation.E; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Exp.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Exp.java new file mode 100644 index 00000000000..a9dacc62436 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Exp.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; + +public class Exp extends MathFunction { + public Exp(Location location, Expression field) { + super(location, field); + } + + @Override + protected MathOperation operation() { + return MathOperation.EXP; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Expm1.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Expm1.java new file mode 100644 index 00000000000..dce2797c3d1 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Expm1.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; + +public class Expm1 extends MathFunction { + public Expm1(Location location, Expression field) { + super(location, field); + } + + @Override + protected MathOperation operation() { + return MathOperation.EXPM1; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Floor.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Floor.java new file mode 100644 index 00000000000..eb0c52e16e1 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Floor.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypeConversion; + +public class Floor extends MathFunction { + public Floor(Location location, Expression field) { + super(location, field); + } + + @Override + protected MathOperation operation() { + return MathOperation.FLOOR; + } + + @Override + public DataType dataType() { + return DataTypeConversion.asInteger(field().dataType()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Log.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Log.java new file mode 100644 index 00000000000..970d94ad169 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Log.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; + +public class Log extends MathFunction { + public Log(Location location, Expression field) { + super(location, field); + } + + @Override + protected MathOperation operation() { + return MathOperation.LOG; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Log10.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Log10.java new file mode 100644 index 00000000000..3656a1ab651 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Log10.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; + +public class Log10 extends MathFunction { + public Log10(Location location, Expression field) { + super(location, field); + } + + @Override + protected MathOperation operation() { + return MathOperation.LOG10; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/MathFunction.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/MathFunction.java new file mode 100644 index 00000000000..3c9b3ec4c2a --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/MathFunction.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinitions; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.UnaryProcessorDefinition; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; + +import java.util.Locale; + +import static java.lang.String.format; + +public abstract class MathFunction extends UnaryScalarFunction { + + protected MathFunction(Location location) { + super(location); + } + + protected MathFunction(Location location, Expression field) { + super(location, field); + } + + @Override + public boolean foldable() { + return field().foldable(); + } + + @Override + protected String formatScript(String template) { + return super.formatScript(format(Locale.ROOT, "Math.%s(%s)", mathFunction(), template)); + } + + protected String mathFunction() { + return getClass().getSimpleName().toLowerCase(Locale.ROOT); + } + + @Override + public DataType dataType() { + return DataTypes.DOUBLE; + } + + @Override + protected final ProcessorDefinition makeProcessorDefinition() { + return new UnaryProcessorDefinition(this, ProcessorDefinitions.toProcessorDefinition(field()), new MathProcessor(operation())); + } + + protected abstract MathOperation operation(); +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/MathProcessor.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/MathProcessor.java new file mode 100644 index 00000000000..1548de6704a --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/MathProcessor.java @@ -0,0 +1,116 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; + +import java.io.IOException; +import java.util.function.DoubleFunction; +import java.util.function.Function; + +public class MathProcessor implements Processor { + + public enum MathOperation { + ABS((Object l) -> { + if (l instanceof Float) { + return Math.abs(((Float) l).floatValue()); + } + if (l instanceof Double) { + return Math.abs(((Double) l).doubleValue()); + } + long lo = ((Number) l).longValue(); + return lo >= 0 ? lo : lo == Long.MIN_VALUE ? Long.MAX_VALUE : -lo; + }), + + ACOS(Math::acos), + ASIN(Math::asin), + ATAN(Math::atan), + CBRT(Math::cbrt), + CEIL(Math::ceil), + COS(Math::cos), + COSH(Math::cosh), + DEGREES(Math::toDegrees), + E((Object l) -> Math.E), + EXP(Math::exp), + EXPM1(Math::expm1), + FLOOR(Math::floor), + LOG(Math::log), + LOG10(Math::log10), + PI((Object l) -> Math.PI), + RADIANS(Math::toRadians), + ROUND((DoubleFunction) Math::round), + SIN(Math::sin), + SINH(Math::sinh), + SQRT(Math::sqrt), + TAN(Math::tan); + + private final Function apply; + + MathOperation(Function apply) { + this.apply = apply; + } + + MathOperation(DoubleFunction apply) { + this.apply = (Object l) -> apply.apply(((Number) l).doubleValue()); + } + + public final Object apply(Object l) { + return apply.apply(l); + } + } + + public static final String NAME = "m"; + + private final MathOperation processor; + + public MathProcessor(MathOperation processor) { + this.processor = processor; + } + + public MathProcessor(StreamInput in) throws IOException { + processor = in.readEnum(MathOperation.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeEnum(processor); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public Object process(Object input) { + return processor.apply(input); + } + + MathOperation processor() { + return processor; + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + MathProcessor other = (MathProcessor) obj; + return processor == other.processor; + } + + @Override + public int hashCode() { + return processor.hashCode(); + } + + @Override + public String toString() { + return processor.toString(); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Pi.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Pi.java new file mode 100644 index 00000000000..424c305c0f2 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Pi.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + + +import org.elasticsearch.xpack.sql.expression.Literal; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.Params; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataTypes; +import org.elasticsearch.xpack.sql.util.StringUtils; + +public class Pi extends MathFunction { + + private static final ScriptTemplate TEMPLATE = new ScriptTemplate("Math.PI", Params.EMPTY, DataTypes.DOUBLE); + + public Pi(Location location) { + super(location, new Literal(location, Math.PI, DataTypes.DOUBLE)); + } + + @Override + public Object fold() { + return Math.PI; + } + + @Override + protected String functionArgs() { + return StringUtils.EMPTY; + } + + @Override + public ScriptTemplate asScript() { + return TEMPLATE; + } + + @Override + protected MathOperation operation() { + return MathOperation.PI; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Radians.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Radians.java new file mode 100644 index 00000000000..a94efc916c4 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Radians.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; + +public class Radians extends MathFunction { + public Radians(Location location, Expression field) { + super(location, field); + } + + @Override + protected String mathFunction() { + return "toRadians"; + } + + @Override + protected MathOperation operation() { + return MathOperation.RADIANS; + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Round.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Round.java new file mode 100644 index 00000000000..43d2e20bec0 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Round.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypeConversion; + +public class Round extends MathFunction { + public Round(Location location, Expression field) { + super(location, field); + } + + @Override + protected MathOperation operation() { + return MathOperation.ROUND; + } + + @Override + public DataType dataType() { + return DataTypeConversion.asInteger(field().dataType()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Sin.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Sin.java new file mode 100644 index 00000000000..ccae5f6b7bf --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Sin.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; + +public class Sin extends MathFunction { + public Sin(Location location, Expression field) { + super(location, field); + } + + @Override + protected MathOperation operation() { + return MathOperation.SIN; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Sinh.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Sinh.java new file mode 100644 index 00000000000..07994951a77 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Sinh.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; + +public class Sinh extends MathFunction { + public Sinh(Location location, Expression field) { + super(location, field); + } + + @Override + protected MathOperation operation() { + return MathOperation.SINH; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Sqrt.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Sqrt.java new file mode 100644 index 00000000000..2fc79b2718f --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Sqrt.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; + +public class Sqrt extends MathFunction { + public Sqrt(Location location, Expression field) { + super(location, field); + } + + @Override + protected MathOperation operation() { + return MathOperation.SQRT; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Tan.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Tan.java new file mode 100644 index 00000000000..5680052c41a --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Tan.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; + +public class Tan extends MathFunction { + public Tan(Location location, Expression field) { + super(location, field); + } + + @Override + protected MathOperation operation() { + return MathOperation.TAN; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/AggNameInput.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/AggNameInput.java new file mode 100644 index 00000000000..a45d848e0e7 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/AggNameInput.java @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition; + +import org.elasticsearch.xpack.sql.expression.Expression; + +public class AggNameInput extends NonExecutableInput { + + public AggNameInput(Expression expression, String context) { + super(expression, context); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/AggPathInput.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/AggPathInput.java new file mode 100644 index 00000000000..326fb1a2962 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/AggPathInput.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; + +import java.util.Objects; + +public class AggPathInput extends NonExecutableInput { + + private final String innerKey; + // used in case the agg itself is not returned in a suitable format (like date aggs) + private final Processor action; + + public AggPathInput(Expression expression, String context) { + this(expression, context, null, null); + } + + public AggPathInput(Expression expression, String context, String innerKey) { + this(expression, context, innerKey, null); + } + + public AggPathInput(Expression expression, String context, String innerKey, Processor action) { + super(expression, context); + this.innerKey = innerKey; + this.action = action; + } + + public String innerKey() { + return innerKey; + } + + public Processor action() { + return action; + } + + @Override + public boolean resolved() { + return true; + } + + @Override + public int hashCode() { + return Objects.hash(context(), innerKey); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + AggPathInput other = (AggPathInput) obj; + return Objects.equals(context(), other.context()) + && Objects.equals(innerKey, other.innerKey) + && Objects.equals(action, other.action); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/AggValueInput.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/AggValueInput.java new file mode 100644 index 00000000000..e151668a37a --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/AggValueInput.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.MatrixFieldProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.SuppliedProcessor; + +import java.util.Objects; +import java.util.function.Supplier; + +public class AggValueInput extends LeafInput> { + + private final String innerKey; + private final Processor matrixProcessor; + + public AggValueInput(Expression expression, Supplier context, String innerKey) { + super(expression, context); + this.innerKey = innerKey; + this.matrixProcessor = innerKey != null ? new MatrixFieldProcessor(innerKey) : null; + } + + public String innerKey() { + return innerKey; + } + + @Override + public Processor asProcessor() { + return new SuppliedProcessor(() -> matrixProcessor != null ? matrixProcessor.process(context().get()) : context().get()); + } + + @Override + public int hashCode() { + return Objects.hash(context(), innerKey); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + AggValueInput other = (AggValueInput) obj; + return Objects.equals(context(), other.context()) + && Objects.equals(innerKey, other.innerKey); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/AttributeInput.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/AttributeInput.java new file mode 100644 index 00000000000..a01b54a1048 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/AttributeInput.java @@ -0,0 +1,16 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition; + +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.Expression; + +public class AttributeInput extends NonExecutableInput { + + public AttributeInput(Expression expression, Attribute context) { + super(expression, context); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/BinaryProcessorDefinition.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/BinaryProcessorDefinition.java new file mode 100644 index 00000000000..ef2616d02d6 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/BinaryProcessorDefinition.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition; + +import org.elasticsearch.xpack.sql.expression.Expression; + +import java.util.Arrays; + +public abstract class BinaryProcessorDefinition extends ProcessorDefinition { + + private final ProcessorDefinition left, right; + + public BinaryProcessorDefinition(Expression expression, ProcessorDefinition left, ProcessorDefinition right) { + super(expression, Arrays.asList(left, right)); + this.left = left; + this.right = right; + } + + public ProcessorDefinition left() { + return left; + } + + public ProcessorDefinition right() { + return right; + } + + @Override + public boolean resolved() { + return left().resolved() && right().resolved(); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/ConstantInput.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/ConstantInput.java new file mode 100644 index 00000000000..e6c7a5e4494 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/ConstantInput.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.ConstantProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; + +public class ConstantInput extends LeafInput { + + public ConstantInput(Expression expression, Object context) { + super(expression, context); + } + + @Override + public Processor asProcessor() { + return new ConstantProcessor(context()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/HitExtractorInput.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/HitExtractorInput.java new file mode 100644 index 00000000000..5b4dedf3f50 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/HitExtractorInput.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition; + +import org.elasticsearch.xpack.sql.execution.search.extractor.HitExtractor; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.HitExtractorProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; + +public class HitExtractorInput extends LeafInput { + + public HitExtractorInput(Expression expression, HitExtractor context) { + super(expression, context); + } + + @Override + public Processor asProcessor() { + return new HitExtractorProcessor(context()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/LeafInput.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/LeafInput.java new file mode 100644 index 00000000000..d3e479a1929 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/LeafInput.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition; + +import org.elasticsearch.xpack.sql.expression.Expression; + +import java.util.Objects; + +import static java.util.Collections.emptyList; + +public abstract class LeafInput extends ProcessorDefinition { + + private T context; + + public LeafInput(Expression expression, T context) { + super(expression, emptyList()); + this.context = context; + } + + public T context() { + return context; + } + + @Override + public boolean resolved() { + return true; + } + + @Override + public int hashCode() { + return Objects.hash(expression(), context); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + AggPathInput other = (AggPathInput) obj; + return Objects.equals(context(), other.context()) + && Objects.equals(expression(), other.expression()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/NonExecutableInput.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/NonExecutableInput.java new file mode 100644 index 00000000000..0655b70018e --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/NonExecutableInput.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition; + +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; + +public class NonExecutableInput extends LeafInput { + + NonExecutableInput(Expression expression, T context) { + super(expression, context); + } + + @Override + public boolean resolved() { + return false; + } + + @Override + public Processor asProcessor() { + throw new SqlIllegalArgumentException("Unresolved input - needs resolving first"); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/ProcessorDefinition.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/ProcessorDefinition.java new file mode 100644 index 00000000000..a8652ea9f10 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/ProcessorDefinition.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; +import org.elasticsearch.xpack.sql.tree.Node; + +import java.util.List; + +public abstract class ProcessorDefinition extends Node { + + private final Expression expression; + + public ProcessorDefinition(Expression expression, List children) { + super(children); + this.expression = expression; + } + + public Expression expression() { + return expression; + } + + public abstract boolean resolved(); + + public abstract Processor asProcessor(); +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/ProcessorDefinitions.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/ProcessorDefinitions.java new file mode 100644 index 00000000000..c633ae3a61d --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/ProcessorDefinitions.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition; + +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.NamedExpression; +import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunction; + +public abstract class ProcessorDefinitions { + + public static ProcessorDefinition toProcessorDefinition(Expression ex) { + if (ex.foldable()) { + return new ConstantInput(ex, ex.fold()); + } + if (ex instanceof ScalarFunction) { + return ((ScalarFunction) ex).asProcessorDefinition(); + } + if (ex instanceof AggregateFunction) { + // unresolved AggNameInput (should always get replaced by the folder) + return new AggNameInput(ex, ((AggregateFunction) ex).name()); + } + if (ex instanceof NamedExpression) { + return new AttributeInput(ex, ((NamedExpression) ex).toAttribute()); + } + throw new SqlIllegalArgumentException("Cannot extract processor from %s", ex); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/ReferenceInput.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/ReferenceInput.java new file mode 100644 index 00000000000..da66b1281de --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/ReferenceInput.java @@ -0,0 +1,16 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.querydsl.container.ColumnReference; + +public class ReferenceInput extends NonExecutableInput { + + public ReferenceInput(Expression expression, ColumnReference context) { + super(expression, context); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/UnaryProcessorDefinition.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/UnaryProcessorDefinition.java new file mode 100644 index 00000000000..e38e5f383b3 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/UnaryProcessorDefinition.java @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.ChainingProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; + +import java.util.Objects; + +import static java.util.Collections.singletonList; + +public class UnaryProcessorDefinition extends ProcessorDefinition { + + private final ProcessorDefinition child; + private final Processor action; + + public UnaryProcessorDefinition(Expression expression, ProcessorDefinition child, Processor action) { + super(expression, singletonList(child)); + this.child = child; + this.action = action; + } + + public ProcessorDefinition child() { + return child; + } + + public Processor action() { + return action; + } + + @Override + public boolean resolved() { + return child.resolved(); + } + + @Override + public Processor asProcessor() { + return new ChainingProcessor(child.asProcessor(), action); + } + + @Override + public int hashCode() { + return Objects.hash(expression(), child, action); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + UnaryProcessorDefinition other = (UnaryProcessorDefinition) obj; + return Objects.equals(action, other.action) + && Objects.equals(child, other.child) + && Objects.equals(expression(), other.expression()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/BinaryProcessor.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/BinaryProcessor.java new file mode 100644 index 00000000000..81795923915 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/BinaryProcessor.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +public abstract class BinaryProcessor implements Processor { + + private final Processor left, right; + + public BinaryProcessor(Processor left, Processor right) { + this.left = left; + this.right = right; + } + + protected BinaryProcessor(StreamInput in) throws IOException { + left = in.readNamedWriteable(Processor.class); + right = in.readNamedWriteable(Processor.class); + } + + @Override + public final void writeTo(StreamOutput out) throws IOException { + out.writeNamedWriteable(left); + out.writeNamedWriteable(right); + doWrite(out); + } + + protected abstract void doWrite(StreamOutput out) throws IOException; + + @Override + public Object process(Object input) { + return doProcess(left.process(input), right.process(input)); + } + + protected Processor left() { + return left; + } + + protected Processor right() { + return right; + } + + protected abstract Object doProcess(Object left, Object right); +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/ChainingProcessor.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/ChainingProcessor.java new file mode 100644 index 00000000000..9be7de637e3 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/ChainingProcessor.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.Objects; + +/** + * A {@linkplain Processor} that composes the results of two + * {@linkplain Processor}s. + */ +public class ChainingProcessor extends UnaryProcessor { + public static final String NAME = "."; + + private final Processor processor; + + public ChainingProcessor(Processor first, Processor second) { + super(first); + this.processor = second; + } + + public ChainingProcessor(StreamInput in) throws IOException { + super(in); + processor = in.readNamedWriteable(Processor.class); + } + + @Override + protected void doWrite(StreamOutput out) throws IOException { + out.writeNamedWriteable(processor); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + protected Object doProcess(Object input) { + return processor.process(input); + } + + Processor first() { + return child(); + } + + Processor second() { + return processor; + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), processor); + } + + @Override + public boolean equals(Object obj) { + return super.equals(obj) && Objects.equals(processor, ((ChainingProcessor) obj).processor); + } + + @Override + public String toString() { + return processor + "(" + super.toString() + ")"; + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/ConstantProcessor.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/ConstantProcessor.java new file mode 100644 index 00000000000..cc419f3c7b7 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/ConstantProcessor.java @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.Objects; + +public class ConstantProcessor implements Processor { + + public static String NAME = "c"; + + private final Object constant; + + public ConstantProcessor(Object value) { + this.constant = value; + } + + public ConstantProcessor(StreamInput in) throws IOException { + constant = in.readGenericValue(); + } + + public void writeTo(StreamOutput out) throws IOException { + out.writeGenericValue(constant); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public Object process(Object input) { + return constant; + } + + @Override + public int hashCode() { + return Objects.hashCode(constant); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + ConstantProcessor other = (ConstantProcessor) obj; + return Objects.equals(constant, other.constant); + } + + @Override + public String toString() { + return "^" + constant; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/HitExtractorProcessor.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/HitExtractorProcessor.java new file mode 100644 index 00000000000..00b778b2396 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/HitExtractorProcessor.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.execution.search.extractor.HitExtractor; + +import java.io.IOException; +import java.util.Objects; + +/** + * Processor wrapping a HitExtractor esentially being a source/leaf of a + * Processor tree. + */ +public class HitExtractorProcessor implements Processor { + + public static final String NAME = "h"; + + private final HitExtractor extractor; + + public HitExtractorProcessor(HitExtractor extractor) { + this.extractor = extractor; + } + + public HitExtractorProcessor(StreamInput in) throws IOException { + extractor = in.readNamedWriteable(HitExtractor.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeNamedWriteable(extractor); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public Object process(Object input) { + if (!(input instanceof SearchHit)) { + throw new SqlIllegalArgumentException("Expected a SearchHit but received %s", input); + } + return extractor.get((SearchHit) input); + } + + @Override + public int hashCode() { + return Objects.hash(extractor); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + HitExtractorProcessor other = (HitExtractorProcessor) obj; + return Objects.equals(extractor, other.extractor); + } + + @Override + public String toString() { + return extractor.toString(); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/MatrixFieldProcessor.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/MatrixFieldProcessor.java new file mode 100644 index 00000000000..7036ca79fb4 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/MatrixFieldProcessor.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +public class MatrixFieldProcessor implements Processor { + public static final String NAME = "mat"; + + private final String key; + + public MatrixFieldProcessor(String key) { + this.key = key; + } + + public MatrixFieldProcessor(StreamInput in) throws IOException { + key = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(key); + } + + @Override + public String getWriteableName() { + return NAME; + } + + String key() { + return key; + } + + @Override + public Object process(Object r) { + return r instanceof Map ? ((Map) r).get(key) : r; + } + + @Override + public int hashCode() { + return Objects.hashCode(key); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + MatrixFieldProcessor other = (MatrixFieldProcessor) obj; + return Objects.equals(key, other.key); + } + + public String toString() { + return "[" + key + "]"; + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/Processor.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/Processor.java new file mode 100644 index 00000000000..f77568368b4 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/Processor.java @@ -0,0 +1,13 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime; + +import org.elasticsearch.common.io.stream.NamedWriteable; + +public interface Processor extends NamedWriteable { + + Object process(Object input); +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/SuppliedProcessor.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/SuppliedProcessor.java new file mode 100644 index 00000000000..2abbfd4506f --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/SuppliedProcessor.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime; + +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; + +import java.io.IOException; +import java.util.function.Supplier; + +public class SuppliedProcessor implements Processor { + + private final Supplier action; + + public SuppliedProcessor(Supplier action) { + this.action = action; + } + + @Override + public String getWriteableName() { + throw new SqlIllegalArgumentException("transient"); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + throw new SqlIllegalArgumentException("transient"); + } + + @Override + public Object process(Object input) { + return action.get(); + } + +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/UnaryProcessor.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/UnaryProcessor.java new file mode 100644 index 00000000000..613e2632283 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/UnaryProcessor.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.Objects; + +public abstract class UnaryProcessor implements Processor { + + private final Processor child; + + public UnaryProcessor(Processor child) { + this.child = child; + } + + protected UnaryProcessor(StreamInput in) throws IOException { + child = in.readNamedWriteable(Processor.class); + } + + @Override + public final void writeTo(StreamOutput out) throws IOException { + out.writeNamedWriteable(child); + doWrite(out); + } + + protected abstract void doWrite(StreamOutput out) throws IOException; + + @Override + public final Object process(Object input) { + return doProcess(child.process(input)); + } + + public Processor child() { + return child; + } + + protected abstract Object doProcess(Object input); + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + UnaryProcessor other = (UnaryProcessor) obj; + return Objects.equals(child, other.child); + } + + @Override + public int hashCode() { + return Objects.hashCode(child); + } + + @Override + public String toString() { + return Objects.toString(child); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/Agg.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/Agg.java new file mode 100644 index 00000000000..3b75b7f98b5 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/Agg.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.script; + +import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunctionAttribute; + +class Agg extends Param { + + Agg(AggregateFunctionAttribute aggRef) { + super(aggRef); + } + + String aggName() { + return value().functionId(); + } + + public String aggProperty() { + return value().propertyPath(); + } + + @Override + public String prefix() { + return "a"; + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/Param.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/Param.java new file mode 100644 index 00000000000..ff2e3322ae0 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/Param.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.script; + +import java.util.Locale; + +abstract class Param { + private final T value; + + Param(T value) { + this.value = value; + } + + abstract String prefix(); + + T value() { + return value; + } + + @Override + public String toString() { + return String.format(Locale.ROOT, "{%s=%s}", prefix(), value); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/Params.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/Params.java new file mode 100644 index 00000000000..1b35aa0bea8 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/Params.java @@ -0,0 +1,134 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.script; + +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; + +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.emptyList; + +/** + * Parameters for a script + * + * This class mainly exists to handle the different aggregation cases. + * While aggs can appear in scripts like regular parameters, they are not passed + * as parameters but rather as bucket_path. + * However in some cases (like count), it's not the agg path that is relevant but rather + * its property (_count). + * As the agg name still needs to be remembered to properly associate the script with. + * + * Hence why this class supports aggRef (which always returns the agg names) and aggPaths + * (which returns the agg property if it exists or the agg name/reference). + * + * Also the parameter names support late binding/evaluation since the agg reference (like function id) + * can be changed during the optimization phase (for example min agg -> stats.min). + */ +public class Params { + + public static final Params EMPTY = new Params(emptyList()); + + private final List> params; + + Params(List> params) { + // flatten params + this.params = flatten(params); + } + + // return vars and aggs in the declared order for binding them to the script + List asCodeNames() { + if (params.isEmpty()) { + return emptyList(); + } + + List names = new ArrayList<>(params.size()); + int aggs = 0, vars = 0; + + for (Param p : params) { + names.add(p.prefix() + (p instanceof Agg ? aggs++ : vars++)); + } + + return names; + } + + // return only the vars (as parameter for a script) + // agg refs are returned separately to be provided as bucket_paths + Map asParams() { + Map map = new LinkedHashMap<>(params.size()); + + int count = 0; + + for (Param p : params) { + if (p instanceof Var) { + map.put(p.prefix() + count++, p.value()); + } + } + + return map; + } + + // return agg refs in a format suitable for bucket_paths + Map asAggPaths() { + Map map = new LinkedHashMap<>(); + + int aggs = 0; + + for (Param p : params) { + if (p instanceof Agg) { + Agg a = (Agg) p; + String s = a.aggProperty() != null ? a.aggProperty() : a.aggName(); + map.put(p.prefix() + aggs++, s); + } + } + + return map; + } + + // return the agg refs + List asAggRefs() { + List refs = new ArrayList<>(); + + for (Param p : params) { + if (p instanceof Agg) { + refs.add(((Agg) p).aggName()); + } + } + + return refs; + } + + + private static List> flatten(List> params) { + List> flatten = emptyList(); + + if (!params.isEmpty()) { + flatten = new ArrayList<>(); + for (Param p : params) { + if (p instanceof Script) { + flatten.addAll(flatten(((Script) p).value().params)); + } + else if (p instanceof Agg) { + flatten.add(p); + } + else if (p instanceof Var) { + flatten.add(p); + } + else { + throw new SqlIllegalArgumentException("Unsupported field %s", p); + } + } + } + return flatten; + } + + @Override + public String toString() { + return params.toString(); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/ParamsBuilder.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/ParamsBuilder.java new file mode 100644 index 00000000000..8f99f29b9c1 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/ParamsBuilder.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.script; + +import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunctionAttribute; + +import java.util.ArrayList; +import java.util.List; + +public class ParamsBuilder { + + private final List> params = new ArrayList<>(); + + public static ParamsBuilder paramsBuilder() { + return new ParamsBuilder(); + } + + public ParamsBuilder variable(Object value) { + params.add(new Var(value)); + return this; + } + + public ParamsBuilder agg(AggregateFunctionAttribute agg) { + params.add(new Agg(agg)); + return this; + } + + public ParamsBuilder script(Params ps) { + params.add(new Script(ps)); + return this; + } + + public Params build() { + return new Params(new ArrayList<>(params)); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/Script.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/Script.java new file mode 100644 index 00000000000..ceabac9c499 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/Script.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.script; + +class Script extends Param { + + Script(Params value) { + super(value); + } + + @Override + public String prefix() { + return "s"; + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/ScriptTemplate.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/ScriptTemplate.java new file mode 100644 index 00000000000..7da07333844 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/ScriptTemplate.java @@ -0,0 +1,98 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.script; + +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; +import org.elasticsearch.xpack.sql.util.StringUtils; + +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; + +import static java.lang.String.format; + +public class ScriptTemplate { + + public static final ScriptTemplate EMPTY = new ScriptTemplate(StringUtils.EMPTY); + + private final String template; + private final Params params; + // used for sorting based on scripts + private final DataType outputType; + + public ScriptTemplate(String template) { + this(template, Params.EMPTY, DataTypes.KEYWORD); + } + + public ScriptTemplate(String template, Params params, DataType outputType) { + this.template = template; + this.params = params; + this.outputType = outputType; + } + + public String template() { + return template; + } + + public Params params() { + return params; + } + + public List aggRefs() { + return params.asAggRefs(); + } + + public Map aggPaths() { + return params.asAggPaths(); + } + + public DataType outputType() { + return outputType; + } + + public Script toPainless() { + return new Script(ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, bindTemplate(), params.asParams()); + } + + private String bindTemplate() { + List binding = params.asCodeNames(); + return binding.isEmpty() ? template : format(Locale.ROOT, template, binding.toArray()); + } + + @Override + public int hashCode() { + return Objects.hash(template, params, outputType); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + ScriptTemplate other = (ScriptTemplate) obj; + return Objects.equals(template, other.template) + && Objects.equals(params, other.params) + && Objects.equals(outputType, other.outputType); + } + + @Override + public String toString() { + return bindTemplate(); + } + + public static String formatTemplate(String template) { + return template.replace("{}", "params.%s"); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/Var.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/Var.java new file mode 100644 index 00000000000..96bda8eabe6 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/Var.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.script; + +class Var extends Param { + + Var(Object value) { + super(value); + } + + @Override + public String prefix() { + return "v"; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/And.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/And.java new file mode 100644 index 00000000000..39a076ce491 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/And.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.predicate; + +import org.elasticsearch.xpack.sql.expression.BinaryLogic; +import org.elasticsearch.xpack.sql.expression.BinaryOperator.Negateable; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; + +public class And extends BinaryLogic implements Negateable { + + public And(Location location, Expression left, Expression right) { + super(location, left, right); + } + + @Override + public Or negate() { + return new Or(location(), new Not(location(), left()), new Not(location(), right())); + } + + @Override + public And swapLeftAndRight() { + return new And(location(), right(), left()); + } + + @Override + public String symbol() { + return "&&"; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/BinaryComparison.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/BinaryComparison.java new file mode 100644 index 00000000000..25aa003c7d2 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/BinaryComparison.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.predicate; + +import org.elasticsearch.xpack.sql.expression.BinaryOperator; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; + +// marker class to indicate operations that rely on values +public abstract class BinaryComparison extends BinaryOperator { + + public BinaryComparison(Location location, Expression left, Expression right) { + super(location, left, right); + } + + @Override + protected TypeResolution resolveInputType(DataType inputType) { + return TypeResolution.TYPE_RESOLVED; + } + + @Override + protected Expression canonicalize() { + return left().hashCode() > right().hashCode() ? swapLeftAndRight() : this; + } + + @Override + public DataType dataType() { + return DataTypes.BOOLEAN; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Equals.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Equals.java new file mode 100644 index 00000000000..51529fd6917 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Equals.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.predicate; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; + +public class Equals extends BinaryComparison { + + public Equals(Location location, Expression left, Expression right) { + super(location, left, right); + } + + @Override + public Equals swapLeftAndRight() { + return new Equals(location(), right(), left()); + } + + @Override + public String symbol() { + return "="; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/GreaterThan.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/GreaterThan.java new file mode 100644 index 00000000000..ffc1c89e6d4 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/GreaterThan.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.predicate; + +import org.elasticsearch.xpack.sql.expression.BinaryOperator.Negateable; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; + +public class GreaterThan extends BinaryComparison implements Negateable { + + public GreaterThan(Location location, Expression left, Expression right) { + super(location, left, right); + } + + @Override + public LessThan swapLeftAndRight() { + return new LessThan(location(), right(), left()); + } + + @Override + public LessThanOrEqual negate() { + return new LessThanOrEqual(location(), left(), right()); + } + + @Override + public String symbol() { + return ">"; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/GreaterThanOrEqual.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/GreaterThanOrEqual.java new file mode 100644 index 00000000000..475afe66310 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/GreaterThanOrEqual.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.predicate; + +import org.elasticsearch.xpack.sql.expression.BinaryOperator.Negateable; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; + +public class GreaterThanOrEqual extends BinaryComparison implements Negateable { + + public GreaterThanOrEqual(Location location, Expression left, Expression right) { + super(location, left, right); + } + + @Override + public LessThanOrEqual swapLeftAndRight() { + return new LessThanOrEqual(location(), right(), left()); + } + + @Override + public LessThan negate() { + return new LessThan(location(), left(), right()); + } + + @Override + public String symbol() { + return ">="; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/In.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/In.java new file mode 100644 index 00000000000..5c961583aa5 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/In.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.predicate; + +import java.util.List; +import java.util.Objects; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; +import org.elasticsearch.xpack.sql.util.CollectionUtils; + +public class In extends Expression { + + private final Expression value; + private final List list; + private final boolean nullable, foldable; + + public In(Location location, Expression value, List list) { + super(location, CollectionUtils.combine(list, value)); + this.value = value; + this.list = list; + + this.nullable = children().stream().anyMatch(Expression::nullable); + this.foldable = children().stream().allMatch(Expression::foldable); + } + + public Expression value() { + return value; + } + + public List list() { + return list; + } + + @Override + public DataType dataType() { + return DataTypes.BOOLEAN; + } + + @Override + public boolean nullable() { + return nullable; + } + + @Override + public boolean foldable() { + return foldable; + } + + @Override + public int hashCode() { + return Objects.hash(value, list); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (!this.equals(obj) || getClass() != obj.getClass()) { + return false; + } + + In other = (In) obj; + return Objects.equals(value, other.value) + && Objects.equals(list, other.list); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/IsNotNull.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/IsNotNull.java new file mode 100644 index 00000000000..15918c17a1b --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/IsNotNull.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.predicate; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.UnaryExpression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; + +public class IsNotNull extends UnaryExpression { + + public IsNotNull(Location location, Expression child) { + super(location, child); + } + + @Override + public boolean nullable() { + return false; + } + + @Override + public DataType dataType() { + return DataTypes.BOOLEAN; + } + + @Override + public String toString() { + return child().toString() + " IS NOT NULL"; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/LessThan.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/LessThan.java new file mode 100644 index 00000000000..05286ff9016 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/LessThan.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.predicate; + +import org.elasticsearch.xpack.sql.expression.BinaryOperator.Negateable; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; + +public class LessThan extends BinaryComparison implements Negateable { + + public LessThan(Location location, Expression left, Expression right) { + super(location, left, right); + } + + @Override + public GreaterThan swapLeftAndRight() { + return new GreaterThan(location(), right(), left()); + } + + @Override + public GreaterThanOrEqual negate() { + return new GreaterThanOrEqual(location(), left(), right()); + } + + @Override + public String symbol() { + return "<"; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/LessThanOrEqual.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/LessThanOrEqual.java new file mode 100644 index 00000000000..dc61e0362e9 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/LessThanOrEqual.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.predicate; + +import org.elasticsearch.xpack.sql.expression.BinaryOperator.Negateable; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; + +public class LessThanOrEqual extends BinaryComparison implements Negateable { + + public LessThanOrEqual(Location location, Expression left, Expression right) { + super(location, left, right); + } + + @Override + public GreaterThanOrEqual swapLeftAndRight() { + return new GreaterThanOrEqual(location(), right(), left()); + } + + @Override + public GreaterThan negate() { + return new GreaterThan(location(), left(), right()); + } + + @Override + public String symbol() { + return "<="; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Not.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Not.java new file mode 100644 index 00000000000..2b8470e86d3 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Not.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.predicate; + +import org.elasticsearch.xpack.sql.expression.BinaryOperator.Negateable; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.UnaryExpression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; + +public class Not extends UnaryExpression { + + public Not(Location location, Expression child) { + super(location, child); + } + + @Override + protected Expression canonicalize() { + Expression canonicalChild = child().canonical(); + if (canonicalChild instanceof Negateable) { + return ((Negateable) canonicalChild).negate(); + } + return this; + } + + @Override + public DataType dataType() { + return DataTypes.BOOLEAN; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Or.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Or.java new file mode 100644 index 00000000000..1d45d186e62 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Or.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.predicate; + +import org.elasticsearch.xpack.sql.expression.BinaryLogic; +import org.elasticsearch.xpack.sql.expression.BinaryOperator.Negateable; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; + +public class Or extends BinaryLogic implements Negateable { + + public Or(Location location, Expression left, Expression right) { + super(location, left, right); + } + + @Override + public Or swapLeftAndRight() { + return new Or(location(), right(), left()); + } + + @Override + public And negate() { + return new And(location(), new Not(location(), left()), new Not(location(), right())); + } + + @Override + public String symbol() { + return "||"; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Predicates.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Predicates.java new file mode 100644 index 00000000000..7439f6def14 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Predicates.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.predicate; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +public abstract class Predicates { + + public static List splitAnd(Expression exp) { + if (exp instanceof And) { + And and = (And) exp; + List list = new ArrayList<>(); + list.addAll(splitAnd(and.left())); + list.addAll(splitAnd(and.right())); + return list; + } + return Collections.singletonList(exp); + } + + public static List splitOr(Expression exp) { + if (exp instanceof Or) { + Or or = (Or) exp; + List list = new ArrayList<>(); + list.addAll(splitOr(or.left())); + list.addAll(splitOr(or.right())); + return list; + } + return Collections.singletonList(exp); + } + + public static Expression combineOr(List exps) { + return exps.stream().reduce((l, r) -> new Or(l.location(), l, r)).orElse(null); + } + + public static Expression combineAnd(List exps) { + return exps.stream().reduce((l, r) -> new And(l.location(), l, r)).orElse(null); + } + + public static List inCommon(List l, List r) { + List common = new ArrayList<>(Math.min(l.size(), r.size())); + for (Expression lExp : l) { + for (Expression rExp : r) { + if (lExp.semanticEquals(rExp)) { + common.add(lExp); + } + } + } + return common.isEmpty() ? Collections.emptyList() : common; + } + + public static List subtract(List from, List r) { + List diff = new ArrayList<>(Math.min(from.size(), r.size())); + for (Expression lExp : from) { + for (Expression rExp : r) { + if (!lExp.semanticEquals(rExp)) { + diff.add(lExp); + } + } + } + return diff.isEmpty() ? Collections.emptyList() : diff; + } + + + public static boolean canEvaluate(Expression exp, LogicalPlan plan) { + return exp.references().subsetOf(plan.outputSet()); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Range.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Range.java new file mode 100644 index 00000000000..080d547de13 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Range.java @@ -0,0 +1,100 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.predicate; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; + +import java.util.Arrays; +import java.util.Objects; + +// BETWEEN or range - is a mix of gt(e) AND lt(e) +public class Range extends Expression { + + private final Expression value, lower, upper; + private final boolean includeLower, includeUpper; + + public Range(Location location, Expression value, Expression lower, boolean includeLower, Expression upper, boolean includeUpper) { + super(location, Arrays.asList(value, lower, upper)); + + this.value = value; + this.lower = lower; + this.upper = upper; + this.includeLower = includeLower; + this.includeUpper = includeUpper; + } + + public Expression value() { + return value; + } + + public Expression lower() { + return lower; + } + + public Expression upper() { + return upper; + } + + public boolean includeLower() { + return includeLower; + } + + public boolean includeUpper() { + return includeUpper; + } + + @Override + public boolean foldable() { + return value.foldable() && lower.foldable() && upper.foldable(); + } + + @Override + public boolean nullable() { + return value.nullable() && lower.nullable() && upper.nullable(); + } + + @Override + public DataType dataType() { + return DataTypes.BOOLEAN; + } + + @Override + public int hashCode() { + return Objects.hash(includeLower, includeUpper, value, lower, upper); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + Range other = (Range) obj; + return Objects.equals(includeLower, other.includeLower) + && Objects.equals(includeUpper, other.includeUpper) + && Objects.equals(value, other.value) + && Objects.equals(lower, other.lower) + && Objects.equals(upper, other.upper); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(lower); + sb.append(includeLower ? " <= " : " < "); + sb.append(value); + sb.append(includeUpper ? " <= " : " < "); + sb.append(upper); + return sb.toString(); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/fulltext/FullTextPredicate.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/fulltext/FullTextPredicate.java new file mode 100644 index 00000000000..05cfbce4c2f --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/fulltext/FullTextPredicate.java @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.predicate.fulltext; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; + +import java.util.List; +import java.util.Map; +import java.util.Objects; + +public class FullTextPredicate extends Expression { + + public enum Operator { + AND, + OR; + + public org.elasticsearch.index.query.Operator toEs() { + return org.elasticsearch.index.query.Operator.fromString(name()); + } + } + + private final String query; + private final String options; + private final Map optionMap; + // common properties + private final String analyzer; + + FullTextPredicate(Location location, String query, String options, List children) { + super(location, children); + this.query = query; + this.options = options; + // inferred + this.optionMap = FullTextUtils.parseSettings(options, location); + this.analyzer = optionMap.get("analyzer"); + } + + public String query() { + return query; + } + + public String options() { + return options; + } + + public Map optionMap() { + return optionMap; + } + + public String analyzer() { + return analyzer; + } + + @Override + public boolean nullable() { + return false; + } + + @Override + public DataType dataType() { + return DataTypes.BOOLEAN; + } + + @Override + public int hashCode() { + return Objects.hash(query, options); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + FullTextPredicate other = (FullTextPredicate) obj; + return Objects.equals(query, other.query) + && Objects.equals(options, other.options); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/fulltext/FullTextUtils.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/fulltext/FullTextUtils.java new file mode 100644 index 00000000000..f332f3e0582 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/fulltext/FullTextUtils.java @@ -0,0 +1,93 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.predicate.fulltext; + +import java.util.LinkedHashMap; +import java.util.Locale; +import java.util.Map; +import java.util.Set; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.xpack.sql.expression.predicate.fulltext.FullTextPredicate.Operator; +import org.elasticsearch.xpack.sql.parser.ParsingException; +import org.elasticsearch.xpack.sql.tree.Location; + +import static java.util.Collections.emptyMap; + +abstract class FullTextUtils { + + private static final String DELIMITER = ";"; + + static Map parseSettings(String options, Location location) { + if (!Strings.hasText(options)) { + return emptyMap(); + } + String[] list = Strings.delimitedListToStringArray(options, DELIMITER); + Map op = new LinkedHashMap(list.length); + + for (String entry : list) { + String[] split = splitInTwo(entry, "="); + if (split == null) { + throw new ParsingException(location, "Cannot parse entry %s in options %s", entry, options); + } + + String previous = op.put(split[0], split[1]); + if (previous != null) { + throw new ParsingException(location, "Duplicate option %s detected in options %s", entry, options); + } + + } + return op; + } + + static Map parseFields(Map options, Location location) { + return parseFields(options.get("fields"), location); + } + + static Map parseFields(String fieldString, Location location) { + if (!Strings.hasText(fieldString)) { + return emptyMap(); + } + Set fieldNames = Strings.commaDelimitedListToSet(fieldString); + + Float defaultBoost = Float.valueOf(1.0f); + Map fields = new LinkedHashMap<>(); + + for (String fieldName : fieldNames) { + if (fieldName.contains("^")) { + String[] split = splitInTwo(fieldName, "^"); + if (split == null) { + fields.put(fieldName, defaultBoost); + } + else { + try { + fields.put(split[0], Float.parseFloat(split[1])); + } catch (NumberFormatException nfe) { + throw new ParsingException(location, "Cannot parse boosting for %s", fieldName); + } + } + } + else { + fields.put(fieldName, defaultBoost); + } + } + + return fields; + } + + private static String[] splitInTwo(String string, String delimiter) { + String[] split = Strings.split(string, delimiter); + if (split == null || split.length != 2) { + return null; + } + return split; + } + + static FullTextPredicate.Operator operator(Map options, String key) { + String value = options.get(key); + return value != null ? Operator.valueOf(value.toUpperCase(Locale.ROOT)) : null; + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/fulltext/MatchQueryPredicate.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/fulltext/MatchQueryPredicate.java new file mode 100644 index 00000000000..fd9df9e3ca7 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/fulltext/MatchQueryPredicate.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.predicate.fulltext; + +import java.util.Objects; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; + +import static java.util.Collections.singletonList; + +public class MatchQueryPredicate extends FullTextPredicate { + + private final Expression field; + private final Operator operator; + + public MatchQueryPredicate(Location location, Expression field, String query, String options) { + super(location, query, options, singletonList(field)); + this.field = field; + + this.operator = FullTextUtils.operator(optionMap(), "operator"); + } + + public Expression field() { + return field; + } + + public Operator operator() { + return operator; + } + + @Override + public int hashCode() { + return Objects.hash(field, super.hashCode()); + } + + @Override + public boolean equals(Object obj) { + if (super.equals(obj)) { + MatchQueryPredicate other = (MatchQueryPredicate) obj; + return Objects.equals(field, other.field); + } + return false; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/fulltext/MultiMatchQueryPredicate.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/fulltext/MultiMatchQueryPredicate.java new file mode 100644 index 00000000000..5ac182a8311 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/fulltext/MultiMatchQueryPredicate.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.predicate.fulltext; + +import java.util.Map; +import java.util.Objects; + +import org.elasticsearch.xpack.sql.tree.Location; + +import static java.util.Collections.emptyList; + +public class MultiMatchQueryPredicate extends FullTextPredicate { + + private final String fieldString; + private final Map fields; + private final Operator operator; + + public MultiMatchQueryPredicate(Location location, String fieldString, String query, String options) { + super(location, query, options, emptyList()); + this.fieldString = fieldString; + + // inferred + this.fields = FullTextUtils.parseFields(fieldString, location); + this.operator = FullTextUtils.operator(optionMap(), "operator"); + } + + public String fieldString() { + return fieldString; + } + + public Map fields() { + return fields; + } + + public Operator operator() { + return operator; + } + + @Override + public int hashCode() { + return Objects.hash(fieldString, super.hashCode()); + } + + @Override + public boolean equals(Object obj) { + if (super.equals(obj)) { + MultiMatchQueryPredicate other = (MultiMatchQueryPredicate) obj; + return Objects.equals(fieldString, other.fieldString); + } + return false; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/fulltext/StringQueryPredicate.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/fulltext/StringQueryPredicate.java new file mode 100644 index 00000000000..0b94eb0d250 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/fulltext/StringQueryPredicate.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.predicate.fulltext; + +import java.util.Map; + +import org.elasticsearch.xpack.sql.tree.Location; + +import static java.util.Collections.emptyList; + +public class StringQueryPredicate extends FullTextPredicate { + + private final Map fields; + private final Operator defaultOperator; + + public StringQueryPredicate(Location location, String query, String options) { + super(location, query, options, emptyList()); + + // inferred + this.fields = FullTextUtils.parseFields(optionMap(), location); + this.defaultOperator = FullTextUtils.operator(optionMap(), "default_operator"); + } + + public Map fields() { + return fields; + } + + public Operator defaultOperator() { + return defaultOperator; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/regex/Like.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/regex/Like.java new file mode 100644 index 00000000000..77ec997e055 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/regex/Like.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.regex; + +import org.elasticsearch.xpack.sql.expression.BinaryExpression; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; + +public class Like extends BinaryExpression { + + public Like(Location location, Expression left, Expression right) { + super(location, left, right); + } + + @Override + public Like swapLeftAndRight() { + return new Like(location(), right(), left()); + } + + @Override + public DataType dataType() { + return DataTypes.BOOLEAN; + } + + @Override + public String symbol() { + return "LIKE"; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/regex/RLike.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/regex/RLike.java new file mode 100644 index 00000000000..acdaf636a99 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/expression/regex/RLike.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.regex; + +import org.elasticsearch.xpack.sql.expression.BinaryExpression; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; + +public class RLike extends BinaryExpression { + + public RLike(Location location, Expression left, Expression right) { + super(location, left, right); + } + + @Override + public RLike swapLeftAndRight() { + return new RLike(location(), right(), left()); + } + + @Override + public DataType dataType() { + return DataTypes.BOOLEAN; + } + + @Override + public String symbol() { + return "RLIKE"; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java new file mode 100644 index 00000000000..f179a2c8d30 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java @@ -0,0 +1,1403 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.optimizer; + +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.Alias; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.AttributeMap; +import org.elasticsearch.xpack.sql.expression.AttributeSet; +import org.elasticsearch.xpack.sql.expression.BinaryExpression; +import org.elasticsearch.xpack.sql.expression.BinaryOperator.Negateable; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.ExpressionId; +import org.elasticsearch.xpack.sql.expression.ExpressionSet; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.Literal; +import org.elasticsearch.xpack.sql.expression.NamedExpression; +import org.elasticsearch.xpack.sql.expression.NestedFieldAttribute; +import org.elasticsearch.xpack.sql.expression.Order; +import org.elasticsearch.xpack.sql.expression.function.Function; +import org.elasticsearch.xpack.sql.expression.function.FunctionAttribute; +import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunction; +import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunctionAttribute; +import org.elasticsearch.xpack.sql.expression.function.aggregate.ExtendedStats; +import org.elasticsearch.xpack.sql.expression.function.aggregate.ExtendedStatsEnclosed; +import org.elasticsearch.xpack.sql.expression.function.aggregate.InnerAggregate; +import org.elasticsearch.xpack.sql.expression.function.aggregate.MatrixStats; +import org.elasticsearch.xpack.sql.expression.function.aggregate.MatrixStatsEnclosed; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Percentile; +import org.elasticsearch.xpack.sql.expression.function.aggregate.PercentileRank; +import org.elasticsearch.xpack.sql.expression.function.aggregate.PercentileRanks; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Percentiles; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Stats; +import org.elasticsearch.xpack.sql.expression.function.scalar.Cast; +import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunctionAttribute; +import org.elasticsearch.xpack.sql.expression.predicate.And; +import org.elasticsearch.xpack.sql.expression.predicate.BinaryComparison; +import org.elasticsearch.xpack.sql.expression.predicate.Equals; +import org.elasticsearch.xpack.sql.expression.predicate.GreaterThan; +import org.elasticsearch.xpack.sql.expression.predicate.GreaterThanOrEqual; +import org.elasticsearch.xpack.sql.expression.predicate.LessThan; +import org.elasticsearch.xpack.sql.expression.predicate.LessThanOrEqual; +import org.elasticsearch.xpack.sql.expression.predicate.Not; +import org.elasticsearch.xpack.sql.expression.predicate.Or; +import org.elasticsearch.xpack.sql.expression.predicate.Range; +import org.elasticsearch.xpack.sql.plan.logical.Aggregate; +import org.elasticsearch.xpack.sql.plan.logical.Filter; +import org.elasticsearch.xpack.sql.plan.logical.Limit; +import org.elasticsearch.xpack.sql.plan.logical.LocalRelation; +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.plan.logical.OrderBy; +import org.elasticsearch.xpack.sql.plan.logical.Project; +import org.elasticsearch.xpack.sql.plan.logical.SubQueryAlias; +import org.elasticsearch.xpack.sql.rule.Rule; +import org.elasticsearch.xpack.sql.rule.RuleExecutor; +import org.elasticsearch.xpack.sql.session.EmptyExecutable; +import org.elasticsearch.xpack.sql.session.SingletonExecutable; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; + +import static java.util.stream.Collectors.toList; +import static org.elasticsearch.xpack.sql.expression.Literal.FALSE; +import static org.elasticsearch.xpack.sql.expression.Literal.TRUE; +import static org.elasticsearch.xpack.sql.expression.predicate.Predicates.combineAnd; +import static org.elasticsearch.xpack.sql.expression.predicate.Predicates.combineOr; +import static org.elasticsearch.xpack.sql.expression.predicate.Predicates.inCommon; +import static org.elasticsearch.xpack.sql.expression.predicate.Predicates.splitAnd; +import static org.elasticsearch.xpack.sql.expression.predicate.Predicates.splitOr; +import static org.elasticsearch.xpack.sql.expression.predicate.Predicates.subtract; +import static org.elasticsearch.xpack.sql.util.CollectionUtils.combine; + + +public class Optimizer extends RuleExecutor { + + public ExecutionInfo debugOptimize(LogicalPlan verified) { + return verified.optimized() ? null : executeWithInfo(verified); + } + + public LogicalPlan optimize(LogicalPlan verified) { + return verified.optimized() ? verified : execute(verified); + } + + @Override + protected Iterable.Batch> batches() { + Batch resolution = new Batch("Finish Analysis", + new PruneSubqueryAliases(), + CleanAliases.INSTANCE + ); + + Batch aggregate = new Batch("Aggregation", + new PruneDuplicatesInGroupBy(), + new ReplaceDuplicateAggsWithReferences(), + new ReplaceAggsWithMatrixStats(), + new ReplaceAggsWithExtendedStats(), + new ReplaceAggsWithStats(), + new PromoteStatsToExtendedStats(), + new ReplaceAggsWithPercentiles(), + new ReplceAggsWithPercentileRanks() + ); + + Batch operators = new Batch("Operator Optimization", + // combining + new CombineProjections(), + // folding + new ReplaceFoldableAttributes(), + new ConstantFolding(), + // boolean + new BooleanSimplification(), + new BinaryComparisonSimplification(), + new BooleanLiteralsOnTheRight(), + new CombineComparisonsIntoRange(), + // prune/elimination + new PruneFilters(), + new PruneOrderBy(), + new PruneOrderByNestedFields(), + new PruneCast(), + new PruneDuplicateFunctions() + ); + + Batch local = new Batch("Skip Elasticsearch", + new SkipQueryOnLimitZero(), + new SkipQueryIfFoldingProjection() + ); + //new BalanceBooleanTrees()); + Batch label = new Batch("Set as Optimized", Limiter.ONCE, + new SetAsOptimized()); + + return Arrays.asList(resolution, aggregate, operators, local, label); + } + + + static class PruneSubqueryAliases extends OptimizerRule { + + PruneSubqueryAliases() { + super(TransformDirection.UP); + } + + @Override + protected LogicalPlan rule(SubQueryAlias alias) { + return alias.child(); + } + } + + static class CleanAliases extends OptimizerRule { + + private static final CleanAliases INSTANCE = new CleanAliases(); + + CleanAliases() { + super(TransformDirection.UP); + } + + @Override + protected LogicalPlan rule(LogicalPlan plan) { + if (plan instanceof Project) { + Project p = (Project) plan; + return new Project(p.location(), p.child(), cleanExpressions(p.projections())); + } + + if (plan instanceof Aggregate) { + Aggregate a = (Aggregate) plan; + // clean group expressions + List cleanedGroups = a.groupings().stream().map(CleanAliases::trimAliases).collect(toList()); + return new Aggregate(a.location(), a.child(), cleanedGroups, cleanExpressions(a.aggregates())); + } + + return plan.transformExpressionsOnly(e -> { + if (e instanceof Alias) { + return ((Alias) e).child(); + } + return e; + }); + } + + private List cleanExpressions(List args) { + return args.stream().map(CleanAliases::trimNonTopLevelAliases).map(NamedExpression.class::cast) + .collect(toList()); + } + + static Expression trimNonTopLevelAliases(Expression e) { + if (e instanceof Alias) { + Alias a = (Alias) e; + return new Alias(a.location(), a.name(), a.qualifier(), trimAliases(a.child()), a.id()); + } + return trimAliases(e); + } + + private static Expression trimAliases(Expression e) { + return e.transformDown(Alias::child, Alias.class); + } + } + + static class PruneDuplicatesInGroupBy extends OptimizerRule { + + @Override + protected LogicalPlan rule(Aggregate agg) { + List groupings = agg.groupings(); + if (groupings.isEmpty()) { + return agg; + } + ExpressionSet unique = new ExpressionSet<>(groupings); + if (unique.size() != groupings.size()) { + return new Aggregate(agg.location(), agg.child(), new ArrayList<>(unique), agg.aggregates()); + } + return agg; + } + } + + static class ReplaceDuplicateAggsWithReferences extends OptimizerRule { + + @Override + protected LogicalPlan rule(Aggregate agg) { + List aggs = agg.aggregates(); + + Map unique = new HashMap<>(); + Map reverse = new HashMap<>(); + + // find duplicates by looking at the function and canonical form + for (NamedExpression ne : aggs) { + if (ne instanceof Alias) { + Alias a = (Alias) ne; + unique.putIfAbsent(a.child(), a); + reverse.putIfAbsent(ne, a.child()); + } + else { + unique.putIfAbsent(ne.canonical(), ne); + reverse.putIfAbsent(ne, ne.canonical()); + } + } + + if (unique.size() != aggs.size()) { + List newAggs = new ArrayList<>(aggs.size()); + for (NamedExpression ne : aggs) { + newAggs.add(unique.get(reverse.get(ne))); + } + return new Aggregate(agg.location(), agg.child(), agg.groupings(), newAggs); + } + + return agg; + } + } + + static class ReplaceAggsWithMatrixStats extends Rule { + + @Override + public LogicalPlan apply(LogicalPlan p) { + Map seen = new LinkedHashMap<>(); + Map promotedFunctionIds = new LinkedHashMap<>(); + + p = p.transformExpressionsUp(e -> rule(e, seen, promotedFunctionIds)); + + // nothing found + if (seen.isEmpty()) { + return p; + } + + return ReplaceAggsWithStats.updateAggAttributes(p, promotedFunctionIds); + } + + @Override + protected LogicalPlan rule(LogicalPlan e) { + return e; + } + + protected Expression rule(Expression e, Map seen, Map promotedIds) { + if (e instanceof MatrixStatsEnclosed) { + AggregateFunction f = (AggregateFunction) e; + + Expression argument = f.field(); + MatrixStats matrixStats = seen.get(argument); + + if (matrixStats == null) { + matrixStats = new MatrixStats(f.location(), argument); + seen.put(argument, matrixStats); + } + + InnerAggregate ia = new InnerAggregate(f, matrixStats, f.field()); + promotedIds.putIfAbsent(f.functionId(), ia.toAttribute()); + return ia; + } + + return e; + } + } + + static class ReplaceAggsWithExtendedStats extends Rule { + + @Override + public LogicalPlan apply(LogicalPlan p) { + Map promotedFunctionIds = new LinkedHashMap<>(); + Map seen = new LinkedHashMap<>(); + p = p.transformExpressionsUp(e -> rule(e, seen, promotedFunctionIds)); + + // nothing found + if (seen.isEmpty()) { + return p; + } + + // update old agg attributes + return ReplaceAggsWithStats.updateAggAttributes(p, promotedFunctionIds); + } + + @Override + protected LogicalPlan rule(LogicalPlan e) { + return e; + } + + protected Expression rule(Expression e, Map seen, Map promotedIds) { + if (e instanceof ExtendedStatsEnclosed) { + AggregateFunction f = (AggregateFunction) e; + + Expression argument = f.field(); + ExtendedStats extendedStats = seen.get(argument); + + if (extendedStats == null) { + extendedStats = new ExtendedStats(f.location(), argument); + seen.put(argument, extendedStats); + } + + InnerAggregate ia = new InnerAggregate(f, extendedStats); + promotedIds.putIfAbsent(f.functionId(), ia.toAttribute()); + return ia; + } + + return e; + } + } + + static class ReplaceAggsWithStats extends Rule { + + private static class Match { + final Stats stats; + int count = 1; + final Set> functionTypes = new LinkedHashSet<>(); + + Match(Stats stats) { + this.stats = stats; + } + + @Override + public String toString() { + return stats.toString(); + } + } + + @Override + public LogicalPlan apply(LogicalPlan p) { + Map potentialPromotions = new LinkedHashMap<>(); + + p.forEachExpressionsUp(e -> collect(e, potentialPromotions)); + + // no promotions found - skip + if (potentialPromotions.isEmpty()) { + return p; + } + + // start promotion + + // old functionId to new function attribute + Map promotedFunctionIds = new LinkedHashMap<>(); + + // 1. promote aggs to InnerAggs + p = p.transformExpressionsUp(e -> promote(e, potentialPromotions, promotedFunctionIds)); + + // 2. update the old agg attrs to the promoted agg functions + return updateAggAttributes(p, promotedFunctionIds); + } + + @Override + protected LogicalPlan rule(LogicalPlan e) { + return e; + } + + private Expression collect(Expression e, Map seen) { + if (Stats.isTypeCompatible(e)) { + AggregateFunction f = (AggregateFunction) e; + + Expression argument = f.field(); + Match match = seen.get(argument); + + if (match == null) { + match = new Match(new Stats(f.location(), argument)); + match.functionTypes.add(f.getClass()); + seen.put(argument, match); + } + else { + if (match.functionTypes.add(f.getClass())) { + match.count++; + } + } + } + + return e; + } + + private static Expression promote(Expression e, Map seen, Map attrs) { + if (Stats.isTypeCompatible(e)) { + AggregateFunction f = (AggregateFunction) e; + + Expression argument = f.field(); + Match counter = seen.get(argument); + + // if the stat has at least two different functions for it, promote it as stat + if (counter != null && counter.count > 1) { + InnerAggregate innerAgg = new InnerAggregate(f, counter.stats); + attrs.putIfAbsent(f.functionId(), innerAgg.toAttribute()); + return innerAgg; + } + } + return e; + } + + static LogicalPlan updateAggAttributes(LogicalPlan p, Map promotedFunctionIds) { + // 1. update old agg function attributes + p = p.transformExpressionsUp(e -> updateAggFunctionAttrs(e, promotedFunctionIds)); + + // 2. update all scalar function consumers of the promoted aggs + // since they contain the old ids in scrips and processorDefinitions that need regenerating + + // 2a. collect ScalarFunctions that unwrapped refer to any of the updated aggregates + // 2b. replace any of the old ScalarFunction attributes + + final Set newAggIds = new LinkedHashSet<>(promotedFunctionIds.size()); + + for (AggregateFunctionAttribute afa : promotedFunctionIds.values()) { + newAggIds.add(afa.functionId()); + } + + final Map updatedScalarAttrs = new LinkedHashMap<>(); + final Map updatedScalarAliases = new LinkedHashMap<>(); + + p = p.transformExpressionsUp(e -> { + + // replace scalar attributes of the old replaced functions + if (e instanceof ScalarFunctionAttribute) { + ScalarFunctionAttribute sfa = (ScalarFunctionAttribute) e; + // check aliases + sfa = updatedScalarAttrs.getOrDefault(sfa.functionId(), sfa); + // check scalars + sfa = updatedScalarAliases.getOrDefault(sfa.id(), sfa); + return sfa; + } + + // unwrap aliases as they 'hide' functions under their own attributes + if (e instanceof Alias) { + Attribute att = Expressions.attribute(e); + if (att instanceof ScalarFunctionAttribute) { + ScalarFunctionAttribute sfa = (ScalarFunctionAttribute) att; + // the underlying function has been updated + // thus record the alias as well + if (updatedScalarAttrs.containsKey(sfa.functionId())) { + updatedScalarAliases.put(sfa.id(), sfa); + } + } + } + + else if (e instanceof ScalarFunction) { + ScalarFunction sf = (ScalarFunction) e; + + // if it's a unseen function check if the function children/arguments refers to any of the promoted aggs + if (!updatedScalarAttrs.containsKey(sf.functionId()) && e.anyMatch(c -> { + Attribute a = Expressions.attribute(c); + if (a instanceof FunctionAttribute) { + return newAggIds.contains(((FunctionAttribute) a).functionId()); + } + return false; + })) { + // if so, record its attribute + updatedScalarAttrs.put(sf.functionId(), sf.toAttribute()); + } + } + + return e; + }); + + return p; + } + + + private static Expression updateAggFunctionAttrs(Expression e, Map promotedIds) { + if (e instanceof AggregateFunctionAttribute) { + AggregateFunctionAttribute ae = (AggregateFunctionAttribute) e; + AggregateFunctionAttribute promoted = promotedIds.get(ae.functionId()); + if (promoted != null) { + return ae.withFunctionId(promoted.functionId(), promoted.propertyPath()); + } + } + return e; + } + } + + static class PromoteStatsToExtendedStats extends Rule { + + @Override + public LogicalPlan apply(LogicalPlan p) { + Map seen = new LinkedHashMap<>(); + + // count the extended stats + p.forEachExpressionsUp(e -> count(e, seen)); + // then if there's a match, replace the stat inside the InnerAgg + return p.transformExpressionsUp(e -> promote(e, seen)); + } + + @Override + protected LogicalPlan rule(LogicalPlan e) { + return e; + } + + private void count(Expression e, Map seen) { + if (e instanceof InnerAggregate) { + InnerAggregate ia = (InnerAggregate) e; + if (ia.outer() instanceof ExtendedStats) { + ExtendedStats extStats = (ExtendedStats) ia.outer(); + seen.putIfAbsent(extStats.field(), extStats); + } + } + } + + protected Expression promote(Expression e, Map seen) { + if (e instanceof InnerAggregate) { + InnerAggregate ia = (InnerAggregate) e; + if (ia.outer() instanceof Stats) { + Stats stats = (Stats) ia.outer(); + ExtendedStats ext = seen.get(stats.field()); + if (ext != null && stats.field().equals(ext.field())) { + return new InnerAggregate(ia.inner(), ext); + } + } + } + + return e; + } + } + + static class ReplaceAggsWithPercentiles extends Rule { + + @Override + public LogicalPlan apply(LogicalPlan p) { + // percentile per field/expression + Map> percentsPerField = new LinkedHashMap<>(); + + // count gather the percents for each field + p.forEachExpressionsUp(e -> count(e, percentsPerField)); + + Map percentilesPerField = new LinkedHashMap<>(); + // create a Percentile agg for each field (and its associated percents) + percentsPerField.forEach((k, v) -> { + percentilesPerField.put(k, new Percentiles(v.iterator().next().location(), k, new ArrayList<>(v))); + }); + + // now replace the agg with pointer to the main ones + Map promotedFunctionIds = new LinkedHashMap<>(); + p = p.transformExpressionsUp(e -> rule(e, percentilesPerField, promotedFunctionIds)); + // finally update all the function references as well + return p.transformExpressionsDown(e -> ReplaceAggsWithStats.updateAggFunctionAttrs(e, promotedFunctionIds)); + } + + private void count(Expression e, Map> percentsPerField) { + if (e instanceof Percentile) { + Percentile p = (Percentile) e; + Expression field = p.field(); + Set percentiles = percentsPerField.get(field); + + if (percentiles == null) { + percentiles = new LinkedHashSet<>(); + percentsPerField.put(field, percentiles); + } + + percentiles.add(p.percent()); + } + } + + protected Expression rule(Expression e, Map percentilesPerField, Map promotedIds) { + if (e instanceof Percentile) { + Percentile p = (Percentile) e; + Percentiles percentiles = percentilesPerField.get(p.field()); + + InnerAggregate ia = new InnerAggregate(p, percentiles); + promotedIds.putIfAbsent(p.functionId(), ia.toAttribute()); + return ia; + } + + return e; + } + + @Override + protected LogicalPlan rule(LogicalPlan e) { + return e; + } + } + + static class ReplceAggsWithPercentileRanks extends Rule { + + @Override + public LogicalPlan apply(LogicalPlan p) { + // percentile per field/expression + Map> valuesPerField = new LinkedHashMap<>(); + + // count gather the percents for each field + p.forEachExpressionsUp(e -> count(e, valuesPerField)); + + Map ranksPerField = new LinkedHashMap<>(); + // create a PercentileRanks agg for each field (and its associated values) + valuesPerField.forEach((k, v) -> { + ranksPerField.put(k, new PercentileRanks(v.iterator().next().location(), k, new ArrayList<>(v))); + }); + + // now replace the agg with pointer to the main ones + Map promotedFunctionIds = new LinkedHashMap<>(); + p = p.transformExpressionsUp(e -> rule(e, ranksPerField, promotedFunctionIds)); + // finally update all the function references as well + return p.transformExpressionsDown(e -> ReplaceAggsWithStats.updateAggFunctionAttrs(e, promotedFunctionIds)); + } + + private void count(Expression e, Map> ranksPerField) { + if (e instanceof PercentileRank) { + PercentileRank p = (PercentileRank) e; + Expression field = p.field(); + Set percentiles = ranksPerField.get(field); + + if (percentiles == null) { + percentiles = new LinkedHashSet<>(); + ranksPerField.put(field, percentiles); + } + + percentiles.add(p.value()); + } + } + + protected Expression rule(Expression e, Map ranksPerField, Map promotedIds) { + if (e instanceof PercentileRank) { + PercentileRank p = (PercentileRank) e; + PercentileRanks ranks = ranksPerField.get(p.field()); + + InnerAggregate ia = new InnerAggregate(p, ranks); + promotedIds.putIfAbsent(p.functionId(), ia.toAttribute()); + return ia; + } + + return e; + } + + @Override + protected LogicalPlan rule(LogicalPlan e) { + return e; + } + } + + static class PruneFilters extends OptimizerRule { + + @Override + protected LogicalPlan rule(Filter filter) { + if (filter.condition() instanceof Literal) { + if (TRUE.equals(filter.condition())) { + return filter.child(); + } + // TODO: add comparison with null as well + if (FALSE.equals(filter.condition())) { + return new LocalRelation(filter.location(), new EmptyExecutable(filter.output())); + } + } + + return filter; + } + } + + static class ReplaceAliasesInHaving extends OptimizerRule { + + @Override + protected LogicalPlan rule(Filter filter) { + if (filter.child() instanceof Aggregate) { + Expression cond = filter.condition(); + // resolve attributes to their actual + Expression newCondition = cond.transformDown(a -> { + + return a; + }, AggregateFunctionAttribute.class); + + if (newCondition != cond) { + return new Filter(filter.location(), filter.child(), newCondition); + } + } + return filter; + } + } + + static class PruneOrderByNestedFields extends OptimizerRule { + + @Override + protected LogicalPlan rule(Project project) { + // check whether OrderBy relies on nested fields which are not used higher up + if (project.child() instanceof OrderBy) { + OrderBy ob = (OrderBy) project.child(); + + // count the direct parents + Map nestedOrders = new LinkedHashMap<>(); + + for (Order order : ob.order()) { + Attribute attr = ((NamedExpression) order.child()).toAttribute(); + if (attr instanceof NestedFieldAttribute) { + nestedOrders.put(((NestedFieldAttribute) attr).parentPath(), order); + } + } + + // no nested fields in sort + if (nestedOrders.isEmpty()) { + return project; + } + + // count the nested parents (if any) inside the parents + List nestedTopFields = new ArrayList<>(); + + for (Attribute attr : project.output()) { + if (attr instanceof NestedFieldAttribute) { + nestedTopFields.add(((NestedFieldAttribute) attr).parentPath()); + } + } + + List orders = new ArrayList<>(ob.order()); + // projection has no nested field references, remove any nested orders + if (nestedTopFields.isEmpty()) { + orders.removeAll(nestedOrders.values()); + } + else { + // remove orders that are not ancestors of the nested projections + for (Entry entry : nestedOrders.entrySet()) { + String parent = entry.getKey(); + boolean shouldKeep = false; + for (String topParent : nestedTopFields) { + if (topParent.startsWith(parent)) { + shouldKeep = true; + break; + } + } + if (!shouldKeep) { + orders.remove(entry.getValue()); + } + } + } + + // no orders left, eliminate it all-together + if (orders.isEmpty()) { + return new Project(project.location(), ob.child(), project.projections()); + } + + if (orders.size() != ob.order().size()) { + OrderBy newOrder = new OrderBy(ob.location(), ob.child(), orders); + return new Project(project.location(), newOrder, project.projections()); + } + } + return project; + } + } + + static class PruneOrderBy extends OptimizerRule { + + @Override + protected LogicalPlan rule(OrderBy ob) { + List order = ob.order(); + + // remove constants + List nonConstant = order.stream().filter(o -> !o.child().foldable()).collect(toList()); + + if (nonConstant.isEmpty()) { + return ob.child(); + } + + // if the sort points to an agg, consider it only if there's grouping + if (ob.child() instanceof Aggregate) { + Aggregate a = (Aggregate) ob.child(); + + if (a.groupings().isEmpty()) { + AttributeSet aggsAttr = new AttributeSet(Expressions.asAttributes(a.aggregates())); + + List nonAgg = nonConstant.stream().filter(o -> { + if (o.child() instanceof NamedExpression) { + return !aggsAttr.contains(((NamedExpression) o.child()).toAttribute()); + } + return true; + }).collect(toList()); + + return nonAgg.isEmpty() ? ob.child() : new OrderBy(ob.location(), ob.child(), nonAgg); + } + } + return ob; + } + } + + static class CombineLimits extends OptimizerRule { + + @Override + protected LogicalPlan rule(Limit limit) { + if (limit.child() instanceof Limit) { + throw new UnsupportedOperationException("not implemented yet"); + } + throw new UnsupportedOperationException("not implemented yet"); + } + } + + // NB: it is important to start replacing casts from the bottom to properly replace aliases + static class PruneCast extends Rule { + + @Override + public LogicalPlan apply(LogicalPlan plan) { + return rule(plan); + } + + @Override + protected LogicalPlan rule(LogicalPlan plan) { + final Map replacedCast = new LinkedHashMap<>(); + + // first eliminate casts inside Aliases + LogicalPlan transformed = plan.transformExpressionsUp(e -> { + // cast wrapped in an alias + if (e instanceof Alias) { + Alias as = (Alias) e; + if (as.child() instanceof Cast) { + Cast c = (Cast) as.child(); + + if (c.from().same(c.to())) { + Alias newAs = new Alias(as.location(), as.name(), as.qualifier(), c.field(), as.id(), as.synthetic()); + replacedCast.put(as.toAttribute(), newAs.toAttribute()); + return newAs; + } + } + return e; + } + return e; + }); + + // then handle stand-alone casts (mixed together the cast rule will kick in before the alias) + transformed = transformed.transformExpressionsUp(e -> { + if (e instanceof Cast) { + Cast c = (Cast) e; + + if (c.from().same(c.to())) { + Expression argument = c.field(); + if (argument instanceof NamedExpression) { + replacedCast.put(c.toAttribute(), ((NamedExpression) argument).toAttribute()); + } + + return argument; + } + } + return e; + }); + + + // replace attributes from previous removed Casts + if (!replacedCast.isEmpty()) { + return transformed.transformUp(p -> { + List newProjections = new ArrayList<>(); + + boolean changed = false; + for (NamedExpression ne : p.projections()) { + Attribute found = replacedCast.get(ne.toAttribute()); + if (found != null) { + changed = true; + newProjections.add(found); + } + else { + newProjections.add(ne.toAttribute()); + } + } + + return changed ? new Project(p.location(), p.child(), newProjections) : p; + + }, Project.class); + } + return transformed; + } + } + + static class PruneDuplicateFunctions extends Rule { + + @Override + public LogicalPlan apply(LogicalPlan p) { + List seen = new ArrayList<>(); + return p.transformExpressionsUp(e -> rule(e, seen)); + } + + @Override + protected LogicalPlan rule(LogicalPlan e) { + return e; + } + + protected Expression rule(Expression exp, List seen) { + Expression e = exp; + if (e instanceof Function) { + Function f = (Function) e; + for (Function seenFunction : seen) { + if (seenFunction != f && f.functionEquals(seenFunction)) { + return seenFunction; + } + } + } + + return exp; + } + } + + static class CombineProjections extends OptimizerRule { + + CombineProjections() { + super(TransformDirection.UP); + } + + @Override + protected LogicalPlan rule(Project project) { + LogicalPlan child = project.child(); + if (child instanceof Project) { + Project p = (Project) child; + // eliminate lower project but first replace the aliases in the upper one + return new Project(p.location(), p.child(), combineProjections(project.projections(), p.projections())); + } + if (child instanceof Aggregate) { + Aggregate a = (Aggregate) child; + return new Aggregate(a.location(), a.child(), a.groupings(), combineProjections(project.projections(), a.aggregates())); + } + + return project; + } + + // normally only the upper projections should survive but since the lower list might have aliases definitions + // that might be reused by the upper one, these need to be replaced. + // for example an alias defined in the lower list might be referred in the upper - without replacing it the alias becomes invalid + private List combineProjections(List upper, List lower) { + // collect aliases in the lower list + Map map = new LinkedHashMap<>(); + for (NamedExpression ne : lower) { + if (ne instanceof Alias) { + Alias a = (Alias) ne; + map.put(a.toAttribute(), a); + } + } + + AttributeMap aliases = new AttributeMap<>(map); + List replaced = new ArrayList<>(); + + // replace any matching attribute with a lower alias (if there's a match) + // but clean-up non-top aliases at the end + for (NamedExpression ne : upper) { + NamedExpression replacedExp = (NamedExpression) ne.transformUp(a -> { + Alias as = aliases.get(a); + return as != null ? as : a; + }, Attribute.class); + + replaced.add((NamedExpression) CleanAliases.trimNonTopLevelAliases(replacedExp)); + } + return replaced; + } + } + + + // replace attributes of foldable expressions with the foldable trees + // SELECT 5 a, a + 3, ... + + static class ReplaceFoldableAttributes extends Rule { + + @Override + public LogicalPlan apply(LogicalPlan plan) { + return rule(plan); + } + + @Override + protected LogicalPlan rule(LogicalPlan plan) { + Map aliases = new LinkedHashMap<>(); + List attrs = new ArrayList<>(); + + // find aliases of all projections + plan.forEachDown(p -> { + for (NamedExpression ne : p.projections()) { + if (ne instanceof Alias) { + if (((Alias) ne).child().foldable()) { + Attribute attr = ne.toAttribute(); + attrs.add(attr); + aliases.put(attr, (Alias) ne); + } + } + } + }, Project.class); + + if (attrs.isEmpty()) { + return plan; + } + + AtomicBoolean stop = new AtomicBoolean(false); + + // propagate folding up to unary nodes + // anything higher and the propagate stops + plan = plan.transformUp(p -> { + if (stop.get() == false && canPropagateFoldable(p)) { + return p.transformExpressionsDown(e -> { + if (e instanceof Attribute && attrs.contains(e)) { + Alias as = aliases.get(e); + if (as == null) { + // might need to implement an Attribute map + throw new SqlIllegalArgumentException("unsupported"); + } + return as; + } + return e; + }); + } + + if (p.children().size() > 1) { + stop.set(true); + } + + return p; + }); + + // finally clean-up aliases + return CleanAliases.INSTANCE.apply(plan); + + } + + private boolean canPropagateFoldable(LogicalPlan p) { + return p instanceof Project || p instanceof Filter || p instanceof SubQueryAlias || p instanceof Aggregate || p instanceof Limit || p instanceof OrderBy; + } + } + + static class ConstantFolding extends OptimizerExpressionRule { + + ConstantFolding() { + super(TransformDirection.DOWN); + } + + @Override + protected Expression rule(Expression e) { + // preserve aliases + if (e instanceof Alias) { + Alias a = (Alias) e; + Expression fold = fold(a.child()); + if (fold != e) { + return new Alias(a.location(), a.name(), null, fold, a.id()); + } + return a; + } + + Expression fold = fold(e); + if (fold != e) { + // preserve the name through an alias + if (e instanceof NamedExpression) { + NamedExpression ne = (NamedExpression) e; + return new Alias(e.location(), ne.name(), null, fold, ne.id()); + } + return fold; + } + return e; + } + + private Expression fold(Expression e) { + // literals are always foldable, so avoid creating a duplicate + if (e.foldable() && !(e instanceof Literal)) { + return new Literal(e.location(), e.fold(), e.dataType()); + } + return e; + } + } + + static class BooleanSimplification extends OptimizerExpressionRule { + + BooleanSimplification() { + super(TransformDirection.UP); + } + + @Override + protected Expression rule(Expression e) { + if (e instanceof BinaryExpression) { + return simplifyAndOr((BinaryExpression) e); + } + if (e instanceof Not) { + return simplifyNot((Not) e); + } + + return e; + } + + private Expression simplifyAndOr(BinaryExpression bc) { + Expression l = bc.left(); + Expression r = bc.right(); + + if (bc instanceof And) { + if (TRUE.equals(l)) { + return r; + } + if (TRUE.equals(r)) { + return l; + } + + if (FALSE.equals(l) || FALSE.equals(r)) { + return FALSE; + } + if (l.semanticEquals(r)) { + return l; + } + + // + // common factor extraction -> (a || b) && (a || c) => a && (b || c) + // + List leftSplit = splitOr(l); + List rightSplit = splitOr(r); + + List common = inCommon(leftSplit, rightSplit); + if (common.isEmpty()) { + return bc; + } + List lDiff = subtract(leftSplit, common); + List rDiff = subtract(rightSplit, common); + // (a || b || c || ... ) && (a || b) => (a || b) + if (lDiff.isEmpty() || rDiff.isEmpty()) { + return combineOr(common); + } + // (a || b || c || ... ) && (a || b || d || ... ) => ((c || ...) && (d || ...)) || a || b + Expression combineLeft = combineOr(lDiff); + Expression combineRight = combineOr(rDiff); + return combineOr(combine(common, new And(combineLeft.location(), combineLeft, combineRight))); + } + + if (bc instanceof Or) { + if (TRUE.equals(l) || TRUE.equals(r)) { + return TRUE; + } + + if (TRUE.equals(l)) { + return r; + } + if (TRUE.equals(r)) { + return l; + } + + if (l.semanticEquals(r)) { + return l; + } + + // + // common factor extraction -> (a && b) || (a && c) => a || (b & c) + // + List leftSplit = splitAnd(l); + List rightSplit = splitAnd(r); + + List common = inCommon(leftSplit, rightSplit); + if (common.isEmpty()) { + return bc; + } + List lDiff = subtract(leftSplit, common); + List rDiff = subtract(rightSplit, common); + // (a || b || c || ... ) && (a || b) => (a || b) + if (lDiff.isEmpty() || rDiff.isEmpty()) { + return combineAnd(common); + } + // (a || b || c || ... ) && (a || b || d || ... ) => ((c || ...) && (d || ...)) || a || b + Expression combineLeft = combineAnd(lDiff); + Expression combineRight = combineAnd(rDiff); + return combineAnd(combine(common, new Or(combineLeft.location(), combineLeft, combineRight))); + } + + // TODO: eliminate conjunction/disjunction + return bc; + } + + private Expression simplifyNot(Not n) { + Expression c = n.child(); + + if (TRUE.equals(c)) { + return FALSE; + } + if (FALSE.equals(c)) { + return TRUE; + } + + if (c instanceof Negateable) { + return ((Negateable) c).negate(); + } + + if (c instanceof Not) { + return ((Not) c).child(); + } + + return n; + } + } + + static class BinaryComparisonSimplification extends OptimizerExpressionRule { + + BinaryComparisonSimplification() { + super(TransformDirection.UP); + } + + @Override + protected Expression rule(Expression e) { + return e instanceof BinaryComparison ? simplify((BinaryComparison) e) : e; + } + + private Expression simplify(BinaryComparison bc) { + Expression l = bc.left(); + Expression r = bc.right(); + + // true for equality + if (bc instanceof Equals || bc instanceof GreaterThanOrEqual || bc instanceof LessThanOrEqual) { + if (!l.nullable() && !r.nullable() && l.semanticEquals(r)) { + return TRUE; + } + } + + // false for equality + if (bc instanceof GreaterThan || bc instanceof LessThan) { + if (!l.nullable() && !r.nullable() && l.semanticEquals(r)) { + return FALSE; + } + } + + return bc; + } + } + + static class BooleanLiteralsOnTheRight extends OptimizerExpressionRule { + + BooleanLiteralsOnTheRight() { + super(TransformDirection.UP); + } + + @Override + protected Expression rule(Expression e) { + return e instanceof BinaryExpression ? literalToTheRight((BinaryExpression) e) : e; + } + + private Expression literalToTheRight(BinaryExpression be) { + return be.left() instanceof Literal && !(be.right() instanceof Literal) ? be.swapLeftAndRight() : be; + } + } + + static class CombineComparisonsIntoRange extends OptimizerExpressionRule { + + CombineComparisonsIntoRange() { + super(TransformDirection.UP); + } + + @Override + protected Expression rule(Expression e) { + return e instanceof And ? combine((And) e) : e; + } + + private Expression combine(And and) { + Expression l = and.left(); + Expression r = and.right(); + + if (l instanceof BinaryComparison && r instanceof BinaryComparison) { + // if the same operator is used + BinaryComparison lb = (BinaryComparison) l; + BinaryComparison rb = (BinaryComparison) r; + + + if (lb.left().equals(((BinaryComparison) r).left()) && lb.right() instanceof Literal && rb.right() instanceof Literal) { + // >/>= AND />= + else if ((r instanceof GreaterThan || r instanceof GreaterThanOrEqual) + && (l instanceof LessThan || l instanceof LessThanOrEqual)) { + return new Range(and.location(), rb.left(), rb.right(), r instanceof GreaterThanOrEqual, lb.right(), + l instanceof LessThanOrEqual); + } + } + } + + return and; + } + } + + + static class SkipQueryOnLimitZero extends OptimizerRule { + @Override + protected LogicalPlan rule(Limit limit) { + if (limit.limit() instanceof Literal) { + if (Integer.valueOf(0).equals((Number) (((Literal) limit.limit()).fold()))) { + return new LocalRelation(limit.location(), new EmptyExecutable(limit.output())); + } + } + return limit; + } + } + + static class SkipQueryIfFoldingProjection extends OptimizerRule { + @Override + protected LogicalPlan rule(LogicalPlan plan) { + if (plan instanceof Project) { + Project p = (Project) plan; + List values = extractConstants(p.projections()); + if (values.size() == p.projections().size()) { + return new LocalRelation(p.location(), new SingletonExecutable(p.output(), values.toArray())); + } + } + if (plan instanceof Aggregate) { + Aggregate a = (Aggregate) plan; + List values = extractConstants(a.aggregates()); + if (values.size() == a.aggregates().size()) { + return new LocalRelation(a.location(), new SingletonExecutable(a.output(), values.toArray())); + } + } + return plan; + } + + private List extractConstants(List named) { + List values = new ArrayList<>(); + for (NamedExpression n : named) { + if (n instanceof Alias) { + Alias a = (Alias) n; + if (a.child().foldable()) { + values.add(a.child().fold()); + } + else { + return values; + } + } + } + return values; + } + } + + + static class SetAsOptimized extends Rule { + + @Override + public LogicalPlan apply(LogicalPlan plan) { + plan.forEachUp(this::rule); + return plan; + } + + @Override + protected LogicalPlan rule(LogicalPlan plan) { + if (!plan.optimized()) { + plan.setOptimized(); + } + return plan; + } + } + + + abstract static class OptimizerRule extends Rule { + + private final TransformDirection direction; + + OptimizerRule() { + this(TransformDirection.DOWN); + } + + protected OptimizerRule(TransformDirection direction) { + this.direction = direction; + } + + + @Override + public final LogicalPlan apply(LogicalPlan plan) { + return direction == TransformDirection.DOWN ? plan.transformDown(this::rule, typeToken()) : plan.transformUp(this::rule, typeToken()); + } + + @Override + protected abstract LogicalPlan rule(SubPlan plan); + } + + abstract static class OptimizerExpressionRule extends Rule { + + private final TransformDirection direction; + + OptimizerExpressionRule(TransformDirection direction) { + this.direction = direction; + } + + @Override + public final LogicalPlan apply(LogicalPlan plan) { + return direction == TransformDirection.DOWN ? plan.transformExpressionsDown(this::rule) : plan + .transformExpressionsUp(this::rule); + } + + @Override + protected LogicalPlan rule(LogicalPlan plan) { + return plan; + } + + protected abstract Expression rule(Expression e); + } + + enum TransformDirection { + UP, DOWN + }; +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/AbstractBuilder.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/AbstractBuilder.java new file mode 100644 index 00000000000..2675bef9dfa --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/AbstractBuilder.java @@ -0,0 +1,101 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.parser; + +import org.antlr.v4.runtime.ParserRuleContext; +import org.antlr.v4.runtime.Token; +import org.antlr.v4.runtime.tree.ParseTree; +import org.antlr.v4.runtime.tree.TerminalNode; +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.util.Check; + +import java.util.ArrayList; +import java.util.List; + +abstract class AbstractBuilder extends SqlBaseBaseVisitor { + + @Override + public Object visit(ParseTree tree) { + Object result = super.visit(tree); + Check.notNull(result, "Don't know how to handle context [%s] with value [%s]", tree.getClass(), tree.getText()); + return result; + } + + @SuppressWarnings("unchecked") + protected T typedParsing(ParseTree ctx, Class type) { + Object result = ctx.accept(this); + if (type.isInstance(result)) { + return (T) result; + } + + throw new ParsingException(source(ctx), "Invalid query '%s'[%s] given; expected %s but found %s", + ctx.getText(), ctx.getClass().getSimpleName(), + type.getSimpleName(), (result != null ? result.getClass().getSimpleName() : "null")); + } + + protected LogicalPlan plan(ParseTree ctx) { + return typedParsing(ctx, LogicalPlan.class); + } + + protected List plans(List ctxs) { + return visitList(ctxs, LogicalPlan.class); + } + + protected List visitList(List contexts, Class clazz) { + List results = new ArrayList<>(contexts.size()); + for (ParserRuleContext context : contexts) { + results.add(clazz.cast(visit(context))); + } + return results; + } + + static Location source(ParseTree ctx) { + if (ctx instanceof ParserRuleContext) { + return source((ParserRuleContext) ctx); + } + return Location.EMPTY; + } + + static Location source(TerminalNode terminalNode) { + Check.notNull(terminalNode, "terminalNode is null"); + return source(terminalNode.getSymbol()); + } + + static Location source(ParserRuleContext parserRuleContext) { + Check.notNull(parserRuleContext, "parserRuleContext is null"); + return source(parserRuleContext.getStart()); + } + + static Location source(Token token) { + Check.notNull(token, "token is null"); + return new Location(token.getLine(), token.getCharPositionInLine()); + } + + /** + * Retrieves the raw text of the node (without interpreting it as a string literal). + */ + static String text(ParseTree node) { + return node == null ? null : node.getText(); + } + + /** + * Extracts the actual unescaped string (literal) value of a token. + */ + static String string(Token token) { + return token == null ? null : unquoteString(token.getText()); + } + + static String unquoteString(String text) { + // remove leading and trailing ' for strings and also eliminate escaped single quotes + return text == null ? null : text.substring(1, text.length() - 1).replace("''", "'"); + } + + @Override + public Object visitTerminal(TerminalNode node) { + throw new ParsingException(source(node), "Does not know how to handle %s", node.getText()); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/AstBuilder.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/AstBuilder.java new file mode 100644 index 00000000000..ee59151aa70 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/AstBuilder.java @@ -0,0 +1,17 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.parser; + +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.SingleStatementContext; +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; + +class AstBuilder extends CommandBuilder { + + @Override + public LogicalPlan visitSingleStatement(SingleStatementContext ctx) { + return plan(ctx.statement()); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/CaseInsensitiveStream.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/CaseInsensitiveStream.java new file mode 100644 index 00000000000..fff954ab592 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/CaseInsensitiveStream.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.parser; + +import org.antlr.v4.runtime.ANTLRInputStream; +import org.antlr.v4.runtime.IntStream; + +import java.util.Locale; + +// extension of ANTLR that does the upper-casing once for the whole stream +// the ugly part is that it has to duplicate LA method + +// This approach is the official solution from the ANTLR authors +// in that it's both faster and easier than having a dedicated lexer +// see https://github.com/antlr/antlr4/issues/1002 +class CaseInsensitiveStream extends ANTLRInputStream { + protected char[] uppedChars; + + CaseInsensitiveStream(String input) { + super(input); + this.uppedChars = input.toUpperCase(Locale.ROOT).toCharArray(); + } + + // this part is copied from ANTLRInputStream + @Override + public int LA(int i) { + if (i == 0) { + return 0; // undefined + } + if (i < 0) { + i++; + if ((p + i - 1) < 0) { + return IntStream.EOF; + } + } + + if ((p + i - 1) >= n) { + return IntStream.EOF; + } + return uppedChars[p + i - 1]; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java new file mode 100644 index 00000000000..5adb1a6fe40 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java @@ -0,0 +1,119 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.parser; + +import org.elasticsearch.common.Booleans; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.DebugContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.ExplainContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.ShowColumnsContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.ShowFunctionsContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.ShowSchemasContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.ShowTablesContext; +import org.elasticsearch.xpack.sql.plan.TableIdentifier; +import org.elasticsearch.xpack.sql.plan.logical.command.Command; +import org.elasticsearch.xpack.sql.plan.logical.command.Debug; +import org.elasticsearch.xpack.sql.plan.logical.command.Explain; +import org.elasticsearch.xpack.sql.plan.logical.command.ShowColumns; +import org.elasticsearch.xpack.sql.plan.logical.command.ShowFunctions; +import org.elasticsearch.xpack.sql.plan.logical.command.ShowSchemas; +import org.elasticsearch.xpack.sql.plan.logical.command.ShowTables; +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.Locale; + +abstract class CommandBuilder extends LogicalPlanBuilder { + + @Override + public Command visitDebug(DebugContext ctx) { + Location loc = source(ctx); + if (ctx.FORMAT().size() > 1) { + throw new ParsingException(loc, "Debug FORMAT should be specified at most once"); + } + if (ctx.PLAN().size() > 1) { + throw new ParsingException(loc, "Debug PLAN should be specified at most once"); + } + + Debug.Type type = null; + + if (ctx.type != null) { + if (ctx.type.getType() == SqlBaseLexer.ANALYZED) { + type = Debug.Type.ANALYZED; + } + else { + type = Debug.Type.OPTIMIZED; + } + } + Debug.Format format = (ctx.format != null && ctx.format.getType() == SqlBaseLexer.GRAPHVIZ ? Debug.Format.GRAPHVIZ : Debug.Format.TEXT); + + return new Debug(loc, plan(ctx.statement()), type, format); + } + + + @Override + public Command visitExplain(ExplainContext ctx) { + Location loc = source(ctx); + if (ctx.PLAN().size() > 1) { + throw new ParsingException(loc, "Explain TYPE should be specified at most once"); + } + if (ctx.FORMAT().size() > 1) { + throw new ParsingException(loc, "Explain FORMAT should be specified at most once"); + } + if (ctx.VERIFY().size() > 1) { + throw new ParsingException(loc, "Explain VERIFY should be specified at most once"); + } + + Explain.Type type = null; + + if (ctx.type != null) { + switch (ctx.type.getType()) { + case SqlBaseLexer.PARSED: + type = Explain.Type.PARSED; + break; + case SqlBaseLexer.ANALYZED: + type = Explain.Type.ANALYZED; + break; + case SqlBaseLexer.OPTIMIZED: + type = Explain.Type.OPTIMIZED; + break; + case SqlBaseLexer.MAPPED: + type = Explain.Type.MAPPED; + break; + case SqlBaseLexer.EXECUTABLE: + type = Explain.Type.EXECUTABLE; + break; + default: + type = Explain.Type.ALL; + } + } + Explain.Format format = (ctx.format != null && ctx.format.getType() == SqlBaseLexer.GRAPHVIZ ? Explain.Format.GRAPHVIZ : Explain.Format.TEXT); + boolean verify = (ctx.verify != null ? Booleans.parseBoolean(ctx.verify.getText().toLowerCase(Locale.ROOT), true) : true); + + return new Explain(loc, plan(ctx.statement()), type, format, verify); + } + + + @Override + public Object visitShowFunctions(ShowFunctionsContext ctx) { + return new ShowFunctions(source(ctx), string(ctx.pattern)); + } + + @Override + public Object visitShowTables(ShowTablesContext ctx) { + return new ShowTables(source(ctx), string(ctx.pattern)); + } + + @Override + public Object visitShowSchemas(ShowSchemasContext ctx) { + return new ShowSchemas(source(ctx)); + } + + + @Override + public Object visitShowColumns(ShowColumnsContext ctx) { + TableIdentifier identifier = visitTableIdentifier(ctx.tableIdentifier()); + return new ShowColumns(source(ctx), identifier.index()); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java new file mode 100644 index 00000000000..93353bae477 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java @@ -0,0 +1,425 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.parser; + +import org.antlr.v4.runtime.ParserRuleContext; +import org.antlr.v4.runtime.tree.ParseTree; +import org.antlr.v4.runtime.tree.TerminalNode; +import org.elasticsearch.common.Booleans; +import org.elasticsearch.xpack.sql.expression.Alias; +import org.elasticsearch.xpack.sql.expression.Exists; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Literal; +import org.elasticsearch.xpack.sql.expression.Order; +import org.elasticsearch.xpack.sql.expression.ScalarSubquery; +import org.elasticsearch.xpack.sql.expression.UnresolvedAttribute; +import org.elasticsearch.xpack.sql.expression.UnresolvedStar; +import org.elasticsearch.xpack.sql.expression.function.UnresolvedFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.Cast; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.Add; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.Div; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.Mod; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.Mul; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.Neg; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.Sub; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.Extract; +import org.elasticsearch.xpack.sql.expression.predicate.And; +import org.elasticsearch.xpack.sql.expression.predicate.Equals; +import org.elasticsearch.xpack.sql.expression.predicate.GreaterThan; +import org.elasticsearch.xpack.sql.expression.predicate.GreaterThanOrEqual; +import org.elasticsearch.xpack.sql.expression.predicate.In; +import org.elasticsearch.xpack.sql.expression.predicate.IsNotNull; +import org.elasticsearch.xpack.sql.expression.predicate.LessThan; +import org.elasticsearch.xpack.sql.expression.predicate.LessThanOrEqual; +import org.elasticsearch.xpack.sql.expression.predicate.Not; +import org.elasticsearch.xpack.sql.expression.predicate.Or; +import org.elasticsearch.xpack.sql.expression.predicate.Range; +import org.elasticsearch.xpack.sql.expression.predicate.fulltext.MatchQueryPredicate; +import org.elasticsearch.xpack.sql.expression.predicate.fulltext.MultiMatchQueryPredicate; +import org.elasticsearch.xpack.sql.expression.predicate.fulltext.StringQueryPredicate; +import org.elasticsearch.xpack.sql.expression.regex.Like; +import org.elasticsearch.xpack.sql.expression.regex.RLike; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.ArithmeticBinaryContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.ArithmeticUnaryContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.BooleanLiteralContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.CastContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.ColumnExpressionContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.ColumnReferenceContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.ComparisonContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.DecimalLiteralContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.DereferenceContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.ExistsContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.ExtractContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.FunctionCallContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.IntegerLiteralContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.LogicalBinaryContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.LogicalNotContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.MatchQueryContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.MultiMatchQueryContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.NullLiteralContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.OrderByContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.ParenthesizedExpressionContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.PredicateContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.PredicatedContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.PrimitiveDataTypeContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.SelectExpressionContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.SingleExpressionContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.StarContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.StringLiteralContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.StringQueryContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.SubqueryExpressionContext; +import org.elasticsearch.xpack.sql.plan.TableIdentifier; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; + +import java.math.BigDecimal; +import java.util.List; +import java.util.Locale; + +import static java.lang.String.format; + +abstract class ExpressionBuilder extends IdentifierBuilder { + + protected Expression expression(ParseTree ctx) { + return typedParsing(ctx, Expression.class); + } + + protected List expressions(List contexts) { + return visitList(contexts, Expression.class); + } + + @Override + public Expression visitSingleExpression(SingleExpressionContext ctx) { + return expression(ctx.expression()); + } + + @Override + public Expression visitSelectExpression(SelectExpressionContext ctx) { + Expression exp = expression(ctx.expression()); + String alias = visitIdentifier(ctx.identifier()); + if (alias != null) { + exp = new Alias(source(ctx), alias, exp); + } + return exp; + } + + @Override + public Expression visitStar(StarContext ctx) { + return new UnresolvedStar(source(ctx), ctx.qualifier != null ? visitColumnExpression(ctx.qualifier) : null); + } + + @Override + public Object visitDereference(DereferenceContext ctx) { + String fieldName = visitIdentifier(ctx.fieldName); + String qualifier = null; + Expression base = expression(ctx.base); + if (base != null) { + if (base instanceof UnresolvedAttribute) { + UnresolvedAttribute b = (UnresolvedAttribute) base; + return new UnresolvedAttribute(source(ctx), b.name() + "." + fieldName, b.qualifier()); + } + else { + throw new UnsupportedOperationException(format(Locale.ROOT, "Uknown dereferencing using %s ", base.getClass())); + } + } + return new UnresolvedAttribute(source(ctx), fieldName, qualifier); + } + + @Override + public UnresolvedAttribute visitColumnExpression(ColumnExpressionContext ctx) { + String qualifier = null; + if (ctx.alias != null) { + qualifier = visitIdentifier(ctx.alias); + } + else if (ctx.table != null) { + TableIdentifier table = visitTableIdentifier(ctx.table); + qualifier = table.index(); + } + return new UnresolvedAttribute(source(ctx), visitIdentifier(ctx.name), qualifier); + } + + @Override + public Object visitColumnReference(ColumnReferenceContext ctx) { + return visitColumnExpression(ctx.columnExpression()); + } + + @Override + public Expression visitExists(ExistsContext ctx) { + return new Exists(source(ctx), plan(ctx.query())); + } + + @Override + public Expression visitComparison(ComparisonContext ctx) { + Expression left = expression(ctx.left); + Expression right = expression(ctx.right); + TerminalNode op = (TerminalNode) ctx.comparisonOperator().getChild(0); + + Location loc = source(ctx); + + switch (op.getSymbol().getType()) { + case SqlBaseParser.EQ: + return new Equals(loc, left, right); + case SqlBaseParser.NEQ: + return new Not(loc, new Equals(loc, left, right)); + case SqlBaseParser.LT: + return new LessThan(loc, left, right); + case SqlBaseParser.LTE: + return new LessThanOrEqual(loc, left, right); + case SqlBaseParser.GT: + return new GreaterThan(loc, left, right); + case SqlBaseParser.GTE: + return new GreaterThanOrEqual(loc, left, right); + default: + throw new ParsingException(loc, "Unknown operator %s", op.getSymbol().getText()); + } + } + + @Override + public Expression visitPredicated(PredicatedContext ctx) { + Expression exp = expression(ctx.valueExpression()); + + // no predicate, quick exit + if (ctx.predicate() == null) { + return exp; + } + + PredicateContext pCtx = ctx.predicate(); + Location loc = source(pCtx); + + Expression e = null; + switch (pCtx.kind.getType()) { + case SqlBaseParser.BETWEEN: + e = new Range(loc, exp, expression(pCtx.lower), true, expression(pCtx.upper), true); + break; + case SqlBaseParser.IN: + if (pCtx.query() != null) { + throw new ParsingException(loc, "IN query not supported yet"); + } + e = new In(loc, exp, expressions(pCtx.expression())); + break; + case SqlBaseParser.LIKE: + e = new Like(loc, exp, expression(pCtx.pattern)); + break; + case SqlBaseParser.RLIKE: + e = new RLike(loc, exp, expression(pCtx.pattern)); + break; + case SqlBaseParser.NULL:; + // shortcut to avoid double negation later on (since there's no IsNull (missing in ES is a negated exists)) + e = new IsNotNull(loc, exp); + return pCtx.NOT() != null ? e : new Not(loc, e); + default: + throw new ParsingException(loc, "Unknown predicate %s", pCtx.kind.getText()); + } + + return pCtx.NOT() != null ? new Not(loc, e) : e; + } + + // + // Arithmetic + // + + @Override + public Object visitArithmeticUnary(ArithmeticUnaryContext ctx) { + Expression value = expression(ctx.valueExpression()); + Location loc = source(ctx); + + switch (ctx.operator.getType()) { + case SqlBaseParser.PLUS: + return value; + case SqlBaseParser.MINUS: + return new Neg(source(ctx.operator), value); + default: + throw new ParsingException(loc, "Unknown arithemtic %s", ctx.operator.getText()); + } + } + + @Override + public Object visitArithmeticBinary(ArithmeticBinaryContext ctx) { + Expression left = expression(ctx.left); + Expression right = expression(ctx.right); + + Location loc = source(ctx.operator); + + switch (ctx.operator.getType()) { + case SqlBaseParser.ASTERISK: + return new Mul(loc, left, right); + case SqlBaseParser.SLASH: + return new Div(loc, left, right); + case SqlBaseParser.PERCENT: + return new Mod(loc, left, right); + case SqlBaseParser.PLUS: + return new Add(loc, left, right); + case SqlBaseParser.MINUS: + return new Sub(loc, left, right); + default: + throw new ParsingException(loc, "Unknown arithemtic %s", ctx.operator.getText()); + } + } + + // + // Full-text search predicates + // + @Override + public Object visitStringQuery(StringQueryContext ctx) { + return new StringQueryPredicate(source(ctx), string(ctx.queryString), string(ctx.options)); + } + + @Override + public Object visitMatchQuery(MatchQueryContext ctx) { + return new MatchQueryPredicate(source(ctx), new UnresolvedAttribute(source(ctx.singleField), visitQualifiedName(ctx.singleField)), string(ctx.queryString), string(ctx.options)); + } + + @Override + public Object visitMultiMatchQuery(MultiMatchQueryContext ctx) { + return new MultiMatchQueryPredicate(source(ctx), string(ctx.multiFields), string(ctx.queryString), string(ctx.options)); + } + + @Override + public Order visitOrderBy(OrderByContext ctx) { + return new Order(source(ctx), expression(ctx.expression()), (ctx.DESC() != null ? Order.OrderDirection.DESC : Order.OrderDirection.ASC)); + } + + @Override + public Object visitCast(CastContext ctx) { + return new Cast(source(ctx), expression(ctx.expression()), typedParsing(ctx.dataType(), DataType.class)); + } + + @Override + public DataType visitPrimitiveDataType(PrimitiveDataTypeContext ctx) { + String type = visitIdentifier(ctx.identifier()).toLowerCase(Locale.ROOT); + + switch (type) { + case "bit": + case "bool": + case "boolean": + return DataTypes.BOOLEAN; + case "tinyint": + case "byte": + return DataTypes.BYTE; + case "smallint": + case "short": + return DataTypes.SHORT; + case "int": + case "integer": + return DataTypes.INTEGER; + case "long": + case "bigint": + return DataTypes.LONG; + case "real": + return DataTypes.FLOAT; + case "float": + case "double": + return DataTypes.DOUBLE; + case "date": + case "timestamp": + return DataTypes.DATE; + case "char": + case "varchar": + case "string": + return DataTypes.KEYWORD; + default: + throw new ParsingException(source(ctx), "Does not recognize type %s", type); + } + } + + @Override + public Object visitFunctionCall(FunctionCallContext ctx) { + String name = visitIdentifier(ctx.identifier()); + boolean isDistinct = false; + if (ctx.setQuantifier() != null) { + isDistinct = (ctx.setQuantifier().DISTINCT() != null); + } + + return new UnresolvedFunction(source(ctx), name, isDistinct, expressions(ctx.expression())); + } + + @Override + public Object visitExtract(ExtractContext ctx) { + Location source = source(ctx); + String fieldString = visitIdentifier(ctx.field); + Extract extract = null; + try { + extract = Extract.valueOf(fieldString.toUpperCase(Locale.ROOT)); + } catch (IllegalArgumentException ex) { + throw new ParsingException(source, format(Locale.ROOT, "Invalid EXTRACT field %s", fieldString)); + } + return extract.toFunction(source, expression(ctx.valueExpression())); + } + + @Override + public Expression visitSubqueryExpression(SubqueryExpressionContext ctx) { + return new ScalarSubquery(source(ctx), plan(ctx.query())); + } + + @Override + public Expression visitParenthesizedExpression(ParenthesizedExpressionContext ctx) { + return expression(ctx.expression()); + } + + + // + // Logical constructs + // + + @Override + public Object visitLogicalNot(LogicalNotContext ctx) { + return new Not(source(ctx), expression(ctx.booleanExpression())); + } + + @Override + public Object visitLogicalBinary(LogicalBinaryContext ctx) { + int type = ctx.operator.getType(); + Location loc = source(ctx); + Expression left = expression(ctx.left); + Expression right = expression(ctx.right); + + if (type == SqlBaseParser.AND) { + return new And(loc, left, right); + } + if (type == SqlBaseParser.OR) { + return new Or(loc, left, right); + } + throw new ParsingException(loc, format(Locale.ROOT, "Don't know how to parse %s", ctx)); + } + + + // + // Literal + // + + + @Override + public Expression visitNullLiteral(NullLiteralContext ctx) { + return new Literal(source(ctx), null, DataTypes.NULL); + } + + @Override + public Expression visitBooleanLiteral(BooleanLiteralContext ctx) { + return new Literal(source(ctx), Booleans.parseBoolean(ctx.getText().toLowerCase(Locale.ROOT), false), DataTypes.BOOLEAN); + } + + @Override + public Expression visitStringLiteral(StringLiteralContext ctx) { + StringBuilder sb = new StringBuilder(); + for (TerminalNode node : ctx.STRING()) { + sb.append(unquoteString(text(node))); + } + return new Literal(source(ctx), sb.toString(), DataTypes.KEYWORD); + } + + @Override + public Object visitDecimalLiteral(DecimalLiteralContext ctx) { + return new Literal(source(ctx), new BigDecimal(ctx.getText()).doubleValue(), DataTypes.DOUBLE); + } + + @Override + public Object visitIntegerLiteral(IntegerLiteralContext ctx) { + BigDecimal bigD = new BigDecimal(ctx.getText()); + // TODO: this can be improved to use the smallest type available + return new Literal(source(ctx), bigD.longValueExact(), DataTypes.INTEGER); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/IdentifierBuilder.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/IdentifierBuilder.java new file mode 100644 index 00000000000..7099b3100be --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/IdentifierBuilder.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.parser; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.IdentifierContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.QualifiedNameContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.TableIdentifierContext; +import org.elasticsearch.xpack.sql.plan.TableIdentifier; +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.Locale; + +import static java.lang.String.format; + +abstract class IdentifierBuilder extends AbstractBuilder { + + @Override + public TableIdentifier visitTableIdentifier(TableIdentifierContext ctx) { + String index = text(ctx.index); + + Location source = source(ctx); + validateIndex(index, source); + + return new TableIdentifier(source, index); + } + + // see https://github.com/elastic/elasticsearch/issues/6736 + private static void validateIndex(String index, Location source) { + for (int i = 0; i < index.length(); i++) { + char c = index.charAt(i); + if (Character.isUpperCase(c)) { + throw new ParsingException(source, format(Locale.ROOT, "Invalid index name (needs to be lowercase) %s", index)); + } + if (c == '.' || c == '\\' || c == '/' || c == '*' || c == '?' || c == '<' || c == '>' || c == '|' || c == ',') { + throw new ParsingException(source, format(Locale.ROOT, "Illegal character %c in index name %s", c, index)); + } + } + } + + @Override + public String visitIdentifier(IdentifierContext ctx) { + return ctx == null ? null : ctx.getText(); + } + + @Override + public String visitQualifiedName(QualifiedNameContext ctx) { + if (ctx == null) { + return null; + } + // TODO: maybe it makes sense to introduce a dedicated object? + return Strings.collectionToDelimitedString(visitList(ctx.identifier(), String.class), "."); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/LogicalPlanBuilder.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/LogicalPlanBuilder.java new file mode 100644 index 00000000000..f522b15ef2e --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/LogicalPlanBuilder.java @@ -0,0 +1,201 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.parser; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Literal; +import org.elasticsearch.xpack.sql.expression.NamedExpression; +import org.elasticsearch.xpack.sql.expression.Order; +import org.elasticsearch.xpack.sql.expression.UnresolvedAlias; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.AliasedQueryContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.AliasedRelationContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.FromClauseContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.JoinCriteriaContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.JoinRelationContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.JoinTypeContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.NamedQueryContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.QueryContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.QueryNoWithContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.QuerySpecificationContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.RelationContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.SubqueryContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.TableNameContext; +import org.elasticsearch.xpack.sql.plan.TableIdentifier; +import org.elasticsearch.xpack.sql.plan.logical.Aggregate; +import org.elasticsearch.xpack.sql.plan.logical.Distinct; +import org.elasticsearch.xpack.sql.plan.logical.Filter; +import org.elasticsearch.xpack.sql.plan.logical.Join; +import org.elasticsearch.xpack.sql.plan.logical.Join.JoinType; +import org.elasticsearch.xpack.sql.plan.logical.Limit; +import org.elasticsearch.xpack.sql.plan.logical.LocalRelation; +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.plan.logical.OrderBy; +import org.elasticsearch.xpack.sql.plan.logical.Project; +import org.elasticsearch.xpack.sql.plan.logical.SubQueryAlias; +import org.elasticsearch.xpack.sql.plan.logical.UnresolvedRelation; +import org.elasticsearch.xpack.sql.plan.logical.With; +import org.elasticsearch.xpack.sql.session.EmptyExecutable; +import org.elasticsearch.xpack.sql.type.DataTypes; + +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.emptyList; +import static java.util.stream.Collectors.toList; + +abstract class LogicalPlanBuilder extends ExpressionBuilder { + + @Override + public LogicalPlan visitQuery(QueryContext ctx) { + LogicalPlan body = plan(ctx.queryNoWith()); + + List namedQueries = visitList(ctx.namedQuery(), SubQueryAlias.class); + + // unwrap query (and validate while at it) + Map cteRelations = new LinkedHashMap<>(namedQueries.size()); + for (SubQueryAlias namedQuery : namedQueries) { + if (cteRelations.put(namedQuery.alias(), namedQuery) != null) { + throw new ParsingException(namedQuery.location(), "Duplicate alias %s", namedQuery.alias()); + } + } + + // return WITH + return new With(source(ctx), body, cteRelations); + } + + @Override + public LogicalPlan visitNamedQuery(NamedQueryContext ctx) { + return new SubQueryAlias(source(ctx), plan(ctx.queryNoWith()), ctx.name.getText()); + } + + public LogicalPlan visitQueryNoWith(QueryNoWithContext ctx) { + LogicalPlan plan = plan(ctx.queryTerm()); + + if (!ctx.orderBy().isEmpty()) { + plan = new OrderBy(source(ctx.ORDER()), plan, visitList(ctx.orderBy(), Order.class)); + } + + if (ctx.limit != null && ctx.INTEGER_VALUE() != null) { + plan = new Limit(source(ctx.limit), new Literal(source(ctx), Integer.parseInt(ctx.limit.getText()), DataTypes.INTEGER), plan); + } + + return plan; + } + + @Override + public LogicalPlan visitQuerySpecification(QuerySpecificationContext ctx) { + LogicalPlan query = (ctx.fromClause() != null)? plan(ctx.fromClause()) : new LocalRelation(source(ctx), new EmptyExecutable(emptyList())); + + // add WHERE + if (ctx.where != null) { + query = new Filter(source(ctx), query, expression(ctx.where)); + } + + List selectTarget = emptyList(); + + // SELECT a, b, c ... + if (!ctx.selectItem().isEmpty()) { + selectTarget = expressions(ctx.selectItem()).stream() + .map(e -> (e instanceof NamedExpression) ? (NamedExpression) e : new UnresolvedAlias(e)) + .collect(toList()); + } + + // GROUP BY + if (ctx.groupBy() != null) { + List groupBy = expressions(ctx.groupBy().groupingElement()); + query = new Aggregate(source(ctx.groupBy()), query, groupBy, selectTarget); + } + else if (!selectTarget.isEmpty()) { + query = new Project(source(ctx.selectItem(0)), query, selectTarget); + } + + // HAVING + if (ctx.having != null) { + query = new Filter(source(ctx.having), query, expression(ctx.having)); + } + + if (ctx.setQuantifier() != null && ctx.setQuantifier().DISTINCT() != null) { + query = new Distinct(source(ctx.setQuantifier()), query); + } + return query; + } + + @Override + public LogicalPlan visitFromClause(FromClauseContext ctx) { + // if there are multiple FROM clauses, convert each pair in a inner join + List plans = plans(ctx.relation()); + return plans.stream() + .reduce((left, right) -> new Join(source(ctx), left, right, Join.JoinType.IMPLICIT, null)) + .get(); + } + + @Override + public LogicalPlan visitRelation(RelationContext ctx) { + // check if there are multiple join clauses. ANTLR produces a right nested tree with the left join clause + // at the top. However the fields previously references might be used in the following clauses. + // As such, swap/reverse the tree. + + LogicalPlan result = plan(ctx.relationPrimary()); + for (JoinRelationContext j : ctx.joinRelation()) { + result = doJoin(result, j); + } + + return result; + } + + private Join doJoin(LogicalPlan left, JoinRelationContext ctx) { + JoinTypeContext joinType = ctx.joinType(); + + Join.JoinType type = JoinType.INNER; + if (joinType != null) { + if (joinType.FULL() != null) { + type = JoinType.FULL; + } + if (joinType.LEFT() != null) { + type = JoinType.LEFT; + } + if (joinType.RIGHT() != null) { + type = JoinType.RIGHT; + } + } + + Expression condition = null; + JoinCriteriaContext criteria = ctx.joinCriteria(); + if (criteria != null) { + if (criteria.USING() != null) { + throw new UnsupportedOperationException(); + } + if (criteria.booleanExpression() != null) { + condition = expression(criteria.booleanExpression()); + } + } + + return new Join(source(ctx), left, plan(ctx.right), type, condition); + } + + @Override + public Object visitAliasedRelation(AliasedRelationContext ctx) { + return new SubQueryAlias(source(ctx), plan(ctx.relation()), visitQualifiedName(ctx.qualifiedName())); + } + + @Override + public Object visitAliasedQuery(AliasedQueryContext ctx) { + return new SubQueryAlias(source(ctx), plan(ctx.queryNoWith()), visitQualifiedName(ctx.qualifiedName())); + } + + @Override + public Object visitSubquery(SubqueryContext ctx) { + return plan(ctx.queryNoWith()); + } + + @Override + public LogicalPlan visitTableName(TableNameContext ctx) { + String alias = visitQualifiedName(ctx.qualifiedName()); + TableIdentifier tableIdentifier = visitTableIdentifier(ctx.tableIdentifier()); + return new UnresolvedRelation(source(ctx), tableIdentifier, alias); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/ParsingException.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/ParsingException.java new file mode 100644 index 00000000000..3382ef240e3 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/ParsingException.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.parser; + +import org.antlr.v4.runtime.RecognitionException; +import org.elasticsearch.xpack.sql.ClientSqlException; +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.Locale; + +import static java.lang.String.format; + +public class ParsingException extends ClientSqlException { + private final int line; + private final int charPositionInLine; + + public ParsingException(String message, RecognitionException cause, int line, int charPositionInLine) { + super(message, cause); + + this.line = line; + this.charPositionInLine = charPositionInLine; + } + + ParsingException(String message, Object... args) { + this(Location.EMPTY, message, args); + } + + public ParsingException(Location nodeLocation, String message, Object... args) { + this(format(Locale.ROOT, message, args), null, nodeLocation.getLineNumber(), nodeLocation.getColumnNumber()); + } + + public int getLineNumber() { + return line; + } + + public int getColumnNumber() { + return charPositionInLine + 1; + } + + public String getErrorMessage() { + return super.getMessage(); + } + + @Override + public String getMessage() { + return format(Locale.ROOT, "line %s:%s: %s", getLineNumber(), getColumnNumber(), getErrorMessage()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseListener.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseListener.java new file mode 100644 index 00000000000..2c4fde45c04 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseListener.java @@ -0,0 +1,908 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +// ANTLR GENERATED CODE: DO NOT EDIT +package org.elasticsearch.xpack.sql.parser; + +import org.antlr.v4.runtime.ParserRuleContext; +import org.antlr.v4.runtime.tree.ErrorNode; +import org.antlr.v4.runtime.tree.TerminalNode; + +/** + * This class provides an empty implementation of {@link SqlBaseListener}, + * which can be extended to create a listener which only needs to handle a subset + * of the available methods. + */ +class SqlBaseBaseListener implements SqlBaseListener { + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterSingleStatement(SqlBaseParser.SingleStatementContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitSingleStatement(SqlBaseParser.SingleStatementContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterSingleExpression(SqlBaseParser.SingleExpressionContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitSingleExpression(SqlBaseParser.SingleExpressionContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterStatementDefault(SqlBaseParser.StatementDefaultContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitStatementDefault(SqlBaseParser.StatementDefaultContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterExplain(SqlBaseParser.ExplainContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitExplain(SqlBaseParser.ExplainContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterDebug(SqlBaseParser.DebugContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitDebug(SqlBaseParser.DebugContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterShowTables(SqlBaseParser.ShowTablesContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitShowTables(SqlBaseParser.ShowTablesContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterShowColumns(SqlBaseParser.ShowColumnsContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitShowColumns(SqlBaseParser.ShowColumnsContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterShowFunctions(SqlBaseParser.ShowFunctionsContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitShowFunctions(SqlBaseParser.ShowFunctionsContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterShowSchemas(SqlBaseParser.ShowSchemasContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitShowSchemas(SqlBaseParser.ShowSchemasContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterQuery(SqlBaseParser.QueryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitQuery(SqlBaseParser.QueryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterQueryNoWith(SqlBaseParser.QueryNoWithContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitQueryNoWith(SqlBaseParser.QueryNoWithContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterQueryPrimaryDefault(SqlBaseParser.QueryPrimaryDefaultContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitQueryPrimaryDefault(SqlBaseParser.QueryPrimaryDefaultContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterSubquery(SqlBaseParser.SubqueryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitSubquery(SqlBaseParser.SubqueryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterOrderBy(SqlBaseParser.OrderByContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitOrderBy(SqlBaseParser.OrderByContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterQuerySpecification(SqlBaseParser.QuerySpecificationContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitQuerySpecification(SqlBaseParser.QuerySpecificationContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterFromClause(SqlBaseParser.FromClauseContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitFromClause(SqlBaseParser.FromClauseContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterGroupBy(SqlBaseParser.GroupByContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitGroupBy(SqlBaseParser.GroupByContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterSingleGroupingSet(SqlBaseParser.SingleGroupingSetContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitSingleGroupingSet(SqlBaseParser.SingleGroupingSetContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterGroupingExpressions(SqlBaseParser.GroupingExpressionsContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitGroupingExpressions(SqlBaseParser.GroupingExpressionsContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterNamedQuery(SqlBaseParser.NamedQueryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitNamedQuery(SqlBaseParser.NamedQueryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterSetQuantifier(SqlBaseParser.SetQuantifierContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitSetQuantifier(SqlBaseParser.SetQuantifierContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterSelectExpression(SqlBaseParser.SelectExpressionContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitSelectExpression(SqlBaseParser.SelectExpressionContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterRelation(SqlBaseParser.RelationContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitRelation(SqlBaseParser.RelationContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterJoinRelation(SqlBaseParser.JoinRelationContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitJoinRelation(SqlBaseParser.JoinRelationContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterJoinType(SqlBaseParser.JoinTypeContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitJoinType(SqlBaseParser.JoinTypeContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterJoinCriteria(SqlBaseParser.JoinCriteriaContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitJoinCriteria(SqlBaseParser.JoinCriteriaContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterTableName(SqlBaseParser.TableNameContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitTableName(SqlBaseParser.TableNameContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterAliasedQuery(SqlBaseParser.AliasedQueryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitAliasedQuery(SqlBaseParser.AliasedQueryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterAliasedRelation(SqlBaseParser.AliasedRelationContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitAliasedRelation(SqlBaseParser.AliasedRelationContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterExpression(SqlBaseParser.ExpressionContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitExpression(SqlBaseParser.ExpressionContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterLogicalNot(SqlBaseParser.LogicalNotContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitLogicalNot(SqlBaseParser.LogicalNotContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterStringQuery(SqlBaseParser.StringQueryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitStringQuery(SqlBaseParser.StringQueryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterBooleanDefault(SqlBaseParser.BooleanDefaultContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitBooleanDefault(SqlBaseParser.BooleanDefaultContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterExists(SqlBaseParser.ExistsContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitExists(SqlBaseParser.ExistsContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterMultiMatchQuery(SqlBaseParser.MultiMatchQueryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitMultiMatchQuery(SqlBaseParser.MultiMatchQueryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterMatchQuery(SqlBaseParser.MatchQueryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitMatchQuery(SqlBaseParser.MatchQueryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterLogicalBinary(SqlBaseParser.LogicalBinaryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitLogicalBinary(SqlBaseParser.LogicalBinaryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterPredicated(SqlBaseParser.PredicatedContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitPredicated(SqlBaseParser.PredicatedContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterPredicate(SqlBaseParser.PredicateContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitPredicate(SqlBaseParser.PredicateContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterValueExpressionDefault(SqlBaseParser.ValueExpressionDefaultContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitValueExpressionDefault(SqlBaseParser.ValueExpressionDefaultContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterComparison(SqlBaseParser.ComparisonContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitComparison(SqlBaseParser.ComparisonContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterArithmeticBinary(SqlBaseParser.ArithmeticBinaryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitArithmeticBinary(SqlBaseParser.ArithmeticBinaryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterArithmeticUnary(SqlBaseParser.ArithmeticUnaryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitArithmeticUnary(SqlBaseParser.ArithmeticUnaryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterCast(SqlBaseParser.CastContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitCast(SqlBaseParser.CastContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterExtract(SqlBaseParser.ExtractContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitExtract(SqlBaseParser.ExtractContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterConstantDefault(SqlBaseParser.ConstantDefaultContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitConstantDefault(SqlBaseParser.ConstantDefaultContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterStar(SqlBaseParser.StarContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitStar(SqlBaseParser.StarContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterFunctionCall(SqlBaseParser.FunctionCallContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitFunctionCall(SqlBaseParser.FunctionCallContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterSubqueryExpression(SqlBaseParser.SubqueryExpressionContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitSubqueryExpression(SqlBaseParser.SubqueryExpressionContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterColumnReference(SqlBaseParser.ColumnReferenceContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitColumnReference(SqlBaseParser.ColumnReferenceContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterDereference(SqlBaseParser.DereferenceContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitDereference(SqlBaseParser.DereferenceContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterParenthesizedExpression(SqlBaseParser.ParenthesizedExpressionContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitParenthesizedExpression(SqlBaseParser.ParenthesizedExpressionContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterColumnExpression(SqlBaseParser.ColumnExpressionContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitColumnExpression(SqlBaseParser.ColumnExpressionContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterNullLiteral(SqlBaseParser.NullLiteralContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitNullLiteral(SqlBaseParser.NullLiteralContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterTypeConstructor(SqlBaseParser.TypeConstructorContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitTypeConstructor(SqlBaseParser.TypeConstructorContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterNumericLiteral(SqlBaseParser.NumericLiteralContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitNumericLiteral(SqlBaseParser.NumericLiteralContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterBooleanLiteral(SqlBaseParser.BooleanLiteralContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitBooleanLiteral(SqlBaseParser.BooleanLiteralContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterStringLiteral(SqlBaseParser.StringLiteralContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitStringLiteral(SqlBaseParser.StringLiteralContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterComparisonOperator(SqlBaseParser.ComparisonOperatorContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitComparisonOperator(SqlBaseParser.ComparisonOperatorContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterBooleanValue(SqlBaseParser.BooleanValueContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitBooleanValue(SqlBaseParser.BooleanValueContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterPrimitiveDataType(SqlBaseParser.PrimitiveDataTypeContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitPrimitiveDataType(SqlBaseParser.PrimitiveDataTypeContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterWhenClause(SqlBaseParser.WhenClauseContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitWhenClause(SqlBaseParser.WhenClauseContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterQualifiedName(SqlBaseParser.QualifiedNameContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitQualifiedName(SqlBaseParser.QualifiedNameContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterTableIdentifier(SqlBaseParser.TableIdentifierContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitTableIdentifier(SqlBaseParser.TableIdentifierContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterIdentifier(SqlBaseParser.IdentifierContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitIdentifier(SqlBaseParser.IdentifierContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterQuotedIdentifier(SqlBaseParser.QuotedIdentifierContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitQuotedIdentifier(SqlBaseParser.QuotedIdentifierContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterBackQuotedIdentifier(SqlBaseParser.BackQuotedIdentifierContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitBackQuotedIdentifier(SqlBaseParser.BackQuotedIdentifierContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterUnquotedIdentifier(SqlBaseParser.UnquotedIdentifierContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitUnquotedIdentifier(SqlBaseParser.UnquotedIdentifierContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterDigitIdentifier(SqlBaseParser.DigitIdentifierContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitDigitIdentifier(SqlBaseParser.DigitIdentifierContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterDecimalLiteral(SqlBaseParser.DecimalLiteralContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitDecimalLiteral(SqlBaseParser.DecimalLiteralContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterIntegerLiteral(SqlBaseParser.IntegerLiteralContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitIntegerLiteral(SqlBaseParser.IntegerLiteralContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterNonReserved(SqlBaseParser.NonReservedContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitNonReserved(SqlBaseParser.NonReservedContext ctx) { } + + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterEveryRule(ParserRuleContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitEveryRule(ParserRuleContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void visitTerminal(TerminalNode node) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void visitErrorNode(ErrorNode node) { } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseVisitor.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseVisitor.java new file mode 100644 index 00000000000..7de81570b02 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseVisitor.java @@ -0,0 +1,523 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +// ANTLR GENERATED CODE: DO NOT EDIT +package org.elasticsearch.xpack.sql.parser; +import org.antlr.v4.runtime.tree.AbstractParseTreeVisitor; + +/** + * This class provides an empty implementation of {@link SqlBaseVisitor}, + * which can be extended to create a visitor which only needs to handle a subset + * of the available methods. + * + * @param The return type of the visit operation. Use {@link Void} for + * operations with no return type. + */ +class SqlBaseBaseVisitor extends AbstractParseTreeVisitor implements SqlBaseVisitor { + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitSingleStatement(SqlBaseParser.SingleStatementContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitSingleExpression(SqlBaseParser.SingleExpressionContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitStatementDefault(SqlBaseParser.StatementDefaultContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitExplain(SqlBaseParser.ExplainContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitDebug(SqlBaseParser.DebugContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitShowTables(SqlBaseParser.ShowTablesContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitShowColumns(SqlBaseParser.ShowColumnsContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitShowFunctions(SqlBaseParser.ShowFunctionsContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitShowSchemas(SqlBaseParser.ShowSchemasContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitQuery(SqlBaseParser.QueryContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitQueryNoWith(SqlBaseParser.QueryNoWithContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitQueryPrimaryDefault(SqlBaseParser.QueryPrimaryDefaultContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitSubquery(SqlBaseParser.SubqueryContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitOrderBy(SqlBaseParser.OrderByContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitQuerySpecification(SqlBaseParser.QuerySpecificationContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitFromClause(SqlBaseParser.FromClauseContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitGroupBy(SqlBaseParser.GroupByContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitSingleGroupingSet(SqlBaseParser.SingleGroupingSetContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitGroupingExpressions(SqlBaseParser.GroupingExpressionsContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitNamedQuery(SqlBaseParser.NamedQueryContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitSetQuantifier(SqlBaseParser.SetQuantifierContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitSelectExpression(SqlBaseParser.SelectExpressionContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitRelation(SqlBaseParser.RelationContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitJoinRelation(SqlBaseParser.JoinRelationContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitJoinType(SqlBaseParser.JoinTypeContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitJoinCriteria(SqlBaseParser.JoinCriteriaContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitTableName(SqlBaseParser.TableNameContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitAliasedQuery(SqlBaseParser.AliasedQueryContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitAliasedRelation(SqlBaseParser.AliasedRelationContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitExpression(SqlBaseParser.ExpressionContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitLogicalNot(SqlBaseParser.LogicalNotContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitStringQuery(SqlBaseParser.StringQueryContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitBooleanDefault(SqlBaseParser.BooleanDefaultContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitExists(SqlBaseParser.ExistsContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitMultiMatchQuery(SqlBaseParser.MultiMatchQueryContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitMatchQuery(SqlBaseParser.MatchQueryContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitLogicalBinary(SqlBaseParser.LogicalBinaryContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitPredicated(SqlBaseParser.PredicatedContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitPredicate(SqlBaseParser.PredicateContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitValueExpressionDefault(SqlBaseParser.ValueExpressionDefaultContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitComparison(SqlBaseParser.ComparisonContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitArithmeticBinary(SqlBaseParser.ArithmeticBinaryContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitArithmeticUnary(SqlBaseParser.ArithmeticUnaryContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitCast(SqlBaseParser.CastContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitExtract(SqlBaseParser.ExtractContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitConstantDefault(SqlBaseParser.ConstantDefaultContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitStar(SqlBaseParser.StarContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitFunctionCall(SqlBaseParser.FunctionCallContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitSubqueryExpression(SqlBaseParser.SubqueryExpressionContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitColumnReference(SqlBaseParser.ColumnReferenceContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitDereference(SqlBaseParser.DereferenceContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitParenthesizedExpression(SqlBaseParser.ParenthesizedExpressionContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitColumnExpression(SqlBaseParser.ColumnExpressionContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitNullLiteral(SqlBaseParser.NullLiteralContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitTypeConstructor(SqlBaseParser.TypeConstructorContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitNumericLiteral(SqlBaseParser.NumericLiteralContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitBooleanLiteral(SqlBaseParser.BooleanLiteralContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitStringLiteral(SqlBaseParser.StringLiteralContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitComparisonOperator(SqlBaseParser.ComparisonOperatorContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitBooleanValue(SqlBaseParser.BooleanValueContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitPrimitiveDataType(SqlBaseParser.PrimitiveDataTypeContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitWhenClause(SqlBaseParser.WhenClauseContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitQualifiedName(SqlBaseParser.QualifiedNameContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitTableIdentifier(SqlBaseParser.TableIdentifierContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitIdentifier(SqlBaseParser.IdentifierContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitQuotedIdentifier(SqlBaseParser.QuotedIdentifierContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitBackQuotedIdentifier(SqlBaseParser.BackQuotedIdentifierContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitUnquotedIdentifier(SqlBaseParser.UnquotedIdentifierContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitDigitIdentifier(SqlBaseParser.DigitIdentifierContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitDecimalLiteral(SqlBaseParser.DecimalLiteralContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitIntegerLiteral(SqlBaseParser.IntegerLiteralContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitNonReserved(SqlBaseParser.NonReservedContext ctx) { return visitChildren(ctx); } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseLexer.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseLexer.java new file mode 100644 index 00000000000..d8ec6df54ad --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseLexer.java @@ -0,0 +1,462 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +// ANTLR GENERATED CODE: DO NOT EDIT +package org.elasticsearch.xpack.sql.parser; +import org.antlr.v4.runtime.Lexer; +import org.antlr.v4.runtime.CharStream; +import org.antlr.v4.runtime.Token; +import org.antlr.v4.runtime.TokenStream; +import org.antlr.v4.runtime.*; +import org.antlr.v4.runtime.atn.*; +import org.antlr.v4.runtime.dfa.DFA; +import org.antlr.v4.runtime.misc.*; + +@SuppressWarnings({"all", "warnings", "unchecked", "unused", "cast"}) +class SqlBaseLexer extends Lexer { + static { RuntimeMetaData.checkVersion("4.5.3", RuntimeMetaData.VERSION); } + + protected static final DFA[] _decisionToDFA; + protected static final PredictionContextCache _sharedContextCache = + new PredictionContextCache(); + public static final int + T__0=1, T__1=2, T__2=3, T__3=4, ALL=5, ANALYZE=6, ANALYZED=7, AND=8, ANY=9, + AS=10, ASC=11, BETWEEN=12, BY=13, CAST=14, COLUMN=15, COLUMNS=16, CROSS=17, + DEBUG=18, DESC=19, DESCRIBE=20, DISTINCT=21, EXECUTABLE=22, EXISTS=23, + EXPLAIN=24, EXTRACT=25, FALSE=26, FOR=27, FORMAT=28, FROM=29, FULL=30, + FUNCTIONS=31, GRAPHVIZ=32, GROUP=33, GROUPING=34, HAVING=35, IN=36, INNER=37, + INTEGER=38, INTO=39, IS=40, JOIN=41, LAST=42, LEFT=43, LIKE=44, LIMIT=45, + LOGICAL=46, MAPPED=47, MATCH=48, NATURAL=49, NO=50, NOT=51, NULL=52, ON=53, + OPTIMIZED=54, OPTION=55, OR=56, ORDER=57, OUTER=58, PARSED=59, PHYSICAL=60, + PLAN=61, QUERY=62, RESET=63, RIGHT=64, RLIKE=65, SCHEMAS=66, SELECT=67, + SESSION=68, SET=69, SETS=70, SHOW=71, TABLE=72, TABLES=73, TEXT=74, THEN=75, + TO=76, TRUE=77, TYPE=78, USE=79, USING=80, VERIFY=81, WHEN=82, WHERE=83, + WITH=84, EQ=85, NEQ=86, LT=87, LTE=88, GT=89, GTE=90, PLUS=91, MINUS=92, + ASTERISK=93, SLASH=94, PERCENT=95, CONCAT=96, STRING=97, INTEGER_VALUE=98, + DECIMAL_VALUE=99, IDENTIFIER=100, DIGIT_IDENTIFIER=101, QUOTED_IDENTIFIER=102, + BACKQUOTED_IDENTIFIER=103, SIMPLE_COMMENT=104, BRACKETED_COMMENT=105, + WS=106, UNRECOGNIZED=107; + public static String[] modeNames = { + "DEFAULT_MODE" + }; + + public static final String[] ruleNames = { + "T__0", "T__1", "T__2", "T__3", "ALL", "ANALYZE", "ANALYZED", "AND", "ANY", + "AS", "ASC", "BETWEEN", "BY", "CAST", "COLUMN", "COLUMNS", "CROSS", "DEBUG", + "DESC", "DESCRIBE", "DISTINCT", "EXECUTABLE", "EXISTS", "EXPLAIN", "EXTRACT", + "FALSE", "FOR", "FORMAT", "FROM", "FULL", "FUNCTIONS", "GRAPHVIZ", "GROUP", + "GROUPING", "HAVING", "IN", "INNER", "INTEGER", "INTO", "IS", "JOIN", + "LAST", "LEFT", "LIKE", "LIMIT", "LOGICAL", "MAPPED", "MATCH", "NATURAL", + "NO", "NOT", "NULL", "ON", "OPTIMIZED", "OPTION", "OR", "ORDER", "OUTER", + "PARSED", "PHYSICAL", "PLAN", "QUERY", "RESET", "RIGHT", "RLIKE", "SCHEMAS", + "SELECT", "SESSION", "SET", "SETS", "SHOW", "TABLE", "TABLES", "TEXT", + "THEN", "TO", "TRUE", "TYPE", "USE", "USING", "VERIFY", "WHEN", "WHERE", + "WITH", "EQ", "NEQ", "LT", "LTE", "GT", "GTE", "PLUS", "MINUS", "ASTERISK", + "SLASH", "PERCENT", "CONCAT", "STRING", "INTEGER_VALUE", "DECIMAL_VALUE", + "IDENTIFIER", "DIGIT_IDENTIFIER", "QUOTED_IDENTIFIER", "BACKQUOTED_IDENTIFIER", + "EXPONENT", "DIGIT", "LETTER", "SIMPLE_COMMENT", "BRACKETED_COMMENT", + "WS", "UNRECOGNIZED" + }; + + private static final String[] _LITERAL_NAMES = { + null, "'('", "')'", "','", "'.'", "'ALL'", "'ANALYZE'", "'ANALYZED'", + "'AND'", "'ANY'", "'AS'", "'ASC'", "'BETWEEN'", "'BY'", "'CAST'", "'COLUMN'", + "'COLUMNS'", "'CROSS'", "'DEBUG'", "'DESC'", "'DESCRIBE'", "'DISTINCT'", + "'EXECUTABLE'", "'EXISTS'", "'EXPLAIN'", "'EXTRACT'", "'FALSE'", "'FOR'", + "'FORMAT'", "'FROM'", "'FULL'", "'FUNCTIONS'", "'GRAPHVIZ'", "'GROUP'", + "'GROUPING'", "'HAVING'", "'IN'", "'INNER'", "'INTEGER'", "'INTO'", "'IS'", + "'JOIN'", "'LAST'", "'LEFT'", "'LIKE'", "'LIMIT'", "'LOGICAL'", "'MAPPED'", + "'MATCH'", "'NATURAL'", "'NO'", "'NOT'", "'NULL'", "'ON'", "'OPTIMIZED'", + "'OPTION'", "'OR'", "'ORDER'", "'OUTER'", "'PARSED'", "'PHYSICAL'", "'PLAN'", + "'QUERY'", "'RESET'", "'RIGHT'", "'RLIKE'", "'SCHEMAS'", "'SELECT'", "'SESSION'", + "'SET'", "'SETS'", "'SHOW'", "'TABLE'", "'TABLES'", "'TEXT'", "'THEN'", + "'TO'", "'TRUE'", "'TYPE'", "'USE'", "'USING'", "'VERIFY'", "'WHEN'", + "'WHERE'", "'WITH'", "'='", null, "'<'", "'<='", "'>'", "'>='", "'+'", + "'-'", "'*'", "'/'", "'%'", "'||'" + }; + private static final String[] _SYMBOLIC_NAMES = { + null, null, null, null, null, "ALL", "ANALYZE", "ANALYZED", "AND", "ANY", + "AS", "ASC", "BETWEEN", "BY", "CAST", "COLUMN", "COLUMNS", "CROSS", "DEBUG", + "DESC", "DESCRIBE", "DISTINCT", "EXECUTABLE", "EXISTS", "EXPLAIN", "EXTRACT", + "FALSE", "FOR", "FORMAT", "FROM", "FULL", "FUNCTIONS", "GRAPHVIZ", "GROUP", + "GROUPING", "HAVING", "IN", "INNER", "INTEGER", "INTO", "IS", "JOIN", + "LAST", "LEFT", "LIKE", "LIMIT", "LOGICAL", "MAPPED", "MATCH", "NATURAL", + "NO", "NOT", "NULL", "ON", "OPTIMIZED", "OPTION", "OR", "ORDER", "OUTER", + "PARSED", "PHYSICAL", "PLAN", "QUERY", "RESET", "RIGHT", "RLIKE", "SCHEMAS", + "SELECT", "SESSION", "SET", "SETS", "SHOW", "TABLE", "TABLES", "TEXT", + "THEN", "TO", "TRUE", "TYPE", "USE", "USING", "VERIFY", "WHEN", "WHERE", + "WITH", "EQ", "NEQ", "LT", "LTE", "GT", "GTE", "PLUS", "MINUS", "ASTERISK", + "SLASH", "PERCENT", "CONCAT", "STRING", "INTEGER_VALUE", "DECIMAL_VALUE", + "IDENTIFIER", "DIGIT_IDENTIFIER", "QUOTED_IDENTIFIER", "BACKQUOTED_IDENTIFIER", + "SIMPLE_COMMENT", "BRACKETED_COMMENT", "WS", "UNRECOGNIZED" + }; + public static final Vocabulary VOCABULARY = new VocabularyImpl(_LITERAL_NAMES, _SYMBOLIC_NAMES); + + /** + * @deprecated Use {@link #VOCABULARY} instead. + */ + @Deprecated + public static final String[] tokenNames; + static { + tokenNames = new String[_SYMBOLIC_NAMES.length]; + for (int i = 0; i < tokenNames.length; i++) { + tokenNames[i] = VOCABULARY.getLiteralName(i); + if (tokenNames[i] == null) { + tokenNames[i] = VOCABULARY.getSymbolicName(i); + } + + if (tokenNames[i] == null) { + tokenNames[i] = ""; + } + } + } + + @Override + @Deprecated + public String[] getTokenNames() { + return tokenNames; + } + + @Override + + public Vocabulary getVocabulary() { + return VOCABULARY; + } + + + public SqlBaseLexer(CharStream input) { + super(input); + _interp = new LexerATNSimulator(this,_ATN,_decisionToDFA,_sharedContextCache); + } + + @Override + public String getGrammarFileName() { return "SqlBase.g4"; } + + @Override + public String[] getRuleNames() { return ruleNames; } + + @Override + public String getSerializedATN() { return _serializedATN; } + + @Override + public String[] getModeNames() { return modeNames; } + + @Override + public ATN getATN() { return _ATN; } + + public static final String _serializedATN = + "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2m\u0387\b\1\4\2\t"+ + "\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13"+ + "\t\13\4\f\t\f\4\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22"+ + "\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31\t\31"+ + "\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36\t\36\4\37\t\37\4 \t \4!"+ + "\t!\4\"\t\"\4#\t#\4$\t$\4%\t%\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4"+ + ",\t,\4-\t-\4.\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64\t"+ + "\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:\4;\t;\4<\t<\4=\t="+ + "\4>\t>\4?\t?\4@\t@\4A\tA\4B\tB\4C\tC\4D\tD\4E\tE\4F\tF\4G\tG\4H\tH\4I"+ + "\tI\4J\tJ\4K\tK\4L\tL\4M\tM\4N\tN\4O\tO\4P\tP\4Q\tQ\4R\tR\4S\tS\4T\tT"+ + "\4U\tU\4V\tV\4W\tW\4X\tX\4Y\tY\4Z\tZ\4[\t[\4\\\t\\\4]\t]\4^\t^\4_\t_\4"+ + "`\t`\4a\ta\4b\tb\4c\tc\4d\td\4e\te\4f\tf\4g\tg\4h\th\4i\ti\4j\tj\4k\t"+ + "k\4l\tl\4m\tm\4n\tn\4o\to\3\2\3\2\3\3\3\3\3\4\3\4\3\5\3\5\3\6\3\6\3\6"+ + "\3\6\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3"+ + "\b\3\t\3\t\3\t\3\t\3\n\3\n\3\n\3\n\3\13\3\13\3\13\3\f\3\f\3\f\3\f\3\r"+ + "\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3\16\3\16\3\16\3\17\3\17\3\17\3\17\3\17\3"+ + "\20\3\20\3\20\3\20\3\20\3\20\3\20\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3"+ + "\21\3\22\3\22\3\22\3\22\3\22\3\22\3\23\3\23\3\23\3\23\3\23\3\23\3\24\3"+ + "\24\3\24\3\24\3\24\3\25\3\25\3\25\3\25\3\25\3\25\3\25\3\25\3\25\3\26\3"+ + "\26\3\26\3\26\3\26\3\26\3\26\3\26\3\26\3\27\3\27\3\27\3\27\3\27\3\27\3"+ + "\27\3\27\3\27\3\27\3\27\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\31\3\31\3"+ + "\31\3\31\3\31\3\31\3\31\3\31\3\32\3\32\3\32\3\32\3\32\3\32\3\32\3\32\3"+ + "\33\3\33\3\33\3\33\3\33\3\33\3\34\3\34\3\34\3\34\3\35\3\35\3\35\3\35\3"+ + "\35\3\35\3\35\3\36\3\36\3\36\3\36\3\36\3\37\3\37\3\37\3\37\3\37\3 \3 "+ + "\3 \3 \3 \3 \3 \3 \3 \3 \3!\3!\3!\3!\3!\3!\3!\3!\3!\3\"\3\"\3\"\3\"\3"+ + "\"\3\"\3#\3#\3#\3#\3#\3#\3#\3#\3#\3$\3$\3$\3$\3$\3$\3$\3%\3%\3%\3&\3&"+ + "\3&\3&\3&\3&\3\'\3\'\3\'\3\'\3\'\3\'\3\'\3\'\3(\3(\3(\3(\3(\3)\3)\3)\3"+ + "*\3*\3*\3*\3*\3+\3+\3+\3+\3+\3,\3,\3,\3,\3,\3-\3-\3-\3-\3-\3.\3.\3.\3"+ + ".\3.\3.\3/\3/\3/\3/\3/\3/\3/\3/\3\60\3\60\3\60\3\60\3\60\3\60\3\60\3\61"+ + "\3\61\3\61\3\61\3\61\3\61\3\62\3\62\3\62\3\62\3\62\3\62\3\62\3\62\3\63"+ + "\3\63\3\63\3\64\3\64\3\64\3\64\3\65\3\65\3\65\3\65\3\65\3\66\3\66\3\66"+ + "\3\67\3\67\3\67\3\67\3\67\3\67\3\67\3\67\3\67\3\67\38\38\38\38\38\38\3"+ + "8\39\39\39\3:\3:\3:\3:\3:\3:\3;\3;\3;\3;\3;\3;\3<\3<\3<\3<\3<\3<\3<\3"+ + "=\3=\3=\3=\3=\3=\3=\3=\3=\3>\3>\3>\3>\3>\3?\3?\3?\3?\3?\3?\3@\3@\3@\3"+ + "@\3@\3@\3A\3A\3A\3A\3A\3A\3B\3B\3B\3B\3B\3B\3C\3C\3C\3C\3C\3C\3C\3C\3"+ + "D\3D\3D\3D\3D\3D\3D\3E\3E\3E\3E\3E\3E\3E\3E\3F\3F\3F\3F\3G\3G\3G\3G\3"+ + "G\3H\3H\3H\3H\3H\3I\3I\3I\3I\3I\3I\3J\3J\3J\3J\3J\3J\3J\3K\3K\3K\3K\3"+ + "K\3L\3L\3L\3L\3L\3M\3M\3M\3N\3N\3N\3N\3N\3O\3O\3O\3O\3O\3P\3P\3P\3P\3"+ + "Q\3Q\3Q\3Q\3Q\3Q\3R\3R\3R\3R\3R\3R\3R\3S\3S\3S\3S\3S\3T\3T\3T\3T\3T\3"+ + "T\3U\3U\3U\3U\3U\3V\3V\3W\3W\3W\3W\3W\3W\3W\5W\u02d3\nW\3X\3X\3Y\3Y\3"+ + "Y\3Z\3Z\3[\3[\3[\3\\\3\\\3]\3]\3^\3^\3_\3_\3`\3`\3a\3a\3a\3b\3b\3b\3b"+ + "\7b\u02f0\nb\fb\16b\u02f3\13b\3b\3b\3c\6c\u02f8\nc\rc\16c\u02f9\3d\6d"+ + "\u02fd\nd\rd\16d\u02fe\3d\3d\7d\u0303\nd\fd\16d\u0306\13d\3d\3d\6d\u030a"+ + "\nd\rd\16d\u030b\3d\6d\u030f\nd\rd\16d\u0310\3d\3d\7d\u0315\nd\fd\16d"+ + "\u0318\13d\5d\u031a\nd\3d\3d\3d\3d\6d\u0320\nd\rd\16d\u0321\3d\3d\5d\u0326"+ + "\nd\3e\3e\5e\u032a\ne\3e\3e\3e\7e\u032f\ne\fe\16e\u0332\13e\3f\3f\3f\3"+ + "f\6f\u0338\nf\rf\16f\u0339\3g\3g\3g\3g\7g\u0340\ng\fg\16g\u0343\13g\3"+ + "g\3g\3h\3h\3h\3h\7h\u034b\nh\fh\16h\u034e\13h\3h\3h\3i\3i\5i\u0354\ni"+ + "\3i\6i\u0357\ni\ri\16i\u0358\3j\3j\3k\3k\3l\3l\3l\3l\7l\u0363\nl\fl\16"+ + "l\u0366\13l\3l\5l\u0369\nl\3l\5l\u036c\nl\3l\3l\3m\3m\3m\3m\3m\7m\u0375"+ + "\nm\fm\16m\u0378\13m\3m\3m\3m\3m\3m\3n\6n\u0380\nn\rn\16n\u0381\3n\3n"+ + "\3o\3o\3\u0376\2p\3\3\5\4\7\5\t\6\13\7\r\b\17\t\21\n\23\13\25\f\27\r\31"+ + "\16\33\17\35\20\37\21!\22#\23%\24\'\25)\26+\27-\30/\31\61\32\63\33\65"+ + "\34\67\359\36;\37= ?!A\"C#E$G%I&K\'M(O)Q*S+U,W-Y.[/]\60_\61a\62c\63e\64"+ + "g\65i\66k\67m8o9q:s;u{?}@\177A\u0081B\u0083C\u0085D\u0087E\u0089"+ + "F\u008bG\u008dH\u008fI\u0091J\u0093K\u0095L\u0097M\u0099N\u009bO\u009d"+ + "P\u009fQ\u00a1R\u00a3S\u00a5T\u00a7U\u00a9V\u00abW\u00adX\u00afY\u00b1"+ + "Z\u00b3[\u00b5\\\u00b7]\u00b9^\u00bb_\u00bd`\u00bfa\u00c1b\u00c3c\u00c5"+ + "d\u00c7e\u00c9f\u00cbg\u00cdh\u00cfi\u00d1\2\u00d3\2\u00d5\2\u00d7j\u00d9"+ + "k\u00dbl\u00ddm\3\2\13\3\2))\5\2<\3\2\2\2\u018a"+ + "\u018b\7H\2\2\u018b\u018c\7W\2\2\u018c\u018d\7P\2\2\u018d\u018e\7E\2\2"+ + "\u018e\u018f\7V\2\2\u018f\u0190\7K\2\2\u0190\u0191\7Q\2\2\u0191\u0192"+ + "\7P\2\2\u0192\u0193\7U\2\2\u0193@\3\2\2\2\u0194\u0195\7I\2\2\u0195\u0196"+ + "\7T\2\2\u0196\u0197\7C\2\2\u0197\u0198\7R\2\2\u0198\u0199\7J\2\2\u0199"+ + "\u019a\7X\2\2\u019a\u019b\7K\2\2\u019b\u019c\7\\\2\2\u019cB\3\2\2\2\u019d"+ + "\u019e\7I\2\2\u019e\u019f\7T\2\2\u019f\u01a0\7Q\2\2\u01a0\u01a1\7W\2\2"+ + "\u01a1\u01a2\7R\2\2\u01a2D\3\2\2\2\u01a3\u01a4\7I\2\2\u01a4\u01a5\7T\2"+ + "\2\u01a5\u01a6\7Q\2\2\u01a6\u01a7\7W\2\2\u01a7\u01a8\7R\2\2\u01a8\u01a9"+ + "\7K\2\2\u01a9\u01aa\7P\2\2\u01aa\u01ab\7I\2\2\u01abF\3\2\2\2\u01ac\u01ad"+ + "\7J\2\2\u01ad\u01ae\7C\2\2\u01ae\u01af\7X\2\2\u01af\u01b0\7K\2\2\u01b0"+ + "\u01b1\7P\2\2\u01b1\u01b2\7I\2\2\u01b2H\3\2\2\2\u01b3\u01b4\7K\2\2\u01b4"+ + "\u01b5\7P\2\2\u01b5J\3\2\2\2\u01b6\u01b7\7K\2\2\u01b7\u01b8\7P\2\2\u01b8"+ + "\u01b9\7P\2\2\u01b9\u01ba\7G\2\2\u01ba\u01bb\7T\2\2\u01bbL\3\2\2\2\u01bc"+ + "\u01bd\7K\2\2\u01bd\u01be\7P\2\2\u01be\u01bf\7V\2\2\u01bf\u01c0\7G\2\2"+ + "\u01c0\u01c1\7I\2\2\u01c1\u01c2\7G\2\2\u01c2\u01c3\7T\2\2\u01c3N\3\2\2"+ + "\2\u01c4\u01c5\7K\2\2\u01c5\u01c6\7P\2\2\u01c6\u01c7\7V\2\2\u01c7\u01c8"+ + "\7Q\2\2\u01c8P\3\2\2\2\u01c9\u01ca\7K\2\2\u01ca\u01cb\7U\2\2\u01cbR\3"+ + "\2\2\2\u01cc\u01cd\7L\2\2\u01cd\u01ce\7Q\2\2\u01ce\u01cf\7K\2\2\u01cf"+ + "\u01d0\7P\2\2\u01d0T\3\2\2\2\u01d1\u01d2\7N\2\2\u01d2\u01d3\7C\2\2\u01d3"+ + "\u01d4\7U\2\2\u01d4\u01d5\7V\2\2\u01d5V\3\2\2\2\u01d6\u01d7\7N\2\2\u01d7"+ + "\u01d8\7G\2\2\u01d8\u01d9\7H\2\2\u01d9\u01da\7V\2\2\u01daX\3\2\2\2\u01db"+ + "\u01dc\7N\2\2\u01dc\u01dd\7K\2\2\u01dd\u01de\7M\2\2\u01de\u01df\7G\2\2"+ + "\u01dfZ\3\2\2\2\u01e0\u01e1\7N\2\2\u01e1\u01e2\7K\2\2\u01e2\u01e3\7O\2"+ + "\2\u01e3\u01e4\7K\2\2\u01e4\u01e5\7V\2\2\u01e5\\\3\2\2\2\u01e6\u01e7\7"+ + "N\2\2\u01e7\u01e8\7Q\2\2\u01e8\u01e9\7I\2\2\u01e9\u01ea\7K\2\2\u01ea\u01eb"+ + "\7E\2\2\u01eb\u01ec\7C\2\2\u01ec\u01ed\7N\2\2\u01ed^\3\2\2\2\u01ee\u01ef"+ + "\7O\2\2\u01ef\u01f0\7C\2\2\u01f0\u01f1\7R\2\2\u01f1\u01f2\7R\2\2\u01f2"+ + "\u01f3\7G\2\2\u01f3\u01f4\7F\2\2\u01f4`\3\2\2\2\u01f5\u01f6\7O\2\2\u01f6"+ + "\u01f7\7C\2\2\u01f7\u01f8\7V\2\2\u01f8\u01f9\7E\2\2\u01f9\u01fa\7J\2\2"+ + "\u01fab\3\2\2\2\u01fb\u01fc\7P\2\2\u01fc\u01fd\7C\2\2\u01fd\u01fe\7V\2"+ + "\2\u01fe\u01ff\7W\2\2\u01ff\u0200\7T\2\2\u0200\u0201\7C\2\2\u0201\u0202"+ + "\7N\2\2\u0202d\3\2\2\2\u0203\u0204\7P\2\2\u0204\u0205\7Q\2\2\u0205f\3"+ + "\2\2\2\u0206\u0207\7P\2\2\u0207\u0208\7Q\2\2\u0208\u0209\7V\2\2\u0209"+ + "h\3\2\2\2\u020a\u020b\7P\2\2\u020b\u020c\7W\2\2\u020c\u020d\7N\2\2\u020d"+ + "\u020e\7N\2\2\u020ej\3\2\2\2\u020f\u0210\7Q\2\2\u0210\u0211\7P\2\2\u0211"+ + "l\3\2\2\2\u0212\u0213\7Q\2\2\u0213\u0214\7R\2\2\u0214\u0215\7V\2\2\u0215"+ + "\u0216\7K\2\2\u0216\u0217\7O\2\2\u0217\u0218\7K\2\2\u0218\u0219\7\\\2"+ + "\2\u0219\u021a\7G\2\2\u021a\u021b\7F\2\2\u021bn\3\2\2\2\u021c\u021d\7"+ + "Q\2\2\u021d\u021e\7R\2\2\u021e\u021f\7V\2\2\u021f\u0220\7K\2\2\u0220\u0221"+ + "\7Q\2\2\u0221\u0222\7P\2\2\u0222p\3\2\2\2\u0223\u0224\7Q\2\2\u0224\u0225"+ + "\7T\2\2\u0225r\3\2\2\2\u0226\u0227\7Q\2\2\u0227\u0228\7T\2\2\u0228\u0229"+ + "\7F\2\2\u0229\u022a\7G\2\2\u022a\u022b\7T\2\2\u022bt\3\2\2\2\u022c\u022d"+ + "\7Q\2\2\u022d\u022e\7W\2\2\u022e\u022f\7V\2\2\u022f\u0230\7G\2\2\u0230"+ + "\u0231\7T\2\2\u0231v\3\2\2\2\u0232\u0233\7R\2\2\u0233\u0234\7C\2\2\u0234"+ + "\u0235\7T\2\2\u0235\u0236\7U\2\2\u0236\u0237\7G\2\2\u0237\u0238\7F\2\2"+ + "\u0238x\3\2\2\2\u0239\u023a\7R\2\2\u023a\u023b\7J\2\2\u023b\u023c\7[\2"+ + "\2\u023c\u023d\7U\2\2\u023d\u023e\7K\2\2\u023e\u023f\7E\2\2\u023f\u0240"+ + "\7C\2\2\u0240\u0241\7N\2\2\u0241z\3\2\2\2\u0242\u0243\7R\2\2\u0243\u0244"+ + "\7N\2\2\u0244\u0245\7C\2\2\u0245\u0246\7P\2\2\u0246|\3\2\2\2\u0247\u0248"+ + "\7S\2\2\u0248\u0249\7W\2\2\u0249\u024a\7G\2\2\u024a\u024b\7T\2\2\u024b"+ + "\u024c\7[\2\2\u024c~\3\2\2\2\u024d\u024e\7T\2\2\u024e\u024f\7G\2\2\u024f"+ + "\u0250\7U\2\2\u0250\u0251\7G\2\2\u0251\u0252\7V\2\2\u0252\u0080\3\2\2"+ + "\2\u0253\u0254\7T\2\2\u0254\u0255\7K\2\2\u0255\u0256\7I\2\2\u0256\u0257"+ + "\7J\2\2\u0257\u0258\7V\2\2\u0258\u0082\3\2\2\2\u0259\u025a\7T\2\2\u025a"+ + "\u025b\7N\2\2\u025b\u025c\7K\2\2\u025c\u025d\7M\2\2\u025d\u025e\7G\2\2"+ + "\u025e\u0084\3\2\2\2\u025f\u0260\7U\2\2\u0260\u0261\7E\2\2\u0261\u0262"+ + "\7J\2\2\u0262\u0263\7G\2\2\u0263\u0264\7O\2\2\u0264\u0265\7C\2\2\u0265"+ + "\u0266\7U\2\2\u0266\u0086\3\2\2\2\u0267\u0268\7U\2\2\u0268\u0269\7G\2"+ + "\2\u0269\u026a\7N\2\2\u026a\u026b\7G\2\2\u026b\u026c\7E\2\2\u026c\u026d"+ + "\7V\2\2\u026d\u0088\3\2\2\2\u026e\u026f\7U\2\2\u026f\u0270\7G\2\2\u0270"+ + "\u0271\7U\2\2\u0271\u0272\7U\2\2\u0272\u0273\7K\2\2\u0273\u0274\7Q\2\2"+ + "\u0274\u0275\7P\2\2\u0275\u008a\3\2\2\2\u0276\u0277\7U\2\2\u0277\u0278"+ + "\7G\2\2\u0278\u0279\7V\2\2\u0279\u008c\3\2\2\2\u027a\u027b\7U\2\2\u027b"+ + "\u027c\7G\2\2\u027c\u027d\7V\2\2\u027d\u027e\7U\2\2\u027e\u008e\3\2\2"+ + "\2\u027f\u0280\7U\2\2\u0280\u0281\7J\2\2\u0281\u0282\7Q\2\2\u0282\u0283"+ + "\7Y\2\2\u0283\u0090\3\2\2\2\u0284\u0285\7V\2\2\u0285\u0286\7C\2\2\u0286"+ + "\u0287\7D\2\2\u0287\u0288\7N\2\2\u0288\u0289\7G\2\2\u0289\u0092\3\2\2"+ + "\2\u028a\u028b\7V\2\2\u028b\u028c\7C\2\2\u028c\u028d\7D\2\2\u028d\u028e"+ + "\7N\2\2\u028e\u028f\7G\2\2\u028f\u0290\7U\2\2\u0290\u0094\3\2\2\2\u0291"+ + "\u0292\7V\2\2\u0292\u0293\7G\2\2\u0293\u0294\7Z\2\2\u0294\u0295\7V\2\2"+ + "\u0295\u0096\3\2\2\2\u0296\u0297\7V\2\2\u0297\u0298\7J\2\2\u0298\u0299"+ + "\7G\2\2\u0299\u029a\7P\2\2\u029a\u0098\3\2\2\2\u029b\u029c\7V\2\2\u029c"+ + "\u029d\7Q\2\2\u029d\u009a\3\2\2\2\u029e\u029f\7V\2\2\u029f\u02a0\7T\2"+ + "\2\u02a0\u02a1\7W\2\2\u02a1\u02a2\7G\2\2\u02a2\u009c\3\2\2\2\u02a3\u02a4"+ + "\7V\2\2\u02a4\u02a5\7[\2\2\u02a5\u02a6\7R\2\2\u02a6\u02a7\7G\2\2\u02a7"+ + "\u009e\3\2\2\2\u02a8\u02a9\7W\2\2\u02a9\u02aa\7U\2\2\u02aa\u02ab\7G\2"+ + "\2\u02ab\u00a0\3\2\2\2\u02ac\u02ad\7W\2\2\u02ad\u02ae\7U\2\2\u02ae\u02af"+ + "\7K\2\2\u02af\u02b0\7P\2\2\u02b0\u02b1\7I\2\2\u02b1\u00a2\3\2\2\2\u02b2"+ + "\u02b3\7X\2\2\u02b3\u02b4\7G\2\2\u02b4\u02b5\7T\2\2\u02b5\u02b6\7K\2\2"+ + "\u02b6\u02b7\7H\2\2\u02b7\u02b8\7[\2\2\u02b8\u00a4\3\2\2\2\u02b9\u02ba"+ + "\7Y\2\2\u02ba\u02bb\7J\2\2\u02bb\u02bc\7G\2\2\u02bc\u02bd\7P\2\2\u02bd"+ + "\u00a6\3\2\2\2\u02be\u02bf\7Y\2\2\u02bf\u02c0\7J\2\2\u02c0\u02c1\7G\2"+ + "\2\u02c1\u02c2\7T\2\2\u02c2\u02c3\7G\2\2\u02c3\u00a8\3\2\2\2\u02c4\u02c5"+ + "\7Y\2\2\u02c5\u02c6\7K\2\2\u02c6\u02c7\7V\2\2\u02c7\u02c8\7J\2\2\u02c8"+ + "\u00aa\3\2\2\2\u02c9\u02ca\7?\2\2\u02ca\u00ac\3\2\2\2\u02cb\u02cc\7>\2"+ + "\2\u02cc\u02d3\7@\2\2\u02cd\u02ce\7#\2\2\u02ce\u02d3\7?\2\2\u02cf\u02d0"+ + "\7>\2\2\u02d0\u02d1\7?\2\2\u02d1\u02d3\7@\2\2\u02d2\u02cb\3\2\2\2\u02d2"+ + "\u02cd\3\2\2\2\u02d2\u02cf\3\2\2\2\u02d3\u00ae\3\2\2\2\u02d4\u02d5\7>"+ + "\2\2\u02d5\u00b0\3\2\2\2\u02d6\u02d7\7>\2\2\u02d7\u02d8\7?\2\2\u02d8\u00b2"+ + "\3\2\2\2\u02d9\u02da\7@\2\2\u02da\u00b4\3\2\2\2\u02db\u02dc\7@\2\2\u02dc"+ + "\u02dd\7?\2\2\u02dd\u00b6\3\2\2\2\u02de\u02df\7-\2\2\u02df\u00b8\3\2\2"+ + "\2\u02e0\u02e1\7/\2\2\u02e1\u00ba\3\2\2\2\u02e2\u02e3\7,\2\2\u02e3\u00bc"+ + "\3\2\2\2\u02e4\u02e5\7\61\2\2\u02e5\u00be\3\2\2\2\u02e6\u02e7\7\'\2\2"+ + "\u02e7\u00c0\3\2\2\2\u02e8\u02e9\7~\2\2\u02e9\u02ea\7~\2\2\u02ea\u00c2"+ + "\3\2\2\2\u02eb\u02f1\7)\2\2\u02ec\u02f0\n\2\2\2\u02ed\u02ee\7)\2\2\u02ee"+ + "\u02f0\7)\2\2\u02ef\u02ec\3\2\2\2\u02ef\u02ed\3\2\2\2\u02f0\u02f3\3\2"+ + "\2\2\u02f1\u02ef\3\2\2\2\u02f1\u02f2\3\2\2\2\u02f2\u02f4\3\2\2\2\u02f3"+ + "\u02f1\3\2\2\2\u02f4\u02f5\7)\2\2\u02f5\u00c4\3\2\2\2\u02f6\u02f8\5\u00d3"+ + "j\2\u02f7\u02f6\3\2\2\2\u02f8\u02f9\3\2\2\2\u02f9\u02f7\3\2\2\2\u02f9"+ + "\u02fa\3\2\2\2\u02fa\u00c6\3\2\2\2\u02fb\u02fd\5\u00d3j\2\u02fc\u02fb"+ + "\3\2\2\2\u02fd\u02fe\3\2\2\2\u02fe\u02fc\3\2\2\2\u02fe\u02ff\3\2\2\2\u02ff"+ + "\u0300\3\2\2\2\u0300\u0304\7\60\2\2\u0301\u0303\5\u00d3j\2\u0302\u0301"+ + "\3\2\2\2\u0303\u0306\3\2\2\2\u0304\u0302\3\2\2\2\u0304\u0305\3\2\2\2\u0305"+ + "\u0326\3\2\2\2\u0306\u0304\3\2\2\2\u0307\u0309\7\60\2\2\u0308\u030a\5"+ + "\u00d3j\2\u0309\u0308\3\2\2\2\u030a\u030b\3\2\2\2\u030b\u0309\3\2\2\2"+ + "\u030b\u030c\3\2\2\2\u030c\u0326\3\2\2\2\u030d\u030f\5\u00d3j\2\u030e"+ + "\u030d\3\2\2\2\u030f\u0310\3\2\2\2\u0310\u030e\3\2\2\2\u0310\u0311\3\2"+ + "\2\2\u0311\u0319\3\2\2\2\u0312\u0316\7\60\2\2\u0313\u0315\5\u00d3j\2\u0314"+ + "\u0313\3\2\2\2\u0315\u0318\3\2\2\2\u0316\u0314\3\2\2\2\u0316\u0317\3\2"+ + "\2\2\u0317\u031a\3\2\2\2\u0318\u0316\3\2\2\2\u0319\u0312\3\2\2\2\u0319"+ + "\u031a\3\2\2\2\u031a\u031b\3\2\2\2\u031b\u031c\5\u00d1i\2\u031c\u0326"+ + "\3\2\2\2\u031d\u031f\7\60\2\2\u031e\u0320\5\u00d3j\2\u031f\u031e\3\2\2"+ + "\2\u0320\u0321\3\2\2\2\u0321\u031f\3\2\2\2\u0321\u0322\3\2\2\2\u0322\u0323"+ + "\3\2\2\2\u0323\u0324\5\u00d1i\2\u0324\u0326\3\2\2\2\u0325\u02fc\3\2\2"+ + "\2\u0325\u0307\3\2\2\2\u0325\u030e\3\2\2\2\u0325\u031d\3\2\2\2\u0326\u00c8"+ + "\3\2\2\2\u0327\u032a\5\u00d5k\2\u0328\u032a\7a\2\2\u0329\u0327\3\2\2\2"+ + "\u0329\u0328\3\2\2\2\u032a\u0330\3\2\2\2\u032b\u032f\5\u00d5k\2\u032c"+ + "\u032f\5\u00d3j\2\u032d\u032f\t\3\2\2\u032e\u032b\3\2\2\2\u032e\u032c"+ + "\3\2\2\2\u032e\u032d\3\2\2\2\u032f\u0332\3\2\2\2\u0330\u032e\3\2\2\2\u0330"+ + "\u0331\3\2\2\2\u0331\u00ca\3\2\2\2\u0332\u0330\3\2\2\2\u0333\u0337\5\u00d3"+ + "j\2\u0334\u0338\5\u00d5k\2\u0335\u0338\5\u00d3j\2\u0336\u0338\t\3\2\2"+ + "\u0337\u0334\3\2\2\2\u0337\u0335\3\2\2\2\u0337\u0336\3\2\2\2\u0338\u0339"+ + "\3\2\2\2\u0339\u0337\3\2\2\2\u0339\u033a\3\2\2\2\u033a\u00cc\3\2\2\2\u033b"+ + "\u0341\7$\2\2\u033c\u0340\n\4\2\2\u033d\u033e\7$\2\2\u033e\u0340\7$\2"+ + "\2\u033f\u033c\3\2\2\2\u033f\u033d\3\2\2\2\u0340\u0343\3\2\2\2\u0341\u033f"+ + "\3\2\2\2\u0341\u0342\3\2\2\2\u0342\u0344\3\2\2\2\u0343\u0341\3\2\2\2\u0344"+ + "\u0345\7$\2\2\u0345\u00ce\3\2\2\2\u0346\u034c\7b\2\2\u0347\u034b\n\5\2"+ + "\2\u0348\u0349\7b\2\2\u0349\u034b\7b\2\2\u034a\u0347\3\2\2\2\u034a\u0348"+ + "\3\2\2\2\u034b\u034e\3\2\2\2\u034c\u034a\3\2\2\2\u034c\u034d\3\2\2\2\u034d"+ + "\u034f\3\2\2\2\u034e\u034c\3\2\2\2\u034f\u0350\7b\2\2\u0350\u00d0\3\2"+ + "\2\2\u0351\u0353\7G\2\2\u0352\u0354\t\6\2\2\u0353\u0352\3\2\2\2\u0353"+ + "\u0354\3\2\2\2\u0354\u0356\3\2\2\2\u0355\u0357\5\u00d3j\2\u0356\u0355"+ + "\3\2\2\2\u0357\u0358\3\2\2\2\u0358\u0356\3\2\2\2\u0358\u0359\3\2\2\2\u0359"+ + "\u00d2\3\2\2\2\u035a\u035b\t\7\2\2\u035b\u00d4\3\2\2\2\u035c\u035d\t\b"+ + "\2\2\u035d\u00d6\3\2\2\2\u035e\u035f\7/\2\2\u035f\u0360\7/\2\2\u0360\u0364"+ + "\3\2\2\2\u0361\u0363\n\t\2\2\u0362\u0361\3\2\2\2\u0363\u0366\3\2\2\2\u0364"+ + "\u0362\3\2\2\2\u0364\u0365\3\2\2\2\u0365\u0368\3\2\2\2\u0366\u0364\3\2"+ + "\2\2\u0367\u0369\7\17\2\2\u0368\u0367\3\2\2\2\u0368\u0369\3\2\2\2\u0369"+ + "\u036b\3\2\2\2\u036a\u036c\7\f\2\2\u036b\u036a\3\2\2\2\u036b\u036c\3\2"+ + "\2\2\u036c\u036d\3\2\2\2\u036d\u036e\bl\2\2\u036e\u00d8\3\2\2\2\u036f"+ + "\u0370\7\61\2\2\u0370\u0371\7,\2\2\u0371\u0376\3\2\2\2\u0372\u0375\5\u00d9"+ + "m\2\u0373\u0375\13\2\2\2\u0374\u0372\3\2\2\2\u0374\u0373\3\2\2\2\u0375"+ + "\u0378\3\2\2\2\u0376\u0377\3\2\2\2\u0376\u0374\3\2\2\2\u0377\u0379\3\2"+ + "\2\2\u0378\u0376\3\2\2\2\u0379\u037a\7,\2\2\u037a\u037b\7\61\2\2\u037b"+ + "\u037c\3\2\2\2\u037c\u037d\bm\2\2\u037d\u00da\3\2\2\2\u037e\u0380\t\n"+ + "\2\2\u037f\u037e\3\2\2\2\u0380\u0381\3\2\2\2\u0381\u037f\3\2\2\2\u0381"+ + "\u0382\3\2\2\2\u0382\u0383\3\2\2\2\u0383\u0384\bn\2\2\u0384\u00dc\3\2"+ + "\2\2\u0385\u0386\13\2\2\2\u0386\u00de\3\2\2\2 \2\u02d2\u02ef\u02f1\u02f9"+ + "\u02fe\u0304\u030b\u0310\u0316\u0319\u0321\u0325\u0329\u032e\u0330\u0337"+ + "\u0339\u033f\u0341\u034a\u034c\u0353\u0358\u0364\u0368\u036b\u0374\u0376"+ + "\u0381\3\2\3\2"; + public static final ATN _ATN = + new ATNDeserializer().deserialize(_serializedATN.toCharArray()); + static { + _decisionToDFA = new DFA[_ATN.getNumberOfDecisions()]; + for (int i = 0; i < _ATN.getNumberOfDecisions(); i++) { + _decisionToDFA[i] = new DFA(_ATN.getDecisionState(i), i); + } + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseListener.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseListener.java new file mode 100644 index 00000000000..344c5ff3736 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseListener.java @@ -0,0 +1,827 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +// ANTLR GENERATED CODE: DO NOT EDIT +package org.elasticsearch.xpack.sql.parser; +import org.antlr.v4.runtime.tree.ParseTreeListener; + +/** + * This interface defines a complete listener for a parse tree produced by + * {@link SqlBaseParser}. + */ +interface SqlBaseListener extends ParseTreeListener { + /** + * Enter a parse tree produced by {@link SqlBaseParser#singleStatement}. + * @param ctx the parse tree + */ + void enterSingleStatement(SqlBaseParser.SingleStatementContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#singleStatement}. + * @param ctx the parse tree + */ + void exitSingleStatement(SqlBaseParser.SingleStatementContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#singleExpression}. + * @param ctx the parse tree + */ + void enterSingleExpression(SqlBaseParser.SingleExpressionContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#singleExpression}. + * @param ctx the parse tree + */ + void exitSingleExpression(SqlBaseParser.SingleExpressionContext ctx); + /** + * Enter a parse tree produced by the {@code statementDefault} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + */ + void enterStatementDefault(SqlBaseParser.StatementDefaultContext ctx); + /** + * Exit a parse tree produced by the {@code statementDefault} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + */ + void exitStatementDefault(SqlBaseParser.StatementDefaultContext ctx); + /** + * Enter a parse tree produced by the {@code explain} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + */ + void enterExplain(SqlBaseParser.ExplainContext ctx); + /** + * Exit a parse tree produced by the {@code explain} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + */ + void exitExplain(SqlBaseParser.ExplainContext ctx); + /** + * Enter a parse tree produced by the {@code debug} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + */ + void enterDebug(SqlBaseParser.DebugContext ctx); + /** + * Exit a parse tree produced by the {@code debug} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + */ + void exitDebug(SqlBaseParser.DebugContext ctx); + /** + * Enter a parse tree produced by the {@code showTables} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + */ + void enterShowTables(SqlBaseParser.ShowTablesContext ctx); + /** + * Exit a parse tree produced by the {@code showTables} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + */ + void exitShowTables(SqlBaseParser.ShowTablesContext ctx); + /** + * Enter a parse tree produced by the {@code showColumns} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + */ + void enterShowColumns(SqlBaseParser.ShowColumnsContext ctx); + /** + * Exit a parse tree produced by the {@code showColumns} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + */ + void exitShowColumns(SqlBaseParser.ShowColumnsContext ctx); + /** + * Enter a parse tree produced by the {@code showFunctions} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + */ + void enterShowFunctions(SqlBaseParser.ShowFunctionsContext ctx); + /** + * Exit a parse tree produced by the {@code showFunctions} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + */ + void exitShowFunctions(SqlBaseParser.ShowFunctionsContext ctx); + /** + * Enter a parse tree produced by the {@code showSchemas} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + */ + void enterShowSchemas(SqlBaseParser.ShowSchemasContext ctx); + /** + * Exit a parse tree produced by the {@code showSchemas} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + */ + void exitShowSchemas(SqlBaseParser.ShowSchemasContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#query}. + * @param ctx the parse tree + */ + void enterQuery(SqlBaseParser.QueryContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#query}. + * @param ctx the parse tree + */ + void exitQuery(SqlBaseParser.QueryContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#queryNoWith}. + * @param ctx the parse tree + */ + void enterQueryNoWith(SqlBaseParser.QueryNoWithContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#queryNoWith}. + * @param ctx the parse tree + */ + void exitQueryNoWith(SqlBaseParser.QueryNoWithContext ctx); + /** + * Enter a parse tree produced by the {@code queryPrimaryDefault} + * labeled alternative in {@link SqlBaseParser#queryTerm}. + * @param ctx the parse tree + */ + void enterQueryPrimaryDefault(SqlBaseParser.QueryPrimaryDefaultContext ctx); + /** + * Exit a parse tree produced by the {@code queryPrimaryDefault} + * labeled alternative in {@link SqlBaseParser#queryTerm}. + * @param ctx the parse tree + */ + void exitQueryPrimaryDefault(SqlBaseParser.QueryPrimaryDefaultContext ctx); + /** + * Enter a parse tree produced by the {@code subquery} + * labeled alternative in {@link SqlBaseParser#queryTerm}. + * @param ctx the parse tree + */ + void enterSubquery(SqlBaseParser.SubqueryContext ctx); + /** + * Exit a parse tree produced by the {@code subquery} + * labeled alternative in {@link SqlBaseParser#queryTerm}. + * @param ctx the parse tree + */ + void exitSubquery(SqlBaseParser.SubqueryContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#orderBy}. + * @param ctx the parse tree + */ + void enterOrderBy(SqlBaseParser.OrderByContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#orderBy}. + * @param ctx the parse tree + */ + void exitOrderBy(SqlBaseParser.OrderByContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#querySpecification}. + * @param ctx the parse tree + */ + void enterQuerySpecification(SqlBaseParser.QuerySpecificationContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#querySpecification}. + * @param ctx the parse tree + */ + void exitQuerySpecification(SqlBaseParser.QuerySpecificationContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#fromClause}. + * @param ctx the parse tree + */ + void enterFromClause(SqlBaseParser.FromClauseContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#fromClause}. + * @param ctx the parse tree + */ + void exitFromClause(SqlBaseParser.FromClauseContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#groupBy}. + * @param ctx the parse tree + */ + void enterGroupBy(SqlBaseParser.GroupByContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#groupBy}. + * @param ctx the parse tree + */ + void exitGroupBy(SqlBaseParser.GroupByContext ctx); + /** + * Enter a parse tree produced by the {@code singleGroupingSet} + * labeled alternative in {@link SqlBaseParser#groupingElement}. + * @param ctx the parse tree + */ + void enterSingleGroupingSet(SqlBaseParser.SingleGroupingSetContext ctx); + /** + * Exit a parse tree produced by the {@code singleGroupingSet} + * labeled alternative in {@link SqlBaseParser#groupingElement}. + * @param ctx the parse tree + */ + void exitSingleGroupingSet(SqlBaseParser.SingleGroupingSetContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#groupingExpressions}. + * @param ctx the parse tree + */ + void enterGroupingExpressions(SqlBaseParser.GroupingExpressionsContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#groupingExpressions}. + * @param ctx the parse tree + */ + void exitGroupingExpressions(SqlBaseParser.GroupingExpressionsContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#namedQuery}. + * @param ctx the parse tree + */ + void enterNamedQuery(SqlBaseParser.NamedQueryContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#namedQuery}. + * @param ctx the parse tree + */ + void exitNamedQuery(SqlBaseParser.NamedQueryContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#setQuantifier}. + * @param ctx the parse tree + */ + void enterSetQuantifier(SqlBaseParser.SetQuantifierContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#setQuantifier}. + * @param ctx the parse tree + */ + void exitSetQuantifier(SqlBaseParser.SetQuantifierContext ctx); + /** + * Enter a parse tree produced by the {@code selectExpression} + * labeled alternative in {@link SqlBaseParser#selectItem}. + * @param ctx the parse tree + */ + void enterSelectExpression(SqlBaseParser.SelectExpressionContext ctx); + /** + * Exit a parse tree produced by the {@code selectExpression} + * labeled alternative in {@link SqlBaseParser#selectItem}. + * @param ctx the parse tree + */ + void exitSelectExpression(SqlBaseParser.SelectExpressionContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#relation}. + * @param ctx the parse tree + */ + void enterRelation(SqlBaseParser.RelationContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#relation}. + * @param ctx the parse tree + */ + void exitRelation(SqlBaseParser.RelationContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#joinRelation}. + * @param ctx the parse tree + */ + void enterJoinRelation(SqlBaseParser.JoinRelationContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#joinRelation}. + * @param ctx the parse tree + */ + void exitJoinRelation(SqlBaseParser.JoinRelationContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#joinType}. + * @param ctx the parse tree + */ + void enterJoinType(SqlBaseParser.JoinTypeContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#joinType}. + * @param ctx the parse tree + */ + void exitJoinType(SqlBaseParser.JoinTypeContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#joinCriteria}. + * @param ctx the parse tree + */ + void enterJoinCriteria(SqlBaseParser.JoinCriteriaContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#joinCriteria}. + * @param ctx the parse tree + */ + void exitJoinCriteria(SqlBaseParser.JoinCriteriaContext ctx); + /** + * Enter a parse tree produced by the {@code tableName} + * labeled alternative in {@link SqlBaseParser#relationPrimary}. + * @param ctx the parse tree + */ + void enterTableName(SqlBaseParser.TableNameContext ctx); + /** + * Exit a parse tree produced by the {@code tableName} + * labeled alternative in {@link SqlBaseParser#relationPrimary}. + * @param ctx the parse tree + */ + void exitTableName(SqlBaseParser.TableNameContext ctx); + /** + * Enter a parse tree produced by the {@code aliasedQuery} + * labeled alternative in {@link SqlBaseParser#relationPrimary}. + * @param ctx the parse tree + */ + void enterAliasedQuery(SqlBaseParser.AliasedQueryContext ctx); + /** + * Exit a parse tree produced by the {@code aliasedQuery} + * labeled alternative in {@link SqlBaseParser#relationPrimary}. + * @param ctx the parse tree + */ + void exitAliasedQuery(SqlBaseParser.AliasedQueryContext ctx); + /** + * Enter a parse tree produced by the {@code aliasedRelation} + * labeled alternative in {@link SqlBaseParser#relationPrimary}. + * @param ctx the parse tree + */ + void enterAliasedRelation(SqlBaseParser.AliasedRelationContext ctx); + /** + * Exit a parse tree produced by the {@code aliasedRelation} + * labeled alternative in {@link SqlBaseParser#relationPrimary}. + * @param ctx the parse tree + */ + void exitAliasedRelation(SqlBaseParser.AliasedRelationContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#expression}. + * @param ctx the parse tree + */ + void enterExpression(SqlBaseParser.ExpressionContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#expression}. + * @param ctx the parse tree + */ + void exitExpression(SqlBaseParser.ExpressionContext ctx); + /** + * Enter a parse tree produced by the {@code logicalNot} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + */ + void enterLogicalNot(SqlBaseParser.LogicalNotContext ctx); + /** + * Exit a parse tree produced by the {@code logicalNot} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + */ + void exitLogicalNot(SqlBaseParser.LogicalNotContext ctx); + /** + * Enter a parse tree produced by the {@code stringQuery} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + */ + void enterStringQuery(SqlBaseParser.StringQueryContext ctx); + /** + * Exit a parse tree produced by the {@code stringQuery} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + */ + void exitStringQuery(SqlBaseParser.StringQueryContext ctx); + /** + * Enter a parse tree produced by the {@code booleanDefault} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + */ + void enterBooleanDefault(SqlBaseParser.BooleanDefaultContext ctx); + /** + * Exit a parse tree produced by the {@code booleanDefault} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + */ + void exitBooleanDefault(SqlBaseParser.BooleanDefaultContext ctx); + /** + * Enter a parse tree produced by the {@code exists} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + */ + void enterExists(SqlBaseParser.ExistsContext ctx); + /** + * Exit a parse tree produced by the {@code exists} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + */ + void exitExists(SqlBaseParser.ExistsContext ctx); + /** + * Enter a parse tree produced by the {@code multiMatchQuery} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + */ + void enterMultiMatchQuery(SqlBaseParser.MultiMatchQueryContext ctx); + /** + * Exit a parse tree produced by the {@code multiMatchQuery} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + */ + void exitMultiMatchQuery(SqlBaseParser.MultiMatchQueryContext ctx); + /** + * Enter a parse tree produced by the {@code matchQuery} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + */ + void enterMatchQuery(SqlBaseParser.MatchQueryContext ctx); + /** + * Exit a parse tree produced by the {@code matchQuery} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + */ + void exitMatchQuery(SqlBaseParser.MatchQueryContext ctx); + /** + * Enter a parse tree produced by the {@code logicalBinary} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + */ + void enterLogicalBinary(SqlBaseParser.LogicalBinaryContext ctx); + /** + * Exit a parse tree produced by the {@code logicalBinary} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + */ + void exitLogicalBinary(SqlBaseParser.LogicalBinaryContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#predicated}. + * @param ctx the parse tree + */ + void enterPredicated(SqlBaseParser.PredicatedContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#predicated}. + * @param ctx the parse tree + */ + void exitPredicated(SqlBaseParser.PredicatedContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#predicate}. + * @param ctx the parse tree + */ + void enterPredicate(SqlBaseParser.PredicateContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#predicate}. + * @param ctx the parse tree + */ + void exitPredicate(SqlBaseParser.PredicateContext ctx); + /** + * Enter a parse tree produced by the {@code valueExpressionDefault} + * labeled alternative in {@link SqlBaseParser#valueExpression}. + * @param ctx the parse tree + */ + void enterValueExpressionDefault(SqlBaseParser.ValueExpressionDefaultContext ctx); + /** + * Exit a parse tree produced by the {@code valueExpressionDefault} + * labeled alternative in {@link SqlBaseParser#valueExpression}. + * @param ctx the parse tree + */ + void exitValueExpressionDefault(SqlBaseParser.ValueExpressionDefaultContext ctx); + /** + * Enter a parse tree produced by the {@code comparison} + * labeled alternative in {@link SqlBaseParser#valueExpression}. + * @param ctx the parse tree + */ + void enterComparison(SqlBaseParser.ComparisonContext ctx); + /** + * Exit a parse tree produced by the {@code comparison} + * labeled alternative in {@link SqlBaseParser#valueExpression}. + * @param ctx the parse tree + */ + void exitComparison(SqlBaseParser.ComparisonContext ctx); + /** + * Enter a parse tree produced by the {@code arithmeticBinary} + * labeled alternative in {@link SqlBaseParser#valueExpression}. + * @param ctx the parse tree + */ + void enterArithmeticBinary(SqlBaseParser.ArithmeticBinaryContext ctx); + /** + * Exit a parse tree produced by the {@code arithmeticBinary} + * labeled alternative in {@link SqlBaseParser#valueExpression}. + * @param ctx the parse tree + */ + void exitArithmeticBinary(SqlBaseParser.ArithmeticBinaryContext ctx); + /** + * Enter a parse tree produced by the {@code arithmeticUnary} + * labeled alternative in {@link SqlBaseParser#valueExpression}. + * @param ctx the parse tree + */ + void enterArithmeticUnary(SqlBaseParser.ArithmeticUnaryContext ctx); + /** + * Exit a parse tree produced by the {@code arithmeticUnary} + * labeled alternative in {@link SqlBaseParser#valueExpression}. + * @param ctx the parse tree + */ + void exitArithmeticUnary(SqlBaseParser.ArithmeticUnaryContext ctx); + /** + * Enter a parse tree produced by the {@code cast} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + */ + void enterCast(SqlBaseParser.CastContext ctx); + /** + * Exit a parse tree produced by the {@code cast} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + */ + void exitCast(SqlBaseParser.CastContext ctx); + /** + * Enter a parse tree produced by the {@code extract} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + */ + void enterExtract(SqlBaseParser.ExtractContext ctx); + /** + * Exit a parse tree produced by the {@code extract} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + */ + void exitExtract(SqlBaseParser.ExtractContext ctx); + /** + * Enter a parse tree produced by the {@code constantDefault} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + */ + void enterConstantDefault(SqlBaseParser.ConstantDefaultContext ctx); + /** + * Exit a parse tree produced by the {@code constantDefault} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + */ + void exitConstantDefault(SqlBaseParser.ConstantDefaultContext ctx); + /** + * Enter a parse tree produced by the {@code star} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + */ + void enterStar(SqlBaseParser.StarContext ctx); + /** + * Exit a parse tree produced by the {@code star} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + */ + void exitStar(SqlBaseParser.StarContext ctx); + /** + * Enter a parse tree produced by the {@code functionCall} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + */ + void enterFunctionCall(SqlBaseParser.FunctionCallContext ctx); + /** + * Exit a parse tree produced by the {@code functionCall} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + */ + void exitFunctionCall(SqlBaseParser.FunctionCallContext ctx); + /** + * Enter a parse tree produced by the {@code subqueryExpression} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + */ + void enterSubqueryExpression(SqlBaseParser.SubqueryExpressionContext ctx); + /** + * Exit a parse tree produced by the {@code subqueryExpression} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + */ + void exitSubqueryExpression(SqlBaseParser.SubqueryExpressionContext ctx); + /** + * Enter a parse tree produced by the {@code columnReference} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + */ + void enterColumnReference(SqlBaseParser.ColumnReferenceContext ctx); + /** + * Exit a parse tree produced by the {@code columnReference} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + */ + void exitColumnReference(SqlBaseParser.ColumnReferenceContext ctx); + /** + * Enter a parse tree produced by the {@code dereference} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + */ + void enterDereference(SqlBaseParser.DereferenceContext ctx); + /** + * Exit a parse tree produced by the {@code dereference} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + */ + void exitDereference(SqlBaseParser.DereferenceContext ctx); + /** + * Enter a parse tree produced by the {@code parenthesizedExpression} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + */ + void enterParenthesizedExpression(SqlBaseParser.ParenthesizedExpressionContext ctx); + /** + * Exit a parse tree produced by the {@code parenthesizedExpression} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + */ + void exitParenthesizedExpression(SqlBaseParser.ParenthesizedExpressionContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#columnExpression}. + * @param ctx the parse tree + */ + void enterColumnExpression(SqlBaseParser.ColumnExpressionContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#columnExpression}. + * @param ctx the parse tree + */ + void exitColumnExpression(SqlBaseParser.ColumnExpressionContext ctx); + /** + * Enter a parse tree produced by the {@code nullLiteral} + * labeled alternative in {@link SqlBaseParser#constant}. + * @param ctx the parse tree + */ + void enterNullLiteral(SqlBaseParser.NullLiteralContext ctx); + /** + * Exit a parse tree produced by the {@code nullLiteral} + * labeled alternative in {@link SqlBaseParser#constant}. + * @param ctx the parse tree + */ + void exitNullLiteral(SqlBaseParser.NullLiteralContext ctx); + /** + * Enter a parse tree produced by the {@code typeConstructor} + * labeled alternative in {@link SqlBaseParser#constant}. + * @param ctx the parse tree + */ + void enterTypeConstructor(SqlBaseParser.TypeConstructorContext ctx); + /** + * Exit a parse tree produced by the {@code typeConstructor} + * labeled alternative in {@link SqlBaseParser#constant}. + * @param ctx the parse tree + */ + void exitTypeConstructor(SqlBaseParser.TypeConstructorContext ctx); + /** + * Enter a parse tree produced by the {@code numericLiteral} + * labeled alternative in {@link SqlBaseParser#constant}. + * @param ctx the parse tree + */ + void enterNumericLiteral(SqlBaseParser.NumericLiteralContext ctx); + /** + * Exit a parse tree produced by the {@code numericLiteral} + * labeled alternative in {@link SqlBaseParser#constant}. + * @param ctx the parse tree + */ + void exitNumericLiteral(SqlBaseParser.NumericLiteralContext ctx); + /** + * Enter a parse tree produced by the {@code booleanLiteral} + * labeled alternative in {@link SqlBaseParser#constant}. + * @param ctx the parse tree + */ + void enterBooleanLiteral(SqlBaseParser.BooleanLiteralContext ctx); + /** + * Exit a parse tree produced by the {@code booleanLiteral} + * labeled alternative in {@link SqlBaseParser#constant}. + * @param ctx the parse tree + */ + void exitBooleanLiteral(SqlBaseParser.BooleanLiteralContext ctx); + /** + * Enter a parse tree produced by the {@code stringLiteral} + * labeled alternative in {@link SqlBaseParser#constant}. + * @param ctx the parse tree + */ + void enterStringLiteral(SqlBaseParser.StringLiteralContext ctx); + /** + * Exit a parse tree produced by the {@code stringLiteral} + * labeled alternative in {@link SqlBaseParser#constant}. + * @param ctx the parse tree + */ + void exitStringLiteral(SqlBaseParser.StringLiteralContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#comparisonOperator}. + * @param ctx the parse tree + */ + void enterComparisonOperator(SqlBaseParser.ComparisonOperatorContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#comparisonOperator}. + * @param ctx the parse tree + */ + void exitComparisonOperator(SqlBaseParser.ComparisonOperatorContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#booleanValue}. + * @param ctx the parse tree + */ + void enterBooleanValue(SqlBaseParser.BooleanValueContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#booleanValue}. + * @param ctx the parse tree + */ + void exitBooleanValue(SqlBaseParser.BooleanValueContext ctx); + /** + * Enter a parse tree produced by the {@code primitiveDataType} + * labeled alternative in {@link SqlBaseParser#dataType}. + * @param ctx the parse tree + */ + void enterPrimitiveDataType(SqlBaseParser.PrimitiveDataTypeContext ctx); + /** + * Exit a parse tree produced by the {@code primitiveDataType} + * labeled alternative in {@link SqlBaseParser#dataType}. + * @param ctx the parse tree + */ + void exitPrimitiveDataType(SqlBaseParser.PrimitiveDataTypeContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#whenClause}. + * @param ctx the parse tree + */ + void enterWhenClause(SqlBaseParser.WhenClauseContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#whenClause}. + * @param ctx the parse tree + */ + void exitWhenClause(SqlBaseParser.WhenClauseContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#qualifiedName}. + * @param ctx the parse tree + */ + void enterQualifiedName(SqlBaseParser.QualifiedNameContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#qualifiedName}. + * @param ctx the parse tree + */ + void exitQualifiedName(SqlBaseParser.QualifiedNameContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#tableIdentifier}. + * @param ctx the parse tree + */ + void enterTableIdentifier(SqlBaseParser.TableIdentifierContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#tableIdentifier}. + * @param ctx the parse tree + */ + void exitTableIdentifier(SqlBaseParser.TableIdentifierContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#identifier}. + * @param ctx the parse tree + */ + void enterIdentifier(SqlBaseParser.IdentifierContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#identifier}. + * @param ctx the parse tree + */ + void exitIdentifier(SqlBaseParser.IdentifierContext ctx); + /** + * Enter a parse tree produced by the {@code quotedIdentifier} + * labeled alternative in {@link SqlBaseParser#quoteIdentifier}. + * @param ctx the parse tree + */ + void enterQuotedIdentifier(SqlBaseParser.QuotedIdentifierContext ctx); + /** + * Exit a parse tree produced by the {@code quotedIdentifier} + * labeled alternative in {@link SqlBaseParser#quoteIdentifier}. + * @param ctx the parse tree + */ + void exitQuotedIdentifier(SqlBaseParser.QuotedIdentifierContext ctx); + /** + * Enter a parse tree produced by the {@code backQuotedIdentifier} + * labeled alternative in {@link SqlBaseParser#quoteIdentifier}. + * @param ctx the parse tree + */ + void enterBackQuotedIdentifier(SqlBaseParser.BackQuotedIdentifierContext ctx); + /** + * Exit a parse tree produced by the {@code backQuotedIdentifier} + * labeled alternative in {@link SqlBaseParser#quoteIdentifier}. + * @param ctx the parse tree + */ + void exitBackQuotedIdentifier(SqlBaseParser.BackQuotedIdentifierContext ctx); + /** + * Enter a parse tree produced by the {@code unquotedIdentifier} + * labeled alternative in {@link SqlBaseParser#unquoteIdentifier}. + * @param ctx the parse tree + */ + void enterUnquotedIdentifier(SqlBaseParser.UnquotedIdentifierContext ctx); + /** + * Exit a parse tree produced by the {@code unquotedIdentifier} + * labeled alternative in {@link SqlBaseParser#unquoteIdentifier}. + * @param ctx the parse tree + */ + void exitUnquotedIdentifier(SqlBaseParser.UnquotedIdentifierContext ctx); + /** + * Enter a parse tree produced by the {@code digitIdentifier} + * labeled alternative in {@link SqlBaseParser#unquoteIdentifier}. + * @param ctx the parse tree + */ + void enterDigitIdentifier(SqlBaseParser.DigitIdentifierContext ctx); + /** + * Exit a parse tree produced by the {@code digitIdentifier} + * labeled alternative in {@link SqlBaseParser#unquoteIdentifier}. + * @param ctx the parse tree + */ + void exitDigitIdentifier(SqlBaseParser.DigitIdentifierContext ctx); + /** + * Enter a parse tree produced by the {@code decimalLiteral} + * labeled alternative in {@link SqlBaseParser#number}. + * @param ctx the parse tree + */ + void enterDecimalLiteral(SqlBaseParser.DecimalLiteralContext ctx); + /** + * Exit a parse tree produced by the {@code decimalLiteral} + * labeled alternative in {@link SqlBaseParser#number}. + * @param ctx the parse tree + */ + void exitDecimalLiteral(SqlBaseParser.DecimalLiteralContext ctx); + /** + * Enter a parse tree produced by the {@code integerLiteral} + * labeled alternative in {@link SqlBaseParser#number}. + * @param ctx the parse tree + */ + void enterIntegerLiteral(SqlBaseParser.IntegerLiteralContext ctx); + /** + * Exit a parse tree produced by the {@code integerLiteral} + * labeled alternative in {@link SqlBaseParser#number}. + * @param ctx the parse tree + */ + void exitIntegerLiteral(SqlBaseParser.IntegerLiteralContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#nonReserved}. + * @param ctx the parse tree + */ + void enterNonReserved(SqlBaseParser.NonReservedContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#nonReserved}. + * @param ctx the parse tree + */ + void exitNonReserved(SqlBaseParser.NonReservedContext ctx); +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseParser.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseParser.java new file mode 100644 index 00000000000..6874b32c9e7 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseParser.java @@ -0,0 +1,4936 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +// ANTLR GENERATED CODE: DO NOT EDIT +package org.elasticsearch.xpack.sql.parser; +import org.antlr.v4.runtime.atn.*; +import org.antlr.v4.runtime.dfa.DFA; +import org.antlr.v4.runtime.*; +import org.antlr.v4.runtime.misc.*; +import org.antlr.v4.runtime.tree.*; +import java.util.List; +import java.util.Iterator; +import java.util.ArrayList; + +@SuppressWarnings({"all", "warnings", "unchecked", "unused", "cast"}) +class SqlBaseParser extends Parser { + static { RuntimeMetaData.checkVersion("4.5.3", RuntimeMetaData.VERSION); } + + protected static final DFA[] _decisionToDFA; + protected static final PredictionContextCache _sharedContextCache = + new PredictionContextCache(); + public static final int + T__0=1, T__1=2, T__2=3, T__3=4, ALL=5, ANALYZE=6, ANALYZED=7, AND=8, ANY=9, + AS=10, ASC=11, BETWEEN=12, BY=13, CAST=14, COLUMN=15, COLUMNS=16, CROSS=17, + DEBUG=18, DESC=19, DESCRIBE=20, DISTINCT=21, EXECUTABLE=22, EXISTS=23, + EXPLAIN=24, EXTRACT=25, FALSE=26, FOR=27, FORMAT=28, FROM=29, FULL=30, + FUNCTIONS=31, GRAPHVIZ=32, GROUP=33, GROUPING=34, HAVING=35, IN=36, INNER=37, + INTEGER=38, INTO=39, IS=40, JOIN=41, LAST=42, LEFT=43, LIKE=44, LIMIT=45, + LOGICAL=46, MAPPED=47, MATCH=48, NATURAL=49, NO=50, NOT=51, NULL=52, ON=53, + OPTIMIZED=54, OPTION=55, OR=56, ORDER=57, OUTER=58, PARSED=59, PHYSICAL=60, + PLAN=61, QUERY=62, RESET=63, RIGHT=64, RLIKE=65, SCHEMAS=66, SELECT=67, + SESSION=68, SET=69, SETS=70, SHOW=71, TABLE=72, TABLES=73, TEXT=74, THEN=75, + TO=76, TRUE=77, TYPE=78, USE=79, USING=80, VERIFY=81, WHEN=82, WHERE=83, + WITH=84, EQ=85, NEQ=86, LT=87, LTE=88, GT=89, GTE=90, PLUS=91, MINUS=92, + ASTERISK=93, SLASH=94, PERCENT=95, CONCAT=96, STRING=97, INTEGER_VALUE=98, + DECIMAL_VALUE=99, IDENTIFIER=100, DIGIT_IDENTIFIER=101, QUOTED_IDENTIFIER=102, + BACKQUOTED_IDENTIFIER=103, SIMPLE_COMMENT=104, BRACKETED_COMMENT=105, + WS=106, UNRECOGNIZED=107, DELIMITER=108; + public static final int + RULE_singleStatement = 0, RULE_singleExpression = 1, RULE_statement = 2, + RULE_query = 3, RULE_queryNoWith = 4, RULE_queryTerm = 5, RULE_orderBy = 6, + RULE_querySpecification = 7, RULE_fromClause = 8, RULE_groupBy = 9, RULE_groupingElement = 10, + RULE_groupingExpressions = 11, RULE_namedQuery = 12, RULE_setQuantifier = 13, + RULE_selectItem = 14, RULE_relation = 15, RULE_joinRelation = 16, RULE_joinType = 17, + RULE_joinCriteria = 18, RULE_relationPrimary = 19, RULE_expression = 20, + RULE_booleanExpression = 21, RULE_predicated = 22, RULE_predicate = 23, + RULE_valueExpression = 24, RULE_primaryExpression = 25, RULE_columnExpression = 26, + RULE_constant = 27, RULE_comparisonOperator = 28, RULE_booleanValue = 29, + RULE_dataType = 30, RULE_whenClause = 31, RULE_qualifiedName = 32, RULE_tableIdentifier = 33, + RULE_identifier = 34, RULE_quoteIdentifier = 35, RULE_unquoteIdentifier = 36, + RULE_number = 37, RULE_nonReserved = 38; + public static final String[] ruleNames = { + "singleStatement", "singleExpression", "statement", "query", "queryNoWith", + "queryTerm", "orderBy", "querySpecification", "fromClause", "groupBy", + "groupingElement", "groupingExpressions", "namedQuery", "setQuantifier", + "selectItem", "relation", "joinRelation", "joinType", "joinCriteria", + "relationPrimary", "expression", "booleanExpression", "predicated", "predicate", + "valueExpression", "primaryExpression", "columnExpression", "constant", + "comparisonOperator", "booleanValue", "dataType", "whenClause", "qualifiedName", + "tableIdentifier", "identifier", "quoteIdentifier", "unquoteIdentifier", + "number", "nonReserved" + }; + + private static final String[] _LITERAL_NAMES = { + null, "'('", "')'", "','", "'.'", "'ALL'", "'ANALYZE'", "'ANALYZED'", + "'AND'", "'ANY'", "'AS'", "'ASC'", "'BETWEEN'", "'BY'", "'CAST'", "'COLUMN'", + "'COLUMNS'", "'CROSS'", "'DEBUG'", "'DESC'", "'DESCRIBE'", "'DISTINCT'", + "'EXECUTABLE'", "'EXISTS'", "'EXPLAIN'", "'EXTRACT'", "'FALSE'", "'FOR'", + "'FORMAT'", "'FROM'", "'FULL'", "'FUNCTIONS'", "'GRAPHVIZ'", "'GROUP'", + "'GROUPING'", "'HAVING'", "'IN'", "'INNER'", "'INTEGER'", "'INTO'", "'IS'", + "'JOIN'", "'LAST'", "'LEFT'", "'LIKE'", "'LIMIT'", "'LOGICAL'", "'MAPPED'", + "'MATCH'", "'NATURAL'", "'NO'", "'NOT'", "'NULL'", "'ON'", "'OPTIMIZED'", + "'OPTION'", "'OR'", "'ORDER'", "'OUTER'", "'PARSED'", "'PHYSICAL'", "'PLAN'", + "'QUERY'", "'RESET'", "'RIGHT'", "'RLIKE'", "'SCHEMAS'", "'SELECT'", "'SESSION'", + "'SET'", "'SETS'", "'SHOW'", "'TABLE'", "'TABLES'", "'TEXT'", "'THEN'", + "'TO'", "'TRUE'", "'TYPE'", "'USE'", "'USING'", "'VERIFY'", "'WHEN'", + "'WHERE'", "'WITH'", "'='", null, "'<'", "'<='", "'>'", "'>='", "'+'", + "'-'", "'*'", "'/'", "'%'", "'||'" + }; + private static final String[] _SYMBOLIC_NAMES = { + null, null, null, null, null, "ALL", "ANALYZE", "ANALYZED", "AND", "ANY", + "AS", "ASC", "BETWEEN", "BY", "CAST", "COLUMN", "COLUMNS", "CROSS", "DEBUG", + "DESC", "DESCRIBE", "DISTINCT", "EXECUTABLE", "EXISTS", "EXPLAIN", "EXTRACT", + "FALSE", "FOR", "FORMAT", "FROM", "FULL", "FUNCTIONS", "GRAPHVIZ", "GROUP", + "GROUPING", "HAVING", "IN", "INNER", "INTEGER", "INTO", "IS", "JOIN", + "LAST", "LEFT", "LIKE", "LIMIT", "LOGICAL", "MAPPED", "MATCH", "NATURAL", + "NO", "NOT", "NULL", "ON", "OPTIMIZED", "OPTION", "OR", "ORDER", "OUTER", + "PARSED", "PHYSICAL", "PLAN", "QUERY", "RESET", "RIGHT", "RLIKE", "SCHEMAS", + "SELECT", "SESSION", "SET", "SETS", "SHOW", "TABLE", "TABLES", "TEXT", + "THEN", "TO", "TRUE", "TYPE", "USE", "USING", "VERIFY", "WHEN", "WHERE", + "WITH", "EQ", "NEQ", "LT", "LTE", "GT", "GTE", "PLUS", "MINUS", "ASTERISK", + "SLASH", "PERCENT", "CONCAT", "STRING", "INTEGER_VALUE", "DECIMAL_VALUE", + "IDENTIFIER", "DIGIT_IDENTIFIER", "QUOTED_IDENTIFIER", "BACKQUOTED_IDENTIFIER", + "SIMPLE_COMMENT", "BRACKETED_COMMENT", "WS", "UNRECOGNIZED", "DELIMITER" + }; + public static final Vocabulary VOCABULARY = new VocabularyImpl(_LITERAL_NAMES, _SYMBOLIC_NAMES); + + /** + * @deprecated Use {@link #VOCABULARY} instead. + */ + @Deprecated + public static final String[] tokenNames; + static { + tokenNames = new String[_SYMBOLIC_NAMES.length]; + for (int i = 0; i < tokenNames.length; i++) { + tokenNames[i] = VOCABULARY.getLiteralName(i); + if (tokenNames[i] == null) { + tokenNames[i] = VOCABULARY.getSymbolicName(i); + } + + if (tokenNames[i] == null) { + tokenNames[i] = ""; + } + } + } + + @Override + @Deprecated + public String[] getTokenNames() { + return tokenNames; + } + + @Override + + public Vocabulary getVocabulary() { + return VOCABULARY; + } + + @Override + public String getGrammarFileName() { return "SqlBase.g4"; } + + @Override + public String[] getRuleNames() { return ruleNames; } + + @Override + public String getSerializedATN() { return _serializedATN; } + + @Override + public ATN getATN() { return _ATN; } + + public SqlBaseParser(TokenStream input) { + super(input); + _interp = new ParserATNSimulator(this,_ATN,_decisionToDFA,_sharedContextCache); + } + public static class SingleStatementContext extends ParserRuleContext { + public StatementContext statement() { + return getRuleContext(StatementContext.class,0); + } + public TerminalNode EOF() { return getToken(SqlBaseParser.EOF, 0); } + public SingleStatementContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_singleStatement; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterSingleStatement(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitSingleStatement(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitSingleStatement(this); + else return visitor.visitChildren(this); + } + } + + public final SingleStatementContext singleStatement() throws RecognitionException { + SingleStatementContext _localctx = new SingleStatementContext(_ctx, getState()); + enterRule(_localctx, 0, RULE_singleStatement); + try { + enterOuterAlt(_localctx, 1); + { + setState(78); + statement(); + setState(79); + match(EOF); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class SingleExpressionContext extends ParserRuleContext { + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public TerminalNode EOF() { return getToken(SqlBaseParser.EOF, 0); } + public SingleExpressionContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_singleExpression; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterSingleExpression(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitSingleExpression(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitSingleExpression(this); + else return visitor.visitChildren(this); + } + } + + public final SingleExpressionContext singleExpression() throws RecognitionException { + SingleExpressionContext _localctx = new SingleExpressionContext(_ctx, getState()); + enterRule(_localctx, 2, RULE_singleExpression); + try { + enterOuterAlt(_localctx, 1); + { + setState(81); + expression(); + setState(82); + match(EOF); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class StatementContext extends ParserRuleContext { + public StatementContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_statement; } + + public StatementContext() { } + public void copyFrom(StatementContext ctx) { + super.copyFrom(ctx); + } + } + public static class ExplainContext extends StatementContext { + public Token type; + public Token format; + public BooleanValueContext verify; + public TerminalNode EXPLAIN() { return getToken(SqlBaseParser.EXPLAIN, 0); } + public StatementContext statement() { + return getRuleContext(StatementContext.class,0); + } + public List PLAN() { return getTokens(SqlBaseParser.PLAN); } + public TerminalNode PLAN(int i) { + return getToken(SqlBaseParser.PLAN, i); + } + public List FORMAT() { return getTokens(SqlBaseParser.FORMAT); } + public TerminalNode FORMAT(int i) { + return getToken(SqlBaseParser.FORMAT, i); + } + public List VERIFY() { return getTokens(SqlBaseParser.VERIFY); } + public TerminalNode VERIFY(int i) { + return getToken(SqlBaseParser.VERIFY, i); + } + public List booleanValue() { + return getRuleContexts(BooleanValueContext.class); + } + public BooleanValueContext booleanValue(int i) { + return getRuleContext(BooleanValueContext.class,i); + } + public List PARSED() { return getTokens(SqlBaseParser.PARSED); } + public TerminalNode PARSED(int i) { + return getToken(SqlBaseParser.PARSED, i); + } + public List ANALYZED() { return getTokens(SqlBaseParser.ANALYZED); } + public TerminalNode ANALYZED(int i) { + return getToken(SqlBaseParser.ANALYZED, i); + } + public List OPTIMIZED() { return getTokens(SqlBaseParser.OPTIMIZED); } + public TerminalNode OPTIMIZED(int i) { + return getToken(SqlBaseParser.OPTIMIZED, i); + } + public List MAPPED() { return getTokens(SqlBaseParser.MAPPED); } + public TerminalNode MAPPED(int i) { + return getToken(SqlBaseParser.MAPPED, i); + } + public List EXECUTABLE() { return getTokens(SqlBaseParser.EXECUTABLE); } + public TerminalNode EXECUTABLE(int i) { + return getToken(SqlBaseParser.EXECUTABLE, i); + } + public List ALL() { return getTokens(SqlBaseParser.ALL); } + public TerminalNode ALL(int i) { + return getToken(SqlBaseParser.ALL, i); + } + public List TEXT() { return getTokens(SqlBaseParser.TEXT); } + public TerminalNode TEXT(int i) { + return getToken(SqlBaseParser.TEXT, i); + } + public List GRAPHVIZ() { return getTokens(SqlBaseParser.GRAPHVIZ); } + public TerminalNode GRAPHVIZ(int i) { + return getToken(SqlBaseParser.GRAPHVIZ, i); + } + public ExplainContext(StatementContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterExplain(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitExplain(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitExplain(this); + else return visitor.visitChildren(this); + } + } + public static class DebugContext extends StatementContext { + public Token type; + public Token format; + public TerminalNode DEBUG() { return getToken(SqlBaseParser.DEBUG, 0); } + public StatementContext statement() { + return getRuleContext(StatementContext.class,0); + } + public List PLAN() { return getTokens(SqlBaseParser.PLAN); } + public TerminalNode PLAN(int i) { + return getToken(SqlBaseParser.PLAN, i); + } + public List FORMAT() { return getTokens(SqlBaseParser.FORMAT); } + public TerminalNode FORMAT(int i) { + return getToken(SqlBaseParser.FORMAT, i); + } + public List ANALYZED() { return getTokens(SqlBaseParser.ANALYZED); } + public TerminalNode ANALYZED(int i) { + return getToken(SqlBaseParser.ANALYZED, i); + } + public List OPTIMIZED() { return getTokens(SqlBaseParser.OPTIMIZED); } + public TerminalNode OPTIMIZED(int i) { + return getToken(SqlBaseParser.OPTIMIZED, i); + } + public List TEXT() { return getTokens(SqlBaseParser.TEXT); } + public TerminalNode TEXT(int i) { + return getToken(SqlBaseParser.TEXT, i); + } + public List GRAPHVIZ() { return getTokens(SqlBaseParser.GRAPHVIZ); } + public TerminalNode GRAPHVIZ(int i) { + return getToken(SqlBaseParser.GRAPHVIZ, i); + } + public DebugContext(StatementContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterDebug(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitDebug(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitDebug(this); + else return visitor.visitChildren(this); + } + } + public static class StatementDefaultContext extends StatementContext { + public QueryContext query() { + return getRuleContext(QueryContext.class,0); + } + public StatementDefaultContext(StatementContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterStatementDefault(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitStatementDefault(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitStatementDefault(this); + else return visitor.visitChildren(this); + } + } + public static class ShowFunctionsContext extends StatementContext { + public Token pattern; + public TerminalNode SHOW() { return getToken(SqlBaseParser.SHOW, 0); } + public TerminalNode FUNCTIONS() { return getToken(SqlBaseParser.FUNCTIONS, 0); } + public TerminalNode STRING() { return getToken(SqlBaseParser.STRING, 0); } + public TerminalNode LIKE() { return getToken(SqlBaseParser.LIKE, 0); } + public ShowFunctionsContext(StatementContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterShowFunctions(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitShowFunctions(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitShowFunctions(this); + else return visitor.visitChildren(this); + } + } + public static class ShowTablesContext extends StatementContext { + public Token pattern; + public TerminalNode SHOW() { return getToken(SqlBaseParser.SHOW, 0); } + public TerminalNode TABLES() { return getToken(SqlBaseParser.TABLES, 0); } + public TerminalNode STRING() { return getToken(SqlBaseParser.STRING, 0); } + public TerminalNode LIKE() { return getToken(SqlBaseParser.LIKE, 0); } + public ShowTablesContext(StatementContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterShowTables(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitShowTables(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitShowTables(this); + else return visitor.visitChildren(this); + } + } + public static class ShowSchemasContext extends StatementContext { + public TerminalNode SHOW() { return getToken(SqlBaseParser.SHOW, 0); } + public TerminalNode SCHEMAS() { return getToken(SqlBaseParser.SCHEMAS, 0); } + public ShowSchemasContext(StatementContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterShowSchemas(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitShowSchemas(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitShowSchemas(this); + else return visitor.visitChildren(this); + } + } + public static class ShowColumnsContext extends StatementContext { + public TerminalNode SHOW() { return getToken(SqlBaseParser.SHOW, 0); } + public TerminalNode COLUMNS() { return getToken(SqlBaseParser.COLUMNS, 0); } + public TableIdentifierContext tableIdentifier() { + return getRuleContext(TableIdentifierContext.class,0); + } + public TerminalNode FROM() { return getToken(SqlBaseParser.FROM, 0); } + public TerminalNode IN() { return getToken(SqlBaseParser.IN, 0); } + public TerminalNode DESCRIBE() { return getToken(SqlBaseParser.DESCRIBE, 0); } + public TerminalNode DESC() { return getToken(SqlBaseParser.DESC, 0); } + public ShowColumnsContext(StatementContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterShowColumns(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitShowColumns(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitShowColumns(this); + else return visitor.visitChildren(this); + } + } + + public final StatementContext statement() throws RecognitionException { + StatementContext _localctx = new StatementContext(_ctx, getState()); + enterRule(_localctx, 4, RULE_statement); + int _la; + try { + setState(141); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,10,_ctx) ) { + case 1: + _localctx = new StatementDefaultContext(_localctx); + enterOuterAlt(_localctx, 1); + { + setState(84); + query(); + } + break; + case 2: + _localctx = new ExplainContext(_localctx); + enterOuterAlt(_localctx, 2); + { + setState(85); + match(EXPLAIN); + setState(99); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,2,_ctx) ) { + case 1: + { + setState(86); + match(T__0); + setState(95); + _errHandler.sync(this); + _la = _input.LA(1); + while (((((_la - 28)) & ~0x3f) == 0 && ((1L << (_la - 28)) & ((1L << (FORMAT - 28)) | (1L << (PLAN - 28)) | (1L << (VERIFY - 28)))) != 0)) { + { + setState(93); + switch (_input.LA(1)) { + case PLAN: + { + setState(87); + match(PLAN); + setState(88); + ((ExplainContext)_localctx).type = _input.LT(1); + _la = _input.LA(1); + if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ALL) | (1L << ANALYZED) | (1L << EXECUTABLE) | (1L << MAPPED) | (1L << OPTIMIZED) | (1L << PARSED))) != 0)) ) { + ((ExplainContext)_localctx).type = (Token)_errHandler.recoverInline(this); + } else { + consume(); + } + } + break; + case FORMAT: + { + setState(89); + match(FORMAT); + setState(90); + ((ExplainContext)_localctx).format = _input.LT(1); + _la = _input.LA(1); + if ( !(_la==GRAPHVIZ || _la==TEXT) ) { + ((ExplainContext)_localctx).format = (Token)_errHandler.recoverInline(this); + } else { + consume(); + } + } + break; + case VERIFY: + { + setState(91); + match(VERIFY); + setState(92); + ((ExplainContext)_localctx).verify = booleanValue(); + } + break; + default: + throw new NoViableAltException(this); + } + } + setState(97); + _errHandler.sync(this); + _la = _input.LA(1); + } + setState(98); + match(T__1); + } + break; + } + setState(101); + statement(); + } + break; + case 3: + _localctx = new DebugContext(_localctx); + enterOuterAlt(_localctx, 3); + { + setState(102); + match(DEBUG); + setState(114); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,5,_ctx) ) { + case 1: + { + setState(103); + match(T__0); + setState(110); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==FORMAT || _la==PLAN) { + { + setState(108); + switch (_input.LA(1)) { + case PLAN: + { + setState(104); + match(PLAN); + setState(105); + ((DebugContext)_localctx).type = _input.LT(1); + _la = _input.LA(1); + if ( !(_la==ANALYZED || _la==OPTIMIZED) ) { + ((DebugContext)_localctx).type = (Token)_errHandler.recoverInline(this); + } else { + consume(); + } + } + break; + case FORMAT: + { + setState(106); + match(FORMAT); + setState(107); + ((DebugContext)_localctx).format = _input.LT(1); + _la = _input.LA(1); + if ( !(_la==GRAPHVIZ || _la==TEXT) ) { + ((DebugContext)_localctx).format = (Token)_errHandler.recoverInline(this); + } else { + consume(); + } + } + break; + default: + throw new NoViableAltException(this); + } + } + setState(112); + _errHandler.sync(this); + _la = _input.LA(1); + } + setState(113); + match(T__1); + } + break; + } + setState(116); + statement(); + } + break; + case 4: + _localctx = new ShowTablesContext(_localctx); + enterOuterAlt(_localctx, 4); + { + setState(117); + match(SHOW); + setState(118); + match(TABLES); + setState(123); + _la = _input.LA(1); + if (_la==LIKE || _la==STRING) { + { + setState(120); + _la = _input.LA(1); + if (_la==LIKE) { + { + setState(119); + match(LIKE); + } + } + + setState(122); + ((ShowTablesContext)_localctx).pattern = match(STRING); + } + } + + } + break; + case 5: + _localctx = new ShowColumnsContext(_localctx); + enterOuterAlt(_localctx, 5); + { + setState(125); + match(SHOW); + setState(126); + match(COLUMNS); + setState(127); + _la = _input.LA(1); + if ( !(_la==FROM || _la==IN) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + setState(128); + tableIdentifier(); + } + break; + case 6: + _localctx = new ShowColumnsContext(_localctx); + enterOuterAlt(_localctx, 6); + { + setState(129); + _la = _input.LA(1); + if ( !(_la==DESC || _la==DESCRIBE) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + setState(130); + tableIdentifier(); + } + break; + case 7: + _localctx = new ShowFunctionsContext(_localctx); + enterOuterAlt(_localctx, 7); + { + setState(131); + match(SHOW); + setState(132); + match(FUNCTIONS); + setState(137); + _la = _input.LA(1); + if (_la==LIKE || _la==STRING) { + { + setState(134); + _la = _input.LA(1); + if (_la==LIKE) { + { + setState(133); + match(LIKE); + } + } + + setState(136); + ((ShowFunctionsContext)_localctx).pattern = match(STRING); + } + } + + } + break; + case 8: + _localctx = new ShowSchemasContext(_localctx); + enterOuterAlt(_localctx, 8); + { + setState(139); + match(SHOW); + setState(140); + match(SCHEMAS); + } + break; + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class QueryContext extends ParserRuleContext { + public QueryNoWithContext queryNoWith() { + return getRuleContext(QueryNoWithContext.class,0); + } + public TerminalNode WITH() { return getToken(SqlBaseParser.WITH, 0); } + public List namedQuery() { + return getRuleContexts(NamedQueryContext.class); + } + public NamedQueryContext namedQuery(int i) { + return getRuleContext(NamedQueryContext.class,i); + } + public QueryContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_query; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterQuery(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitQuery(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitQuery(this); + else return visitor.visitChildren(this); + } + } + + public final QueryContext query() throws RecognitionException { + QueryContext _localctx = new QueryContext(_ctx, getState()); + enterRule(_localctx, 6, RULE_query); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(152); + _la = _input.LA(1); + if (_la==WITH) { + { + setState(143); + match(WITH); + setState(144); + namedQuery(); + setState(149); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==T__2) { + { + { + setState(145); + match(T__2); + setState(146); + namedQuery(); + } + } + setState(151); + _errHandler.sync(this); + _la = _input.LA(1); + } + } + } + + setState(154); + queryNoWith(); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class QueryNoWithContext extends ParserRuleContext { + public Token limit; + public QueryTermContext queryTerm() { + return getRuleContext(QueryTermContext.class,0); + } + public TerminalNode ORDER() { return getToken(SqlBaseParser.ORDER, 0); } + public TerminalNode BY() { return getToken(SqlBaseParser.BY, 0); } + public List orderBy() { + return getRuleContexts(OrderByContext.class); + } + public OrderByContext orderBy(int i) { + return getRuleContext(OrderByContext.class,i); + } + public TerminalNode LIMIT() { return getToken(SqlBaseParser.LIMIT, 0); } + public TerminalNode INTEGER_VALUE() { return getToken(SqlBaseParser.INTEGER_VALUE, 0); } + public TerminalNode ALL() { return getToken(SqlBaseParser.ALL, 0); } + public QueryNoWithContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_queryNoWith; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterQueryNoWith(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitQueryNoWith(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitQueryNoWith(this); + else return visitor.visitChildren(this); + } + } + + public final QueryNoWithContext queryNoWith() throws RecognitionException { + QueryNoWithContext _localctx = new QueryNoWithContext(_ctx, getState()); + enterRule(_localctx, 8, RULE_queryNoWith); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(156); + queryTerm(); + setState(167); + _la = _input.LA(1); + if (_la==ORDER) { + { + setState(157); + match(ORDER); + setState(158); + match(BY); + setState(159); + orderBy(); + setState(164); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==T__2) { + { + { + setState(160); + match(T__2); + setState(161); + orderBy(); + } + } + setState(166); + _errHandler.sync(this); + _la = _input.LA(1); + } + } + } + + setState(171); + _la = _input.LA(1); + if (_la==LIMIT) { + { + setState(169); + match(LIMIT); + setState(170); + ((QueryNoWithContext)_localctx).limit = _input.LT(1); + _la = _input.LA(1); + if ( !(_la==ALL || _la==INTEGER_VALUE) ) { + ((QueryNoWithContext)_localctx).limit = (Token)_errHandler.recoverInline(this); + } else { + consume(); + } + } + } + + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class QueryTermContext extends ParserRuleContext { + public QueryTermContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_queryTerm; } + + public QueryTermContext() { } + public void copyFrom(QueryTermContext ctx) { + super.copyFrom(ctx); + } + } + public static class SubqueryContext extends QueryTermContext { + public QueryNoWithContext queryNoWith() { + return getRuleContext(QueryNoWithContext.class,0); + } + public SubqueryContext(QueryTermContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterSubquery(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitSubquery(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitSubquery(this); + else return visitor.visitChildren(this); + } + } + public static class QueryPrimaryDefaultContext extends QueryTermContext { + public QuerySpecificationContext querySpecification() { + return getRuleContext(QuerySpecificationContext.class,0); + } + public QueryPrimaryDefaultContext(QueryTermContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterQueryPrimaryDefault(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitQueryPrimaryDefault(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitQueryPrimaryDefault(this); + else return visitor.visitChildren(this); + } + } + + public final QueryTermContext queryTerm() throws RecognitionException { + QueryTermContext _localctx = new QueryTermContext(_ctx, getState()); + enterRule(_localctx, 10, RULE_queryTerm); + try { + setState(178); + switch (_input.LA(1)) { + case SELECT: + _localctx = new QueryPrimaryDefaultContext(_localctx); + enterOuterAlt(_localctx, 1); + { + setState(173); + querySpecification(); + } + break; + case T__0: + _localctx = new SubqueryContext(_localctx); + enterOuterAlt(_localctx, 2); + { + setState(174); + match(T__0); + setState(175); + queryNoWith(); + setState(176); + match(T__1); + } + break; + default: + throw new NoViableAltException(this); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class OrderByContext extends ParserRuleContext { + public Token ordering; + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public TerminalNode ASC() { return getToken(SqlBaseParser.ASC, 0); } + public TerminalNode DESC() { return getToken(SqlBaseParser.DESC, 0); } + public OrderByContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_orderBy; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterOrderBy(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitOrderBy(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitOrderBy(this); + else return visitor.visitChildren(this); + } + } + + public final OrderByContext orderBy() throws RecognitionException { + OrderByContext _localctx = new OrderByContext(_ctx, getState()); + enterRule(_localctx, 12, RULE_orderBy); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(180); + expression(); + setState(182); + _la = _input.LA(1); + if (_la==ASC || _la==DESC) { + { + setState(181); + ((OrderByContext)_localctx).ordering = _input.LT(1); + _la = _input.LA(1); + if ( !(_la==ASC || _la==DESC) ) { + ((OrderByContext)_localctx).ordering = (Token)_errHandler.recoverInline(this); + } else { + consume(); + } + } + } + + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class QuerySpecificationContext extends ParserRuleContext { + public BooleanExpressionContext where; + public BooleanExpressionContext having; + public TerminalNode SELECT() { return getToken(SqlBaseParser.SELECT, 0); } + public List selectItem() { + return getRuleContexts(SelectItemContext.class); + } + public SelectItemContext selectItem(int i) { + return getRuleContext(SelectItemContext.class,i); + } + public SetQuantifierContext setQuantifier() { + return getRuleContext(SetQuantifierContext.class,0); + } + public FromClauseContext fromClause() { + return getRuleContext(FromClauseContext.class,0); + } + public TerminalNode WHERE() { return getToken(SqlBaseParser.WHERE, 0); } + public TerminalNode GROUP() { return getToken(SqlBaseParser.GROUP, 0); } + public TerminalNode BY() { return getToken(SqlBaseParser.BY, 0); } + public GroupByContext groupBy() { + return getRuleContext(GroupByContext.class,0); + } + public TerminalNode HAVING() { return getToken(SqlBaseParser.HAVING, 0); } + public List booleanExpression() { + return getRuleContexts(BooleanExpressionContext.class); + } + public BooleanExpressionContext booleanExpression(int i) { + return getRuleContext(BooleanExpressionContext.class,i); + } + public QuerySpecificationContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_querySpecification; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterQuerySpecification(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitQuerySpecification(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitQuerySpecification(this); + else return visitor.visitChildren(this); + } + } + + public final QuerySpecificationContext querySpecification() throws RecognitionException { + QuerySpecificationContext _localctx = new QuerySpecificationContext(_ctx, getState()); + enterRule(_localctx, 14, RULE_querySpecification); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(184); + match(SELECT); + setState(186); + _la = _input.LA(1); + if (_la==ALL || _la==DISTINCT) { + { + setState(185); + setQuantifier(); + } + } + + setState(188); + selectItem(); + setState(193); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==T__2) { + { + { + setState(189); + match(T__2); + setState(190); + selectItem(); + } + } + setState(195); + _errHandler.sync(this); + _la = _input.LA(1); + } + setState(197); + _la = _input.LA(1); + if (_la==FROM) { + { + setState(196); + fromClause(); + } + } + + setState(201); + _la = _input.LA(1); + if (_la==WHERE) { + { + setState(199); + match(WHERE); + setState(200); + ((QuerySpecificationContext)_localctx).where = booleanExpression(0); + } + } + + setState(206); + _la = _input.LA(1); + if (_la==GROUP) { + { + setState(203); + match(GROUP); + setState(204); + match(BY); + setState(205); + groupBy(); + } + } + + setState(210); + _la = _input.LA(1); + if (_la==HAVING) { + { + setState(208); + match(HAVING); + setState(209); + ((QuerySpecificationContext)_localctx).having = booleanExpression(0); + } + } + + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class FromClauseContext extends ParserRuleContext { + public TerminalNode FROM() { return getToken(SqlBaseParser.FROM, 0); } + public List relation() { + return getRuleContexts(RelationContext.class); + } + public RelationContext relation(int i) { + return getRuleContext(RelationContext.class,i); + } + public FromClauseContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_fromClause; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterFromClause(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitFromClause(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitFromClause(this); + else return visitor.visitChildren(this); + } + } + + public final FromClauseContext fromClause() throws RecognitionException { + FromClauseContext _localctx = new FromClauseContext(_ctx, getState()); + enterRule(_localctx, 16, RULE_fromClause); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(212); + match(FROM); + setState(213); + relation(); + setState(218); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==T__2) { + { + { + setState(214); + match(T__2); + setState(215); + relation(); + } + } + setState(220); + _errHandler.sync(this); + _la = _input.LA(1); + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class GroupByContext extends ParserRuleContext { + public List groupingElement() { + return getRuleContexts(GroupingElementContext.class); + } + public GroupingElementContext groupingElement(int i) { + return getRuleContext(GroupingElementContext.class,i); + } + public SetQuantifierContext setQuantifier() { + return getRuleContext(SetQuantifierContext.class,0); + } + public GroupByContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_groupBy; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterGroupBy(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitGroupBy(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitGroupBy(this); + else return visitor.visitChildren(this); + } + } + + public final GroupByContext groupBy() throws RecognitionException { + GroupByContext _localctx = new GroupByContext(_ctx, getState()); + enterRule(_localctx, 18, RULE_groupBy); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(222); + _la = _input.LA(1); + if (_la==ALL || _la==DISTINCT) { + { + setState(221); + setQuantifier(); + } + } + + setState(224); + groupingElement(); + setState(229); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==T__2) { + { + { + setState(225); + match(T__2); + setState(226); + groupingElement(); + } + } + setState(231); + _errHandler.sync(this); + _la = _input.LA(1); + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class GroupingElementContext extends ParserRuleContext { + public GroupingElementContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_groupingElement; } + + public GroupingElementContext() { } + public void copyFrom(GroupingElementContext ctx) { + super.copyFrom(ctx); + } + } + public static class SingleGroupingSetContext extends GroupingElementContext { + public GroupingExpressionsContext groupingExpressions() { + return getRuleContext(GroupingExpressionsContext.class,0); + } + public SingleGroupingSetContext(GroupingElementContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterSingleGroupingSet(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitSingleGroupingSet(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitSingleGroupingSet(this); + else return visitor.visitChildren(this); + } + } + + public final GroupingElementContext groupingElement() throws RecognitionException { + GroupingElementContext _localctx = new GroupingElementContext(_ctx, getState()); + enterRule(_localctx, 20, RULE_groupingElement); + try { + _localctx = new SingleGroupingSetContext(_localctx); + enterOuterAlt(_localctx, 1); + { + setState(232); + groupingExpressions(); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class GroupingExpressionsContext extends ParserRuleContext { + public List expression() { + return getRuleContexts(ExpressionContext.class); + } + public ExpressionContext expression(int i) { + return getRuleContext(ExpressionContext.class,i); + } + public GroupingExpressionsContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_groupingExpressions; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterGroupingExpressions(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitGroupingExpressions(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitGroupingExpressions(this); + else return visitor.visitChildren(this); + } + } + + public final GroupingExpressionsContext groupingExpressions() throws RecognitionException { + GroupingExpressionsContext _localctx = new GroupingExpressionsContext(_ctx, getState()); + enterRule(_localctx, 22, RULE_groupingExpressions); + int _la; + try { + setState(247); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,29,_ctx) ) { + case 1: + enterOuterAlt(_localctx, 1); + { + setState(234); + match(T__0); + setState(243); + _la = _input.LA(1); + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << T__0) | (1L << ANALYZE) | (1L << ANALYZED) | (1L << CAST) | (1L << COLUMNS) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXISTS) | (1L << EXPLAIN) | (1L << EXTRACT) | (1L << FALSE) | (1L << FORMAT) | (1L << FROM) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << LOGICAL) | (1L << MAPPED) | (1L << MATCH) | (1L << NOT) | (1L << NULL) | (1L << OPTIMIZED) | (1L << PARSED) | (1L << PHYSICAL) | (1L << PLAN) | (1L << QUERY) | (1L << RESET))) != 0) || ((((_la - 65)) & ~0x3f) == 0 && ((1L << (_la - 65)) & ((1L << (RLIKE - 65)) | (1L << (SCHEMAS - 65)) | (1L << (SESSION - 65)) | (1L << (SETS - 65)) | (1L << (SHOW - 65)) | (1L << (TABLES - 65)) | (1L << (TEXT - 65)) | (1L << (TRUE - 65)) | (1L << (TYPE - 65)) | (1L << (USE - 65)) | (1L << (VERIFY - 65)) | (1L << (PLUS - 65)) | (1L << (MINUS - 65)) | (1L << (ASTERISK - 65)) | (1L << (STRING - 65)) | (1L << (INTEGER_VALUE - 65)) | (1L << (DECIMAL_VALUE - 65)) | (1L << (IDENTIFIER - 65)) | (1L << (DIGIT_IDENTIFIER - 65)) | (1L << (QUOTED_IDENTIFIER - 65)) | (1L << (BACKQUOTED_IDENTIFIER - 65)))) != 0)) { + { + setState(235); + expression(); + setState(240); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==T__2) { + { + { + setState(236); + match(T__2); + setState(237); + expression(); + } + } + setState(242); + _errHandler.sync(this); + _la = _input.LA(1); + } + } + } + + setState(245); + match(T__1); + } + break; + case 2: + enterOuterAlt(_localctx, 2); + { + setState(246); + expression(); + } + break; + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class NamedQueryContext extends ParserRuleContext { + public IdentifierContext name; + public TerminalNode AS() { return getToken(SqlBaseParser.AS, 0); } + public QueryNoWithContext queryNoWith() { + return getRuleContext(QueryNoWithContext.class,0); + } + public IdentifierContext identifier() { + return getRuleContext(IdentifierContext.class,0); + } + public NamedQueryContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_namedQuery; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterNamedQuery(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitNamedQuery(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitNamedQuery(this); + else return visitor.visitChildren(this); + } + } + + public final NamedQueryContext namedQuery() throws RecognitionException { + NamedQueryContext _localctx = new NamedQueryContext(_ctx, getState()); + enterRule(_localctx, 24, RULE_namedQuery); + try { + enterOuterAlt(_localctx, 1); + { + setState(249); + ((NamedQueryContext)_localctx).name = identifier(); + setState(250); + match(AS); + setState(251); + match(T__0); + setState(252); + queryNoWith(); + setState(253); + match(T__1); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class SetQuantifierContext extends ParserRuleContext { + public TerminalNode DISTINCT() { return getToken(SqlBaseParser.DISTINCT, 0); } + public TerminalNode ALL() { return getToken(SqlBaseParser.ALL, 0); } + public SetQuantifierContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_setQuantifier; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterSetQuantifier(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitSetQuantifier(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitSetQuantifier(this); + else return visitor.visitChildren(this); + } + } + + public final SetQuantifierContext setQuantifier() throws RecognitionException { + SetQuantifierContext _localctx = new SetQuantifierContext(_ctx, getState()); + enterRule(_localctx, 26, RULE_setQuantifier); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(255); + _la = _input.LA(1); + if ( !(_la==ALL || _la==DISTINCT) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class SelectItemContext extends ParserRuleContext { + public SelectItemContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_selectItem; } + + public SelectItemContext() { } + public void copyFrom(SelectItemContext ctx) { + super.copyFrom(ctx); + } + } + public static class SelectExpressionContext extends SelectItemContext { + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public IdentifierContext identifier() { + return getRuleContext(IdentifierContext.class,0); + } + public TerminalNode AS() { return getToken(SqlBaseParser.AS, 0); } + public SelectExpressionContext(SelectItemContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterSelectExpression(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitSelectExpression(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitSelectExpression(this); + else return visitor.visitChildren(this); + } + } + + public final SelectItemContext selectItem() throws RecognitionException { + SelectItemContext _localctx = new SelectItemContext(_ctx, getState()); + enterRule(_localctx, 28, RULE_selectItem); + int _la; + try { + _localctx = new SelectExpressionContext(_localctx); + enterOuterAlt(_localctx, 1); + { + setState(257); + expression(); + setState(262); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,31,_ctx) ) { + case 1: + { + setState(259); + _la = _input.LA(1); + if (_la==AS) { + { + setState(258); + match(AS); + } + } + + setState(261); + identifier(); + } + break; + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class RelationContext extends ParserRuleContext { + public RelationPrimaryContext relationPrimary() { + return getRuleContext(RelationPrimaryContext.class,0); + } + public List joinRelation() { + return getRuleContexts(JoinRelationContext.class); + } + public JoinRelationContext joinRelation(int i) { + return getRuleContext(JoinRelationContext.class,i); + } + public RelationContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_relation; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterRelation(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitRelation(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitRelation(this); + else return visitor.visitChildren(this); + } + } + + public final RelationContext relation() throws RecognitionException { + RelationContext _localctx = new RelationContext(_ctx, getState()); + enterRule(_localctx, 30, RULE_relation); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(264); + relationPrimary(); + setState(268); + _errHandler.sync(this); + _la = _input.LA(1); + while (((((_la - 30)) & ~0x3f) == 0 && ((1L << (_la - 30)) & ((1L << (FULL - 30)) | (1L << (INNER - 30)) | (1L << (JOIN - 30)) | (1L << (LEFT - 30)) | (1L << (NATURAL - 30)) | (1L << (RIGHT - 30)))) != 0)) { + { + { + setState(265); + joinRelation(); + } + } + setState(270); + _errHandler.sync(this); + _la = _input.LA(1); + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class JoinRelationContext extends ParserRuleContext { + public RelationPrimaryContext right; + public TerminalNode JOIN() { return getToken(SqlBaseParser.JOIN, 0); } + public RelationPrimaryContext relationPrimary() { + return getRuleContext(RelationPrimaryContext.class,0); + } + public JoinTypeContext joinType() { + return getRuleContext(JoinTypeContext.class,0); + } + public JoinCriteriaContext joinCriteria() { + return getRuleContext(JoinCriteriaContext.class,0); + } + public TerminalNode NATURAL() { return getToken(SqlBaseParser.NATURAL, 0); } + public JoinRelationContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_joinRelation; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterJoinRelation(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitJoinRelation(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitJoinRelation(this); + else return visitor.visitChildren(this); + } + } + + public final JoinRelationContext joinRelation() throws RecognitionException { + JoinRelationContext _localctx = new JoinRelationContext(_ctx, getState()); + enterRule(_localctx, 32, RULE_joinRelation); + int _la; + try { + setState(282); + switch (_input.LA(1)) { + case FULL: + case INNER: + case JOIN: + case LEFT: + case RIGHT: + enterOuterAlt(_localctx, 1); + { + { + setState(271); + joinType(); + } + setState(272); + match(JOIN); + setState(273); + ((JoinRelationContext)_localctx).right = relationPrimary(); + setState(275); + _la = _input.LA(1); + if (_la==ON || _la==USING) { + { + setState(274); + joinCriteria(); + } + } + + } + break; + case NATURAL: + enterOuterAlt(_localctx, 2); + { + setState(277); + match(NATURAL); + setState(278); + joinType(); + setState(279); + match(JOIN); + setState(280); + ((JoinRelationContext)_localctx).right = relationPrimary(); + } + break; + default: + throw new NoViableAltException(this); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class JoinTypeContext extends ParserRuleContext { + public TerminalNode INNER() { return getToken(SqlBaseParser.INNER, 0); } + public TerminalNode LEFT() { return getToken(SqlBaseParser.LEFT, 0); } + public TerminalNode OUTER() { return getToken(SqlBaseParser.OUTER, 0); } + public TerminalNode RIGHT() { return getToken(SqlBaseParser.RIGHT, 0); } + public TerminalNode FULL() { return getToken(SqlBaseParser.FULL, 0); } + public JoinTypeContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_joinType; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterJoinType(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitJoinType(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitJoinType(this); + else return visitor.visitChildren(this); + } + } + + public final JoinTypeContext joinType() throws RecognitionException { + JoinTypeContext _localctx = new JoinTypeContext(_ctx, getState()); + enterRule(_localctx, 34, RULE_joinType); + int _la; + try { + setState(299); + switch (_input.LA(1)) { + case INNER: + case JOIN: + enterOuterAlt(_localctx, 1); + { + setState(285); + _la = _input.LA(1); + if (_la==INNER) { + { + setState(284); + match(INNER); + } + } + + } + break; + case LEFT: + enterOuterAlt(_localctx, 2); + { + setState(287); + match(LEFT); + setState(289); + _la = _input.LA(1); + if (_la==OUTER) { + { + setState(288); + match(OUTER); + } + } + + } + break; + case RIGHT: + enterOuterAlt(_localctx, 3); + { + setState(291); + match(RIGHT); + setState(293); + _la = _input.LA(1); + if (_la==OUTER) { + { + setState(292); + match(OUTER); + } + } + + } + break; + case FULL: + enterOuterAlt(_localctx, 4); + { + setState(295); + match(FULL); + setState(297); + _la = _input.LA(1); + if (_la==OUTER) { + { + setState(296); + match(OUTER); + } + } + + } + break; + default: + throw new NoViableAltException(this); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class JoinCriteriaContext extends ParserRuleContext { + public TerminalNode ON() { return getToken(SqlBaseParser.ON, 0); } + public BooleanExpressionContext booleanExpression() { + return getRuleContext(BooleanExpressionContext.class,0); + } + public TerminalNode USING() { return getToken(SqlBaseParser.USING, 0); } + public List identifier() { + return getRuleContexts(IdentifierContext.class); + } + public IdentifierContext identifier(int i) { + return getRuleContext(IdentifierContext.class,i); + } + public JoinCriteriaContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_joinCriteria; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterJoinCriteria(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitJoinCriteria(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitJoinCriteria(this); + else return visitor.visitChildren(this); + } + } + + public final JoinCriteriaContext joinCriteria() throws RecognitionException { + JoinCriteriaContext _localctx = new JoinCriteriaContext(_ctx, getState()); + enterRule(_localctx, 36, RULE_joinCriteria); + int _la; + try { + setState(315); + switch (_input.LA(1)) { + case ON: + enterOuterAlt(_localctx, 1); + { + setState(301); + match(ON); + setState(302); + booleanExpression(0); + } + break; + case USING: + enterOuterAlt(_localctx, 2); + { + setState(303); + match(USING); + setState(304); + match(T__0); + setState(305); + identifier(); + setState(310); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==T__2) { + { + { + setState(306); + match(T__2); + setState(307); + identifier(); + } + } + setState(312); + _errHandler.sync(this); + _la = _input.LA(1); + } + setState(313); + match(T__1); + } + break; + default: + throw new NoViableAltException(this); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class RelationPrimaryContext extends ParserRuleContext { + public RelationPrimaryContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_relationPrimary; } + + public RelationPrimaryContext() { } + public void copyFrom(RelationPrimaryContext ctx) { + super.copyFrom(ctx); + } + } + public static class AliasedRelationContext extends RelationPrimaryContext { + public RelationContext relation() { + return getRuleContext(RelationContext.class,0); + } + public QualifiedNameContext qualifiedName() { + return getRuleContext(QualifiedNameContext.class,0); + } + public TerminalNode AS() { return getToken(SqlBaseParser.AS, 0); } + public AliasedRelationContext(RelationPrimaryContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterAliasedRelation(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitAliasedRelation(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitAliasedRelation(this); + else return visitor.visitChildren(this); + } + } + public static class AliasedQueryContext extends RelationPrimaryContext { + public QueryNoWithContext queryNoWith() { + return getRuleContext(QueryNoWithContext.class,0); + } + public QualifiedNameContext qualifiedName() { + return getRuleContext(QualifiedNameContext.class,0); + } + public TerminalNode AS() { return getToken(SqlBaseParser.AS, 0); } + public AliasedQueryContext(RelationPrimaryContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterAliasedQuery(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitAliasedQuery(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitAliasedQuery(this); + else return visitor.visitChildren(this); + } + } + public static class TableNameContext extends RelationPrimaryContext { + public TableIdentifierContext tableIdentifier() { + return getRuleContext(TableIdentifierContext.class,0); + } + public QualifiedNameContext qualifiedName() { + return getRuleContext(QualifiedNameContext.class,0); + } + public TerminalNode AS() { return getToken(SqlBaseParser.AS, 0); } + public TableNameContext(RelationPrimaryContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterTableName(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitTableName(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitTableName(this); + else return visitor.visitChildren(this); + } + } + + public final RelationPrimaryContext relationPrimary() throws RecognitionException { + RelationPrimaryContext _localctx = new RelationPrimaryContext(_ctx, getState()); + enterRule(_localctx, 38, RULE_relationPrimary); + int _la; + try { + setState(342); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,48,_ctx) ) { + case 1: + _localctx = new TableNameContext(_localctx); + enterOuterAlt(_localctx, 1); + { + setState(317); + tableIdentifier(); + setState(322); + _la = _input.LA(1); + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << AS) | (1L << COLUMNS) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FORMAT) | (1L << FROM) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << LOGICAL) | (1L << MAPPED) | (1L << OPTIMIZED) | (1L << PARSED) | (1L << PHYSICAL) | (1L << PLAN) | (1L << QUERY) | (1L << RESET))) != 0) || ((((_la - 65)) & ~0x3f) == 0 && ((1L << (_la - 65)) & ((1L << (RLIKE - 65)) | (1L << (SCHEMAS - 65)) | (1L << (SESSION - 65)) | (1L << (SETS - 65)) | (1L << (SHOW - 65)) | (1L << (TABLES - 65)) | (1L << (TEXT - 65)) | (1L << (TYPE - 65)) | (1L << (USE - 65)) | (1L << (VERIFY - 65)) | (1L << (IDENTIFIER - 65)) | (1L << (DIGIT_IDENTIFIER - 65)) | (1L << (QUOTED_IDENTIFIER - 65)) | (1L << (BACKQUOTED_IDENTIFIER - 65)))) != 0)) { + { + setState(319); + _la = _input.LA(1); + if (_la==AS) { + { + setState(318); + match(AS); + } + } + + setState(321); + qualifiedName(); + } + } + + } + break; + case 2: + _localctx = new AliasedQueryContext(_localctx); + enterOuterAlt(_localctx, 2); + { + setState(324); + match(T__0); + setState(325); + queryNoWith(); + setState(326); + match(T__1); + setState(331); + _la = _input.LA(1); + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << AS) | (1L << COLUMNS) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FORMAT) | (1L << FROM) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << LOGICAL) | (1L << MAPPED) | (1L << OPTIMIZED) | (1L << PARSED) | (1L << PHYSICAL) | (1L << PLAN) | (1L << QUERY) | (1L << RESET))) != 0) || ((((_la - 65)) & ~0x3f) == 0 && ((1L << (_la - 65)) & ((1L << (RLIKE - 65)) | (1L << (SCHEMAS - 65)) | (1L << (SESSION - 65)) | (1L << (SETS - 65)) | (1L << (SHOW - 65)) | (1L << (TABLES - 65)) | (1L << (TEXT - 65)) | (1L << (TYPE - 65)) | (1L << (USE - 65)) | (1L << (VERIFY - 65)) | (1L << (IDENTIFIER - 65)) | (1L << (DIGIT_IDENTIFIER - 65)) | (1L << (QUOTED_IDENTIFIER - 65)) | (1L << (BACKQUOTED_IDENTIFIER - 65)))) != 0)) { + { + setState(328); + _la = _input.LA(1); + if (_la==AS) { + { + setState(327); + match(AS); + } + } + + setState(330); + qualifiedName(); + } + } + + } + break; + case 3: + _localctx = new AliasedRelationContext(_localctx); + enterOuterAlt(_localctx, 3); + { + setState(333); + match(T__0); + setState(334); + relation(); + setState(335); + match(T__1); + setState(340); + _la = _input.LA(1); + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << AS) | (1L << COLUMNS) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FORMAT) | (1L << FROM) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << LOGICAL) | (1L << MAPPED) | (1L << OPTIMIZED) | (1L << PARSED) | (1L << PHYSICAL) | (1L << PLAN) | (1L << QUERY) | (1L << RESET))) != 0) || ((((_la - 65)) & ~0x3f) == 0 && ((1L << (_la - 65)) & ((1L << (RLIKE - 65)) | (1L << (SCHEMAS - 65)) | (1L << (SESSION - 65)) | (1L << (SETS - 65)) | (1L << (SHOW - 65)) | (1L << (TABLES - 65)) | (1L << (TEXT - 65)) | (1L << (TYPE - 65)) | (1L << (USE - 65)) | (1L << (VERIFY - 65)) | (1L << (IDENTIFIER - 65)) | (1L << (DIGIT_IDENTIFIER - 65)) | (1L << (QUOTED_IDENTIFIER - 65)) | (1L << (BACKQUOTED_IDENTIFIER - 65)))) != 0)) { + { + setState(337); + _la = _input.LA(1); + if (_la==AS) { + { + setState(336); + match(AS); + } + } + + setState(339); + qualifiedName(); + } + } + + } + break; + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class ExpressionContext extends ParserRuleContext { + public BooleanExpressionContext booleanExpression() { + return getRuleContext(BooleanExpressionContext.class,0); + } + public ExpressionContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_expression; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterExpression(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitExpression(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitExpression(this); + else return visitor.visitChildren(this); + } + } + + public final ExpressionContext expression() throws RecognitionException { + ExpressionContext _localctx = new ExpressionContext(_ctx, getState()); + enterRule(_localctx, 40, RULE_expression); + try { + enterOuterAlt(_localctx, 1); + { + setState(344); + booleanExpression(0); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class BooleanExpressionContext extends ParserRuleContext { + public BooleanExpressionContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_booleanExpression; } + + public BooleanExpressionContext() { } + public void copyFrom(BooleanExpressionContext ctx) { + super.copyFrom(ctx); + } + } + public static class LogicalNotContext extends BooleanExpressionContext { + public TerminalNode NOT() { return getToken(SqlBaseParser.NOT, 0); } + public BooleanExpressionContext booleanExpression() { + return getRuleContext(BooleanExpressionContext.class,0); + } + public LogicalNotContext(BooleanExpressionContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterLogicalNot(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitLogicalNot(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitLogicalNot(this); + else return visitor.visitChildren(this); + } + } + public static class StringQueryContext extends BooleanExpressionContext { + public Token queryString; + public Token options; + public TerminalNode QUERY() { return getToken(SqlBaseParser.QUERY, 0); } + public List STRING() { return getTokens(SqlBaseParser.STRING); } + public TerminalNode STRING(int i) { + return getToken(SqlBaseParser.STRING, i); + } + public StringQueryContext(BooleanExpressionContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterStringQuery(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitStringQuery(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitStringQuery(this); + else return visitor.visitChildren(this); + } + } + public static class BooleanDefaultContext extends BooleanExpressionContext { + public PredicatedContext predicated() { + return getRuleContext(PredicatedContext.class,0); + } + public BooleanDefaultContext(BooleanExpressionContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterBooleanDefault(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitBooleanDefault(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitBooleanDefault(this); + else return visitor.visitChildren(this); + } + } + public static class ExistsContext extends BooleanExpressionContext { + public TerminalNode EXISTS() { return getToken(SqlBaseParser.EXISTS, 0); } + public QueryContext query() { + return getRuleContext(QueryContext.class,0); + } + public ExistsContext(BooleanExpressionContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterExists(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitExists(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitExists(this); + else return visitor.visitChildren(this); + } + } + public static class MultiMatchQueryContext extends BooleanExpressionContext { + public Token multiFields; + public Token queryString; + public Token options; + public TerminalNode MATCH() { return getToken(SqlBaseParser.MATCH, 0); } + public List STRING() { return getTokens(SqlBaseParser.STRING); } + public TerminalNode STRING(int i) { + return getToken(SqlBaseParser.STRING, i); + } + public MultiMatchQueryContext(BooleanExpressionContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterMultiMatchQuery(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitMultiMatchQuery(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitMultiMatchQuery(this); + else return visitor.visitChildren(this); + } + } + public static class MatchQueryContext extends BooleanExpressionContext { + public QualifiedNameContext singleField; + public Token queryString; + public Token options; + public TerminalNode MATCH() { return getToken(SqlBaseParser.MATCH, 0); } + public QualifiedNameContext qualifiedName() { + return getRuleContext(QualifiedNameContext.class,0); + } + public List STRING() { return getTokens(SqlBaseParser.STRING); } + public TerminalNode STRING(int i) { + return getToken(SqlBaseParser.STRING, i); + } + public MatchQueryContext(BooleanExpressionContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterMatchQuery(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitMatchQuery(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitMatchQuery(this); + else return visitor.visitChildren(this); + } + } + public static class LogicalBinaryContext extends BooleanExpressionContext { + public BooleanExpressionContext left; + public Token operator; + public BooleanExpressionContext right; + public List booleanExpression() { + return getRuleContexts(BooleanExpressionContext.class); + } + public BooleanExpressionContext booleanExpression(int i) { + return getRuleContext(BooleanExpressionContext.class,i); + } + public TerminalNode AND() { return getToken(SqlBaseParser.AND, 0); } + public TerminalNode OR() { return getToken(SqlBaseParser.OR, 0); } + public LogicalBinaryContext(BooleanExpressionContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterLogicalBinary(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitLogicalBinary(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitLogicalBinary(this); + else return visitor.visitChildren(this); + } + } + + public final BooleanExpressionContext booleanExpression() throws RecognitionException { + return booleanExpression(0); + } + + private BooleanExpressionContext booleanExpression(int _p) throws RecognitionException { + ParserRuleContext _parentctx = _ctx; + int _parentState = getState(); + BooleanExpressionContext _localctx = new BooleanExpressionContext(_ctx, _parentState); + BooleanExpressionContext _prevctx = _localctx; + int _startState = 42; + enterRecursionRule(_localctx, 42, RULE_booleanExpression, _p); + int _la; + try { + int _alt; + enterOuterAlt(_localctx, 1); + { + setState(393); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,52,_ctx) ) { + case 1: + { + _localctx = new LogicalNotContext(_localctx); + _ctx = _localctx; + _prevctx = _localctx; + + setState(347); + match(NOT); + setState(348); + booleanExpression(8); + } + break; + case 2: + { + _localctx = new ExistsContext(_localctx); + _ctx = _localctx; + _prevctx = _localctx; + setState(349); + match(EXISTS); + setState(350); + match(T__0); + setState(351); + query(); + setState(352); + match(T__1); + } + break; + case 3: + { + _localctx = new StringQueryContext(_localctx); + _ctx = _localctx; + _prevctx = _localctx; + setState(354); + match(QUERY); + setState(355); + match(T__0); + setState(356); + ((StringQueryContext)_localctx).queryString = match(STRING); + setState(361); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==T__2) { + { + { + setState(357); + match(T__2); + setState(358); + ((StringQueryContext)_localctx).options = match(STRING); + } + } + setState(363); + _errHandler.sync(this); + _la = _input.LA(1); + } + setState(364); + match(T__1); + } + break; + case 4: + { + _localctx = new MatchQueryContext(_localctx); + _ctx = _localctx; + _prevctx = _localctx; + setState(365); + match(MATCH); + setState(366); + match(T__0); + setState(367); + ((MatchQueryContext)_localctx).singleField = qualifiedName(); + setState(368); + match(T__2); + setState(369); + ((MatchQueryContext)_localctx).queryString = match(STRING); + setState(374); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==T__2) { + { + { + setState(370); + match(T__2); + setState(371); + ((MatchQueryContext)_localctx).options = match(STRING); + } + } + setState(376); + _errHandler.sync(this); + _la = _input.LA(1); + } + setState(377); + match(T__1); + } + break; + case 5: + { + _localctx = new MultiMatchQueryContext(_localctx); + _ctx = _localctx; + _prevctx = _localctx; + setState(379); + match(MATCH); + setState(380); + match(T__0); + setState(381); + ((MultiMatchQueryContext)_localctx).multiFields = match(STRING); + setState(382); + match(T__2); + setState(383); + ((MultiMatchQueryContext)_localctx).queryString = match(STRING); + setState(388); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==T__2) { + { + { + setState(384); + match(T__2); + setState(385); + ((MultiMatchQueryContext)_localctx).options = match(STRING); + } + } + setState(390); + _errHandler.sync(this); + _la = _input.LA(1); + } + setState(391); + match(T__1); + } + break; + case 6: + { + _localctx = new BooleanDefaultContext(_localctx); + _ctx = _localctx; + _prevctx = _localctx; + setState(392); + predicated(); + } + break; + } + _ctx.stop = _input.LT(-1); + setState(403); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,54,_ctx); + while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { + if ( _alt==1 ) { + if ( _parseListeners!=null ) triggerExitRuleEvent(); + _prevctx = _localctx; + { + setState(401); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,53,_ctx) ) { + case 1: + { + _localctx = new LogicalBinaryContext(new BooleanExpressionContext(_parentctx, _parentState)); + ((LogicalBinaryContext)_localctx).left = _prevctx; + pushNewRecursionContext(_localctx, _startState, RULE_booleanExpression); + setState(395); + if (!(precpred(_ctx, 2))) throw new FailedPredicateException(this, "precpred(_ctx, 2)"); + setState(396); + ((LogicalBinaryContext)_localctx).operator = match(AND); + setState(397); + ((LogicalBinaryContext)_localctx).right = booleanExpression(3); + } + break; + case 2: + { + _localctx = new LogicalBinaryContext(new BooleanExpressionContext(_parentctx, _parentState)); + ((LogicalBinaryContext)_localctx).left = _prevctx; + pushNewRecursionContext(_localctx, _startState, RULE_booleanExpression); + setState(398); + if (!(precpred(_ctx, 1))) throw new FailedPredicateException(this, "precpred(_ctx, 1)"); + setState(399); + ((LogicalBinaryContext)_localctx).operator = match(OR); + setState(400); + ((LogicalBinaryContext)_localctx).right = booleanExpression(2); + } + break; + } + } + } + setState(405); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,54,_ctx); + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + unrollRecursionContexts(_parentctx); + } + return _localctx; + } + + public static class PredicatedContext extends ParserRuleContext { + public ValueExpressionContext valueExpression() { + return getRuleContext(ValueExpressionContext.class,0); + } + public PredicateContext predicate() { + return getRuleContext(PredicateContext.class,0); + } + public PredicatedContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_predicated; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterPredicated(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitPredicated(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitPredicated(this); + else return visitor.visitChildren(this); + } + } + + public final PredicatedContext predicated() throws RecognitionException { + PredicatedContext _localctx = new PredicatedContext(_ctx, getState()); + enterRule(_localctx, 44, RULE_predicated); + try { + enterOuterAlt(_localctx, 1); + { + setState(406); + valueExpression(0); + setState(408); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,55,_ctx) ) { + case 1: + { + setState(407); + predicate(); + } + break; + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class PredicateContext extends ParserRuleContext { + public Token kind; + public ValueExpressionContext lower; + public ValueExpressionContext upper; + public ValueExpressionContext pattern; + public TerminalNode AND() { return getToken(SqlBaseParser.AND, 0); } + public TerminalNode BETWEEN() { return getToken(SqlBaseParser.BETWEEN, 0); } + public List valueExpression() { + return getRuleContexts(ValueExpressionContext.class); + } + public ValueExpressionContext valueExpression(int i) { + return getRuleContext(ValueExpressionContext.class,i); + } + public TerminalNode NOT() { return getToken(SqlBaseParser.NOT, 0); } + public List expression() { + return getRuleContexts(ExpressionContext.class); + } + public ExpressionContext expression(int i) { + return getRuleContext(ExpressionContext.class,i); + } + public TerminalNode IN() { return getToken(SqlBaseParser.IN, 0); } + public QueryContext query() { + return getRuleContext(QueryContext.class,0); + } + public TerminalNode LIKE() { return getToken(SqlBaseParser.LIKE, 0); } + public TerminalNode RLIKE() { return getToken(SqlBaseParser.RLIKE, 0); } + public TerminalNode IS() { return getToken(SqlBaseParser.IS, 0); } + public TerminalNode NULL() { return getToken(SqlBaseParser.NULL, 0); } + public PredicateContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_predicate; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterPredicate(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitPredicate(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitPredicate(this); + else return visitor.visitChildren(this); + } + } + + public final PredicateContext predicate() throws RecognitionException { + PredicateContext _localctx = new PredicateContext(_ctx, getState()); + enterRule(_localctx, 46, RULE_predicate); + int _la; + try { + setState(451); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,62,_ctx) ) { + case 1: + enterOuterAlt(_localctx, 1); + { + setState(411); + _la = _input.LA(1); + if (_la==NOT) { + { + setState(410); + match(NOT); + } + } + + setState(413); + ((PredicateContext)_localctx).kind = match(BETWEEN); + setState(414); + ((PredicateContext)_localctx).lower = valueExpression(0); + setState(415); + match(AND); + setState(416); + ((PredicateContext)_localctx).upper = valueExpression(0); + } + break; + case 2: + enterOuterAlt(_localctx, 2); + { + setState(419); + _la = _input.LA(1); + if (_la==NOT) { + { + setState(418); + match(NOT); + } + } + + setState(421); + ((PredicateContext)_localctx).kind = match(IN); + setState(422); + match(T__0); + setState(423); + expression(); + setState(428); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==T__2) { + { + { + setState(424); + match(T__2); + setState(425); + expression(); + } + } + setState(430); + _errHandler.sync(this); + _la = _input.LA(1); + } + setState(431); + match(T__1); + } + break; + case 3: + enterOuterAlt(_localctx, 3); + { + setState(434); + _la = _input.LA(1); + if (_la==NOT) { + { + setState(433); + match(NOT); + } + } + + setState(436); + ((PredicateContext)_localctx).kind = match(IN); + setState(437); + match(T__0); + setState(438); + query(); + setState(439); + match(T__1); + } + break; + case 4: + enterOuterAlt(_localctx, 4); + { + setState(442); + _la = _input.LA(1); + if (_la==NOT) { + { + setState(441); + match(NOT); + } + } + + setState(444); + ((PredicateContext)_localctx).kind = _input.LT(1); + _la = _input.LA(1); + if ( !(_la==LIKE || _la==RLIKE) ) { + ((PredicateContext)_localctx).kind = (Token)_errHandler.recoverInline(this); + } else { + consume(); + } + setState(445); + ((PredicateContext)_localctx).pattern = valueExpression(0); + } + break; + case 5: + enterOuterAlt(_localctx, 5); + { + setState(446); + match(IS); + setState(448); + _la = _input.LA(1); + if (_la==NOT) { + { + setState(447); + match(NOT); + } + } + + setState(450); + ((PredicateContext)_localctx).kind = match(NULL); + } + break; + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class ValueExpressionContext extends ParserRuleContext { + public ValueExpressionContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_valueExpression; } + + public ValueExpressionContext() { } + public void copyFrom(ValueExpressionContext ctx) { + super.copyFrom(ctx); + } + } + public static class ValueExpressionDefaultContext extends ValueExpressionContext { + public PrimaryExpressionContext primaryExpression() { + return getRuleContext(PrimaryExpressionContext.class,0); + } + public ValueExpressionDefaultContext(ValueExpressionContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterValueExpressionDefault(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitValueExpressionDefault(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitValueExpressionDefault(this); + else return visitor.visitChildren(this); + } + } + public static class ComparisonContext extends ValueExpressionContext { + public ValueExpressionContext left; + public ValueExpressionContext right; + public ComparisonOperatorContext comparisonOperator() { + return getRuleContext(ComparisonOperatorContext.class,0); + } + public List valueExpression() { + return getRuleContexts(ValueExpressionContext.class); + } + public ValueExpressionContext valueExpression(int i) { + return getRuleContext(ValueExpressionContext.class,i); + } + public ComparisonContext(ValueExpressionContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterComparison(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitComparison(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitComparison(this); + else return visitor.visitChildren(this); + } + } + public static class ArithmeticBinaryContext extends ValueExpressionContext { + public ValueExpressionContext left; + public Token operator; + public ValueExpressionContext right; + public List valueExpression() { + return getRuleContexts(ValueExpressionContext.class); + } + public ValueExpressionContext valueExpression(int i) { + return getRuleContext(ValueExpressionContext.class,i); + } + public TerminalNode ASTERISK() { return getToken(SqlBaseParser.ASTERISK, 0); } + public TerminalNode SLASH() { return getToken(SqlBaseParser.SLASH, 0); } + public TerminalNode PERCENT() { return getToken(SqlBaseParser.PERCENT, 0); } + public TerminalNode PLUS() { return getToken(SqlBaseParser.PLUS, 0); } + public TerminalNode MINUS() { return getToken(SqlBaseParser.MINUS, 0); } + public ArithmeticBinaryContext(ValueExpressionContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterArithmeticBinary(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitArithmeticBinary(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitArithmeticBinary(this); + else return visitor.visitChildren(this); + } + } + public static class ArithmeticUnaryContext extends ValueExpressionContext { + public Token operator; + public ValueExpressionContext valueExpression() { + return getRuleContext(ValueExpressionContext.class,0); + } + public TerminalNode MINUS() { return getToken(SqlBaseParser.MINUS, 0); } + public TerminalNode PLUS() { return getToken(SqlBaseParser.PLUS, 0); } + public ArithmeticUnaryContext(ValueExpressionContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterArithmeticUnary(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitArithmeticUnary(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitArithmeticUnary(this); + else return visitor.visitChildren(this); + } + } + + public final ValueExpressionContext valueExpression() throws RecognitionException { + return valueExpression(0); + } + + private ValueExpressionContext valueExpression(int _p) throws RecognitionException { + ParserRuleContext _parentctx = _ctx; + int _parentState = getState(); + ValueExpressionContext _localctx = new ValueExpressionContext(_ctx, _parentState); + ValueExpressionContext _prevctx = _localctx; + int _startState = 48; + enterRecursionRule(_localctx, 48, RULE_valueExpression, _p); + int _la; + try { + int _alt; + enterOuterAlt(_localctx, 1); + { + setState(457); + switch (_input.LA(1)) { + case T__0: + case ANALYZE: + case ANALYZED: + case CAST: + case COLUMNS: + case DEBUG: + case EXECUTABLE: + case EXPLAIN: + case EXTRACT: + case FALSE: + case FORMAT: + case FROM: + case FUNCTIONS: + case GRAPHVIZ: + case LOGICAL: + case MAPPED: + case NULL: + case OPTIMIZED: + case PARSED: + case PHYSICAL: + case PLAN: + case QUERY: + case RESET: + case RLIKE: + case SCHEMAS: + case SESSION: + case SETS: + case SHOW: + case TABLES: + case TEXT: + case TRUE: + case TYPE: + case USE: + case VERIFY: + case ASTERISK: + case STRING: + case INTEGER_VALUE: + case DECIMAL_VALUE: + case IDENTIFIER: + case DIGIT_IDENTIFIER: + case QUOTED_IDENTIFIER: + case BACKQUOTED_IDENTIFIER: + { + _localctx = new ValueExpressionDefaultContext(_localctx); + _ctx = _localctx; + _prevctx = _localctx; + + setState(454); + primaryExpression(); + } + break; + case PLUS: + case MINUS: + { + _localctx = new ArithmeticUnaryContext(_localctx); + _ctx = _localctx; + _prevctx = _localctx; + setState(455); + ((ArithmeticUnaryContext)_localctx).operator = _input.LT(1); + _la = _input.LA(1); + if ( !(_la==PLUS || _la==MINUS) ) { + ((ArithmeticUnaryContext)_localctx).operator = (Token)_errHandler.recoverInline(this); + } else { + consume(); + } + setState(456); + valueExpression(4); + } + break; + default: + throw new NoViableAltException(this); + } + _ctx.stop = _input.LT(-1); + setState(471); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,65,_ctx); + while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { + if ( _alt==1 ) { + if ( _parseListeners!=null ) triggerExitRuleEvent(); + _prevctx = _localctx; + { + setState(469); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,64,_ctx) ) { + case 1: + { + _localctx = new ArithmeticBinaryContext(new ValueExpressionContext(_parentctx, _parentState)); + ((ArithmeticBinaryContext)_localctx).left = _prevctx; + pushNewRecursionContext(_localctx, _startState, RULE_valueExpression); + setState(459); + if (!(precpred(_ctx, 3))) throw new FailedPredicateException(this, "precpred(_ctx, 3)"); + setState(460); + ((ArithmeticBinaryContext)_localctx).operator = _input.LT(1); + _la = _input.LA(1); + if ( !(((((_la - 93)) & ~0x3f) == 0 && ((1L << (_la - 93)) & ((1L << (ASTERISK - 93)) | (1L << (SLASH - 93)) | (1L << (PERCENT - 93)))) != 0)) ) { + ((ArithmeticBinaryContext)_localctx).operator = (Token)_errHandler.recoverInline(this); + } else { + consume(); + } + setState(461); + ((ArithmeticBinaryContext)_localctx).right = valueExpression(4); + } + break; + case 2: + { + _localctx = new ArithmeticBinaryContext(new ValueExpressionContext(_parentctx, _parentState)); + ((ArithmeticBinaryContext)_localctx).left = _prevctx; + pushNewRecursionContext(_localctx, _startState, RULE_valueExpression); + setState(462); + if (!(precpred(_ctx, 2))) throw new FailedPredicateException(this, "precpred(_ctx, 2)"); + setState(463); + ((ArithmeticBinaryContext)_localctx).operator = _input.LT(1); + _la = _input.LA(1); + if ( !(_la==PLUS || _la==MINUS) ) { + ((ArithmeticBinaryContext)_localctx).operator = (Token)_errHandler.recoverInline(this); + } else { + consume(); + } + setState(464); + ((ArithmeticBinaryContext)_localctx).right = valueExpression(3); + } + break; + case 3: + { + _localctx = new ComparisonContext(new ValueExpressionContext(_parentctx, _parentState)); + ((ComparisonContext)_localctx).left = _prevctx; + pushNewRecursionContext(_localctx, _startState, RULE_valueExpression); + setState(465); + if (!(precpred(_ctx, 1))) throw new FailedPredicateException(this, "precpred(_ctx, 1)"); + setState(466); + comparisonOperator(); + setState(467); + ((ComparisonContext)_localctx).right = valueExpression(2); + } + break; + } + } + } + setState(473); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,65,_ctx); + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + unrollRecursionContexts(_parentctx); + } + return _localctx; + } + + public static class PrimaryExpressionContext extends ParserRuleContext { + public PrimaryExpressionContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_primaryExpression; } + + public PrimaryExpressionContext() { } + public void copyFrom(PrimaryExpressionContext ctx) { + super.copyFrom(ctx); + } + } + public static class DereferenceContext extends PrimaryExpressionContext { + public ColumnExpressionContext base; + public IdentifierContext fieldName; + public ColumnExpressionContext columnExpression() { + return getRuleContext(ColumnExpressionContext.class,0); + } + public IdentifierContext identifier() { + return getRuleContext(IdentifierContext.class,0); + } + public DereferenceContext(PrimaryExpressionContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterDereference(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitDereference(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitDereference(this); + else return visitor.visitChildren(this); + } + } + public static class CastContext extends PrimaryExpressionContext { + public TerminalNode CAST() { return getToken(SqlBaseParser.CAST, 0); } + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public TerminalNode AS() { return getToken(SqlBaseParser.AS, 0); } + public DataTypeContext dataType() { + return getRuleContext(DataTypeContext.class,0); + } + public CastContext(PrimaryExpressionContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterCast(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitCast(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitCast(this); + else return visitor.visitChildren(this); + } + } + public static class ConstantDefaultContext extends PrimaryExpressionContext { + public ConstantContext constant() { + return getRuleContext(ConstantContext.class,0); + } + public ConstantDefaultContext(PrimaryExpressionContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterConstantDefault(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitConstantDefault(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitConstantDefault(this); + else return visitor.visitChildren(this); + } + } + public static class ColumnReferenceContext extends PrimaryExpressionContext { + public ColumnExpressionContext columnExpression() { + return getRuleContext(ColumnExpressionContext.class,0); + } + public ColumnReferenceContext(PrimaryExpressionContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterColumnReference(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitColumnReference(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitColumnReference(this); + else return visitor.visitChildren(this); + } + } + public static class ExtractContext extends PrimaryExpressionContext { + public IdentifierContext field; + public TerminalNode EXTRACT() { return getToken(SqlBaseParser.EXTRACT, 0); } + public TerminalNode FROM() { return getToken(SqlBaseParser.FROM, 0); } + public ValueExpressionContext valueExpression() { + return getRuleContext(ValueExpressionContext.class,0); + } + public IdentifierContext identifier() { + return getRuleContext(IdentifierContext.class,0); + } + public ExtractContext(PrimaryExpressionContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterExtract(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitExtract(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitExtract(this); + else return visitor.visitChildren(this); + } + } + public static class ParenthesizedExpressionContext extends PrimaryExpressionContext { + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public ParenthesizedExpressionContext(PrimaryExpressionContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterParenthesizedExpression(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitParenthesizedExpression(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitParenthesizedExpression(this); + else return visitor.visitChildren(this); + } + } + public static class StarContext extends PrimaryExpressionContext { + public ColumnExpressionContext qualifier; + public TerminalNode ASTERISK() { return getToken(SqlBaseParser.ASTERISK, 0); } + public ColumnExpressionContext columnExpression() { + return getRuleContext(ColumnExpressionContext.class,0); + } + public StarContext(PrimaryExpressionContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterStar(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitStar(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitStar(this); + else return visitor.visitChildren(this); + } + } + public static class FunctionCallContext extends PrimaryExpressionContext { + public IdentifierContext identifier() { + return getRuleContext(IdentifierContext.class,0); + } + public List expression() { + return getRuleContexts(ExpressionContext.class); + } + public ExpressionContext expression(int i) { + return getRuleContext(ExpressionContext.class,i); + } + public SetQuantifierContext setQuantifier() { + return getRuleContext(SetQuantifierContext.class,0); + } + public FunctionCallContext(PrimaryExpressionContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterFunctionCall(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitFunctionCall(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitFunctionCall(this); + else return visitor.visitChildren(this); + } + } + public static class SubqueryExpressionContext extends PrimaryExpressionContext { + public QueryContext query() { + return getRuleContext(QueryContext.class,0); + } + public SubqueryExpressionContext(PrimaryExpressionContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterSubqueryExpression(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitSubqueryExpression(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitSubqueryExpression(this); + else return visitor.visitChildren(this); + } + } + + public final PrimaryExpressionContext primaryExpression() throws RecognitionException { + PrimaryExpressionContext _localctx = new PrimaryExpressionContext(_ctx, getState()); + enterRule(_localctx, 50, RULE_primaryExpression); + int _la; + try { + setState(526); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,70,_ctx) ) { + case 1: + _localctx = new CastContext(_localctx); + enterOuterAlt(_localctx, 1); + { + setState(474); + match(CAST); + setState(475); + match(T__0); + setState(476); + expression(); + setState(477); + match(AS); + setState(478); + dataType(); + setState(479); + match(T__1); + } + break; + case 2: + _localctx = new ExtractContext(_localctx); + enterOuterAlt(_localctx, 2); + { + setState(481); + match(EXTRACT); + setState(482); + match(T__0); + setState(483); + ((ExtractContext)_localctx).field = identifier(); + setState(484); + match(FROM); + setState(485); + valueExpression(0); + setState(486); + match(T__1); + } + break; + case 3: + _localctx = new ConstantDefaultContext(_localctx); + enterOuterAlt(_localctx, 3); + { + setState(488); + constant(); + } + break; + case 4: + _localctx = new StarContext(_localctx); + enterOuterAlt(_localctx, 4); + { + setState(489); + match(ASTERISK); + } + break; + case 5: + _localctx = new StarContext(_localctx); + enterOuterAlt(_localctx, 5); + { + setState(493); + _la = _input.LA(1); + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << COLUMNS) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FORMAT) | (1L << FROM) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << LOGICAL) | (1L << MAPPED) | (1L << OPTIMIZED) | (1L << PARSED) | (1L << PHYSICAL) | (1L << PLAN) | (1L << QUERY) | (1L << RESET))) != 0) || ((((_la - 65)) & ~0x3f) == 0 && ((1L << (_la - 65)) & ((1L << (RLIKE - 65)) | (1L << (SCHEMAS - 65)) | (1L << (SESSION - 65)) | (1L << (SETS - 65)) | (1L << (SHOW - 65)) | (1L << (TABLES - 65)) | (1L << (TEXT - 65)) | (1L << (TYPE - 65)) | (1L << (USE - 65)) | (1L << (VERIFY - 65)) | (1L << (IDENTIFIER - 65)) | (1L << (DIGIT_IDENTIFIER - 65)) | (1L << (QUOTED_IDENTIFIER - 65)) | (1L << (BACKQUOTED_IDENTIFIER - 65)))) != 0)) { + { + setState(490); + ((StarContext)_localctx).qualifier = columnExpression(); + setState(491); + match(T__3); + } + } + + setState(495); + match(ASTERISK); + } + break; + case 6: + _localctx = new FunctionCallContext(_localctx); + enterOuterAlt(_localctx, 6); + { + setState(496); + identifier(); + setState(497); + match(T__0); + setState(509); + _la = _input.LA(1); + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << T__0) | (1L << ALL) | (1L << ANALYZE) | (1L << ANALYZED) | (1L << CAST) | (1L << COLUMNS) | (1L << DEBUG) | (1L << DISTINCT) | (1L << EXECUTABLE) | (1L << EXISTS) | (1L << EXPLAIN) | (1L << EXTRACT) | (1L << FALSE) | (1L << FORMAT) | (1L << FROM) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << LOGICAL) | (1L << MAPPED) | (1L << MATCH) | (1L << NOT) | (1L << NULL) | (1L << OPTIMIZED) | (1L << PARSED) | (1L << PHYSICAL) | (1L << PLAN) | (1L << QUERY) | (1L << RESET))) != 0) || ((((_la - 65)) & ~0x3f) == 0 && ((1L << (_la - 65)) & ((1L << (RLIKE - 65)) | (1L << (SCHEMAS - 65)) | (1L << (SESSION - 65)) | (1L << (SETS - 65)) | (1L << (SHOW - 65)) | (1L << (TABLES - 65)) | (1L << (TEXT - 65)) | (1L << (TRUE - 65)) | (1L << (TYPE - 65)) | (1L << (USE - 65)) | (1L << (VERIFY - 65)) | (1L << (PLUS - 65)) | (1L << (MINUS - 65)) | (1L << (ASTERISK - 65)) | (1L << (STRING - 65)) | (1L << (INTEGER_VALUE - 65)) | (1L << (DECIMAL_VALUE - 65)) | (1L << (IDENTIFIER - 65)) | (1L << (DIGIT_IDENTIFIER - 65)) | (1L << (QUOTED_IDENTIFIER - 65)) | (1L << (BACKQUOTED_IDENTIFIER - 65)))) != 0)) { + { + setState(499); + _la = _input.LA(1); + if (_la==ALL || _la==DISTINCT) { + { + setState(498); + setQuantifier(); + } + } + + setState(501); + expression(); + setState(506); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==T__2) { + { + { + setState(502); + match(T__2); + setState(503); + expression(); + } + } + setState(508); + _errHandler.sync(this); + _la = _input.LA(1); + } + } + } + + setState(511); + match(T__1); + } + break; + case 7: + _localctx = new SubqueryExpressionContext(_localctx); + enterOuterAlt(_localctx, 7); + { + setState(513); + match(T__0); + setState(514); + query(); + setState(515); + match(T__1); + } + break; + case 8: + _localctx = new ColumnReferenceContext(_localctx); + enterOuterAlt(_localctx, 8); + { + setState(517); + columnExpression(); + } + break; + case 9: + _localctx = new DereferenceContext(_localctx); + enterOuterAlt(_localctx, 9); + { + setState(518); + ((DereferenceContext)_localctx).base = columnExpression(); + setState(519); + match(T__3); + setState(520); + ((DereferenceContext)_localctx).fieldName = identifier(); + } + break; + case 10: + _localctx = new ParenthesizedExpressionContext(_localctx); + enterOuterAlt(_localctx, 10); + { + setState(522); + match(T__0); + setState(523); + expression(); + setState(524); + match(T__1); + } + break; + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class ColumnExpressionContext extends ParserRuleContext { + public IdentifierContext alias; + public TableIdentifierContext table; + public IdentifierContext name; + public List identifier() { + return getRuleContexts(IdentifierContext.class); + } + public IdentifierContext identifier(int i) { + return getRuleContext(IdentifierContext.class,i); + } + public TableIdentifierContext tableIdentifier() { + return getRuleContext(TableIdentifierContext.class,0); + } + public ColumnExpressionContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_columnExpression; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterColumnExpression(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitColumnExpression(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitColumnExpression(this); + else return visitor.visitChildren(this); + } + } + + public final ColumnExpressionContext columnExpression() throws RecognitionException { + ColumnExpressionContext _localctx = new ColumnExpressionContext(_ctx, getState()); + enterRule(_localctx, 52, RULE_columnExpression); + try { + enterOuterAlt(_localctx, 1); + { + setState(534); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,72,_ctx) ) { + case 1: + { + setState(530); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,71,_ctx) ) { + case 1: + { + setState(528); + ((ColumnExpressionContext)_localctx).alias = identifier(); + } + break; + case 2: + { + setState(529); + ((ColumnExpressionContext)_localctx).table = tableIdentifier(); + } + break; + } + setState(532); + match(T__3); + } + break; + } + setState(536); + ((ColumnExpressionContext)_localctx).name = identifier(); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class ConstantContext extends ParserRuleContext { + public ConstantContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_constant; } + + public ConstantContext() { } + public void copyFrom(ConstantContext ctx) { + super.copyFrom(ctx); + } + } + public static class NullLiteralContext extends ConstantContext { + public TerminalNode NULL() { return getToken(SqlBaseParser.NULL, 0); } + public NullLiteralContext(ConstantContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterNullLiteral(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitNullLiteral(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitNullLiteral(this); + else return visitor.visitChildren(this); + } + } + public static class StringLiteralContext extends ConstantContext { + public List STRING() { return getTokens(SqlBaseParser.STRING); } + public TerminalNode STRING(int i) { + return getToken(SqlBaseParser.STRING, i); + } + public StringLiteralContext(ConstantContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterStringLiteral(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitStringLiteral(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitStringLiteral(this); + else return visitor.visitChildren(this); + } + } + public static class TypeConstructorContext extends ConstantContext { + public IdentifierContext identifier() { + return getRuleContext(IdentifierContext.class,0); + } + public TerminalNode STRING() { return getToken(SqlBaseParser.STRING, 0); } + public TypeConstructorContext(ConstantContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterTypeConstructor(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitTypeConstructor(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitTypeConstructor(this); + else return visitor.visitChildren(this); + } + } + public static class NumericLiteralContext extends ConstantContext { + public NumberContext number() { + return getRuleContext(NumberContext.class,0); + } + public NumericLiteralContext(ConstantContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterNumericLiteral(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitNumericLiteral(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitNumericLiteral(this); + else return visitor.visitChildren(this); + } + } + public static class BooleanLiteralContext extends ConstantContext { + public BooleanValueContext booleanValue() { + return getRuleContext(BooleanValueContext.class,0); + } + public BooleanLiteralContext(ConstantContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterBooleanLiteral(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitBooleanLiteral(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitBooleanLiteral(this); + else return visitor.visitChildren(this); + } + } + + public final ConstantContext constant() throws RecognitionException { + ConstantContext _localctx = new ConstantContext(_ctx, getState()); + enterRule(_localctx, 54, RULE_constant); + try { + int _alt; + setState(549); + switch (_input.LA(1)) { + case NULL: + _localctx = new NullLiteralContext(_localctx); + enterOuterAlt(_localctx, 1); + { + setState(538); + match(NULL); + } + break; + case ANALYZE: + case ANALYZED: + case COLUMNS: + case DEBUG: + case EXECUTABLE: + case EXPLAIN: + case FORMAT: + case FROM: + case FUNCTIONS: + case GRAPHVIZ: + case LOGICAL: + case MAPPED: + case OPTIMIZED: + case PARSED: + case PHYSICAL: + case PLAN: + case QUERY: + case RESET: + case RLIKE: + case SCHEMAS: + case SESSION: + case SETS: + case SHOW: + case TABLES: + case TEXT: + case TYPE: + case USE: + case VERIFY: + case IDENTIFIER: + case DIGIT_IDENTIFIER: + case QUOTED_IDENTIFIER: + case BACKQUOTED_IDENTIFIER: + _localctx = new TypeConstructorContext(_localctx); + enterOuterAlt(_localctx, 2); + { + setState(539); + identifier(); + setState(540); + match(STRING); + } + break; + case INTEGER_VALUE: + case DECIMAL_VALUE: + _localctx = new NumericLiteralContext(_localctx); + enterOuterAlt(_localctx, 3); + { + setState(542); + number(); + } + break; + case FALSE: + case TRUE: + _localctx = new BooleanLiteralContext(_localctx); + enterOuterAlt(_localctx, 4); + { + setState(543); + booleanValue(); + } + break; + case STRING: + _localctx = new StringLiteralContext(_localctx); + enterOuterAlt(_localctx, 5); + { + setState(545); + _errHandler.sync(this); + _alt = 1; + do { + switch (_alt) { + case 1: + { + { + setState(544); + match(STRING); + } + } + break; + default: + throw new NoViableAltException(this); + } + setState(547); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,73,_ctx); + } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); + } + break; + default: + throw new NoViableAltException(this); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class ComparisonOperatorContext extends ParserRuleContext { + public TerminalNode EQ() { return getToken(SqlBaseParser.EQ, 0); } + public TerminalNode NEQ() { return getToken(SqlBaseParser.NEQ, 0); } + public TerminalNode LT() { return getToken(SqlBaseParser.LT, 0); } + public TerminalNode LTE() { return getToken(SqlBaseParser.LTE, 0); } + public TerminalNode GT() { return getToken(SqlBaseParser.GT, 0); } + public TerminalNode GTE() { return getToken(SqlBaseParser.GTE, 0); } + public ComparisonOperatorContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_comparisonOperator; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterComparisonOperator(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitComparisonOperator(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitComparisonOperator(this); + else return visitor.visitChildren(this); + } + } + + public final ComparisonOperatorContext comparisonOperator() throws RecognitionException { + ComparisonOperatorContext _localctx = new ComparisonOperatorContext(_ctx, getState()); + enterRule(_localctx, 56, RULE_comparisonOperator); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(551); + _la = _input.LA(1); + if ( !(((((_la - 85)) & ~0x3f) == 0 && ((1L << (_la - 85)) & ((1L << (EQ - 85)) | (1L << (NEQ - 85)) | (1L << (LT - 85)) | (1L << (LTE - 85)) | (1L << (GT - 85)) | (1L << (GTE - 85)))) != 0)) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class BooleanValueContext extends ParserRuleContext { + public TerminalNode TRUE() { return getToken(SqlBaseParser.TRUE, 0); } + public TerminalNode FALSE() { return getToken(SqlBaseParser.FALSE, 0); } + public BooleanValueContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_booleanValue; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterBooleanValue(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitBooleanValue(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitBooleanValue(this); + else return visitor.visitChildren(this); + } + } + + public final BooleanValueContext booleanValue() throws RecognitionException { + BooleanValueContext _localctx = new BooleanValueContext(_ctx, getState()); + enterRule(_localctx, 58, RULE_booleanValue); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(553); + _la = _input.LA(1); + if ( !(_la==FALSE || _la==TRUE) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class DataTypeContext extends ParserRuleContext { + public DataTypeContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_dataType; } + + public DataTypeContext() { } + public void copyFrom(DataTypeContext ctx) { + super.copyFrom(ctx); + } + } + public static class PrimitiveDataTypeContext extends DataTypeContext { + public IdentifierContext identifier() { + return getRuleContext(IdentifierContext.class,0); + } + public PrimitiveDataTypeContext(DataTypeContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterPrimitiveDataType(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitPrimitiveDataType(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitPrimitiveDataType(this); + else return visitor.visitChildren(this); + } + } + + public final DataTypeContext dataType() throws RecognitionException { + DataTypeContext _localctx = new DataTypeContext(_ctx, getState()); + enterRule(_localctx, 60, RULE_dataType); + try { + _localctx = new PrimitiveDataTypeContext(_localctx); + enterOuterAlt(_localctx, 1); + { + setState(555); + identifier(); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class WhenClauseContext extends ParserRuleContext { + public ExpressionContext condition; + public ExpressionContext result; + public TerminalNode WHEN() { return getToken(SqlBaseParser.WHEN, 0); } + public TerminalNode THEN() { return getToken(SqlBaseParser.THEN, 0); } + public List expression() { + return getRuleContexts(ExpressionContext.class); + } + public ExpressionContext expression(int i) { + return getRuleContext(ExpressionContext.class,i); + } + public WhenClauseContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_whenClause; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterWhenClause(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitWhenClause(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitWhenClause(this); + else return visitor.visitChildren(this); + } + } + + public final WhenClauseContext whenClause() throws RecognitionException { + WhenClauseContext _localctx = new WhenClauseContext(_ctx, getState()); + enterRule(_localctx, 62, RULE_whenClause); + try { + enterOuterAlt(_localctx, 1); + { + setState(557); + match(WHEN); + setState(558); + ((WhenClauseContext)_localctx).condition = expression(); + setState(559); + match(THEN); + setState(560); + ((WhenClauseContext)_localctx).result = expression(); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class QualifiedNameContext extends ParserRuleContext { + public List identifier() { + return getRuleContexts(IdentifierContext.class); + } + public IdentifierContext identifier(int i) { + return getRuleContext(IdentifierContext.class,i); + } + public QualifiedNameContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_qualifiedName; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterQualifiedName(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitQualifiedName(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitQualifiedName(this); + else return visitor.visitChildren(this); + } + } + + public final QualifiedNameContext qualifiedName() throws RecognitionException { + QualifiedNameContext _localctx = new QualifiedNameContext(_ctx, getState()); + enterRule(_localctx, 64, RULE_qualifiedName); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(562); + identifier(); + setState(567); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==T__3) { + { + { + setState(563); + match(T__3); + setState(564); + identifier(); + } + } + setState(569); + _errHandler.sync(this); + _la = _input.LA(1); + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class TableIdentifierContext extends ParserRuleContext { + public IdentifierContext index; + public IdentifierContext identifier() { + return getRuleContext(IdentifierContext.class,0); + } + public TableIdentifierContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_tableIdentifier; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterTableIdentifier(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitTableIdentifier(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitTableIdentifier(this); + else return visitor.visitChildren(this); + } + } + + public final TableIdentifierContext tableIdentifier() throws RecognitionException { + TableIdentifierContext _localctx = new TableIdentifierContext(_ctx, getState()); + enterRule(_localctx, 66, RULE_tableIdentifier); + try { + enterOuterAlt(_localctx, 1); + { + setState(570); + ((TableIdentifierContext)_localctx).index = identifier(); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class IdentifierContext extends ParserRuleContext { + public QuoteIdentifierContext quoteIdentifier() { + return getRuleContext(QuoteIdentifierContext.class,0); + } + public UnquoteIdentifierContext unquoteIdentifier() { + return getRuleContext(UnquoteIdentifierContext.class,0); + } + public IdentifierContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_identifier; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterIdentifier(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitIdentifier(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitIdentifier(this); + else return visitor.visitChildren(this); + } + } + + public final IdentifierContext identifier() throws RecognitionException { + IdentifierContext _localctx = new IdentifierContext(_ctx, getState()); + enterRule(_localctx, 68, RULE_identifier); + try { + setState(574); + switch (_input.LA(1)) { + case QUOTED_IDENTIFIER: + case BACKQUOTED_IDENTIFIER: + enterOuterAlt(_localctx, 1); + { + setState(572); + quoteIdentifier(); + } + break; + case ANALYZE: + case ANALYZED: + case COLUMNS: + case DEBUG: + case EXECUTABLE: + case EXPLAIN: + case FORMAT: + case FROM: + case FUNCTIONS: + case GRAPHVIZ: + case LOGICAL: + case MAPPED: + case OPTIMIZED: + case PARSED: + case PHYSICAL: + case PLAN: + case QUERY: + case RESET: + case RLIKE: + case SCHEMAS: + case SESSION: + case SETS: + case SHOW: + case TABLES: + case TEXT: + case TYPE: + case USE: + case VERIFY: + case IDENTIFIER: + case DIGIT_IDENTIFIER: + enterOuterAlt(_localctx, 2); + { + setState(573); + unquoteIdentifier(); + } + break; + default: + throw new NoViableAltException(this); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class QuoteIdentifierContext extends ParserRuleContext { + public QuoteIdentifierContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_quoteIdentifier; } + + public QuoteIdentifierContext() { } + public void copyFrom(QuoteIdentifierContext ctx) { + super.copyFrom(ctx); + } + } + public static class BackQuotedIdentifierContext extends QuoteIdentifierContext { + public TerminalNode BACKQUOTED_IDENTIFIER() { return getToken(SqlBaseParser.BACKQUOTED_IDENTIFIER, 0); } + public BackQuotedIdentifierContext(QuoteIdentifierContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterBackQuotedIdentifier(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitBackQuotedIdentifier(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitBackQuotedIdentifier(this); + else return visitor.visitChildren(this); + } + } + public static class QuotedIdentifierContext extends QuoteIdentifierContext { + public TerminalNode QUOTED_IDENTIFIER() { return getToken(SqlBaseParser.QUOTED_IDENTIFIER, 0); } + public QuotedIdentifierContext(QuoteIdentifierContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterQuotedIdentifier(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitQuotedIdentifier(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitQuotedIdentifier(this); + else return visitor.visitChildren(this); + } + } + + public final QuoteIdentifierContext quoteIdentifier() throws RecognitionException { + QuoteIdentifierContext _localctx = new QuoteIdentifierContext(_ctx, getState()); + enterRule(_localctx, 70, RULE_quoteIdentifier); + try { + setState(578); + switch (_input.LA(1)) { + case QUOTED_IDENTIFIER: + _localctx = new QuotedIdentifierContext(_localctx); + enterOuterAlt(_localctx, 1); + { + setState(576); + match(QUOTED_IDENTIFIER); + } + break; + case BACKQUOTED_IDENTIFIER: + _localctx = new BackQuotedIdentifierContext(_localctx); + enterOuterAlt(_localctx, 2); + { + setState(577); + match(BACKQUOTED_IDENTIFIER); + } + break; + default: + throw new NoViableAltException(this); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class UnquoteIdentifierContext extends ParserRuleContext { + public UnquoteIdentifierContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_unquoteIdentifier; } + + public UnquoteIdentifierContext() { } + public void copyFrom(UnquoteIdentifierContext ctx) { + super.copyFrom(ctx); + } + } + public static class DigitIdentifierContext extends UnquoteIdentifierContext { + public TerminalNode DIGIT_IDENTIFIER() { return getToken(SqlBaseParser.DIGIT_IDENTIFIER, 0); } + public DigitIdentifierContext(UnquoteIdentifierContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterDigitIdentifier(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitDigitIdentifier(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitDigitIdentifier(this); + else return visitor.visitChildren(this); + } + } + public static class UnquotedIdentifierContext extends UnquoteIdentifierContext { + public TerminalNode IDENTIFIER() { return getToken(SqlBaseParser.IDENTIFIER, 0); } + public NonReservedContext nonReserved() { + return getRuleContext(NonReservedContext.class,0); + } + public UnquotedIdentifierContext(UnquoteIdentifierContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterUnquotedIdentifier(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitUnquotedIdentifier(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitUnquotedIdentifier(this); + else return visitor.visitChildren(this); + } + } + + public final UnquoteIdentifierContext unquoteIdentifier() throws RecognitionException { + UnquoteIdentifierContext _localctx = new UnquoteIdentifierContext(_ctx, getState()); + enterRule(_localctx, 72, RULE_unquoteIdentifier); + try { + setState(583); + switch (_input.LA(1)) { + case IDENTIFIER: + _localctx = new UnquotedIdentifierContext(_localctx); + enterOuterAlt(_localctx, 1); + { + setState(580); + match(IDENTIFIER); + } + break; + case ANALYZE: + case ANALYZED: + case COLUMNS: + case DEBUG: + case EXECUTABLE: + case EXPLAIN: + case FORMAT: + case FROM: + case FUNCTIONS: + case GRAPHVIZ: + case LOGICAL: + case MAPPED: + case OPTIMIZED: + case PARSED: + case PHYSICAL: + case PLAN: + case QUERY: + case RESET: + case RLIKE: + case SCHEMAS: + case SESSION: + case SETS: + case SHOW: + case TABLES: + case TEXT: + case TYPE: + case USE: + case VERIFY: + _localctx = new UnquotedIdentifierContext(_localctx); + enterOuterAlt(_localctx, 2); + { + setState(581); + nonReserved(); + } + break; + case DIGIT_IDENTIFIER: + _localctx = new DigitIdentifierContext(_localctx); + enterOuterAlt(_localctx, 3); + { + setState(582); + match(DIGIT_IDENTIFIER); + } + break; + default: + throw new NoViableAltException(this); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class NumberContext extends ParserRuleContext { + public NumberContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_number; } + + public NumberContext() { } + public void copyFrom(NumberContext ctx) { + super.copyFrom(ctx); + } + } + public static class DecimalLiteralContext extends NumberContext { + public TerminalNode DECIMAL_VALUE() { return getToken(SqlBaseParser.DECIMAL_VALUE, 0); } + public DecimalLiteralContext(NumberContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterDecimalLiteral(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitDecimalLiteral(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitDecimalLiteral(this); + else return visitor.visitChildren(this); + } + } + public static class IntegerLiteralContext extends NumberContext { + public TerminalNode INTEGER_VALUE() { return getToken(SqlBaseParser.INTEGER_VALUE, 0); } + public IntegerLiteralContext(NumberContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterIntegerLiteral(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitIntegerLiteral(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitIntegerLiteral(this); + else return visitor.visitChildren(this); + } + } + + public final NumberContext number() throws RecognitionException { + NumberContext _localctx = new NumberContext(_ctx, getState()); + enterRule(_localctx, 74, RULE_number); + try { + setState(587); + switch (_input.LA(1)) { + case DECIMAL_VALUE: + _localctx = new DecimalLiteralContext(_localctx); + enterOuterAlt(_localctx, 1); + { + setState(585); + match(DECIMAL_VALUE); + } + break; + case INTEGER_VALUE: + _localctx = new IntegerLiteralContext(_localctx); + enterOuterAlt(_localctx, 2); + { + setState(586); + match(INTEGER_VALUE); + } + break; + default: + throw new NoViableAltException(this); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class NonReservedContext extends ParserRuleContext { + public TerminalNode ANALYZE() { return getToken(SqlBaseParser.ANALYZE, 0); } + public TerminalNode ANALYZED() { return getToken(SqlBaseParser.ANALYZED, 0); } + public TerminalNode COLUMNS() { return getToken(SqlBaseParser.COLUMNS, 0); } + public TerminalNode DEBUG() { return getToken(SqlBaseParser.DEBUG, 0); } + public TerminalNode EXECUTABLE() { return getToken(SqlBaseParser.EXECUTABLE, 0); } + public TerminalNode EXPLAIN() { return getToken(SqlBaseParser.EXPLAIN, 0); } + public TerminalNode FORMAT() { return getToken(SqlBaseParser.FORMAT, 0); } + public TerminalNode FUNCTIONS() { return getToken(SqlBaseParser.FUNCTIONS, 0); } + public TerminalNode FROM() { return getToken(SqlBaseParser.FROM, 0); } + public TerminalNode GRAPHVIZ() { return getToken(SqlBaseParser.GRAPHVIZ, 0); } + public TerminalNode LOGICAL() { return getToken(SqlBaseParser.LOGICAL, 0); } + public TerminalNode MAPPED() { return getToken(SqlBaseParser.MAPPED, 0); } + public TerminalNode OPTIMIZED() { return getToken(SqlBaseParser.OPTIMIZED, 0); } + public TerminalNode PARSED() { return getToken(SqlBaseParser.PARSED, 0); } + public TerminalNode PHYSICAL() { return getToken(SqlBaseParser.PHYSICAL, 0); } + public TerminalNode PLAN() { return getToken(SqlBaseParser.PLAN, 0); } + public TerminalNode QUERY() { return getToken(SqlBaseParser.QUERY, 0); } + public TerminalNode RESET() { return getToken(SqlBaseParser.RESET, 0); } + public TerminalNode RLIKE() { return getToken(SqlBaseParser.RLIKE, 0); } + public TerminalNode SCHEMAS() { return getToken(SqlBaseParser.SCHEMAS, 0); } + public TerminalNode SESSION() { return getToken(SqlBaseParser.SESSION, 0); } + public TerminalNode SETS() { return getToken(SqlBaseParser.SETS, 0); } + public TerminalNode SHOW() { return getToken(SqlBaseParser.SHOW, 0); } + public TerminalNode TABLES() { return getToken(SqlBaseParser.TABLES, 0); } + public TerminalNode TEXT() { return getToken(SqlBaseParser.TEXT, 0); } + public TerminalNode TYPE() { return getToken(SqlBaseParser.TYPE, 0); } + public TerminalNode USE() { return getToken(SqlBaseParser.USE, 0); } + public TerminalNode VERIFY() { return getToken(SqlBaseParser.VERIFY, 0); } + public NonReservedContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_nonReserved; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterNonReserved(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitNonReserved(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitNonReserved(this); + else return visitor.visitChildren(this); + } + } + + public final NonReservedContext nonReserved() throws RecognitionException { + NonReservedContext _localctx = new NonReservedContext(_ctx, getState()); + enterRule(_localctx, 76, RULE_nonReserved); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(589); + _la = _input.LA(1); + if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << COLUMNS) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FORMAT) | (1L << FROM) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << LOGICAL) | (1L << MAPPED) | (1L << OPTIMIZED) | (1L << PARSED) | (1L << PHYSICAL) | (1L << PLAN) | (1L << QUERY) | (1L << RESET))) != 0) || ((((_la - 65)) & ~0x3f) == 0 && ((1L << (_la - 65)) & ((1L << (RLIKE - 65)) | (1L << (SCHEMAS - 65)) | (1L << (SESSION - 65)) | (1L << (SETS - 65)) | (1L << (SHOW - 65)) | (1L << (TABLES - 65)) | (1L << (TEXT - 65)) | (1L << (TYPE - 65)) | (1L << (USE - 65)) | (1L << (VERIFY - 65)))) != 0)) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public boolean sempred(RuleContext _localctx, int ruleIndex, int predIndex) { + switch (ruleIndex) { + case 21: + return booleanExpression_sempred((BooleanExpressionContext)_localctx, predIndex); + case 24: + return valueExpression_sempred((ValueExpressionContext)_localctx, predIndex); + } + return true; + } + private boolean booleanExpression_sempred(BooleanExpressionContext _localctx, int predIndex) { + switch (predIndex) { + case 0: + return precpred(_ctx, 2); + case 1: + return precpred(_ctx, 1); + } + return true; + } + private boolean valueExpression_sempred(ValueExpressionContext _localctx, int predIndex) { + switch (predIndex) { + case 2: + return precpred(_ctx, 3); + case 3: + return precpred(_ctx, 2); + case 4: + return precpred(_ctx, 1); + } + return true; + } + + public static final String _serializedATN = + "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3n\u0252\4\2\t\2\4"+ + "\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t"+ + "\13\4\f\t\f\4\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22"+ + "\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31\t\31"+ + "\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36\t\36\4\37\t\37\4 \t \4!"+ + "\t!\4\"\t\"\4#\t#\4$\t$\4%\t%\4&\t&\4\'\t\'\4(\t(\3\2\3\2\3\2\3\3\3\3"+ + "\3\3\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\7\4`\n\4\f\4\16\4c\13\4\3\4\5"+ + "\4f\n\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\7\4o\n\4\f\4\16\4r\13\4\3\4\5\4u\n"+ + "\4\3\4\3\4\3\4\3\4\5\4{\n\4\3\4\5\4~\n\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3"+ + "\4\3\4\5\4\u0089\n\4\3\4\5\4\u008c\n\4\3\4\3\4\5\4\u0090\n\4\3\5\3\5\3"+ + "\5\3\5\7\5\u0096\n\5\f\5\16\5\u0099\13\5\5\5\u009b\n\5\3\5\3\5\3\6\3\6"+ + "\3\6\3\6\3\6\3\6\7\6\u00a5\n\6\f\6\16\6\u00a8\13\6\5\6\u00aa\n\6\3\6\3"+ + "\6\5\6\u00ae\n\6\3\7\3\7\3\7\3\7\3\7\5\7\u00b5\n\7\3\b\3\b\5\b\u00b9\n"+ + "\b\3\t\3\t\5\t\u00bd\n\t\3\t\3\t\3\t\7\t\u00c2\n\t\f\t\16\t\u00c5\13\t"+ + "\3\t\5\t\u00c8\n\t\3\t\3\t\5\t\u00cc\n\t\3\t\3\t\3\t\5\t\u00d1\n\t\3\t"+ + "\3\t\5\t\u00d5\n\t\3\n\3\n\3\n\3\n\7\n\u00db\n\n\f\n\16\n\u00de\13\n\3"+ + "\13\5\13\u00e1\n\13\3\13\3\13\3\13\7\13\u00e6\n\13\f\13\16\13\u00e9\13"+ + "\13\3\f\3\f\3\r\3\r\3\r\3\r\7\r\u00f1\n\r\f\r\16\r\u00f4\13\r\5\r\u00f6"+ + "\n\r\3\r\3\r\5\r\u00fa\n\r\3\16\3\16\3\16\3\16\3\16\3\16\3\17\3\17\3\20"+ + "\3\20\5\20\u0106\n\20\3\20\5\20\u0109\n\20\3\21\3\21\7\21\u010d\n\21\f"+ + "\21\16\21\u0110\13\21\3\22\3\22\3\22\3\22\5\22\u0116\n\22\3\22\3\22\3"+ + "\22\3\22\3\22\5\22\u011d\n\22\3\23\5\23\u0120\n\23\3\23\3\23\5\23\u0124"+ + "\n\23\3\23\3\23\5\23\u0128\n\23\3\23\3\23\5\23\u012c\n\23\5\23\u012e\n"+ + "\23\3\24\3\24\3\24\3\24\3\24\3\24\3\24\7\24\u0137\n\24\f\24\16\24\u013a"+ + "\13\24\3\24\3\24\5\24\u013e\n\24\3\25\3\25\5\25\u0142\n\25\3\25\5\25\u0145"+ + "\n\25\3\25\3\25\3\25\3\25\5\25\u014b\n\25\3\25\5\25\u014e\n\25\3\25\3"+ + "\25\3\25\3\25\5\25\u0154\n\25\3\25\5\25\u0157\n\25\5\25\u0159\n\25\3\26"+ + "\3\26\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27"+ + "\7\27\u016a\n\27\f\27\16\27\u016d\13\27\3\27\3\27\3\27\3\27\3\27\3\27"+ + "\3\27\3\27\7\27\u0177\n\27\f\27\16\27\u017a\13\27\3\27\3\27\3\27\3\27"+ + "\3\27\3\27\3\27\3\27\3\27\7\27\u0185\n\27\f\27\16\27\u0188\13\27\3\27"+ + "\3\27\5\27\u018c\n\27\3\27\3\27\3\27\3\27\3\27\3\27\7\27\u0194\n\27\f"+ + "\27\16\27\u0197\13\27\3\30\3\30\5\30\u019b\n\30\3\31\5\31\u019e\n\31\3"+ + "\31\3\31\3\31\3\31\3\31\3\31\5\31\u01a6\n\31\3\31\3\31\3\31\3\31\3\31"+ + "\7\31\u01ad\n\31\f\31\16\31\u01b0\13\31\3\31\3\31\3\31\5\31\u01b5\n\31"+ + "\3\31\3\31\3\31\3\31\3\31\3\31\5\31\u01bd\n\31\3\31\3\31\3\31\3\31\5\31"+ + "\u01c3\n\31\3\31\5\31\u01c6\n\31\3\32\3\32\3\32\3\32\5\32\u01cc\n\32\3"+ + "\32\3\32\3\32\3\32\3\32\3\32\3\32\3\32\3\32\3\32\7\32\u01d8\n\32\f\32"+ + "\16\32\u01db\13\32\3\33\3\33\3\33\3\33\3\33\3\33\3\33\3\33\3\33\3\33\3"+ + "\33\3\33\3\33\3\33\3\33\3\33\3\33\3\33\3\33\5\33\u01f0\n\33\3\33\3\33"+ + "\3\33\3\33\5\33\u01f6\n\33\3\33\3\33\3\33\7\33\u01fb\n\33\f\33\16\33\u01fe"+ + "\13\33\5\33\u0200\n\33\3\33\3\33\3\33\3\33\3\33\3\33\3\33\3\33\3\33\3"+ + "\33\3\33\3\33\3\33\3\33\3\33\5\33\u0211\n\33\3\34\3\34\5\34\u0215\n\34"+ + "\3\34\3\34\5\34\u0219\n\34\3\34\3\34\3\35\3\35\3\35\3\35\3\35\3\35\3\35"+ + "\6\35\u0224\n\35\r\35\16\35\u0225\5\35\u0228\n\35\3\36\3\36\3\37\3\37"+ + "\3 \3 \3!\3!\3!\3!\3!\3\"\3\"\3\"\7\"\u0238\n\"\f\"\16\"\u023b\13\"\3"+ + "#\3#\3$\3$\5$\u0241\n$\3%\3%\5%\u0245\n%\3&\3&\3&\5&\u024a\n&\3\'\3\'"+ + "\5\'\u024e\n\'\3(\3(\3(\2\4,\62)\2\4\6\b\n\f\16\20\22\24\26\30\32\34\36"+ + " \"$&(*,.\60\62\64\668:<>@BDFHJLN\2\20\b\2\7\7\t\t\30\30\61\6188==\4\2"+ + "\"\"LL\4\2\t\t88\4\2\37\37&&\3\2\25\26\4\2\7\7dd\4\2\r\r\25\25\4\2\7\7"+ + "\27\27\4\2..CC\3\2]^\3\2_a\3\2W\\\4\2\34\34OO\22\2\b\t\22\22\24\24\30"+ + "\30\32\32\36\37!\"\60\6188=ACDFFHIKLPQSS\u0298\2P\3\2\2\2\4S\3\2\2\2\6"+ + "\u008f\3\2\2\2\b\u009a\3\2\2\2\n\u009e\3\2\2\2\f\u00b4\3\2\2\2\16\u00b6"+ + "\3\2\2\2\20\u00ba\3\2\2\2\22\u00d6\3\2\2\2\24\u00e0\3\2\2\2\26\u00ea\3"+ + "\2\2\2\30\u00f9\3\2\2\2\32\u00fb\3\2\2\2\34\u0101\3\2\2\2\36\u0103\3\2"+ + "\2\2 \u010a\3\2\2\2\"\u011c\3\2\2\2$\u012d\3\2\2\2&\u013d\3\2\2\2(\u0158"+ + "\3\2\2\2*\u015a\3\2\2\2,\u018b\3\2\2\2.\u0198\3\2\2\2\60\u01c5\3\2\2\2"+ + "\62\u01cb\3\2\2\2\64\u0210\3\2\2\2\66\u0218\3\2\2\28\u0227\3\2\2\2:\u0229"+ + "\3\2\2\2<\u022b\3\2\2\2>\u022d\3\2\2\2@\u022f\3\2\2\2B\u0234\3\2\2\2D"+ + "\u023c\3\2\2\2F\u0240\3\2\2\2H\u0244\3\2\2\2J\u0249\3\2\2\2L\u024d\3\2"+ + "\2\2N\u024f\3\2\2\2PQ\5\6\4\2QR\7\2\2\3R\3\3\2\2\2ST\5*\26\2TU\7\2\2\3"+ + "U\5\3\2\2\2V\u0090\5\b\5\2We\7\32\2\2Xa\7\3\2\2YZ\7?\2\2Z`\t\2\2\2[\\"+ + "\7\36\2\2\\`\t\3\2\2]^\7S\2\2^`\5<\37\2_Y\3\2\2\2_[\3\2\2\2_]\3\2\2\2"+ + "`c\3\2\2\2a_\3\2\2\2ab\3\2\2\2bd\3\2\2\2ca\3\2\2\2df\7\4\2\2eX\3\2\2\2"+ + "ef\3\2\2\2fg\3\2\2\2g\u0090\5\6\4\2ht\7\24\2\2ip\7\3\2\2jk\7?\2\2ko\t"+ + "\4\2\2lm\7\36\2\2mo\t\3\2\2nj\3\2\2\2nl\3\2\2\2or\3\2\2\2pn\3\2\2\2pq"+ + "\3\2\2\2qs\3\2\2\2rp\3\2\2\2su\7\4\2\2ti\3\2\2\2tu\3\2\2\2uv\3\2\2\2v"+ + "\u0090\5\6\4\2wx\7I\2\2x}\7K\2\2y{\7.\2\2zy\3\2\2\2z{\3\2\2\2{|\3\2\2"+ + "\2|~\7c\2\2}z\3\2\2\2}~\3\2\2\2~\u0090\3\2\2\2\177\u0080\7I\2\2\u0080"+ + "\u0081\7\22\2\2\u0081\u0082\t\5\2\2\u0082\u0090\5D#\2\u0083\u0084\t\6"+ + "\2\2\u0084\u0090\5D#\2\u0085\u0086\7I\2\2\u0086\u008b\7!\2\2\u0087\u0089"+ + "\7.\2\2\u0088\u0087\3\2\2\2\u0088\u0089\3\2\2\2\u0089\u008a\3\2\2\2\u008a"+ + "\u008c\7c\2\2\u008b\u0088\3\2\2\2\u008b\u008c\3\2\2\2\u008c\u0090\3\2"+ + "\2\2\u008d\u008e\7I\2\2\u008e\u0090\7D\2\2\u008fV\3\2\2\2\u008fW\3\2\2"+ + "\2\u008fh\3\2\2\2\u008fw\3\2\2\2\u008f\177\3\2\2\2\u008f\u0083\3\2\2\2"+ + "\u008f\u0085\3\2\2\2\u008f\u008d\3\2\2\2\u0090\7\3\2\2\2\u0091\u0092\7"+ + "V\2\2\u0092\u0097\5\32\16\2\u0093\u0094\7\5\2\2\u0094\u0096\5\32\16\2"+ + "\u0095\u0093\3\2\2\2\u0096\u0099\3\2\2\2\u0097\u0095\3\2\2\2\u0097\u0098"+ + "\3\2\2\2\u0098\u009b\3\2\2\2\u0099\u0097\3\2\2\2\u009a\u0091\3\2\2\2\u009a"+ + "\u009b\3\2\2\2\u009b\u009c\3\2\2\2\u009c\u009d\5\n\6\2\u009d\t\3\2\2\2"+ + "\u009e\u00a9\5\f\7\2\u009f\u00a0\7;\2\2\u00a0\u00a1\7\17\2\2\u00a1\u00a6"+ + "\5\16\b\2\u00a2\u00a3\7\5\2\2\u00a3\u00a5\5\16\b\2\u00a4\u00a2\3\2\2\2"+ + "\u00a5\u00a8\3\2\2\2\u00a6\u00a4\3\2\2\2\u00a6\u00a7\3\2\2\2\u00a7\u00aa"+ + "\3\2\2\2\u00a8\u00a6\3\2\2\2\u00a9\u009f\3\2\2\2\u00a9\u00aa\3\2\2\2\u00aa"+ + "\u00ad\3\2\2\2\u00ab\u00ac\7/\2\2\u00ac\u00ae\t\7\2\2\u00ad\u00ab\3\2"+ + "\2\2\u00ad\u00ae\3\2\2\2\u00ae\13\3\2\2\2\u00af\u00b5\5\20\t\2\u00b0\u00b1"+ + "\7\3\2\2\u00b1\u00b2\5\n\6\2\u00b2\u00b3\7\4\2\2\u00b3\u00b5\3\2\2\2\u00b4"+ + "\u00af\3\2\2\2\u00b4\u00b0\3\2\2\2\u00b5\r\3\2\2\2\u00b6\u00b8\5*\26\2"+ + "\u00b7\u00b9\t\b\2\2\u00b8\u00b7\3\2\2\2\u00b8\u00b9\3\2\2\2\u00b9\17"+ + "\3\2\2\2\u00ba\u00bc\7E\2\2\u00bb\u00bd\5\34\17\2\u00bc\u00bb\3\2\2\2"+ + "\u00bc\u00bd\3\2\2\2\u00bd\u00be\3\2\2\2\u00be\u00c3\5\36\20\2\u00bf\u00c0"+ + "\7\5\2\2\u00c0\u00c2\5\36\20\2\u00c1\u00bf\3\2\2\2\u00c2\u00c5\3\2\2\2"+ + "\u00c3\u00c1\3\2\2\2\u00c3\u00c4\3\2\2\2\u00c4\u00c7\3\2\2\2\u00c5\u00c3"+ + "\3\2\2\2\u00c6\u00c8\5\22\n\2\u00c7\u00c6\3\2\2\2\u00c7\u00c8\3\2\2\2"+ + "\u00c8\u00cb\3\2\2\2\u00c9\u00ca\7U\2\2\u00ca\u00cc\5,\27\2\u00cb\u00c9"+ + "\3\2\2\2\u00cb\u00cc\3\2\2\2\u00cc\u00d0\3\2\2\2\u00cd\u00ce\7#\2\2\u00ce"+ + "\u00cf\7\17\2\2\u00cf\u00d1\5\24\13\2\u00d0\u00cd\3\2\2\2\u00d0\u00d1"+ + "\3\2\2\2\u00d1\u00d4\3\2\2\2\u00d2\u00d3\7%\2\2\u00d3\u00d5\5,\27\2\u00d4"+ + "\u00d2\3\2\2\2\u00d4\u00d5\3\2\2\2\u00d5\21\3\2\2\2\u00d6\u00d7\7\37\2"+ + "\2\u00d7\u00dc\5 \21\2\u00d8\u00d9\7\5\2\2\u00d9\u00db\5 \21\2\u00da\u00d8"+ + "\3\2\2\2\u00db\u00de\3\2\2\2\u00dc\u00da\3\2\2\2\u00dc\u00dd\3\2\2\2\u00dd"+ + "\23\3\2\2\2\u00de\u00dc\3\2\2\2\u00df\u00e1\5\34\17\2\u00e0\u00df\3\2"+ + "\2\2\u00e0\u00e1\3\2\2\2\u00e1\u00e2\3\2\2\2\u00e2\u00e7\5\26\f\2\u00e3"+ + "\u00e4\7\5\2\2\u00e4\u00e6\5\26\f\2\u00e5\u00e3\3\2\2\2\u00e6\u00e9\3"+ + "\2\2\2\u00e7\u00e5\3\2\2\2\u00e7\u00e8\3\2\2\2\u00e8\25\3\2\2\2\u00e9"+ + "\u00e7\3\2\2\2\u00ea\u00eb\5\30\r\2\u00eb\27\3\2\2\2\u00ec\u00f5\7\3\2"+ + "\2\u00ed\u00f2\5*\26\2\u00ee\u00ef\7\5\2\2\u00ef\u00f1\5*\26\2\u00f0\u00ee"+ + "\3\2\2\2\u00f1\u00f4\3\2\2\2\u00f2\u00f0\3\2\2\2\u00f2\u00f3\3\2\2\2\u00f3"+ + "\u00f6\3\2\2\2\u00f4\u00f2\3\2\2\2\u00f5\u00ed\3\2\2\2\u00f5\u00f6\3\2"+ + "\2\2\u00f6\u00f7\3\2\2\2\u00f7\u00fa\7\4\2\2\u00f8\u00fa\5*\26\2\u00f9"+ + "\u00ec\3\2\2\2\u00f9\u00f8\3\2\2\2\u00fa\31\3\2\2\2\u00fb\u00fc\5F$\2"+ + "\u00fc\u00fd\7\f\2\2\u00fd\u00fe\7\3\2\2\u00fe\u00ff\5\n\6\2\u00ff\u0100"+ + "\7\4\2\2\u0100\33\3\2\2\2\u0101\u0102\t\t\2\2\u0102\35\3\2\2\2\u0103\u0108"+ + "\5*\26\2\u0104\u0106\7\f\2\2\u0105\u0104\3\2\2\2\u0105\u0106\3\2\2\2\u0106"+ + "\u0107\3\2\2\2\u0107\u0109\5F$\2\u0108\u0105\3\2\2\2\u0108\u0109\3\2\2"+ + "\2\u0109\37\3\2\2\2\u010a\u010e\5(\25\2\u010b\u010d\5\"\22\2\u010c\u010b"+ + "\3\2\2\2\u010d\u0110\3\2\2\2\u010e\u010c\3\2\2\2\u010e\u010f\3\2\2\2\u010f"+ + "!\3\2\2\2\u0110\u010e\3\2\2\2\u0111\u0112\5$\23\2\u0112\u0113\7+\2\2\u0113"+ + "\u0115\5(\25\2\u0114\u0116\5&\24\2\u0115\u0114\3\2\2\2\u0115\u0116\3\2"+ + "\2\2\u0116\u011d\3\2\2\2\u0117\u0118\7\63\2\2\u0118\u0119\5$\23\2\u0119"+ + "\u011a\7+\2\2\u011a\u011b\5(\25\2\u011b\u011d\3\2\2\2\u011c\u0111\3\2"+ + "\2\2\u011c\u0117\3\2\2\2\u011d#\3\2\2\2\u011e\u0120\7\'\2\2\u011f\u011e"+ + "\3\2\2\2\u011f\u0120\3\2\2\2\u0120\u012e\3\2\2\2\u0121\u0123\7-\2\2\u0122"+ + "\u0124\7<\2\2\u0123\u0122\3\2\2\2\u0123\u0124\3\2\2\2\u0124\u012e\3\2"+ + "\2\2\u0125\u0127\7B\2\2\u0126\u0128\7<\2\2\u0127\u0126\3\2\2\2\u0127\u0128"+ + "\3\2\2\2\u0128\u012e\3\2\2\2\u0129\u012b\7 \2\2\u012a\u012c\7<\2\2\u012b"+ + "\u012a\3\2\2\2\u012b\u012c\3\2\2\2\u012c\u012e\3\2\2\2\u012d\u011f\3\2"+ + "\2\2\u012d\u0121\3\2\2\2\u012d\u0125\3\2\2\2\u012d\u0129\3\2\2\2\u012e"+ + "%\3\2\2\2\u012f\u0130\7\67\2\2\u0130\u013e\5,\27\2\u0131\u0132\7R\2\2"+ + "\u0132\u0133\7\3\2\2\u0133\u0138\5F$\2\u0134\u0135\7\5\2\2\u0135\u0137"+ + "\5F$\2\u0136\u0134\3\2\2\2\u0137\u013a\3\2\2\2\u0138\u0136\3\2\2\2\u0138"+ + "\u0139\3\2\2\2\u0139\u013b\3\2\2\2\u013a\u0138\3\2\2\2\u013b\u013c\7\4"+ + "\2\2\u013c\u013e\3\2\2\2\u013d\u012f\3\2\2\2\u013d\u0131\3\2\2\2\u013e"+ + "\'\3\2\2\2\u013f\u0144\5D#\2\u0140\u0142\7\f\2\2\u0141\u0140\3\2\2\2\u0141"+ + "\u0142\3\2\2\2\u0142\u0143\3\2\2\2\u0143\u0145\5B\"\2\u0144\u0141\3\2"+ + "\2\2\u0144\u0145\3\2\2\2\u0145\u0159\3\2\2\2\u0146\u0147\7\3\2\2\u0147"+ + "\u0148\5\n\6\2\u0148\u014d\7\4\2\2\u0149\u014b\7\f\2\2\u014a\u0149\3\2"+ + "\2\2\u014a\u014b\3\2\2\2\u014b\u014c\3\2\2\2\u014c\u014e\5B\"\2\u014d"+ + "\u014a\3\2\2\2\u014d\u014e\3\2\2\2\u014e\u0159\3\2\2\2\u014f\u0150\7\3"+ + "\2\2\u0150\u0151\5 \21\2\u0151\u0156\7\4\2\2\u0152\u0154\7\f\2\2\u0153"+ + "\u0152\3\2\2\2\u0153\u0154\3\2\2\2\u0154\u0155\3\2\2\2\u0155\u0157\5B"+ + "\"\2\u0156\u0153\3\2\2\2\u0156\u0157\3\2\2\2\u0157\u0159\3\2\2\2\u0158"+ + "\u013f\3\2\2\2\u0158\u0146\3\2\2\2\u0158\u014f\3\2\2\2\u0159)\3\2\2\2"+ + "\u015a\u015b\5,\27\2\u015b+\3\2\2\2\u015c\u015d\b\27\1\2\u015d\u015e\7"+ + "\65\2\2\u015e\u018c\5,\27\n\u015f\u0160\7\31\2\2\u0160\u0161\7\3\2\2\u0161"+ + "\u0162\5\b\5\2\u0162\u0163\7\4\2\2\u0163\u018c\3\2\2\2\u0164\u0165\7@"+ + "\2\2\u0165\u0166\7\3\2\2\u0166\u016b\7c\2\2\u0167\u0168\7\5\2\2\u0168"+ + "\u016a\7c\2\2\u0169\u0167\3\2\2\2\u016a\u016d\3\2\2\2\u016b\u0169\3\2"+ + "\2\2\u016b\u016c\3\2\2\2\u016c\u016e\3\2\2\2\u016d\u016b\3\2\2\2\u016e"+ + "\u018c\7\4\2\2\u016f\u0170\7\62\2\2\u0170\u0171\7\3\2\2\u0171\u0172\5"+ + "B\"\2\u0172\u0173\7\5\2\2\u0173\u0178\7c\2\2\u0174\u0175\7\5\2\2\u0175"+ + "\u0177\7c\2\2\u0176\u0174\3\2\2\2\u0177\u017a\3\2\2\2\u0178\u0176\3\2"+ + "\2\2\u0178\u0179\3\2\2\2\u0179\u017b\3\2\2\2\u017a\u0178\3\2\2\2\u017b"+ + "\u017c\7\4\2\2\u017c\u018c\3\2\2\2\u017d\u017e\7\62\2\2\u017e\u017f\7"+ + "\3\2\2\u017f\u0180\7c\2\2\u0180\u0181\7\5\2\2\u0181\u0186\7c\2\2\u0182"+ + "\u0183\7\5\2\2\u0183\u0185\7c\2\2\u0184\u0182\3\2\2\2\u0185\u0188\3\2"+ + "\2\2\u0186\u0184\3\2\2\2\u0186\u0187\3\2\2\2\u0187\u0189\3\2\2\2\u0188"+ + "\u0186\3\2\2\2\u0189\u018c\7\4\2\2\u018a\u018c\5.\30\2\u018b\u015c\3\2"+ + "\2\2\u018b\u015f\3\2\2\2\u018b\u0164\3\2\2\2\u018b\u016f\3\2\2\2\u018b"+ + "\u017d\3\2\2\2\u018b\u018a\3\2\2\2\u018c\u0195\3\2\2\2\u018d\u018e\f\4"+ + "\2\2\u018e\u018f\7\n\2\2\u018f\u0194\5,\27\5\u0190\u0191\f\3\2\2\u0191"+ + "\u0192\7:\2\2\u0192\u0194\5,\27\4\u0193\u018d\3\2\2\2\u0193\u0190\3\2"+ + "\2\2\u0194\u0197\3\2\2\2\u0195\u0193\3\2\2\2\u0195\u0196\3\2\2\2\u0196"+ + "-\3\2\2\2\u0197\u0195\3\2\2\2\u0198\u019a\5\62\32\2\u0199\u019b\5\60\31"+ + "\2\u019a\u0199\3\2\2\2\u019a\u019b\3\2\2\2\u019b/\3\2\2\2\u019c\u019e"+ + "\7\65\2\2\u019d\u019c\3\2\2\2\u019d\u019e\3\2\2\2\u019e\u019f\3\2\2\2"+ + "\u019f\u01a0\7\16\2\2\u01a0\u01a1\5\62\32\2\u01a1\u01a2\7\n\2\2\u01a2"+ + "\u01a3\5\62\32\2\u01a3\u01c6\3\2\2\2\u01a4\u01a6\7\65\2\2\u01a5\u01a4"+ + "\3\2\2\2\u01a5\u01a6\3\2\2\2\u01a6\u01a7\3\2\2\2\u01a7\u01a8\7&\2\2\u01a8"+ + "\u01a9\7\3\2\2\u01a9\u01ae\5*\26\2\u01aa\u01ab\7\5\2\2\u01ab\u01ad\5*"+ + "\26\2\u01ac\u01aa\3\2\2\2\u01ad\u01b0\3\2\2\2\u01ae\u01ac\3\2\2\2\u01ae"+ + "\u01af\3\2\2\2\u01af\u01b1\3\2\2\2\u01b0\u01ae\3\2\2\2\u01b1\u01b2\7\4"+ + "\2\2\u01b2\u01c6\3\2\2\2\u01b3\u01b5\7\65\2\2\u01b4\u01b3\3\2\2\2\u01b4"+ + "\u01b5\3\2\2\2\u01b5\u01b6\3\2\2\2\u01b6\u01b7\7&\2\2\u01b7\u01b8\7\3"+ + "\2\2\u01b8\u01b9\5\b\5\2\u01b9\u01ba\7\4\2\2\u01ba\u01c6\3\2\2\2\u01bb"+ + "\u01bd\7\65\2\2\u01bc\u01bb\3\2\2\2\u01bc\u01bd\3\2\2\2\u01bd\u01be\3"+ + "\2\2\2\u01be\u01bf\t\n\2\2\u01bf\u01c6\5\62\32\2\u01c0\u01c2\7*\2\2\u01c1"+ + "\u01c3\7\65\2\2\u01c2\u01c1\3\2\2\2\u01c2\u01c3\3\2\2\2\u01c3\u01c4\3"+ + "\2\2\2\u01c4\u01c6\7\66\2\2\u01c5\u019d\3\2\2\2\u01c5\u01a5\3\2\2\2\u01c5"+ + "\u01b4\3\2\2\2\u01c5\u01bc\3\2\2\2\u01c5\u01c0\3\2\2\2\u01c6\61\3\2\2"+ + "\2\u01c7\u01c8\b\32\1\2\u01c8\u01cc\5\64\33\2\u01c9\u01ca\t\13\2\2\u01ca"+ + "\u01cc\5\62\32\6\u01cb\u01c7\3\2\2\2\u01cb\u01c9\3\2\2\2\u01cc\u01d9\3"+ + "\2\2\2\u01cd\u01ce\f\5\2\2\u01ce\u01cf\t\f\2\2\u01cf\u01d8\5\62\32\6\u01d0"+ + "\u01d1\f\4\2\2\u01d1\u01d2\t\13\2\2\u01d2\u01d8\5\62\32\5\u01d3\u01d4"+ + "\f\3\2\2\u01d4\u01d5\5:\36\2\u01d5\u01d6\5\62\32\4\u01d6\u01d8\3\2\2\2"+ + "\u01d7\u01cd\3\2\2\2\u01d7\u01d0\3\2\2\2\u01d7\u01d3\3\2\2\2\u01d8\u01db"+ + "\3\2\2\2\u01d9\u01d7\3\2\2\2\u01d9\u01da\3\2\2\2\u01da\63\3\2\2\2\u01db"+ + "\u01d9\3\2\2\2\u01dc\u01dd\7\20\2\2\u01dd\u01de\7\3\2\2\u01de\u01df\5"+ + "*\26\2\u01df\u01e0\7\f\2\2\u01e0\u01e1\5> \2\u01e1\u01e2\7\4\2\2\u01e2"+ + "\u0211\3\2\2\2\u01e3\u01e4\7\33\2\2\u01e4\u01e5\7\3\2\2\u01e5\u01e6\5"+ + "F$\2\u01e6\u01e7\7\37\2\2\u01e7\u01e8\5\62\32\2\u01e8\u01e9\7\4\2\2\u01e9"+ + "\u0211\3\2\2\2\u01ea\u0211\58\35\2\u01eb\u0211\7_\2\2\u01ec\u01ed\5\66"+ + "\34\2\u01ed\u01ee\7\6\2\2\u01ee\u01f0\3\2\2\2\u01ef\u01ec\3\2\2\2\u01ef"+ + "\u01f0\3\2\2\2\u01f0\u01f1\3\2\2\2\u01f1\u0211\7_\2\2\u01f2\u01f3\5F$"+ + "\2\u01f3\u01ff\7\3\2\2\u01f4\u01f6\5\34\17\2\u01f5\u01f4\3\2\2\2\u01f5"+ + "\u01f6\3\2\2\2\u01f6\u01f7\3\2\2\2\u01f7\u01fc\5*\26\2\u01f8\u01f9\7\5"+ + "\2\2\u01f9\u01fb\5*\26\2\u01fa\u01f8\3\2\2\2\u01fb\u01fe\3\2\2\2\u01fc"+ + "\u01fa\3\2\2\2\u01fc\u01fd\3\2\2\2\u01fd\u0200\3\2\2\2\u01fe\u01fc\3\2"+ + "\2\2\u01ff\u01f5\3\2\2\2\u01ff\u0200\3\2\2\2\u0200\u0201\3\2\2\2\u0201"+ + "\u0202\7\4\2\2\u0202\u0211\3\2\2\2\u0203\u0204\7\3\2\2\u0204\u0205\5\b"+ + "\5\2\u0205\u0206\7\4\2\2\u0206\u0211\3\2\2\2\u0207\u0211\5\66\34\2\u0208"+ + "\u0209\5\66\34\2\u0209\u020a\7\6\2\2\u020a\u020b\5F$\2\u020b\u0211\3\2"+ + "\2\2\u020c\u020d\7\3\2\2\u020d\u020e\5*\26\2\u020e\u020f\7\4\2\2\u020f"+ + "\u0211\3\2\2\2\u0210\u01dc\3\2\2\2\u0210\u01e3\3\2\2\2\u0210\u01ea\3\2"+ + "\2\2\u0210\u01eb\3\2\2\2\u0210\u01ef\3\2\2\2\u0210\u01f2\3\2\2\2\u0210"+ + "\u0203\3\2\2\2\u0210\u0207\3\2\2\2\u0210\u0208\3\2\2\2\u0210\u020c\3\2"+ + "\2\2\u0211\65\3\2\2\2\u0212\u0215\5F$\2\u0213\u0215\5D#\2\u0214\u0212"+ + "\3\2\2\2\u0214\u0213\3\2\2\2\u0215\u0216\3\2\2\2\u0216\u0217\7\6\2\2\u0217"+ + "\u0219\3\2\2\2\u0218\u0214\3\2\2\2\u0218\u0219\3\2\2\2\u0219\u021a\3\2"+ + "\2\2\u021a\u021b\5F$\2\u021b\67\3\2\2\2\u021c\u0228\7\66\2\2\u021d\u021e"+ + "\5F$\2\u021e\u021f\7c\2\2\u021f\u0228\3\2\2\2\u0220\u0228\5L\'\2\u0221"+ + "\u0228\5<\37\2\u0222\u0224\7c\2\2\u0223\u0222\3\2\2\2\u0224\u0225\3\2"+ + "\2\2\u0225\u0223\3\2\2\2\u0225\u0226\3\2\2\2\u0226\u0228\3\2\2\2\u0227"+ + "\u021c\3\2\2\2\u0227\u021d\3\2\2\2\u0227\u0220\3\2\2\2\u0227\u0221\3\2"+ + "\2\2\u0227\u0223\3\2\2\2\u02289\3\2\2\2\u0229\u022a\t\r\2\2\u022a;\3\2"+ + "\2\2\u022b\u022c\t\16\2\2\u022c=\3\2\2\2\u022d\u022e\5F$\2\u022e?\3\2"+ + "\2\2\u022f\u0230\7T\2\2\u0230\u0231\5*\26\2\u0231\u0232\7M\2\2\u0232\u0233"+ + "\5*\26\2\u0233A\3\2\2\2\u0234\u0239\5F$\2\u0235\u0236\7\6\2\2\u0236\u0238"+ + "\5F$\2\u0237\u0235\3\2\2\2\u0238\u023b\3\2\2\2\u0239\u0237\3\2\2\2\u0239"+ + "\u023a\3\2\2\2\u023aC\3\2\2\2\u023b\u0239\3\2\2\2\u023c\u023d\5F$\2\u023d"+ + "E\3\2\2\2\u023e\u0241\5H%\2\u023f\u0241\5J&\2\u0240\u023e\3\2\2\2\u0240"+ + "\u023f\3\2\2\2\u0241G\3\2\2\2\u0242\u0245\7h\2\2\u0243\u0245\7i\2\2\u0244"+ + "\u0242\3\2\2\2\u0244\u0243\3\2\2\2\u0245I\3\2\2\2\u0246\u024a\7f\2\2\u0247"+ + "\u024a\5N(\2\u0248\u024a\7g\2\2\u0249\u0246\3\2\2\2\u0249\u0247\3\2\2"+ + "\2\u0249\u0248\3\2\2\2\u024aK\3\2\2\2\u024b\u024e\7e\2\2\u024c\u024e\7"+ + "d\2\2\u024d\u024b\3\2\2\2\u024d\u024c\3\2\2\2\u024eM\3\2\2\2\u024f\u0250"+ + "\t\17\2\2\u0250O\3\2\2\2R_aenptz}\u0088\u008b\u008f\u0097\u009a\u00a6"+ + "\u00a9\u00ad\u00b4\u00b8\u00bc\u00c3\u00c7\u00cb\u00d0\u00d4\u00dc\u00e0"+ + "\u00e7\u00f2\u00f5\u00f9\u0105\u0108\u010e\u0115\u011c\u011f\u0123\u0127"+ + "\u012b\u012d\u0138\u013d\u0141\u0144\u014a\u014d\u0153\u0156\u0158\u016b"+ + "\u0178\u0186\u018b\u0193\u0195\u019a\u019d\u01a5\u01ae\u01b4\u01bc\u01c2"+ + "\u01c5\u01cb\u01d7\u01d9\u01ef\u01f5\u01fc\u01ff\u0210\u0214\u0218\u0225"+ + "\u0227\u0239\u0240\u0244\u0249\u024d"; + public static final ATN _ATN = + new ATNDeserializer().deserialize(_serializedATN.toCharArray()); + static { + _decisionToDFA = new DFA[_ATN.getNumberOfDecisions()]; + for (int i = 0; i < _ATN.getNumberOfDecisions(); i++) { + _decisionToDFA[i] = new DFA(_ATN.getDecisionState(i), i); + } + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseVisitor.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseVisitor.java new file mode 100644 index 00000000000..f4395577f2b --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseVisitor.java @@ -0,0 +1,496 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +// ANTLR GENERATED CODE: DO NOT EDIT +package org.elasticsearch.xpack.sql.parser; +import org.antlr.v4.runtime.tree.ParseTreeVisitor; + +/** + * This interface defines a complete generic visitor for a parse tree produced + * by {@link SqlBaseParser}. + * + * @param The return type of the visit operation. Use {@link Void} for + * operations with no return type. + */ +interface SqlBaseVisitor extends ParseTreeVisitor { + /** + * Visit a parse tree produced by {@link SqlBaseParser#singleStatement}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitSingleStatement(SqlBaseParser.SingleStatementContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#singleExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitSingleExpression(SqlBaseParser.SingleExpressionContext ctx); + /** + * Visit a parse tree produced by the {@code statementDefault} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitStatementDefault(SqlBaseParser.StatementDefaultContext ctx); + /** + * Visit a parse tree produced by the {@code explain} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitExplain(SqlBaseParser.ExplainContext ctx); + /** + * Visit a parse tree produced by the {@code debug} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitDebug(SqlBaseParser.DebugContext ctx); + /** + * Visit a parse tree produced by the {@code showTables} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitShowTables(SqlBaseParser.ShowTablesContext ctx); + /** + * Visit a parse tree produced by the {@code showColumns} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitShowColumns(SqlBaseParser.ShowColumnsContext ctx); + /** + * Visit a parse tree produced by the {@code showFunctions} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitShowFunctions(SqlBaseParser.ShowFunctionsContext ctx); + /** + * Visit a parse tree produced by the {@code showSchemas} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitShowSchemas(SqlBaseParser.ShowSchemasContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#query}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitQuery(SqlBaseParser.QueryContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#queryNoWith}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitQueryNoWith(SqlBaseParser.QueryNoWithContext ctx); + /** + * Visit a parse tree produced by the {@code queryPrimaryDefault} + * labeled alternative in {@link SqlBaseParser#queryTerm}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitQueryPrimaryDefault(SqlBaseParser.QueryPrimaryDefaultContext ctx); + /** + * Visit a parse tree produced by the {@code subquery} + * labeled alternative in {@link SqlBaseParser#queryTerm}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitSubquery(SqlBaseParser.SubqueryContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#orderBy}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitOrderBy(SqlBaseParser.OrderByContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#querySpecification}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitQuerySpecification(SqlBaseParser.QuerySpecificationContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#fromClause}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitFromClause(SqlBaseParser.FromClauseContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#groupBy}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitGroupBy(SqlBaseParser.GroupByContext ctx); + /** + * Visit a parse tree produced by the {@code singleGroupingSet} + * labeled alternative in {@link SqlBaseParser#groupingElement}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitSingleGroupingSet(SqlBaseParser.SingleGroupingSetContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#groupingExpressions}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitGroupingExpressions(SqlBaseParser.GroupingExpressionsContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#namedQuery}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitNamedQuery(SqlBaseParser.NamedQueryContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#setQuantifier}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitSetQuantifier(SqlBaseParser.SetQuantifierContext ctx); + /** + * Visit a parse tree produced by the {@code selectExpression} + * labeled alternative in {@link SqlBaseParser#selectItem}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitSelectExpression(SqlBaseParser.SelectExpressionContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#relation}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitRelation(SqlBaseParser.RelationContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#joinRelation}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitJoinRelation(SqlBaseParser.JoinRelationContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#joinType}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitJoinType(SqlBaseParser.JoinTypeContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#joinCriteria}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitJoinCriteria(SqlBaseParser.JoinCriteriaContext ctx); + /** + * Visit a parse tree produced by the {@code tableName} + * labeled alternative in {@link SqlBaseParser#relationPrimary}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitTableName(SqlBaseParser.TableNameContext ctx); + /** + * Visit a parse tree produced by the {@code aliasedQuery} + * labeled alternative in {@link SqlBaseParser#relationPrimary}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitAliasedQuery(SqlBaseParser.AliasedQueryContext ctx); + /** + * Visit a parse tree produced by the {@code aliasedRelation} + * labeled alternative in {@link SqlBaseParser#relationPrimary}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitAliasedRelation(SqlBaseParser.AliasedRelationContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#expression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitExpression(SqlBaseParser.ExpressionContext ctx); + /** + * Visit a parse tree produced by the {@code logicalNot} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitLogicalNot(SqlBaseParser.LogicalNotContext ctx); + /** + * Visit a parse tree produced by the {@code stringQuery} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitStringQuery(SqlBaseParser.StringQueryContext ctx); + /** + * Visit a parse tree produced by the {@code booleanDefault} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitBooleanDefault(SqlBaseParser.BooleanDefaultContext ctx); + /** + * Visit a parse tree produced by the {@code exists} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitExists(SqlBaseParser.ExistsContext ctx); + /** + * Visit a parse tree produced by the {@code multiMatchQuery} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitMultiMatchQuery(SqlBaseParser.MultiMatchQueryContext ctx); + /** + * Visit a parse tree produced by the {@code matchQuery} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitMatchQuery(SqlBaseParser.MatchQueryContext ctx); + /** + * Visit a parse tree produced by the {@code logicalBinary} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitLogicalBinary(SqlBaseParser.LogicalBinaryContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#predicated}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitPredicated(SqlBaseParser.PredicatedContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#predicate}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitPredicate(SqlBaseParser.PredicateContext ctx); + /** + * Visit a parse tree produced by the {@code valueExpressionDefault} + * labeled alternative in {@link SqlBaseParser#valueExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitValueExpressionDefault(SqlBaseParser.ValueExpressionDefaultContext ctx); + /** + * Visit a parse tree produced by the {@code comparison} + * labeled alternative in {@link SqlBaseParser#valueExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitComparison(SqlBaseParser.ComparisonContext ctx); + /** + * Visit a parse tree produced by the {@code arithmeticBinary} + * labeled alternative in {@link SqlBaseParser#valueExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitArithmeticBinary(SqlBaseParser.ArithmeticBinaryContext ctx); + /** + * Visit a parse tree produced by the {@code arithmeticUnary} + * labeled alternative in {@link SqlBaseParser#valueExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitArithmeticUnary(SqlBaseParser.ArithmeticUnaryContext ctx); + /** + * Visit a parse tree produced by the {@code cast} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitCast(SqlBaseParser.CastContext ctx); + /** + * Visit a parse tree produced by the {@code extract} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitExtract(SqlBaseParser.ExtractContext ctx); + /** + * Visit a parse tree produced by the {@code constantDefault} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitConstantDefault(SqlBaseParser.ConstantDefaultContext ctx); + /** + * Visit a parse tree produced by the {@code star} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitStar(SqlBaseParser.StarContext ctx); + /** + * Visit a parse tree produced by the {@code functionCall} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitFunctionCall(SqlBaseParser.FunctionCallContext ctx); + /** + * Visit a parse tree produced by the {@code subqueryExpression} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitSubqueryExpression(SqlBaseParser.SubqueryExpressionContext ctx); + /** + * Visit a parse tree produced by the {@code columnReference} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitColumnReference(SqlBaseParser.ColumnReferenceContext ctx); + /** + * Visit a parse tree produced by the {@code dereference} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitDereference(SqlBaseParser.DereferenceContext ctx); + /** + * Visit a parse tree produced by the {@code parenthesizedExpression} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitParenthesizedExpression(SqlBaseParser.ParenthesizedExpressionContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#columnExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitColumnExpression(SqlBaseParser.ColumnExpressionContext ctx); + /** + * Visit a parse tree produced by the {@code nullLiteral} + * labeled alternative in {@link SqlBaseParser#constant}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitNullLiteral(SqlBaseParser.NullLiteralContext ctx); + /** + * Visit a parse tree produced by the {@code typeConstructor} + * labeled alternative in {@link SqlBaseParser#constant}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitTypeConstructor(SqlBaseParser.TypeConstructorContext ctx); + /** + * Visit a parse tree produced by the {@code numericLiteral} + * labeled alternative in {@link SqlBaseParser#constant}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitNumericLiteral(SqlBaseParser.NumericLiteralContext ctx); + /** + * Visit a parse tree produced by the {@code booleanLiteral} + * labeled alternative in {@link SqlBaseParser#constant}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitBooleanLiteral(SqlBaseParser.BooleanLiteralContext ctx); + /** + * Visit a parse tree produced by the {@code stringLiteral} + * labeled alternative in {@link SqlBaseParser#constant}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitStringLiteral(SqlBaseParser.StringLiteralContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#comparisonOperator}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitComparisonOperator(SqlBaseParser.ComparisonOperatorContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#booleanValue}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitBooleanValue(SqlBaseParser.BooleanValueContext ctx); + /** + * Visit a parse tree produced by the {@code primitiveDataType} + * labeled alternative in {@link SqlBaseParser#dataType}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitPrimitiveDataType(SqlBaseParser.PrimitiveDataTypeContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#whenClause}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitWhenClause(SqlBaseParser.WhenClauseContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#qualifiedName}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitQualifiedName(SqlBaseParser.QualifiedNameContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#tableIdentifier}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitTableIdentifier(SqlBaseParser.TableIdentifierContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#identifier}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitIdentifier(SqlBaseParser.IdentifierContext ctx); + /** + * Visit a parse tree produced by the {@code quotedIdentifier} + * labeled alternative in {@link SqlBaseParser#quoteIdentifier}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitQuotedIdentifier(SqlBaseParser.QuotedIdentifierContext ctx); + /** + * Visit a parse tree produced by the {@code backQuotedIdentifier} + * labeled alternative in {@link SqlBaseParser#quoteIdentifier}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitBackQuotedIdentifier(SqlBaseParser.BackQuotedIdentifierContext ctx); + /** + * Visit a parse tree produced by the {@code unquotedIdentifier} + * labeled alternative in {@link SqlBaseParser#unquoteIdentifier}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitUnquotedIdentifier(SqlBaseParser.UnquotedIdentifierContext ctx); + /** + * Visit a parse tree produced by the {@code digitIdentifier} + * labeled alternative in {@link SqlBaseParser#unquoteIdentifier}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitDigitIdentifier(SqlBaseParser.DigitIdentifierContext ctx); + /** + * Visit a parse tree produced by the {@code decimalLiteral} + * labeled alternative in {@link SqlBaseParser#number}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitDecimalLiteral(SqlBaseParser.DecimalLiteralContext ctx); + /** + * Visit a parse tree produced by the {@code integerLiteral} + * labeled alternative in {@link SqlBaseParser#number}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitIntegerLiteral(SqlBaseParser.IntegerLiteralContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#nonReserved}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitNonReserved(SqlBaseParser.NonReservedContext ctx); +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/SqlParser.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/SqlParser.java new file mode 100644 index 00000000000..c75d5e92b3a --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/parser/SqlParser.java @@ -0,0 +1,158 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.parser; + +import org.antlr.v4.runtime.BaseErrorListener; +import org.antlr.v4.runtime.CommonToken; +import org.antlr.v4.runtime.CommonTokenStream; +import org.antlr.v4.runtime.ParserRuleContext; +import org.antlr.v4.runtime.RecognitionException; +import org.antlr.v4.runtime.Recognizer; +import org.antlr.v4.runtime.Token; +import org.antlr.v4.runtime.atn.PredictionMode; +import org.antlr.v4.runtime.misc.Pair; +import org.antlr.v4.runtime.tree.TerminalNode; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; + +import java.util.Arrays; +import java.util.List; +import java.util.function.Function; + +public class SqlParser { + + private static final Logger log = Loggers.getLogger(SqlParser.class); + + public LogicalPlan createStatement(String sql) { + if (log.isDebugEnabled()) { + log.debug("Parsing as statement: {}", sql); + } + return invokeParser("statement", sql, SqlBaseParser::singleStatement); + } + + public Expression createExpression(String expression) { + if (log.isDebugEnabled()) { + log.debug("Parsing as expression: {}", expression); + } + + return invokeParser("expression", expression, SqlBaseParser::singleExpression); + } + + @SuppressWarnings("unchecked") + private T invokeParser(String name, String sql, Function parseFunction) { + try { + SqlBaseLexer lexer = new SqlBaseLexer(new CaseInsensitiveStream(sql)); + + lexer.removeErrorListeners(); + lexer.addErrorListener(ERROR_LISTENER); + + CommonTokenStream tokenStream = new CommonTokenStream(lexer); + SqlBaseParser parser = new SqlBaseParser(tokenStream); + + parser.addParseListener(new PostProcessor(Arrays.asList(parser.getRuleNames()))); + parser.removeErrorListeners(); + parser.addErrorListener(ERROR_LISTENER); + + ParserRuleContext tree; + try { + // first, try parsing with potentially faster SLL mode + parser.getInterpreter().setPredictionMode(PredictionMode.SLL); + tree = parseFunction.apply(parser); + } + catch (Exception ex) { + // if we fail, parse with LL mode + tokenStream.reset(); // rewind input stream + parser.reset(); + + parser.getInterpreter().setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION); + tree = parseFunction.apply(parser); + } + + postProcess(lexer, parser, tree); + + return (T) new AstBuilder().visit(tree); + } + + catch (StackOverflowError e) { + throw new ParsingException(name + " is too large (stack overflow while parsing)"); + } + } + + protected void postProcess(SqlBaseLexer lexer, SqlBaseParser parser, ParserRuleContext tree) { + // no-op + } + + private class PostProcessor extends SqlBaseBaseListener { + private final List ruleNames; + + PostProcessor(List ruleNames) { + this.ruleNames = ruleNames; + } + + @Override + public void exitBackQuotedIdentifier(SqlBaseParser.BackQuotedIdentifierContext context) { + Token token = context.BACKQUOTED_IDENTIFIER().getSymbol(); + throw new ParsingException( + "backquoted indetifiers not supported; please use double quotes instead", + null, + token.getLine(), + token.getCharPositionInLine()); + } + + @Override + public void exitDigitIdentifier(SqlBaseParser.DigitIdentifierContext context) { + Token token = context.DIGIT_IDENTIFIER().getSymbol(); + throw new ParsingException( + "identifiers must not start with a digit; please use double quotes", + null, + token.getLine(), + token.getCharPositionInLine()); + } + + @Override + public void exitQuotedIdentifier(SqlBaseParser.QuotedIdentifierContext context) { + // Remove quotes + context.getParent().removeLastChild(); + + Token token = (Token) context.getChild(0).getPayload(); + context.getParent().addChild(new CommonToken( + new Pair<>(token.getTokenSource(), token.getInputStream()), + SqlBaseLexer.IDENTIFIER, + token.getChannel(), + token.getStartIndex() + 1, + token.getStopIndex() - 1)); + } + + @Override + public void exitNonReserved(SqlBaseParser.NonReservedContext context) { + // tree cannot be modified during rule enter/exit _unless_ it's a terminal node + if (!(context.getChild(0) instanceof TerminalNode)) { + int rule = ((ParserRuleContext) context.getChild(0)).getRuleIndex(); + throw new ParsingException("nonReserved can only contain tokens. Found nested rule: " + ruleNames.get(rule)); + } + + // replace nonReserved words with IDENT tokens + context.getParent().removeLastChild(); + + Token token = (Token) context.getChild(0).getPayload(); + context.getParent().addChild(new CommonToken( + new Pair<>(token.getTokenSource(), token.getInputStream()), + SqlBaseLexer.IDENTIFIER, + token.getChannel(), + token.getStartIndex(), + token.getStopIndex())); + } + } + + private static final BaseErrorListener ERROR_LISTENER = new BaseErrorListener() { + @Override + public void syntaxError(Recognizer recognizer, Object offendingSymbol, int line, int charPositionInLine, String message, RecognitionException e) { + throw new ParsingException(message, e, line, charPositionInLine); + } + }; +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/QueryPlan.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/QueryPlan.java new file mode 100644 index 00000000000..66181c5dc68 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/QueryPlan.java @@ -0,0 +1,122 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.function.Consumer; +import java.util.function.Function; + +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.AttributeSet; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.Node; +import org.elasticsearch.xpack.sql.type.DataType; + +public abstract class QueryPlan> extends Node { + + private AttributeSet lazyOutputSet; + private AttributeSet lazyInputSet; + + + public QueryPlan(Location location, List children) { + super(location, children); + } + + public abstract List output(); + + public AttributeSet outputSet() { + if (lazyOutputSet == null) { + lazyOutputSet = new AttributeSet(output()); + } + return lazyOutputSet; + } + + public AttributeSet intputSet() { + if (lazyInputSet == null) { + List attrs = new ArrayList<>(); + for (PlanType child : children()) { + attrs.addAll(child.output()); + } + lazyInputSet = new AttributeSet(attrs); + } + return lazyInputSet; + } + + public PlanType transformExpressionsOnly(Function rule) { + return transformPropertiesOnly(e -> doTransformExpression(e, exp -> exp.transformDown(rule)), Object.class); + } + + public PlanType transformExpressionsDown(Function rule) { + return transformPropertiesDown(e -> doTransformExpression(e, exp -> exp.transformDown(rule)), Object.class); + } + + public PlanType transformExpressionsUp(Function rule) { + return transformPropertiesUp(e -> doTransformExpression(e, exp -> exp.transformUp(rule)), Object.class); + } + + private Object doTransformExpression(Object arg, Function traversal) { + if (arg instanceof Expression) { + return traversal.apply((Expression) arg); + } + if (arg instanceof DataType || arg instanceof Map) { + return arg; + } + + // WARNING: if the collection is typed, an incompatible function will be applied to it + // this results in CCE at runtime and additional filtering is required + // preserving the type information is hacky and weird (a lot of context needs to be passed around and the lambda itself + // has no type info so it's difficult to have automatic checking without having base classes). + + if (arg instanceof Collection) { + Collection c = (Collection) arg; + List transformed = new ArrayList<>(c.size()); + boolean hasChanged = false; + for (Object e : c) { + Object next = doTransformExpression(e, traversal); + if (!c.equals(next)) { + hasChanged = true; + } + else { + // use the initial value + next = e; + } + transformed.add(next); + } + + return hasChanged ? transformed : arg; + } + + return arg; + } + + public void forEachExpressionsDown(Consumer rule) { + forEachPropertiesDown(e -> doForEachExpression(e, exp -> exp.forEachDown(rule)), Object.class); + } + + public void forEachExpressionsUp(Consumer rule) { + forEachPropertiesUp(e -> doForEachExpression(e, exp -> exp.forEachUp(rule)), Object.class); + } + + public void forEachExpressions(Consumer rule) { + forEachPropertiesOnly(e -> doForEachExpression(e, rule::accept), Object.class); + } + + private void doForEachExpression(Object arg, Consumer traversal) { + if (arg instanceof Expression) { + traversal.accept((Expression) arg); + } + else if (arg instanceof Collection) { + Collection c = (Collection) arg; + for (Object o : c) { + doForEachExpression(o, traversal); + } + } + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/TableIdentifier.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/TableIdentifier.java new file mode 100644 index 00000000000..1432fa4a147 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/TableIdentifier.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan; + +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.Objects; + +public class TableIdentifier { + + private final String index; + private final Location location; + + public TableIdentifier(Location location, String index) { + this.location = location; + this.index = index; + } + + public String index() { + return index; + } + + @Override + public int hashCode() { + return Objects.hash(index); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + TableIdentifier other = (TableIdentifier) obj; + return Objects.equals(index, other.index); + } + + public Location location() { + return location; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("[index="); + builder.append(index); + builder.append("]"); + return builder.toString(); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Aggregate.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Aggregate.java new file mode 100644 index 00000000000..a04923d6ccb --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Aggregate.java @@ -0,0 +1,67 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical; + +import org.elasticsearch.xpack.sql.capabilities.Resolvables; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.NamedExpression; +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.List; +import java.util.Objects; + +public class Aggregate extends UnaryPlan { + + private final List groupings; + private final List aggregates; + + public Aggregate(Location location, LogicalPlan child, List groupings, List aggregates) { + super(location, child); + this.groupings = groupings; + this.aggregates = aggregates; + } + + public List groupings() { + return groupings; + } + + public List aggregates() { + return aggregates; + } + + @Override + public boolean expressionsResolved() { + return Resolvables.resolved(groupings) && Resolvables.resolved(aggregates); + } + + @Override + public List output() { + return Expressions.asAttributes(aggregates); + } + + @Override + public int hashCode() { + return Objects.hash(groupings, aggregates, child()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + Aggregate other = (Aggregate) obj; + return Objects.equals(groupings, other.groupings) + && Objects.equals(aggregates, other.aggregates) + && Objects.equals(child(), other.child()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/BinaryPlan.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/BinaryPlan.java new file mode 100644 index 00000000000..daee3a97ee8 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/BinaryPlan.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical; + +import java.util.Arrays; +import java.util.Objects; + +import org.elasticsearch.xpack.sql.tree.Location; + +abstract class BinaryPlan extends LogicalPlan { + + private final LogicalPlan left, right; + + BinaryPlan(Location location, LogicalPlan left, LogicalPlan right) { + super(location, Arrays.asList(left, right)); + this.left = left; + this.right = right; + } + + public LogicalPlan left() { + return left; + } + + public LogicalPlan right() { + return right; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + BinaryPlan other = (BinaryPlan) obj; + + return Objects.equals(left(), other.left()) + && Objects.equals(right(), other.right()); + } + + @Override + public int hashCode() { + return Objects.hash(left, right); + } + +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Distinct.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Distinct.java new file mode 100644 index 00000000000..be79e3a4ffc --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Distinct.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical; + +import org.elasticsearch.xpack.sql.tree.Location; + +public class Distinct extends UnaryPlan { + + public Distinct(Location location, LogicalPlan child) { + super(location, child); + } + + @Override + public boolean expressionsResolved() { + return true; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/EsRelation.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/EsRelation.java new file mode 100644 index 00000000000..2eb9eca3fa2 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/EsRelation.java @@ -0,0 +1,101 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical; + +import org.elasticsearch.xpack.sql.analysis.index.EsIndex; +import org.elasticsearch.xpack.sql.analysis.index.MappingException; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.NestedFieldAttribute; +import org.elasticsearch.xpack.sql.expression.RootFieldAttribute; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.NestedType; +import org.elasticsearch.xpack.sql.type.StringType; +import org.elasticsearch.xpack.sql.util.StringUtils; + +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Stream; + +import static java.util.Collections.emptyList; +import static java.util.stream.Collectors.toList; +import static org.elasticsearch.xpack.sql.util.CollectionUtils.combine; + +public class EsRelation extends LeafPlan { + + private final EsIndex index; + private final List attrs; + + public EsRelation(Location location, EsIndex index) { + super(location); + this.index = index; + attrs = flatten(location, index.mapping()).collect(toList()); + } + + private static Stream flatten(Location location, Map mapping) { + return flatten(location, mapping, null, emptyList()); + } + + private static Stream flatten(Location location, Map mapping, String parent, List nestedParents) { + return mapping.entrySet().stream() + .filter(e -> e.getValue() != null) + .flatMap(e -> { + String name = parent != null ? parent + "." + e.getKey() : e.getKey(); + DataType t = e.getValue(); + if (t.isComplex() && !(t instanceof StringType)) { + if (t instanceof NestedType) { + return Stream.concat(Stream.of(new NestedFieldAttribute(location, name, t, nestedParents)), flatten(location, ((NestedType) t).properties(), name, combine(nestedParents, name))); + } + // if (t instanceof ObjectType) { + // return flatten(location, ((ObjectType) t).properties(), name, combine(nestedParents, name)); + // } + + throw new MappingException("Does not know how to handle complex type %s", t); + } + Attribute att = nestedParents.isEmpty() ? new RootFieldAttribute(location, name, t) : new NestedFieldAttribute(location, name, t, nestedParents); + return Stream.of(att); + }); + } + + public EsIndex index() { + return index; + } + + @Override + public List output() { + return attrs; + } + + @Override + public boolean expressionsResolved() { + return true; + } + + @Override + public int hashCode() { + return Objects.hash(index); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + EsRelation other = (EsRelation) obj; + return Objects.equals(index, other.index); + } + + @Override + public String nodeString() { + return nodeName() + "[" + index + "]" + StringUtils.limitedToString(attrs); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Filter.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Filter.java new file mode 100644 index 00000000000..368eb12d804 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Filter.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical; + +import java.util.Objects; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; + +public class Filter extends UnaryPlan { + + private final Expression condition; + + public Filter(Location location, LogicalPlan child, Expression condition) { + super(location, child); + this.condition = condition; + } + + public Expression condition() { + return condition; + } + + @Override + public boolean expressionsResolved() { + return condition.resolved(); + } + + @Override + public int hashCode() { + return Objects.hash(condition, child()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + Filter other = (Filter) obj; + + return Objects.equals(condition, other.condition) + && Objects.equals(child(), other.child()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Join.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Join.java new file mode 100644 index 00000000000..799f8d85580 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Join.java @@ -0,0 +1,113 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical; + +import java.util.List; +import java.util.Objects; + +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataTypes; + +import static java.util.stream.Collectors.toList; + +import static org.elasticsearch.xpack.sql.util.CollectionUtils.combine; + +public class Join extends BinaryPlan { + + private final JoinType type; + private final Expression condition; + + public enum JoinType { + INNER, + LEFT, // OUTER + RIGHT, // OUTER + FULL, // OUTER + IMPLICIT, + } + + public Join(Location location, LogicalPlan left, LogicalPlan right, JoinType type, Expression condition) { + super(location, left, right); + this.type = type; + this.condition = condition; + } + + public JoinType type() { + return type; + } + + public Expression condition() { + return condition; + } + + @Override + public List output() { + switch (type) { + case LEFT: + // right side can be null + return combine(left().output(), makeNullable(right().output())); + case RIGHT: + // left side can be null + return combine(makeNullable(left().output()), right().output()); + case FULL: + // both sides can be null + return combine(makeNullable(left().output()), makeNullable(right().output())); + // INNER + default: + return combine(left().output(), right().output()); + } + } + + private static List makeNullable(List output) { + return output.stream() + .map(a -> a.withNullability(true)) + .collect(toList()); + } + + @Override + public boolean expressionsResolved() { + return condition == null || condition.resolved(); + } + + public boolean duplicatesResolved() { + return left().outputSet().intersect(right().outputSet()).isEmpty(); + } + + @Override + public boolean resolved() { + // resolve the join if + // - the children are resolved + // - there are no conflicts in output + // - the condition (if present) is resolved to a boolean + return childrenResolved() && + duplicatesResolved() && + expressionsResolved() && + (condition == null || DataTypes.BOOLEAN.equals(condition.dataType())); + } + + @Override + public int hashCode() { + return Objects.hash(type, condition, left(), right()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + Join other = (Join) obj; + + return Objects.equals(type, other.type) + && Objects.equals(condition, other.condition) + && Objects.equals(left(), other.left()) + && Objects.equals(right(), other.right()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/LeafPlan.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/LeafPlan.java new file mode 100644 index 00000000000..1128f15a38e --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/LeafPlan.java @@ -0,0 +1,17 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical; + +import java.util.Collections; + +import org.elasticsearch.xpack.sql.tree.Location; + +abstract class LeafPlan extends LogicalPlan { + + protected LeafPlan(Location location) { + super(location, Collections.emptyList()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Limit.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Limit.java new file mode 100644 index 00000000000..a0cb4fc2675 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Limit.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical; + +import java.util.Objects; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; + +public class Limit extends UnaryPlan { + + private final Expression limit; + + public Limit(Location location, Expression limit, LogicalPlan child) { + super(location, child); + this.limit = limit; + } + + public Expression limit() { + return limit; + } + + @Override + public boolean expressionsResolved() { + return limit.resolved(); + } + + @Override + public int hashCode() { + return Objects.hash(limit, child()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + Limit other = (Limit) obj; + + return Objects.equals(limit, other.limit) + && Objects.equals(child(), other.child()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/LocalRelation.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/LocalRelation.java new file mode 100644 index 00000000000..d02726ed582 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/LocalRelation.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.session.Executable; +import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.session.SqlSession; +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.List; +import java.util.Objects; + +import static java.util.Collections.emptyList; + +public class LocalRelation extends LogicalPlan implements Executable { + + private final Executable executable; + + public LocalRelation(Location location, Executable executable) { + super(location, emptyList()); + this.executable = executable; + } + + public Executable executable() { + return executable; + } + + @Override + public boolean expressionsResolved() { + return true; + } + + @Override + public List output() { + return executable.output(); + } + + @Override + public void execute(SqlSession session, ActionListener listener) { + executable.execute(session, listener); + } + + @Override + public int hashCode() { + return executable.hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + LocalRelation other = (LocalRelation) obj; + return Objects.equals(executable, other.executable); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/LogicalPlan.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/LogicalPlan.java new file mode 100644 index 00000000000..189204101c3 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/LogicalPlan.java @@ -0,0 +1,81 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical; + +import org.elasticsearch.xpack.sql.capabilities.Resolvable; +import org.elasticsearch.xpack.sql.capabilities.Resolvables; +import org.elasticsearch.xpack.sql.plan.QueryPlan; +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.List; + +public abstract class LogicalPlan extends QueryPlan implements Resolvable { + + /** + * Order is important in the enum; any values should be added at the end. + */ + public enum Stage { + PARSED, + PRE_ANALYZED, + ANALYZED, + OPTIMIZED; + } + + private Stage stage = Stage.PARSED; + private Boolean lazyChildrenResolved = null; + private Boolean lazyResolved = null; + + public LogicalPlan(Location location, List children) { + super(location, children); + } + + public boolean preAnalyzed() { + return stage.ordinal() >= Stage.PRE_ANALYZED.ordinal(); + } + + public void setPreAnalyzed() { + stage = Stage.PRE_ANALYZED; + } + + public boolean analyzed() { + return stage.ordinal() >= Stage.ANALYZED.ordinal(); + } + + public void setAnalyzed() { + stage = Stage.ANALYZED; + } + + public boolean optimized() { + return stage.ordinal() >= Stage.OPTIMIZED.ordinal(); + } + + public void setOptimized() { + stage = Stage.OPTIMIZED; + } + + public final boolean childrenResolved() { + if (lazyChildrenResolved == null) { + lazyChildrenResolved = Boolean.valueOf(Resolvables.resolved(children())); + } + return lazyChildrenResolved; + } + + @Override + public boolean resolved() { + if (lazyResolved == null) { + lazyResolved = expressionsResolved() && childrenResolved(); + } + return lazyResolved; + } + + public abstract boolean expressionsResolved(); + + @Override + public abstract int hashCode(); + + @Override + public abstract boolean equals(Object obj); +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/OrderBy.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/OrderBy.java new file mode 100644 index 00000000000..95a5bdecbdc --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/OrderBy.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical; + +import java.util.List; +import java.util.Objects; + +import org.elasticsearch.xpack.sql.capabilities.Resolvables; +import org.elasticsearch.xpack.sql.expression.Order; +import org.elasticsearch.xpack.sql.tree.Location; + +public class OrderBy extends UnaryPlan { + + private final List order; + + public OrderBy(Location location, LogicalPlan child, List order) { + super(location, child); + this.order = order; + } + + public List order() { + return order; + } + + @Override + public boolean expressionsResolved() { + return Resolvables.resolved(order); + } + + @Override + public int hashCode() { + return Objects.hash(order, child()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + OrderBy other = (OrderBy) obj; + return Objects.equals(order, other.order) + && Objects.equals(child(), other.child()); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Project.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Project.java new file mode 100644 index 00000000000..e48278e21a8 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Project.java @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical; + +import java.util.List; +import java.util.Objects; + +import org.elasticsearch.xpack.sql.capabilities.Resolvables; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.NamedExpression; +import org.elasticsearch.xpack.sql.expression.function.Functions; +import org.elasticsearch.xpack.sql.tree.Location; + +public class Project extends UnaryPlan { + + private final List projections; + + public Project(Location location, LogicalPlan child, List projections) { + super(location, child); + this.projections = projections; + } + + public List projections() { + return projections; + } + + @Override + public boolean resolved() { + return super.resolved() && !Expressions.anyMatch(projections, Functions::isAggregate); + } + + @Override + public boolean expressionsResolved() { + return Resolvables.resolved(projections); + } + + @Override + public List output() { + return Expressions.asAttributes(projections); + } + + @Override + public int hashCode() { + return Objects.hash(projections, child()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + Project other = (Project) obj; + + return Objects.equals(projections, other.projections) + && Objects.equals(child(), other.child()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/SubQueryAlias.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/SubQueryAlias.java new file mode 100644 index 00000000000..e7180399507 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/SubQueryAlias.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical; + +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.tree.Location; + +public class SubQueryAlias extends UnaryPlan { + + private final String alias; + + public SubQueryAlias(Location location, LogicalPlan child, String alias) { + super(location, child); + this.alias = alias; + } + + public String alias() { + return alias; + } + + @Override + public List output() { + return (alias == null ? child().output() : + child().output().stream() + .map(e -> e.withQualifier(alias)) + .collect(Collectors.toList()) + ); + } + + @Override + public boolean expressionsResolved() { + return true; + } + + @Override + public int hashCode() { + return Objects.hash(alias, child()); + } + + @Override + public boolean equals(Object obj) { + if (!super.equals(obj)) { + return false; + } + + SubQueryAlias other = (SubQueryAlias) obj; + return Objects.equals(alias, other.alias); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/UnaryPlan.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/UnaryPlan.java new file mode 100644 index 00000000000..3b4399e2e9f --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/UnaryPlan.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical; + +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.tree.Location; + +public abstract class UnaryPlan extends LogicalPlan { + + private final LogicalPlan child; + + UnaryPlan(Location location, LogicalPlan child) { + super(location, Collections.singletonList(child)); + this.child = child; + } + + public LogicalPlan child() { + return child; + } + + @Override + public List output() { + return child.output(); + } + + @Override + public int hashCode() { + return Objects.hashCode(child()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + UnaryPlan other = (UnaryPlan) obj; + + return Objects.equals(child, other.child); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/UnresolvedRelation.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/UnresolvedRelation.java new file mode 100644 index 00000000000..eacd5475821 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/UnresolvedRelation.java @@ -0,0 +1,80 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical; + +import org.elasticsearch.xpack.sql.capabilities.Unresolvable; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.plan.TableIdentifier; +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +public class UnresolvedRelation extends LeafPlan implements Unresolvable { + + private final TableIdentifier table; + private final String alias; + private final String unresolvedMsg; + + public UnresolvedRelation(Location location, TableIdentifier table, String alias) { + this(location, table, alias, null); + } + + public UnresolvedRelation(Location location, TableIdentifier table, String alias, String unresolvedMessage) { + super(location); + this.table = table; + this.alias = alias; + this.unresolvedMsg = unresolvedMessage == null ? "Unknown index [" + table.index() + "]" : unresolvedMessage; + } + + public TableIdentifier table() { + return table; + } + + public String alias() { + return alias; + } + + @Override + public boolean resolved() { + return false; + } + + @Override + public boolean expressionsResolved() { + return false; + } + + @Override + public List output() { + return Collections.emptyList(); + } + + @Override + public String unresolvedMessage() { + return unresolvedMsg; + } + + @Override + public int hashCode() { + return Objects.hash(table); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + UnresolvedRelation other = (UnresolvedRelation) obj; + return Objects.equals(table, other.table); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/With.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/With.java new file mode 100644 index 00000000000..10ccc5cc0e0 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/With.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical; + +import java.util.Map; +import java.util.Objects; + +import org.elasticsearch.xpack.sql.tree.Location; + +public class With extends UnaryPlan { + private final Map subQueries; + + public With(Location location, LogicalPlan child, Map subQueries) { + super(location, child); + this.subQueries = subQueries; + } + + public Map subQueries() { + return subQueries; + } + + @Override + public boolean expressionsResolved() { + return true; + } + + @Override + public int hashCode() { + return Objects.hash(child(), subQueries); + } + + @Override + public boolean equals(Object obj) { + if (!super.equals(obj)) { + return false; + } + + With other = (With) obj; + return Objects.equals(subQueries, other.subQueries); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/Command.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/Command.java new file mode 100644 index 00000000000..49693be8e17 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/Command.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical.command; + +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.session.Executable; +import org.elasticsearch.xpack.sql.tree.Location; + +import static java.util.Collections.emptyList; + +public abstract class Command extends LogicalPlan implements Executable { + + public Command(Location location) { + super(location, emptyList()); + } + + @Override + public boolean expressionsResolved() { + return true; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/Debug.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/Debug.java new file mode 100644 index 00000000000..cb5bb6b67ac --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/Debug.java @@ -0,0 +1,151 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical.command; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.RootFieldAttribute; +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.rule.RuleExecutor.Batch; +import org.elasticsearch.xpack.sql.rule.RuleExecutor.ExecutionInfo; +import org.elasticsearch.xpack.sql.rule.RuleExecutor.Transformation; +import org.elasticsearch.xpack.sql.session.Rows; +import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.session.SqlSession; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.Node; +import org.elasticsearch.xpack.sql.tree.NodeUtils; +import org.elasticsearch.xpack.sql.type.DataTypes; +import org.elasticsearch.xpack.sql.util.Graphviz; + +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Objects; + +import static java.util.Collections.singletonList; +import static org.elasticsearch.action.ActionListener.wrap; + +public class Debug extends Command { + + public enum Type { + ANALYZED, OPTIMIZED; + } + + public enum Format { + TEXT, GRAPHVIZ + } + + private final LogicalPlan plan; + private final Format format; + private final Type type; + + public Debug(Location location, LogicalPlan plan, Type type, Format format) { + super(location); + this.plan = plan; + this.format = format == null ? Format.TEXT : format; + this.type = type == null ? Type.OPTIMIZED : type; + } + + public LogicalPlan plan() { + return plan; + } + + public Format format() { + return format; + } + + public Type type() { + return type; + } + + @Override + public List output() { + return singletonList(new RootFieldAttribute(location(), "plan", DataTypes.KEYWORD)); + } + + @Override + public void execute(SqlSession session, ActionListener listener) { + switch (type) { + case ANALYZED: + session.debugAnalyzedPlan(plan, wrap(i -> handleInfo(i, listener), listener::onFailure)); + break; + case OPTIMIZED: + session.analyzedPlan(plan, true, + wrap(analyzedPlan -> handleInfo(session.optimizer().debugOptimize(analyzedPlan), listener), listener::onFailure)); + break; + default: + break; + } + } + + @SuppressWarnings({ "rawtypes", "unchecked" }) + private void handleInfo(ExecutionInfo info, ActionListener listener) { + String planString = null; + + if (format == Format.TEXT) { + StringBuilder sb = new StringBuilder(); + if (info == null) { + sb.append(plan.toString()); + } else { + Map> map = info.transformations(); + + for (Entry> entry : map.entrySet()) { + // for each batch + sb.append("***"); + sb.append(entry.getKey().name()); + sb.append("***"); + for (Transformation tf : entry.getValue()) { + sb.append(tf.ruleName()); + sb.append("\n"); + sb.append(NodeUtils.diffString(tf.before(), tf.after())); + sb.append("\n"); + } + } + } + planString = sb.toString(); + } else { + if (info == null) { + planString = Graphviz.dot("Planned", plan); + } else { + Map> plans = new LinkedHashMap<>(); + Map> map = info.transformations(); + plans.put("start", info.before()); + + for (Entry> entry : map.entrySet()) { + // for each batch + int counter = 0; + for (Transformation tf : entry.getValue()) { + if (tf.hasChanged()) { + plans.put(tf.ruleName() + "#" + ++counter, tf.after()); + } + } + } + planString = Graphviz.dot(plans, true); + } + } + + listener.onResponse(Rows.singleton(output(), planString)); + } + + @Override + public int hashCode() { + return Objects.hash(plan, type, format); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if ((obj == null) || (getClass() != obj.getClass())) { + return false; + } + Debug o = (Debug) obj; + return Objects.equals(format, o.format) && Objects.equals(type, o.type) && Objects.equals(plan, o.plan); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/Explain.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/Explain.java new file mode 100644 index 00000000000..9704bda8a7d --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/Explain.java @@ -0,0 +1,233 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical.command; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.Strings; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.RootFieldAttribute; +import org.elasticsearch.xpack.sql.plan.QueryPlan; +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.plan.physical.PhysicalPlan; +import org.elasticsearch.xpack.sql.planner.Planner; +import org.elasticsearch.xpack.sql.session.Rows; +import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.session.SqlSession; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataTypes; +import org.elasticsearch.xpack.sql.util.Graphviz; + +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; + +import static java.util.Collections.singletonList; +import static java.util.Collections.unmodifiableMap; +import static org.elasticsearch.action.ActionListener.wrap; + +public class Explain extends Command { + + public enum Type { + PARSED, ANALYZED, OPTIMIZED, MAPPED, EXECUTABLE, ALL; + + public String printableName() { + return Strings.capitalize(name().toLowerCase(Locale.ROOT)); + } + } + + public enum Format { + TEXT, GRAPHVIZ + } + + private final LogicalPlan plan; + private final boolean verify; + private final Format format; + private final Type type; + + public Explain(Location location, LogicalPlan plan, Type type, Format format, boolean verify) { + super(location); + this.plan = plan; + this.verify = verify; + this.format = format == null ? Format.TEXT : format; + this.type = type == null ? Type.ANALYZED : type; + } + + public LogicalPlan plan() { + return plan; + } + + public boolean verify() { + return verify; + } + + public Format format() { + return format; + } + + public Type type() { + return type; + } + + @Override + public List output() { + return singletonList(new RootFieldAttribute(location(), "plan", DataTypes.KEYWORD)); + } + + @Override + public void execute(SqlSession session, ActionListener listener) { + + if (type == Type.PARSED) { + listener.onResponse(Rows.singleton(output(), formatPlan(format, plan))); + return; + } + + // to avoid duplicating code, the type/verification filtering happens inside the listeners instead of outside using a CASE + session.analyzedPlan(plan, verify, wrap(analyzedPlan -> { + + if (type == Type.ANALYZED) { + listener.onResponse(Rows.singleton(output(), formatPlan(format, analyzedPlan))); + return; + } + + Planner planner = session.planner(); + // verification is on, exceptions can be thrown + if (verify) { + session.optimizedPlan(analyzedPlan, wrap(optimizedPlan -> { + if (type == Type.OPTIMIZED) { + listener.onResponse(Rows.singleton(output(), formatPlan(format, optimizedPlan))); + return; + } + + PhysicalPlan mappedPlan = planner.mapPlan(optimizedPlan, verify); + if (type == Type.MAPPED) { + listener.onResponse(Rows.singleton(output(), formatPlan(format, mappedPlan))); + return; + } + + PhysicalPlan executablePlan = planner.foldPlan(mappedPlan, verify); + if (type == Type.EXECUTABLE) { + listener.onResponse(Rows.singleton(output(), formatPlan(format, executablePlan))); + return; + } + + // Type.All + listener.onResponse(Rows.singleton(output(), printPlans(format, plan, analyzedPlan, optimizedPlan, mappedPlan, executablePlan))); + }, listener::onFailure)); + } + + // check errors manually to see how far the plans work out + else { + // no analysis failure, can move on + if (session.analyzer().verifyFailures(analyzedPlan).isEmpty()) { + session.optimizedPlan(analyzedPlan, wrap(optimizedPlan -> { + if (type == Type.OPTIMIZED) { + listener.onResponse(Rows.singleton(output(), formatPlan(format, optimizedPlan))); + return; + } + + PhysicalPlan mappedPlan = planner.mapPlan(optimizedPlan, verify); + + if (type == Type.MAPPED) { + listener.onResponse(Rows.singleton(output(), formatPlan(format, mappedPlan))); + return; + } + + if (planner.verifyMappingPlanFailures(mappedPlan).isEmpty()) { + PhysicalPlan executablePlan = planner.foldPlan(mappedPlan, verify); + + if (type == Type.EXECUTABLE) { + listener.onResponse(Rows.singleton(output(), formatPlan(format, executablePlan))); + return; + } + + listener.onResponse(Rows.singleton(output(), printPlans(format, plan, analyzedPlan, optimizedPlan, mappedPlan, executablePlan))); + return; + } + // mapped failed + if (type != Type.ALL) { + listener.onResponse(Rows.singleton(output(), formatPlan(format, mappedPlan))); + return; + } + + listener.onResponse(Rows.singleton(output(), printPlans(format, plan, analyzedPlan, optimizedPlan, mappedPlan, null))); + }, listener::onFailure)); + // cannot continue + } else { + if (type != Type.ALL) { + listener.onResponse(Rows.singleton(output(), formatPlan(format, analyzedPlan))); + } + else { + listener.onResponse(Rows.singleton(output(), printPlans(format, plan, analyzedPlan, null, null, null))); + } + } + } + }, listener::onFailure)); + } + + private static String printPlans(Format format, LogicalPlan parsed, LogicalPlan analyzedPlan, LogicalPlan optimizedPlan, PhysicalPlan mappedPlan, PhysicalPlan executionPlan) { + if (format == Format.TEXT) { + StringBuilder sb = new StringBuilder(); + sb.append("Parsed\n"); + sb.append("-----------\n"); + sb.append(parsed.toString()); + sb.append("\nAnalyzed\n"); + sb.append("--------\n"); + sb.append(analyzedPlan.toString()); + sb.append("\nOptimized\n"); + sb.append("---------\n"); + sb.append(nullablePlan(optimizedPlan)); + sb.append("\nMapped\n"); + sb.append("---------\n"); + sb.append(nullablePlan(mappedPlan)); + sb.append("\nExecutable\n"); + sb.append("---------\n"); + sb.append(nullablePlan(executionPlan)); + + return sb.toString(); + } else { + Map> plans = new HashMap<>(); + plans.put("Parsed", parsed); + plans.put("Analyzed", analyzedPlan); + + if (optimizedPlan != null) { + plans.put("Optimized", optimizedPlan); + plans.put("Mapped", mappedPlan); + plans.put("Execution", executionPlan); + } + return Graphviz.dot(unmodifiableMap(plans), false); + } + } + + private static String nullablePlan(QueryPlan plan) { + return plan != null ? plan.toString() : ""; + } + + private String formatPlan(Format format, QueryPlan plan) { + return (format == Format.TEXT ? nullablePlan(plan) : Graphviz.dot(type.printableName(), plan)); + } + + @Override + public int hashCode() { + return Objects.hash(plan, type, format, verify); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if ((obj == null) || (getClass() != obj.getClass())) { + return false; + } + Explain o = (Explain) obj; + return Objects.equals(verify, o.verify) + && Objects.equals(format, o.format) + && Objects.equals(type, o.type) + && Objects.equals(plan, o.plan); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowColumns.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowColumns.java new file mode 100644 index 00000000000..ed34a74b69b --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowColumns.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical.command; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.RootFieldAttribute; +import org.elasticsearch.xpack.sql.session.Rows; +import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.session.SqlSession; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.CompoundDataType; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Objects; + +import static java.util.Arrays.asList; +import static java.util.Collections.emptyList; + +public class ShowColumns extends Command { + + private final String index; + + public ShowColumns(Location location, String index) { + super(location); + this.index = index; + } + + public String index() { + return index; + } + + @Override + public List output() { + return asList(new RootFieldAttribute(location(), "column", DataTypes.KEYWORD), + new RootFieldAttribute(location(), "type", DataTypes.KEYWORD)); + } + + @Override + public void execute(SqlSession session, ActionListener listener) { + session.indexResolver().asIndex(index, ActionListener.wrap( + indexResult -> { + List> rows = emptyList(); + if (indexResult.isValid()) { + rows = new ArrayList<>(); + fillInRows(indexResult.get().mapping(), null, rows); + } + listener.onResponse(Rows.of(output(), rows)); + }, + listener::onFailure + )); + } + + private void fillInRows(Map mapping, String prefix, List> rows) { + for (Entry e : mapping.entrySet()) { + DataType dt = e.getValue(); + String name = e.getKey(); + if (dt != null) { + rows.add(asList(prefix != null ? prefix + "." + name : name, dt.sqlName())); + if (dt instanceof CompoundDataType) { + String newPrefix = prefix != null ? prefix + "." + name : name; + fillInRows(((CompoundDataType) dt).properties(), newPrefix, rows); + } + } + } + } + + @Override + public int hashCode() { + return Objects.hash(index); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + ShowColumns other = (ShowColumns) obj; + return Objects.equals(index, other.index); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowFunctions.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowFunctions.java new file mode 100644 index 00000000000..45c3f370eb3 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowFunctions.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical.command; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.RootFieldAttribute; +import org.elasticsearch.xpack.sql.expression.function.FunctionDefinition; +import org.elasticsearch.xpack.sql.expression.function.FunctionRegistry; +import org.elasticsearch.xpack.sql.session.Rows; +import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.session.SqlSession; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataTypes; + +import java.util.Collection; +import java.util.List; +import java.util.Objects; + +import static java.util.Arrays.asList; +import static java.util.stream.Collectors.toList; + +public class ShowFunctions extends Command { + + private final String pattern; + + public ShowFunctions(Location location, String pattern) { + super(location); + this.pattern = pattern; + } + + public String pattern() { + return pattern; + } + + @Override + public List output() { + return asList(new RootFieldAttribute(location(), "name", DataTypes.KEYWORD), + new RootFieldAttribute(location(), "type", DataTypes.KEYWORD)); + } + + @Override + public void execute(SqlSession session, ActionListener listener) { + FunctionRegistry registry = session.functionRegistry(); + Collection functions = registry.listFunctions(pattern); + + listener.onResponse(Rows.of(output(), functions.stream() + .map(f -> asList(f.name(), f.type().name())) + .collect(toList()))); + } + + @Override + public int hashCode() { + return Objects.hash(pattern); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + ShowFunctions other = (ShowFunctions) obj; + return Objects.equals(pattern, other.pattern); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowSchemas.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowSchemas.java new file mode 100644 index 00000000000..919448607ba --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowSchemas.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical.command; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.RootFieldAttribute; +import org.elasticsearch.xpack.sql.session.RowSet; +import org.elasticsearch.xpack.sql.session.Rows; +import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.session.SqlSession; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataTypes; + +import java.util.List; + +import static java.util.Collections.singletonList; + +public class ShowSchemas extends Command { + + public ShowSchemas(Location location) { + super(location); + } + + @Override + public List output() { + return singletonList(new RootFieldAttribute(location(), "schema", DataTypes.KEYWORD)); + } + + @Override + public void execute(SqlSession session, ActionListener listener) { + listener.onResponse(Rows.empty(output())); + } + + @Override + public int hashCode() { + return getClass().hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + return true; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowTables.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowTables.java new file mode 100644 index 00000000000..62ae21aa84b --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowTables.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical.command; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.RootFieldAttribute; +import org.elasticsearch.xpack.sql.session.Rows; +import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.session.SqlSession; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataTypes; +import org.elasticsearch.xpack.sql.util.StringUtils; + +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +import static java.util.Collections.singletonList; +import static java.util.stream.Collectors.toList; + +public class ShowTables extends Command { + + @Nullable + private final String pattern; + + public ShowTables(Location location, @Nullable String pattern) { + super(location); + this.pattern = pattern; + } + + public String pattern() { + return pattern; + } + + @Override + public List output() { + return Collections.singletonList(new RootFieldAttribute(location(), "table", DataTypes.KEYWORD)); + } + + @Override + public final void execute(SqlSession session, ActionListener listener) { + String pattern = Strings.hasText(this.pattern) ? StringUtils.jdbcToEsPattern(this.pattern) : "*"; + session.indexResolver().asList(pattern, ActionListener.wrap(result -> { + listener.onResponse(Rows.of(output(), result.stream() + .map(t -> singletonList(t.name())) + .collect(toList()))); + }, listener::onFailure)); + } + + @Override + public int hashCode() { + return Objects.hash(pattern); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + ShowTables other = (ShowTables) obj; + return Objects.equals(pattern, other.pattern); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/AggregateExec.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/AggregateExec.java new file mode 100644 index 00000000000..66b97e4b675 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/AggregateExec.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.physical; + +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.NamedExpression; + +import java.util.List; +import java.util.Objects; + +public class AggregateExec extends UnaryExec implements Unexecutable { + + private final List groupings; + private final List aggregates; + + public AggregateExec(PhysicalPlan child, List groupings, List aggregates) { + super(child); + this.groupings = groupings; + this.aggregates = aggregates; + } + + public List groupings() { + return groupings; + } + + public List aggregates() { + return aggregates; + } + + @Override + public List output() { + return Expressions.asAttributes(aggregates); + } + + @Override + public int hashCode() { + return Objects.hash(groupings, aggregates, child()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + AggregateExec other = (AggregateExec) obj; + + return Objects.equals(groupings, other.groupings) + && Objects.equals(aggregates, other.aggregates) + && Objects.equals(child(), other.child()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/BinaryExec.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/BinaryExec.java new file mode 100644 index 00000000000..813f2476fb7 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/BinaryExec.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.physical; + +import java.util.Arrays; +import java.util.Objects; + +import org.elasticsearch.xpack.sql.tree.Location; + +abstract class BinaryExec extends PhysicalPlan { + + private final PhysicalPlan left, right; + + protected BinaryExec(Location location, PhysicalPlan left, PhysicalPlan right) { + super(location, Arrays.asList(left, right)); + this.left = left; + this.right = right; + } + + public PhysicalPlan left() { + return left; + } + + public PhysicalPlan right() { + return right; + } + + @Override + public int hashCode() { + return Objects.hash(left, right); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + BinaryExec other = (BinaryExec) obj; + return Objects.equals(left, other.left) + && Objects.equals(right, other.right); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/CommandExec.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/CommandExec.java new file mode 100644 index 00000000000..11bab8d1017 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/CommandExec.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.physical; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.plan.logical.command.Command; +import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.session.SqlSession; + +import java.util.List; +import java.util.Objects; + +public class CommandExec extends LeafExec { + + private final Command command; + + public CommandExec(Command command) { + super(command.location()); + this.command = command; + } + + public Command command() { + return command; + } + + @Override + public void execute(SqlSession session, ActionListener listener) { + command.execute(session, listener); + } + + @Override + public List output() { + return command.output(); + } + + @Override + public int hashCode() { + return Objects.hash(command); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + CommandExec other = (CommandExec) obj; + return Objects.equals(command, other.command); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/EsQueryExec.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/EsQueryExec.java new file mode 100644 index 00000000000..0c7d6c0f161 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/EsQueryExec.java @@ -0,0 +1,82 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.physical; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.xpack.sql.execution.search.Scroller; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.querydsl.container.QueryContainer; +import org.elasticsearch.xpack.sql.session.Rows; +import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.session.SqlSession; +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.List; +import java.util.Objects; + +public class EsQueryExec extends LeafExec { + + private final String index; + private final List output; + + private final QueryContainer queryContainer; + + public EsQueryExec(Location location, String index, List output, QueryContainer queryContainer) { + super(location); + this.index = index; + this.output = output; + this.queryContainer = queryContainer; + } + + public EsQueryExec with(QueryContainer queryContainer) { + return new EsQueryExec(location(), index, output, queryContainer); + } + + public String index() { + return index; + } + + public QueryContainer queryContainer() { + return queryContainer; + } + + @Override + public List output() { + return output; + } + + @Override + public void execute(SqlSession session, ActionListener listener) { + Scroller scroller = new Scroller(session.client(), session.settings()); + scroller.scroll(Rows.schema(output), queryContainer, index, listener); + } + + @Override + public int hashCode() { + return Objects.hash(index, queryContainer, output); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + EsQueryExec other = (EsQueryExec) obj; + return Objects.equals(index, other.index) + && Objects.equals(queryContainer, other.queryContainer) + && Objects.equals(output, other.output); + } + + @Override + public String nodeString() { + return nodeName() + "[" + index + "," + queryContainer + "]"; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/FilterExec.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/FilterExec.java new file mode 100644 index 00000000000..53c49e6307c --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/FilterExec.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.physical; + +import java.util.List; +import java.util.Objects; + +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.Expression; + +public class FilterExec extends UnaryExec implements Unexecutable { + + private final Expression condition; + // indicates whether the filter is regular or agg-based (HAVING xxx) + // gets setup automatically and then copied over during cloning + private final boolean isHaving; + + public FilterExec(PhysicalPlan child, Expression condition) { + this(child, condition, child instanceof AggregateExec); + } + + public FilterExec(PhysicalPlan child, Expression condition, boolean isHaving) { + super(child); + this.condition = condition; + this.isHaving = isHaving; + } + + public Expression condition() { + return condition; + } + + public boolean isHaving() { + return isHaving; + } + + @Override + public List output() { + return child().output(); + } + + @Override + public int hashCode() { + return Objects.hash(condition, isHaving, child()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + FilterExec other = (FilterExec) obj; + return Objects.equals(condition, other.condition) + && Objects.equals(child(), other.child()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/LeafExec.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/LeafExec.java new file mode 100644 index 00000000000..99fd22a6f45 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/LeafExec.java @@ -0,0 +1,16 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.physical; + +import java.util.Collections; + +import org.elasticsearch.xpack.sql.tree.Location; + +abstract class LeafExec extends PhysicalPlan { + LeafExec(Location location) { + super(location, Collections.emptyList()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/LimitExec.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/LimitExec.java new file mode 100644 index 00000000000..e768fcb62d6 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/LimitExec.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.physical; + +import java.util.Objects; + +import org.elasticsearch.xpack.sql.expression.Expression; + +public class LimitExec extends UnaryExec implements Unexecutable { + + private final Expression limit; + + public LimitExec(PhysicalPlan child, Expression limit) { + super(child); + this.limit = limit; + } + + public Expression limit() { + return limit; + } + + @Override + public int hashCode() { + return Objects.hash(limit, child()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + LimitExec other = (LimitExec) obj; + return Objects.equals(limit, other.limit) + && Objects.equals(child(), other.child()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/LocalExec.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/LocalExec.java new file mode 100644 index 00000000000..d27bc97ed5d --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/LocalExec.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.physical; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.session.EmptyExecutable; +import org.elasticsearch.xpack.sql.session.Executable; +import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.session.SqlSession; +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.List; +import java.util.Objects; + +public class LocalExec extends LeafExec { + + private final Executable executable; + + public LocalExec(Location location, Executable executable) { + super(location); + this.executable = executable; + } + + public Executable executable() { + return executable; + } + + @Override + public List output() { + return executable.output(); + } + + public boolean isEmpty() { + return executable instanceof EmptyExecutable; + } + + @Override + public void execute(SqlSession session, ActionListener listener) { + executable.execute(session, listener); + } + + @Override + public int hashCode() { + return Objects.hash(executable); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + LocalExec other = (LocalExec) obj; + return Objects.equals(executable, other.executable); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/OrderExec.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/OrderExec.java new file mode 100644 index 00000000000..21182f03b5f --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/OrderExec.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.physical; + +import java.util.List; +import java.util.Objects; + +import org.elasticsearch.xpack.sql.expression.Order; + +public class OrderExec extends UnaryExec implements Unexecutable { + + private final List order; + + public OrderExec(PhysicalPlan child, List order) { + super(child); + this.order = order; + } + + public List order() { + return order; + } + + @Override + public int hashCode() { + return Objects.hash(order, child()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + OrderExec other = (OrderExec) obj; + + return Objects.equals(order, other.order) + && Objects.equals(child(), other.child()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/PhysicalPlan.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/PhysicalPlan.java new file mode 100644 index 00000000000..802e81ed8b6 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/PhysicalPlan.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.physical; + +import java.util.List; + +import org.elasticsearch.xpack.sql.plan.QueryPlan; +import org.elasticsearch.xpack.sql.session.Executable; +import org.elasticsearch.xpack.sql.session.Rows; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.Schema; + +public abstract class PhysicalPlan extends QueryPlan implements Executable { + + private Schema lazySchema; + + public PhysicalPlan(Location location, List children) { + super(location, children); + } + + public Schema schema() { + if (lazySchema == null) { + lazySchema = Rows.schema(output()); + } + return lazySchema; + } + + @Override + public abstract int hashCode(); + + @Override + public abstract boolean equals(Object obj); +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/ProjectExec.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/ProjectExec.java new file mode 100644 index 00000000000..10977fb8f60 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/ProjectExec.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.physical; + +import java.util.List; +import java.util.Objects; + +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.NamedExpression; + +public class ProjectExec extends UnaryExec implements Unexecutable { + + private final List projections; + + public ProjectExec(PhysicalPlan child, List projections) { + super(child); + this.projections = projections; + } + + public List projections() { + return projections; + } + + @Override + public List output() { + return Expressions.asAttributes(projections); + } + + @Override + public int hashCode() { + return Objects.hash(projections, child()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + ProjectExec other = (ProjectExec) obj; + + return Objects.equals(projections, other.projections) + && Objects.equals(child(), other.child()); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/UnaryExec.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/UnaryExec.java new file mode 100644 index 00000000000..404cef106e7 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/UnaryExec.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.physical; + +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +import org.elasticsearch.xpack.sql.expression.Attribute; + +abstract class UnaryExec extends PhysicalPlan { + + private final PhysicalPlan child; + + UnaryExec(PhysicalPlan child) { + super(child.location(), Collections.singletonList(child)); + this.child = child; + } + + public PhysicalPlan child() { + return child; + } + + @Override + public List output() { + return child.output(); + } + + @Override + public int hashCode() { + return Objects.hashCode(child()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + UnaryExec other = (UnaryExec) obj; + + return Objects.equals(child, other.child); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/Unexecutable.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/Unexecutable.java new file mode 100644 index 00000000000..df351c2115c --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/Unexecutable.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.physical; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.xpack.sql.planner.PlanningException; +import org.elasticsearch.xpack.sql.session.Executable; +import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.session.SqlSession; + +import java.util.Locale; + +import static java.lang.String.format; + +// this is mainly a marker interface to validate a plan before being executed +public interface Unexecutable extends Executable { + + default void execute(SqlSession session, ActionListener listener) { + throw new PlanningException(format(Locale.ROOT, "Current plan %s is not executable", this)); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/UnplannedExec.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/UnplannedExec.java new file mode 100644 index 00000000000..e78e63b1a1d --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plan/physical/UnplannedExec.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.physical; + +import java.util.List; +import java.util.Objects; + +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; + +public class UnplannedExec extends LeafExec implements Unexecutable { + + private final LogicalPlan plan; + + public UnplannedExec(LogicalPlan plan) { + super(plan.location()); + this.plan = plan; + } + + public LogicalPlan plan() { + return plan; + } + + @Override + public List output() { + return plan.output(); + } + + @Override + public int hashCode() { + return plan.hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + UnplannedExec other = (UnplannedExec) obj; + return Objects.equals(plan, other.plan); + } + + @Override + public String nodeString() { + return nodeName() + "[" + plan.nodeString() + "]"; + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/planner/FoldingException.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/planner/FoldingException.java new file mode 100644 index 00000000000..7ffe33e20f7 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/planner/FoldingException.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.planner; + +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.sql.ClientSqlException; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.Node; + +import java.util.Locale; + +import static java.lang.String.format; + +public class FoldingException extends ClientSqlException { + + private final int line; + private final int column; + + public FoldingException(Node source, String message, Object... args) { + super(message, args); + + Location loc = Location.EMPTY; + if (source != null && source.location() != null) { + loc = source.location(); + } + this.line = loc.getLineNumber(); + this.column = loc.getColumnNumber(); + } + + public FoldingException(Node source, String message, Throwable cause) { + super(message, cause); + + Location loc = Location.EMPTY; + if (source != null && source.location() != null) { + loc = source.location(); + } + this.line = loc.getLineNumber(); + this.column = loc.getColumnNumber(); + } + + public int getLineNumber() { + return line; + } + + public int getColumnNumber() { + return column; + } + + @Override + public RestStatus status() { + return RestStatus.BAD_REQUEST; + } + + @Override + public String getMessage() { + return format(Locale.ROOT, "line %s:%s: %s", getLineNumber(), getColumnNumber(), super.getMessage()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/planner/Mapper.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/planner/Mapper.java new file mode 100644 index 00000000000..caa0734ce37 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/planner/Mapper.java @@ -0,0 +1,149 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.planner; + +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.plan.logical.Aggregate; +import org.elasticsearch.xpack.sql.plan.logical.EsRelation; +import org.elasticsearch.xpack.sql.plan.logical.Filter; +import org.elasticsearch.xpack.sql.plan.logical.Join; +import org.elasticsearch.xpack.sql.plan.logical.Limit; +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.plan.logical.OrderBy; +import org.elasticsearch.xpack.sql.plan.logical.Project; +import org.elasticsearch.xpack.sql.plan.logical.LocalRelation; +import org.elasticsearch.xpack.sql.plan.logical.With; +import org.elasticsearch.xpack.sql.plan.logical.command.Command; +import org.elasticsearch.xpack.sql.plan.physical.AggregateExec; +import org.elasticsearch.xpack.sql.plan.physical.CommandExec; +import org.elasticsearch.xpack.sql.plan.physical.EsQueryExec; +import org.elasticsearch.xpack.sql.plan.physical.FilterExec; +import org.elasticsearch.xpack.sql.plan.physical.LimitExec; +import org.elasticsearch.xpack.sql.plan.physical.OrderExec; +import org.elasticsearch.xpack.sql.plan.physical.PhysicalPlan; +import org.elasticsearch.xpack.sql.plan.physical.ProjectExec; +import org.elasticsearch.xpack.sql.plan.physical.LocalExec; +import org.elasticsearch.xpack.sql.plan.physical.UnplannedExec; +import org.elasticsearch.xpack.sql.querydsl.container.QueryContainer; +import org.elasticsearch.xpack.sql.rule.Rule; +import org.elasticsearch.xpack.sql.rule.RuleExecutor; +import org.elasticsearch.xpack.sql.util.ReflectionUtils; + +import java.util.Arrays; +import java.util.List; + +class Mapper extends RuleExecutor { + + public PhysicalPlan map(LogicalPlan plan) { + return execute(planLater(plan)); + } + + @Override + protected Iterable.Batch> batches() { + Batch conversion = new Batch("Mapping", + new JoinMapper(), + new SimpleExecMapper() + ); + + return Arrays.asList(conversion); + } + + private static PhysicalPlan planLater(LogicalPlan plan) { + return new UnplannedExec(plan); + } + + private static class SimpleExecMapper extends MapExecRule { + + @Override + protected PhysicalPlan map(LogicalPlan p) { + if (p instanceof Command) { + return new CommandExec((Command) p); + } + + if (p instanceof LocalRelation) { + return new LocalExec(p.location(), (LocalRelation) p); + } + + if (p instanceof Project) { + Project pj = (Project) p; + return new ProjectExec(map(pj.child()), pj.projections()); + } + + if (p instanceof Filter) { + Filter fl = (Filter) p; + return new FilterExec(map(fl.child()), fl.condition()); + } + + if (p instanceof OrderBy) { + OrderBy o = (OrderBy) p; + return new OrderExec(map(o.child()), o.order()); + } + + if (p instanceof Aggregate) { + Aggregate a = (Aggregate) p; + // analysis and optimizations have converted the grouping into actual attributes + return new AggregateExec(map(a.child()), a.groupings(), a.aggregates()); + } + + if (p instanceof EsRelation) { + EsRelation c = (EsRelation) p; + List output = c.output(); + return new EsQueryExec(c.location(), c.index().name(), output, new QueryContainer()); + } + + if (p instanceof Limit) { + Limit l = (Limit) p; + return new LimitExec(map(l.child()), l.limit()); + } + // TODO: Translate With in a subplan + if (p instanceof With) { + throw new UnsupportedOperationException("With should have been translated already"); + } + + return planLater(p); + } + } + + private static class JoinMapper extends MapExecRule { + + @Override + protected PhysicalPlan map(Join j) { + return join(j); + } + + private PhysicalPlan join(Join join) { + //TODO: pick up on nested/parent-child docs + // 2. Hash? + // 3. Cartesian + // 3. Fallback to nested loop + + + throw new UnsupportedOperationException("Don't know how to handle join " + join.nodeString()); + } + } + + abstract static class MapExecRule extends Rule { + + private final Class subPlanToken = ReflectionUtils.detectSuperTypeForRuleLike(getClass()); + + @Override + public final PhysicalPlan apply(PhysicalPlan plan) { + return plan.transformUp(this::rule, UnplannedExec.class); + } + + @SuppressWarnings("unchecked") + @Override + protected final PhysicalPlan rule(UnplannedExec plan) { + LogicalPlan subPlan = plan.plan(); + if (subPlanToken.isInstance(subPlan)) { + return map((SubPlan) subPlan); + } + return plan; + } + + protected abstract PhysicalPlan map(SubPlan plan); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/planner/Planner.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/planner/Planner.java new file mode 100644 index 00000000000..5bda469853d --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/planner/Planner.java @@ -0,0 +1,67 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.planner; + +import java.util.List; +import java.util.Map; + +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.plan.physical.PhysicalPlan; +import org.elasticsearch.xpack.sql.planner.Verifier.Failure; +import org.elasticsearch.xpack.sql.tree.Node; + +import static java.util.stream.Collectors.toMap; + +public class Planner { + + private final Mapper mapper = new Mapper(); + private final QueryFolder folder = new QueryFolder(); + + public PhysicalPlan plan(LogicalPlan plan) { + return plan(plan, true); + } + + public PhysicalPlan plan(LogicalPlan plan, boolean verify) { + return foldPlan(mapPlan(plan, verify), verify); + } + + // first, map the logical plan + public PhysicalPlan mapPlan(LogicalPlan plan, boolean verify) { + return verify ? verifyMappingPlan(mapper.map(plan)) : mapper.map(plan); + } + + // second, pack it up + public PhysicalPlan foldPlan(PhysicalPlan mapped, boolean verify) { + return verify ? verifyExecutingPlan(folder.fold(mapped)) : folder.fold(mapped); + } + + // verify the mapped plan + public PhysicalPlan verifyMappingPlan(PhysicalPlan plan) { + List failures = Verifier.verifyMappingPlan(plan); + if (!failures.isEmpty()) { + throw new PlanningException(failures); + } + return plan; + } + + public Map, String> verifyMappingPlanFailures(PhysicalPlan plan) { + List failures = Verifier.verifyMappingPlan(plan); + return failures.stream().collect(toMap(Failure::source, Failure::message)); + } + + public PhysicalPlan verifyExecutingPlan(PhysicalPlan plan) { + List failures = Verifier.verifyExecutingPlan(plan); + if (!failures.isEmpty()) { + throw new PlanningException(failures); + } + return plan; + } + + public Map, String> verifyExecutingPlanFailures(PhysicalPlan plan) { + List failures = Verifier.verifyExecutingPlan(plan); + return failures.stream().collect(toMap(Failure::source, Failure::message)); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/planner/PlanningException.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/planner/PlanningException.java new file mode 100644 index 00000000000..0537550944b --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/planner/PlanningException.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.planner; + +import org.elasticsearch.xpack.sql.ClientSqlException; +import org.elasticsearch.xpack.sql.planner.Verifier.Failure; +import org.elasticsearch.xpack.sql.util.StringUtils; + +import java.util.Collection; +import java.util.Locale; +import java.util.stream.Collectors; + +import static java.lang.String.format; + +public class PlanningException extends ClientSqlException { + + public PlanningException(String message, Object... args) { + super(message, args); + } + + public PlanningException(Collection sources) { + super(extractMessage(sources)); + } + + private static String extractMessage(Collection failures) { + return failures.stream() + .map(f -> format(Locale.ROOT, "line %s:%s: %s", f.source().location().getLineNumber(), f.source().location().getColumnNumber(), f.message())) + .collect(Collectors.joining(StringUtils.NEW_LINE, "Found " + failures.size() + " problem(s)\n", StringUtils.EMPTY)); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java new file mode 100644 index 00000000000..b62ac5aaea7 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java @@ -0,0 +1,570 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.planner; + +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.xpack.sql.expression.Alias; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.Foldables; +import org.elasticsearch.xpack.sql.expression.NamedExpression; +import org.elasticsearch.xpack.sql.expression.Order; +import org.elasticsearch.xpack.sql.expression.function.Function; +import org.elasticsearch.xpack.sql.expression.function.Functions; +import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunction; +import org.elasticsearch.xpack.sql.expression.function.aggregate.CompoundNumericAggregate; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Count; +import org.elasticsearch.xpack.sql.expression.function.aggregate.InnerAggregate; +import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunctionAttribute; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeHistogramFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.AggPathInput; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinitions; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.UnaryProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; +import org.elasticsearch.xpack.sql.plan.physical.AggregateExec; +import org.elasticsearch.xpack.sql.plan.physical.EsQueryExec; +import org.elasticsearch.xpack.sql.plan.physical.FilterExec; +import org.elasticsearch.xpack.sql.plan.physical.LimitExec; +import org.elasticsearch.xpack.sql.plan.physical.LocalExec; +import org.elasticsearch.xpack.sql.plan.physical.OrderExec; +import org.elasticsearch.xpack.sql.plan.physical.PhysicalPlan; +import org.elasticsearch.xpack.sql.plan.physical.ProjectExec; +import org.elasticsearch.xpack.sql.planner.QueryTranslator.GroupingContext; +import org.elasticsearch.xpack.sql.planner.QueryTranslator.QueryTranslation; +import org.elasticsearch.xpack.sql.querydsl.agg.AggFilter; +import org.elasticsearch.xpack.sql.querydsl.agg.AggPath; +import org.elasticsearch.xpack.sql.querydsl.agg.Aggs; +import org.elasticsearch.xpack.sql.querydsl.agg.GroupingAgg; +import org.elasticsearch.xpack.sql.querydsl.agg.LeafAgg; +import org.elasticsearch.xpack.sql.querydsl.container.AttributeSort; +import org.elasticsearch.xpack.sql.querydsl.container.ComputedRef; +import org.elasticsearch.xpack.sql.querydsl.container.QueryContainer; +import org.elasticsearch.xpack.sql.querydsl.container.ScriptSort; +import org.elasticsearch.xpack.sql.querydsl.container.Sort.Direction; +import org.elasticsearch.xpack.sql.querydsl.container.TotalCountRef; +import org.elasticsearch.xpack.sql.querydsl.query.Query; +import org.elasticsearch.xpack.sql.rule.Rule; +import org.elasticsearch.xpack.sql.rule.RuleExecutor; +import org.elasticsearch.xpack.sql.session.EmptyExecutable; +import org.elasticsearch.xpack.sql.util.Check; +import org.elasticsearch.xpack.sql.util.StringUtils; + +import java.util.Arrays; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; + +import static org.elasticsearch.xpack.sql.planner.QueryTranslator.and; +import static org.elasticsearch.xpack.sql.planner.QueryTranslator.toAgg; +import static org.elasticsearch.xpack.sql.planner.QueryTranslator.toQuery; +import static org.elasticsearch.xpack.sql.util.CollectionUtils.combine; + +class QueryFolder extends RuleExecutor { + PhysicalPlan fold(PhysicalPlan plan) { + return execute(plan); + } + + @Override + protected Iterable.Batch> batches() { + Batch rollup = new Batch("Fold queries", + new FoldAggregate(), + new FoldProject(), + new FoldFilter(), + new FoldOrderBy(), + new FoldLimit() + ); + + Batch local = new Batch("Local queries", + new PropagateEmptyLocal(), + new LocalLimit() + ); + + Batch finish = new Batch("Finish query", Limiter.ONCE, + new PlanOutputToQueryRef() + ); + + return Arrays.asList(rollup, local, finish); + } + + private static class FoldProject extends FoldingRule { + + @Override + protected PhysicalPlan rule(ProjectExec project) { + if (project.child() instanceof EsQueryExec) { + EsQueryExec exec = (EsQueryExec) project.child(); + QueryContainer queryC = exec.queryContainer(); + + Map aliases = new LinkedHashMap<>(queryC.aliases()); + Map processors = new LinkedHashMap<>(queryC.scalarFunctions()); + + for (NamedExpression pj : project.projections()) { + if (pj instanceof Alias) { + Attribute aliasAttr = pj.toAttribute(); + Expression e = ((Alias) pj).child(); + + if (e instanceof NamedExpression) { + Attribute attr = ((NamedExpression) e).toAttribute(); + aliases.put(aliasAttr, attr); + // add placeholder for each scalar function + if (e instanceof ScalarFunction) { + processors.put(attr, ProcessorDefinitions.toProcessorDefinition(e)); + } + } else { + processors.put(aliasAttr, ProcessorDefinitions.toProcessorDefinition(e)); + } + } + else { + // for named expressions nothing is recorded as these are resolved last + // otherwise 'intermediate' projects might pollute the + // output + + if (pj instanceof ScalarFunction) { + ScalarFunction f = (ScalarFunction) pj; + processors.put(f.toAttribute(), f.asProcessorDefinition()); + } + } + } + + QueryContainer clone = new QueryContainer(queryC.query(), queryC.aggs(), queryC.columns(), aliases, queryC.pseudoFunctions(), processors, queryC.sort(), queryC.limit()); + return new EsQueryExec(exec.location(), exec.index(), project.output(), clone); + } + return project; + } + } + + private static class FoldFilter extends FoldingRule { + @Override + protected PhysicalPlan rule(FilterExec plan) { + + if (plan.child() instanceof EsQueryExec) { + EsQueryExec exec = (EsQueryExec) plan.child(); + QueryContainer qContainer = exec.queryContainer(); + + QueryTranslation qt = toQuery(plan.condition(), plan.isHaving()); + + Query query = (qContainer.query() != null || qt.query != null) ? and(plan.location(), qContainer.query(), qt.query) : null; + Aggs aggs = addPipelineAggs(qContainer, qt, plan); + + qContainer = new QueryContainer(query, aggs, qContainer.columns(), qContainer.aliases(), + qContainer.pseudoFunctions(), + qContainer.scalarFunctions(), + qContainer.sort(), + qContainer.limit()); + + return exec.with(qContainer); + } + return plan; + } + + private Aggs addPipelineAggs(QueryContainer qContainer, QueryTranslation qt, FilterExec fexec) { + AggFilter filter = qt.aggFilter; + Aggs aggs = qContainer.aggs(); + + if (filter == null) { + return qContainer.aggs(); + } + + // find the relevant groups and compute the shortest path (the highest group in the hierarchy) + Map groupPaths = new LinkedHashMap<>(); + // root group + String shortestPath = null; + GroupingAgg targetGroup = null; + + for (String refId : filter.aggRefs()) { + // is it root group or agg property (_count) + if (refId == null) { + shortestPath = StringUtils.EMPTY; + } + else { + // find function group + GroupingAgg groupAgg = qContainer.findGroupForAgg(refId); + + if (groupAgg == null) { + groupAgg = qContainer.pseudoFunctions().get(refId); + } + + if (groupAgg == null) { + throw new FoldingException(fexec, "Cannot find group for agg %s referrenced by agg filter %s(%s)", refId, filter.name(), filter); + } + + String path = groupAgg.asParentPath(); + if (shortestPath == null || shortestPath.length() > path.length()) { + shortestPath = path; + targetGroup = groupAgg; + } + groupPaths.put(refId, groupAgg); + } + } + + // and finally update the agg groups + if (targetGroup == GroupingAgg.DEFAULT_GROUP) { + throw new PlanningException("Aggregation filtering not supported (yet) without explicit grouping"); + //aggs = aggs.addAgg(null, filter); + } + if (targetGroup == null) { + throw new PlanningException("Cannot determine group column; likely an invalid query - please report"); + } + else { + aggs = aggs.updateGroup(targetGroup.withPipelines(combine(targetGroup.subPipelines(), filter))); + } + + return aggs; + } + } + + private static class FoldAggregate extends FoldingRule { + @Override + protected PhysicalPlan rule(AggregateExec a) { + + if (a.child() instanceof EsQueryExec) { + EsQueryExec exec = (EsQueryExec) a.child(); + + // build the group aggregation + // and also collect info about it (since the group columns might be used inside the select) + + GroupingContext groupingContext = QueryTranslator.groupBy(a.groupings()); + + QueryContainer queryC = exec.queryContainer(); + if (groupingContext != null) { + queryC = queryC.addGroups(groupingContext.groupMap.values()); + } + + Map aliases = new LinkedHashMap<>(); + // tracker for compound aggs seen in a group + Map compoundAggMap = new LinkedHashMap<>(); + + // followed by actual aggregates + for (NamedExpression ne : a.aggregates()) { + + // unwrap alias - it can be + // - an attribute (since we support aliases inside group-by) + // SELECT emp_no ... GROUP BY emp_no + // SELECT YEAR(hire_date) ... GROUP BY YEAR(hire_date) + + // - an agg function (typically) + // SELECT COUNT(*), AVG(salary) ... GROUP BY salary; + + // - a scalar function, which can be applied on an attribute or aggregate and can require one or multiple inputs + + // SELECT SIN(emp_no) ... GROUP BY emp_no + // SELECT CAST(YEAR(hire_date)) ... GROUP BY YEAR(hire_date) + // SELECT CAST(AVG(salary)) ... GROUP BY salary + // SELECT AVG(salary) + SIN(MIN(salary)) ... GROUP BY salary + + if (ne instanceof Alias || ne instanceof Function) { + Alias as = ne instanceof Alias ? (Alias) ne : null; + Expression child = as != null ? as.child() : ne; + + // record aliases in case they are later referred in the tree + if (as != null && as.child() instanceof NamedExpression) { + aliases.put(as.toAttribute(), ((NamedExpression) as.child()).toAttribute()); + } + + // + // look first for scalar functions which might wrap the actual grouped target + // (e.g. + // CAST(field) GROUP BY field or + // ABS(YEAR(field)) GROUP BY YEAR(field) or + // ABS(AVG(salary)) ... GROUP BY salary + // ) + if (child instanceof ScalarFunction) { + ScalarFunction f = (ScalarFunction) child; + ProcessorDefinition proc = f.asProcessorDefinition(); + + final AtomicReference qC = new AtomicReference<>(queryC); + + proc = proc.transformUp(p -> { + // bail out if the def is resolved + if (p.resolved()) { + return p; + } + + // get the backing expression and check if it belongs to a agg group or whether it's an expression in the first place + Expression exp = p.expression(); + GroupingAgg matchingGroup = null; + if (groupingContext != null) { + // is there a group (aggregation) for this expression ? + matchingGroup = groupingContext.groupFor(exp); + } + else { + // a scalar function can be used only if has already been mentioned for grouping + // (otherwise it is the opposite of grouping) + if (exp instanceof ScalarFunction) { + throw new FoldingException(exp, "Scalar function %s can be used only if included already in grouping", exp.toString()); + } + } + + // found match for expression; if it's an attribute or scalar, end the processing chain with the reference to the backing agg + if (matchingGroup != null) { + if (exp instanceof Attribute || exp instanceof ScalarFunction) { + Processor action = null; + // special handling of dates since aggs return the typed Date object which needs extraction + // instead of handling this in the scroller, the folder handles this as it already got access to the extraction action + if (exp instanceof DateTimeHistogramFunction) { + action = ((UnaryProcessorDefinition) p).action(); + } + return new AggPathInput(exp, matchingGroup.propertyPath(), null, action); + } + } + // or found an aggregate expression (which has to work on an attribute used for grouping) + // (can happen when dealing with a root group) + if (Functions.isAggregate(exp)) { + Tuple withFunction = addAggFunction(matchingGroup, (AggregateFunction) exp, compoundAggMap, qC.get()); + qC.set(withFunction.v1()); + return withFunction.v2(); + } + // not an aggregate and no matching - go to a higher node (likely a function YEAR(birth_date)) + return p; + }); + + if (!proc.resolved()) { + throw new FoldingException(child, "Cannot find grouping for '%s'", Expressions.name(child)); + } + + // add the computed column + queryC = qC.get().addColumn(new ComputedRef(proc)); + + // TODO: is this needed? + // redirect the alias to the scalar group id (changing the id altogether doesn't work it is already used in the aggpath) + //aliases.put(as.toAttribute(), sf.toAttribute()); + } + // apply the same logic above (for function inputs) to non-scalar functions with small variantions: + // instead of adding things as input, add them as full blown column + else { + GroupingAgg matchingGroup = null; + if (groupingContext != null) { + // is there a group (aggregation) for this expression ? + matchingGroup = groupingContext.groupFor(child); + } + // attributes can only refer to declared groups + if (child instanceof Attribute) { + Check.notNull(matchingGroup, "Cannot find group '%s'", Expressions.name(child)); + queryC = queryC.addAggColumn(matchingGroup.propertyPath()); + } + else { + // the only thing left is agg function + Check.isTrue(Functions.isAggregate(child), "Expected aggregate function inside alias; got %s", child.nodeString()); + Tuple withAgg = addAggFunction(matchingGroup, (AggregateFunction) child, compoundAggMap, queryC); + //FIXME: what about inner key + queryC = withAgg.v1().addAggColumn(withAgg.v2().context()); + if (withAgg.v2().innerKey() != null) { + throw new PlanningException("innerkey/matrix stats not handled (yet)"); + } + } + } + } + // not an Alias or a Function, means it's an Attribute so apply the same logic as above + else { + GroupingAgg matchingGroup = null; + if (groupingContext != null) { + matchingGroup = groupingContext.groupFor(ne); + Check.notNull(matchingGroup, "Cannot find group '%s'", Expressions.name(ne)); + queryC = queryC.addAggColumn(matchingGroup.propertyPath()); + } + } + } + + if (!aliases.isEmpty()) { + Map newAliases = new LinkedHashMap<>(queryC.aliases()); + newAliases.putAll(aliases); + queryC = queryC.withAliases(newAliases); + } + return new EsQueryExec(exec.location(), exec.index(), a.output(), queryC); + } + return a; + } + + private Tuple addAggFunction(GroupingAgg parentAgg, AggregateFunction f, Map compoundAggMap, QueryContainer queryC) { + String functionId = f.functionId(); + // handle count as a special case agg + if (f instanceof Count) { + Count c = (Count) f; + if (!c.distinct()) { + String path = parentAgg == null ? TotalCountRef.PATH : AggPath.bucketCount(parentAgg.asParentPath()); + Map pseudoFunctions = new LinkedHashMap<>(queryC.pseudoFunctions()); + pseudoFunctions.put(functionId, parentAgg); + return new Tuple<>(queryC.withPseudoFunctions(pseudoFunctions), new AggPathInput(f, path)); + } + } + + AggPathInput aggInput = null; + + // otherwise translate the function into an actual agg + String parentPath = parentAgg != null ? parentAgg.asParentPath() : null; + String groupId = parentAgg != null ? parentAgg.id() : null; + + if (f instanceof InnerAggregate) { + InnerAggregate ia = (InnerAggregate) f; + CompoundNumericAggregate outer = ia.outer(); + String cAggPath = compoundAggMap.get(outer); + + // the compound agg hasn't been seen before so initialize it + if (cAggPath == null) { + LeafAgg leafAgg = toAgg(parentPath, functionId, outer); + cAggPath = leafAgg.propertyPath(); + compoundAggMap.put(outer, cAggPath); + // add the agg (without any reference) + queryC = queryC.with(queryC.aggs().addAgg(leafAgg)); + } + + String aggPath = AggPath.metricValue(cAggPath, ia.innerId()); + // FIXME: concern leak - hack around MatrixAgg which is not + // generalized (afaik) + aggInput = new AggPathInput(f, aggPath, ia.innerKey() != null ? QueryTranslator.nameOf(ia.innerKey()) : null); + } + else { + LeafAgg leafAgg = toAgg(parentPath, functionId, f); + aggInput = new AggPathInput(f, leafAgg.propertyPath()); + queryC = queryC.with(queryC.aggs().addAgg(groupId, leafAgg)); + } + + return new Tuple<>(queryC, aggInput); + } + } + + private static class FoldOrderBy extends FoldingRule { + @Override + protected PhysicalPlan rule(OrderExec plan) { + + if (plan.child() instanceof EsQueryExec) { + EsQueryExec exec = (EsQueryExec) plan.child(); + QueryContainer qContainer = exec.queryContainer(); + + for (Order order : plan.order()) { + Direction direction = Direction.from(order.direction()); + + // check whether sorting is on an group (and thus nested agg) or field + Attribute attr = ((NamedExpression) order.child()).toAttribute(); + // check whether there's an alias (occurs with scalar functions which are not named) + attr = qContainer.aliases().getOrDefault(attr, attr); + String lookup = attr.id().toString(); + GroupingAgg group = qContainer.findGroupForAgg(lookup); + + // TODO: might need to validate whether the target field or group actually exist + if (group != null && group != GroupingAgg.DEFAULT_GROUP) { + // check whether the lookup matches a group + if (group.id().equals(lookup)) { + qContainer = qContainer.updateGroup(group.with(direction)); + } + // else it's a leafAgg + else { + qContainer = qContainer.updateGroup(group.with(lookup, direction)); + } + } + else { + // scalar functions typically require script ordering + if (attr instanceof ScalarFunctionAttribute) { + ScalarFunctionAttribute sfa = (ScalarFunctionAttribute) attr; + // is there an expression to order by? + if (sfa.orderBy() != null) { + Expression ob = sfa.orderBy(); + if (ob instanceof NamedExpression) { + Attribute at = ((NamedExpression) ob).toAttribute(); + at = qContainer.aliases().getOrDefault(at, at); + qContainer = qContainer.sort(new AttributeSort(at, direction)); + } + // ignore constant + else if (!ob.foldable()) { + throw new PlanningException("does not know how to order by expression %s", ob); + } + } + // nope, use scripted sorting + else { + qContainer = qContainer.sort(new ScriptSort(sfa.script(), direction)); + } + } + else { + qContainer = qContainer.sort(new AttributeSort(attr, direction)); + } + } + } + + return exec.with(qContainer); + } + return plan; + } + } + + + private static class FoldLimit extends FoldingRule { + + @Override + protected PhysicalPlan rule(LimitExec plan) { + if (plan.child() instanceof EsQueryExec) { + EsQueryExec exec = (EsQueryExec) plan.child(); + int limit = Foldables.intValueOf(plan.limit()); + int currentSize = exec.queryContainer().limit(); + int newSize = currentSize < 0 ? limit : Math.min(currentSize, limit); + return exec.with(exec.queryContainer().withLimit(newSize)); + } + return plan; + } + } + + private static class PlanOutputToQueryRef extends FoldingRule { + @Override + protected PhysicalPlan rule(EsQueryExec exec) { + QueryContainer qContainer = exec.queryContainer(); + + // references (aka aggs) are in place + if (qContainer.hasColumns()) { + return exec; + } + + for (Attribute attr : exec.output()) { + qContainer = qContainer.addColumn(attr); + } + + // after all attributes have been resolved + return exec.with(qContainer); + } + } + + // + // local + // + + private static class PropagateEmptyLocal extends FoldingRule { + + @Override + protected PhysicalPlan rule(PhysicalPlan plan) { + if (plan.children().size() == 1) { + PhysicalPlan p = plan.children().get(0); + if (p instanceof LocalExec && ((LocalExec) p).isEmpty()) { + return new LocalExec(plan.location(), new EmptyExecutable(plan.output())); + } + } + return plan; + } + } + + // local exec currently means empty or one entry so limit can't really be applied + private static class LocalLimit extends FoldingRule { + + @Override + protected PhysicalPlan rule(LimitExec plan) { + if (plan.child() instanceof LocalExec) { + return plan.child(); + } + return plan; + } + } + + // rule for folding physical plans together + abstract static class FoldingRule extends Rule { + + @Override + public final PhysicalPlan apply(PhysicalPlan plan) { + return plan.transformUp(this::rule, typeToken()); + } + + @Override + protected abstract PhysicalPlan rule(SubPlan plan); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java new file mode 100644 index 00000000000..c87668fd901 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java @@ -0,0 +1,880 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.planner; + +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.BinaryExpression; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.ExpressionId; +import org.elasticsearch.xpack.sql.expression.FieldAttribute; +import org.elasticsearch.xpack.sql.expression.Literal; +import org.elasticsearch.xpack.sql.expression.NamedExpression; +import org.elasticsearch.xpack.sql.expression.NestedFieldAttribute; +import org.elasticsearch.xpack.sql.expression.RootFieldAttribute; +import org.elasticsearch.xpack.sql.expression.UnaryExpression; +import org.elasticsearch.xpack.sql.expression.function.Function; +import org.elasticsearch.xpack.sql.expression.function.Functions; +import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunction; +import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunctionAttribute; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Avg; +import org.elasticsearch.xpack.sql.expression.function.aggregate.CompoundNumericAggregate; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Count; +import org.elasticsearch.xpack.sql.expression.function.aggregate.ExtendedStats; +import org.elasticsearch.xpack.sql.expression.function.aggregate.MatrixStats; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Max; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Min; +import org.elasticsearch.xpack.sql.expression.function.aggregate.PercentileRanks; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Percentiles; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Stats; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Sum; +import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunctionAttribute; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeHistogramFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.Params; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.expression.predicate.And; +import org.elasticsearch.xpack.sql.expression.predicate.BinaryComparison; +import org.elasticsearch.xpack.sql.expression.predicate.Equals; +import org.elasticsearch.xpack.sql.expression.predicate.GreaterThan; +import org.elasticsearch.xpack.sql.expression.predicate.GreaterThanOrEqual; +import org.elasticsearch.xpack.sql.expression.predicate.IsNotNull; +import org.elasticsearch.xpack.sql.expression.predicate.LessThan; +import org.elasticsearch.xpack.sql.expression.predicate.LessThanOrEqual; +import org.elasticsearch.xpack.sql.expression.predicate.Not; +import org.elasticsearch.xpack.sql.expression.predicate.Or; +import org.elasticsearch.xpack.sql.expression.predicate.Range; +import org.elasticsearch.xpack.sql.expression.predicate.fulltext.MatchQueryPredicate; +import org.elasticsearch.xpack.sql.expression.predicate.fulltext.MultiMatchQueryPredicate; +import org.elasticsearch.xpack.sql.expression.predicate.fulltext.StringQueryPredicate; +import org.elasticsearch.xpack.sql.expression.regex.Like; +import org.elasticsearch.xpack.sql.expression.regex.RLike; +import org.elasticsearch.xpack.sql.querydsl.agg.AggFilter; +import org.elasticsearch.xpack.sql.querydsl.agg.AggPath; +import org.elasticsearch.xpack.sql.querydsl.agg.AndAggFilter; +import org.elasticsearch.xpack.sql.querydsl.agg.AvgAgg; +import org.elasticsearch.xpack.sql.querydsl.agg.CardinalityAgg; +import org.elasticsearch.xpack.sql.querydsl.agg.ExtendedStatsAgg; +import org.elasticsearch.xpack.sql.querydsl.agg.GroupByColumnAgg; +import org.elasticsearch.xpack.sql.querydsl.agg.GroupByDateAgg; +import org.elasticsearch.xpack.sql.querydsl.agg.GroupByScriptAgg; +import org.elasticsearch.xpack.sql.querydsl.agg.GroupingAgg; +import org.elasticsearch.xpack.sql.querydsl.agg.LeafAgg; +import org.elasticsearch.xpack.sql.querydsl.agg.MatrixStatsAgg; +import org.elasticsearch.xpack.sql.querydsl.agg.MaxAgg; +import org.elasticsearch.xpack.sql.querydsl.agg.MinAgg; +import org.elasticsearch.xpack.sql.querydsl.agg.OrAggFilter; +import org.elasticsearch.xpack.sql.querydsl.agg.PercentileRanksAgg; +import org.elasticsearch.xpack.sql.querydsl.agg.PercentilesAgg; +import org.elasticsearch.xpack.sql.querydsl.agg.StatsAgg; +import org.elasticsearch.xpack.sql.querydsl.agg.SumAgg; +import org.elasticsearch.xpack.sql.querydsl.query.AndQuery; +import org.elasticsearch.xpack.sql.querydsl.query.ExistsQuery; +import org.elasticsearch.xpack.sql.querydsl.query.MatchQuery; +import org.elasticsearch.xpack.sql.querydsl.query.MultiMatchQuery; +import org.elasticsearch.xpack.sql.querydsl.query.NestedQuery; +import org.elasticsearch.xpack.sql.querydsl.query.NotQuery; +import org.elasticsearch.xpack.sql.querydsl.query.OrQuery; +import org.elasticsearch.xpack.sql.querydsl.query.Query; +import org.elasticsearch.xpack.sql.querydsl.query.QueryStringQuery; +import org.elasticsearch.xpack.sql.querydsl.query.RangeQuery; +import org.elasticsearch.xpack.sql.querydsl.query.RegexQuery; +import org.elasticsearch.xpack.sql.querydsl.query.ScriptQuery; +import org.elasticsearch.xpack.sql.querydsl.query.TermQuery; +import org.elasticsearch.xpack.sql.querydsl.query.WildcardQuery; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataTypes; +import org.elasticsearch.xpack.sql.util.Check; +import org.elasticsearch.xpack.sql.util.ReflectionUtils; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Map.Entry; + +import static java.lang.String.format; +import static java.util.Collections.singletonList; +import static org.elasticsearch.xpack.sql.expression.Foldables.doubleValuesOf; +import static org.elasticsearch.xpack.sql.expression.Foldables.stringValueOf; +import static org.elasticsearch.xpack.sql.expression.Foldables.valueOf; +import static org.elasticsearch.xpack.sql.expression.function.scalar.script.ParamsBuilder.paramsBuilder; +import static org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate.formatTemplate; + +abstract class QueryTranslator { + + static final List> QUERY_TRANSLATORS = Arrays.asList( + new BinaryComparisons(), + new Ranges(), + new BinaryLogic(), + new Nots(), + new Nulls(), + new Likes(), + new StringQueries(), + new Matches(), + new MultiMatches() + ); + + static final List> AGG_TRANSLATORS = Arrays.asList( + new Maxes(), + new Mins(), + new Avgs(), + new Sums(), + new StatsAggs(), + new ExtendedStatsAggs(), + new MatrixStatsAggs(), + new PercentilesAggs(), + new PercentileRanksAggs(), + new DistinctCounts(), + new DateTimes() + ); + + static class QueryTranslation { + final Query query; + // Agg filter / Function or Agg association + final AggFilter aggFilter; + + QueryTranslation(Query query) { + this(query, null); + } + + QueryTranslation(AggFilter aggFilter) { + this(null, aggFilter); + } + + QueryTranslation(Query query, AggFilter aggFilter) { + this.query = query; + this.aggFilter = aggFilter; + } + } + + static QueryTranslation toQuery(Expression e, boolean onAggs) { + QueryTranslation translation = null; + for (ExpressionTranslator translator : QUERY_TRANSLATORS) { + translation = translator.translate(e, onAggs); + if (translation != null) { + return translation; + } + } + + throw new UnsupportedOperationException(format(Locale.ROOT, "Don't know how to translate %s %s", e.nodeName(), e)); + } + + static LeafAgg toAgg(String parent, String id, Function f) { + + for (AggTranslator translator : AGG_TRANSLATORS) { + LeafAgg agg = translator.apply(id, parent, f); + if (agg != null) { + return agg; + } + } + + throw new UnsupportedOperationException(format(Locale.ROOT, "Don't know how to translate %s %s", f.nodeName(), f)); + } + + static class GroupingContext { + final GroupingAgg head; + final GroupingAgg tail; + final ExpressionId headAggId; + + final Map groupMap; + final List aggNames; + final String groupPath; + + GroupingContext(Map groupMap, String propertyPath) { + this.groupMap = groupMap; + this.groupPath = propertyPath; + + aggNames = new ArrayList<>(groupMap.size()); + for (GroupingAgg gAgg : groupMap.values()) { + aggNames.add(gAgg.id()); + } + + Iterator> iterator = groupMap.entrySet().iterator(); + + Entry entry = iterator.next(); + headAggId = entry.getKey(); + head = entry.getValue(); + + GroupingAgg lastAgg = head; + + while (iterator.hasNext()) { + lastAgg = iterator.next().getValue(); + } + tail = lastAgg; + } + + + GroupingAgg groupFor(Expression exp) { + if (Functions.isAggregate(exp)) { + AggregateFunction f = (AggregateFunction) exp; + // if there's at least one agg in the tree + if (groupPath != null) { + GroupingAgg matchingGroup = null; + // group found - finding the dedicated agg + if (f.field() instanceof NamedExpression) { + matchingGroup = groupMap.get(((NamedExpression) f.field()).id()); + } + // return matching group or the tail (last group) + return matchingGroup != null ? matchingGroup : tail; + } + else { + return null; + } + } + if (exp instanceof NamedExpression) { + return groupMap.get(((NamedExpression) exp).id()); + } + throw new SqlIllegalArgumentException("Don't know how to find group for expression %s", exp); + } + + @Override + public String toString() { + return groupMap.toString(); + } + } + + // creates a tree of GroupBy aggs plus some extra information + // useful for tree validation/group referencing + static GroupingContext groupBy(List groupings) { + if (groupings.isEmpty()) { + return null; + } + + Map aggMap = new LinkedHashMap<>(); + + // nested the aggs but also + // identify each agg by an expression for later referencing + String propertyPath = ""; + for (Expression exp : groupings) { + String aggId; + if (exp instanceof NamedExpression) { + NamedExpression ne = (NamedExpression) exp; + + // change analyzed to non non-analyzed attributes + if (exp instanceof FieldAttribute) { + FieldAttribute fa = (FieldAttribute) exp; + if (fa.isAnalyzed()) { + ne = fa.notAnalyzedAttribute(); + } + } + aggId = ne.id().toString(); + + propertyPath = AggPath.path(propertyPath, aggId); + + GroupingAgg agg = null; + + // handle functions differently + if (exp instanceof Function) { + // dates are handled differently because of date histograms + if (exp instanceof DateTimeHistogramFunction) { + DateTimeHistogramFunction dthf = (DateTimeHistogramFunction) exp; + agg = new GroupByDateAgg(aggId, AggPath.bucketValue(propertyPath), nameOf(exp), dthf.interval(), dthf.timeZone()); + } + // all other scalar functions become a script + else if (exp instanceof ScalarFunction) { + ScalarFunction sf = (ScalarFunction) exp; + agg = new GroupByScriptAgg(aggId, AggPath.bucketValue(propertyPath), nameOf(exp), sf.asScript()); + } + else { + throw new SqlIllegalArgumentException("Cannot GROUP BY function %s", exp); + } + } + else { + agg = new GroupByColumnAgg(aggId, AggPath.bucketValue(propertyPath), ne.name()); + } + + aggMap.put(ne.id(), agg); + } + else { + throw new SqlIllegalArgumentException("Don't know how to group on %s", exp.nodeString()); + } + } + return new GroupingContext(aggMap, propertyPath); + } + + static QueryTranslation and(Location loc, QueryTranslation left, QueryTranslation right) { + Check.isTrue(left != null || right != null, "Both expressions are null"); + if (left == null) { + return right; + } + if (right == null) { + return left; + } + + Query newQ = null; + if (left.query != null || right.query != null) { + newQ = and(loc, left.query, right.query); + } + + AggFilter aggFilter = null; + + if (left.aggFilter == null) { + aggFilter = right.aggFilter; + } + else if (right.aggFilter == null) { + aggFilter = left.aggFilter; + } + else { + aggFilter = new AndAggFilter(left.aggFilter, right.aggFilter); + } + + return new QueryTranslation(newQ, aggFilter); + } + + static Query and(Location loc, Query left, Query right) { + Check.isTrue(left != null || right != null, "Both expressions are null"); + if (left == null) { + return right; + } + if (right == null) { + return left; + } + return new AndQuery(loc, left, right); + } + + static QueryTranslation or(Location loc, QueryTranslation left, QueryTranslation right) { + Check.isTrue(left != null || right != null, "Both expressions are null"); + if (left == null) { + return right; + } + if (right == null) { + return left; + } + + Query newQ = null; + if (left.query != null || right.query != null) { + newQ = or(loc, left.query, right.query); + } + + AggFilter aggFilter = null; + + if (left.aggFilter == null) { + aggFilter = right.aggFilter; + } + else if (right.aggFilter == null) { + aggFilter = left.aggFilter; + } + else { + aggFilter = new OrAggFilter(left.aggFilter, right.aggFilter); + } + + return new QueryTranslation(newQ, aggFilter); + } + + static Query or(Location loc, Query left, Query right) { + Check.isTrue(left != null || right != null, "Both expressions are null"); + + if (left == null) { + return right; + } + if (right == null) { + return left; + } + return new OrQuery(loc, left, right); + } + + static Query not(Query query) { + Check.isTrue(query != null, "Expressions is null"); + return new NotQuery(query.location(), query); + } + + static String nameOf(Expression e) { + if (e instanceof DateTimeFunction) { + return nameOf(((DateTimeFunction) e).field()); + } + if (e instanceof NamedExpression) { + return ((NamedExpression) e).name(); + } + if (e instanceof Literal) { + return String.valueOf(e.fold()); + } + throw new SqlIllegalArgumentException("Cannot determine name for %s", e); + } + + static String idOf(Expression e) { + if (e instanceof NamedExpression) { + return ((NamedExpression) e).id().toString(); + } + throw new SqlIllegalArgumentException("Cannot determine id for %s", e); + } + + static String dateFormat(Expression e) { + if (e instanceof DateTimeFunction) { + return ((DateTimeFunction) e).dateTimeFormat(); + } + return null; + } + + static String field(AggregateFunction af) { + Expression arg = af.field(); + if (arg instanceof RootFieldAttribute) { + return ((RootFieldAttribute) arg).name(); + } + if (arg instanceof Literal) { + return String.valueOf(((Literal) arg).value()); + } + throw new SqlIllegalArgumentException("Does not know how to convert argument %s for function %s", arg.nodeString(), af.nodeString()); + } + + // TODO: need to optimize on ngram + // TODO: see whether escaping is needed + static class Likes extends ExpressionTranslator { + + @Override + protected QueryTranslation asQuery(BinaryExpression e, boolean onAggs) { + Query q = null; + boolean analyzed = true; + String target = null; + + if (e.left() instanceof FieldAttribute) { + FieldAttribute fa = (FieldAttribute) e.left(); + analyzed = fa.isAnalyzed(); + target = nameOf(analyzed ? fa : fa.notAnalyzedAttribute()); + } + + String pattern = sqlToEsPatternMatching(stringValueOf(e.right())); + if (e instanceof Like) { + if (analyzed) { + q = new QueryStringQuery(e.location(), pattern, target); + } + else { + q = new WildcardQuery(e.location(), nameOf(e.left()), pattern); + } + } + + if (e instanceof RLike) { + if (analyzed) { + q = new QueryStringQuery(e.location(), "/" + pattern + "/", target); + } + else { + q = new RegexQuery(e.location(), nameOf(e.left()), sqlToEsPatternMatching(stringValueOf(e.right()))); + } + } + + return q != null ? new QueryTranslation(wrapIfNested(q, e.left())) : null; + } + + private static String sqlToEsPatternMatching(String pattern) { + return pattern.replace("%", "*").replace("_", "?"); + } + } + + static class StringQueries extends ExpressionTranslator { + + @Override + protected QueryTranslation asQuery(StringQueryPredicate q, boolean onAggs) { + return new QueryTranslation(new QueryStringQuery(q.location(), q.query(), q.fields(), q)); + } + } + + static class Matches extends ExpressionTranslator { + + @Override + protected QueryTranslation asQuery(MatchQueryPredicate q, boolean onAggs) { + return new QueryTranslation(wrapIfNested(new MatchQuery(q.location(), nameOf(q.field()), q.query(), q), q.field())); + } + } + + static class MultiMatches extends ExpressionTranslator { + + @Override + protected QueryTranslation asQuery(MultiMatchQueryPredicate q, boolean onAggs) { + return new QueryTranslation(new MultiMatchQuery(q.location(), q.query(), q.fields(), q)); + } + } + + static class BinaryLogic extends ExpressionTranslator { + + @Override + protected QueryTranslation asQuery(BinaryExpression e, boolean onAggs) { + if (e instanceof And) { + return and(e.location(), toQuery(e.left(), onAggs), toQuery(e.right(), onAggs)); + } + if (e instanceof Or) { + return or(e.location(), toQuery(e.left(), onAggs), toQuery(e.right(), onAggs)); + } + + return null; + } + } + + static class Nots extends ExpressionTranslator { + + @Override + protected QueryTranslation asQuery(Not not, boolean onAggs) { + QueryTranslation translation = toQuery(not.child(), onAggs); + return new QueryTranslation(not(translation.query), translation.aggFilter); + } + } + + static class Nulls extends ExpressionTranslator { + + @Override + protected QueryTranslation asQuery(UnaryExpression ue, boolean onAggs) { + // TODO: handle onAggs - missing bucket aggregation + if (ue instanceof IsNotNull) { + return new QueryTranslation(new ExistsQuery(ue.location(), nameOf(ue.child()))); + } + return null; + } + } + + // assume the Optimizer properly orders the predicates to ease the translation + static class BinaryComparisons extends ExpressionTranslator { + + @Override + protected QueryTranslation asQuery(BinaryComparison bc, boolean onAggs) { + Check.isTrue(bc.right().foldable(), + "Line %d:%d - Comparisons against variables are not (currently) supported; offender %s in %s", + bc.right().location().getLineNumber(), bc.right().location().getColumnNumber(), bc.right().nodeName(), bc.nodeName()); + + if (bc.left() instanceof NamedExpression) { + NamedExpression ne = (NamedExpression) bc.left(); + + Query query = null; + AggFilter aggFilter = null; + + Attribute at = ne.toAttribute(); + + // scalar function can appear in both WHERE and HAVING so handle it first + // in both cases the function script is used - script-query/query for the former, bucket-selector/aggFilter for the latter + + if (at instanceof ScalarFunctionAttribute) { + ScalarFunctionAttribute sfa = (ScalarFunctionAttribute) at; + ScriptTemplate scriptTemplate = sfa.script(); + + String template = formatTemplate(format(Locale.ROOT, "%s %s {}", scriptTemplate.template(), bc.symbol())); + // no need to bind the wrapped/target agg - it is already available through the nested script (needed to create the script itself) + Params params = paramsBuilder().script(scriptTemplate.params()).variable(valueOf(bc.right())).build(); + ScriptTemplate script = new ScriptTemplate(template, params, DataTypes.BOOLEAN); + if (onAggs) { + aggFilter = new AggFilter(at.id().toString(), script); + } + else { + query = new ScriptQuery(at.location(), script); + } + } + + // + // Agg context means HAVING -> PipelineAggs + // + else if (onAggs) { + String template = null; + Params params = null; + + // agg function + if (at instanceof AggregateFunctionAttribute) { + AggregateFunctionAttribute fa = (AggregateFunctionAttribute) at; + + // TODO: handle case where both sides of the comparison are functions + template = formatTemplate(format(Locale.ROOT, "{} %s {}", bc.symbol())); + + // bind the agg and the variable to the script + params = paramsBuilder().agg(fa).variable(valueOf(bc.right())).build(); + } + + aggFilter = new AggFilter(at.id().toString(), new ScriptTemplate(template, params, DataTypes.BOOLEAN)); + } + + // + // No Agg context means WHERE clause + // + else { + if (at instanceof FieldAttribute) { + query = wrapIfNested(translateQuery(bc), ne); + } + } + + return new QueryTranslation(query, aggFilter); + } + + else { + throw new UnsupportedOperationException("No idea how to translate " + bc.left()); + } + } + + private static Query translateQuery(BinaryComparison bc) { + Location loc = bc.location(); + String name = nameOf(bc.left()); + Object value = valueOf(bc.right()); + String format = dateFormat(bc.left()); + + if (bc instanceof GreaterThan) { + return new RangeQuery(loc, name, value, false, null, false, format); + } + if (bc instanceof GreaterThanOrEqual) { + return new RangeQuery(loc, name, value, true, null, false, format); + } + if (bc instanceof LessThan) { + return new RangeQuery(loc, name, null, false, value, false, format); + } + if (bc instanceof LessThanOrEqual) { + return new RangeQuery(loc, name, null, false, value, true, format); + } + if (bc instanceof Equals) { + if (bc.left() instanceof FieldAttribute) { + FieldAttribute fa = (FieldAttribute) bc.left(); + if (fa.isAnalyzed()) { + return new MatchQuery(loc, name, value); + } + } + return new TermQuery(loc, name, value); + } + + Check.isTrue(false, "don't know how to translate binary comparison %s in %s", bc.right().nodeString(), bc); + return null; + } + } + + static class Ranges extends ExpressionTranslator { + + @Override + protected QueryTranslation asQuery(Range r, boolean onAggs) { + Object lower = valueOf(r.lower()); + Object upper = valueOf(r.upper()); + + Expression e = r.value(); + + + if (e instanceof NamedExpression) { + NamedExpression ne = (NamedExpression) e; + + Query query = null; + AggFilter aggFilter = null; + + Attribute at = ne.toAttribute(); + + // scalar function can appear in both WHERE and HAVING so handle it first + // in both cases the function script is used - script-query/query for the former, bucket-selector/aggFilter for the latter + + if (at instanceof ScalarFunctionAttribute) { + ScalarFunctionAttribute sfa = (ScalarFunctionAttribute) at; + ScriptTemplate scriptTemplate = sfa.script(); + + String template = formatTemplate(format(Locale.ROOT, "({} %s %s) && (%s %s {})", + r.includeLower() ? "<=" : "<", + scriptTemplate.template(), + scriptTemplate.template(), + r.includeUpper() ? "<=" : "<")); + + // no need to bind the wrapped/target - it is already available through the nested script (needed to create the script itself) + Params params = paramsBuilder().variable(lower) + .script(scriptTemplate.params()) + .script(scriptTemplate.params()) + .variable(upper) + .build(); + + ScriptTemplate script = new ScriptTemplate(template, params, DataTypes.BOOLEAN); + + if (onAggs) { + aggFilter = new AggFilter(at.id().toString(), script); + } + else { + query = new ScriptQuery(at.location(), script); + } + } + + // + // HAVING + // + else if (onAggs) { + String template = null; + Params params = null; + + // agg function + if (at instanceof AggregateFunctionAttribute) { + AggregateFunctionAttribute fa = (AggregateFunctionAttribute) at; + + template = formatTemplate(format(Locale.ROOT, "{} %s {} && {} %s {}", + r.includeLower() ? "<=" : "<", + r.includeUpper() ? "<=" : "<")); + + params = paramsBuilder().variable(lower) + .agg(fa) + .agg(fa) + .variable(upper) + .build(); + + } + aggFilter = new AggFilter(((NamedExpression) r.value()).id().toString(), new ScriptTemplate(template, params, DataTypes.BOOLEAN)); + } + // + // WHERE + // + else { + // typical range + if (at instanceof FieldAttribute) { + RangeQuery rangeQuery = new RangeQuery(r.location(), nameOf(r.value()), + valueOf(r.lower()), r.includeLower(), valueOf(r.upper()), r.includeUpper(), dateFormat(r.value())); + query = wrapIfNested(rangeQuery, r.value()); + } + } + + return new QueryTranslation(query, aggFilter); + } + else { + throw new UnsupportedOperationException("No idea how to translate " + e); + } + + } + } + + + // + // Agg translators + // + + static class DistinctCounts extends SingleValueAggTranslator { + + @Override + protected LeafAgg toAgg(String id, String path, Count c) { + if (!c.distinct()) { + return null; + } + return new CardinalityAgg(id, path, field(c)); + } + } + + static class Sums extends SingleValueAggTranslator { + + @Override + protected LeafAgg toAgg(String id, String path, Sum s) { + return new SumAgg(id, path, field(s)); + } + } + + static class Avgs extends SingleValueAggTranslator { + + @Override + protected LeafAgg toAgg(String id, String path, Avg a) { + return new AvgAgg(id, path, field(a)); + } + } + + static class Maxes extends SingleValueAggTranslator { + + @Override + protected LeafAgg toAgg(String id, String path, Max m) { + return new MaxAgg(id, path, field(m)); + } + } + + static class Mins extends SingleValueAggTranslator { + + @Override + protected LeafAgg toAgg(String id, String path, Min m) { + return new MinAgg(id, path, field(m)); + } + } + + static class StatsAggs extends CompoundAggTranslator { + + @Override + protected LeafAgg toAgg(String id, String path, Stats s) { + return new StatsAgg(id, path, field(s)); + } + } + + static class ExtendedStatsAggs extends CompoundAggTranslator { + + @Override + protected LeafAgg toAgg(String id, String path, ExtendedStats e) { + return new ExtendedStatsAgg(id, path, field(e)); + } + } + + static class MatrixStatsAggs extends CompoundAggTranslator { + + @Override + protected LeafAgg toAgg(String id, String path, MatrixStats m) { + return new MatrixStatsAgg(id, path, singletonList(field(m))); + } + } + + static class PercentilesAggs extends CompoundAggTranslator { + + @Override + protected LeafAgg toAgg(String id, String path, Percentiles p) { + return new PercentilesAgg(id, path, field(p), doubleValuesOf(p.percents())); + } + } + + static class PercentileRanksAggs extends CompoundAggTranslator { + + @Override + protected LeafAgg toAgg(String id, String path, PercentileRanks p) { + return new PercentileRanksAgg(id, path, field(p), doubleValuesOf(p.values())); + } + } + + static class DateTimes extends SingleValueAggTranslator { + + @Override + protected LeafAgg toAgg(String id, String path, Min m) { + return new MinAgg(id, path, field(m)); + } + } + + abstract static class AggTranslator { + + private final Class typeToken = ReflectionUtils.detectSuperTypeForRuleLike(getClass()); + + @SuppressWarnings("unchecked") + public final LeafAgg apply(String id, String parent, Function f) { + return (typeToken.isInstance(f) ? asAgg(id, parent, (F) f) : null); + } + + protected abstract LeafAgg asAgg(String id, String parent, F f); + } + + abstract static class SingleValueAggTranslator extends AggTranslator { + + @Override + protected final LeafAgg asAgg(String id, String parent, F function) { + String path = parent == null ? id : AggPath.path(parent, id); + return toAgg(id, AggPath.metricValue(path), function); + } + + protected abstract LeafAgg toAgg(String id, String path, F f); + } + + abstract static class CompoundAggTranslator extends AggTranslator { + + @Override + protected final LeafAgg asAgg(String id, String parent, C function) { + String path = parent == null ? id : AggPath.path(parent, id); + return toAgg(id, path, function); + } + + protected abstract LeafAgg toAgg(String id, String path, C f); + } + + + abstract static class ExpressionTranslator { + + private final Class typeToken = ReflectionUtils.detectSuperTypeForRuleLike(getClass()); + + @SuppressWarnings("unchecked") + public QueryTranslation translate(Expression exp, boolean onAggs) { + return (typeToken.isInstance(exp) ? asQuery((E) exp, onAggs) : null); + } + + protected abstract QueryTranslation asQuery(E e, boolean onAggs); + + protected static Query wrapIfNested(Query query, Expression exp) { + if (exp instanceof NestedFieldAttribute) { + NestedFieldAttribute nfa = (NestedFieldAttribute) exp; + return new NestedQuery(nfa.location(), nfa.parentPath(), query); + } + return query; + } + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/planner/Verifier.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/planner/Verifier.java new file mode 100644 index 00000000000..c9a1b9fa0b1 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/planner/Verifier.java @@ -0,0 +1,107 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.planner; + +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.plan.physical.AggregateExec; +import org.elasticsearch.xpack.sql.plan.physical.PhysicalPlan; +import org.elasticsearch.xpack.sql.plan.physical.Unexecutable; +import org.elasticsearch.xpack.sql.plan.physical.UnplannedExec; +import org.elasticsearch.xpack.sql.tree.Node; + +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; + +abstract class Verifier { + + static class Failure { + private final Node source; + private final String message; + + Failure(Node source, String message) { + this.source = source; + this.message = message; + } + + Node source() { + return source; + } + + String message() { + return message; + } + + @Override + public int hashCode() { + return source.hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + Verifier.Failure other = (Verifier.Failure) obj; + return Objects.equals(source, other.source); + } + } + + private static Failure fail(Node source, String message) { + return new Failure(source, message); + } + + static List verifyMappingPlan(PhysicalPlan plan) { + List failures = new ArrayList<>(); + + plan.forEachUp(p -> { + if (p instanceof UnplannedExec) { + failures.add(fail(p, "Unplanned item")); + } + p.forEachExpressionsUp(e -> { + if (e.childrenResolved() && !e.resolved()) { + failures.add(fail(e, "Unresolved expression")); + } + }); + + if (p instanceof AggregateExec) { + forbidMultiFieldGroupBy((AggregateExec) p, failures); + } + }); + + return failures; + } + + private static void forbidMultiFieldGroupBy(AggregateExec a, List failures) { + if (a.groupings().size() > 1) { + failures.add(fail(a.groupings().get(0), "Currently, only a single expression can be used with GROUP BY; please select one of " + + Expressions.names(a.groupings()))); + } + } + + + static List verifyExecutingPlan(PhysicalPlan plan) { + List failures = new ArrayList<>(); + + plan.forEachUp(p -> { + if (p instanceof Unexecutable) { + failures.add(fail(p, "Unexecutable item")); + } + p.forEachExpressionsUp(e -> { + if (e.childrenResolved() && !e.resolved()) { + failures.add(fail(e, "Unresolved expression")); + } + }); + }); + + return failures; + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/AbstractSqlProtocolRestAction.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/AbstractSqlProtocolRestAction.java new file mode 100644 index 00000000000..45646cece9b --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/AbstractSqlProtocolRestAction.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestRequest.Method; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.action.RestResponseListener; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto; +import org.elasticsearch.xpack.sql.protocol.shared.AbstractProto; +import org.elasticsearch.xpack.sql.protocol.shared.Request; +import org.elasticsearch.xpack.sql.protocol.shared.Response; +import org.elasticsearch.xpack.sql.session.Cursor; + +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.util.function.Function; + +import static org.elasticsearch.rest.BytesRestResponse.TEXT_CONTENT_TYPE; +import static org.elasticsearch.rest.RestStatus.OK; +import static org.elasticsearch.xpack.sql.util.StringUtils.EMPTY; + +public abstract class AbstractSqlProtocolRestAction extends BaseRestHandler { + public static final NamedWriteableRegistry CURSOR_REGISTRY = new NamedWriteableRegistry(Cursor.getNamedWriteables()); + private final AbstractProto proto; + + protected AbstractSqlProtocolRestAction(Settings settings, AbstractProto proto) { + super(settings); + this.proto = proto; + } + + protected abstract RestChannelConsumer innerPrepareRequest(Request request, Client client) throws IOException; + + protected ActionListener toActionListener(RestChannel channel, Function responseBuilder) { + return new RestResponseListener(channel) { + @Override + public RestResponse buildResponse(T response) throws Exception { + try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) { + try (DataOutputStream dataOutputStream = new DataOutputStream(bytesStreamOutput)) { + // TODO use the version from the client + // Tracked by https://github.com/elastic/x-pack-elasticsearch/issues/3080 + proto.writeResponse(responseBuilder.apply(response), Proto.CURRENT_VERSION, dataOutputStream); + } + return new BytesRestResponse(OK, TEXT_CONTENT_TYPE, bytesStreamOutput.bytes()); + } + } + }; + } + + @Override + protected final RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + if (restRequest.method() == Method.HEAD) { + return channel -> channel.sendResponse(new BytesRestResponse(OK, EMPTY)); + } + Request request; + try (DataInputStream in = new DataInputStream(restRequest.content().streamInput())) { + request = proto.readRequest(in); + } + return innerPrepareRequest(request, client); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/CliFormatter.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/CliFormatter.java new file mode 100644 index 00000000000..377eea04c82 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/CliFormatter.java @@ -0,0 +1,167 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xpack.sql.plugin.sql.action.SqlResponse; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +/** + * Formats {@link SqlResponse} for the CLI. {@linkplain Writeable} so + * that its state can be saved between pages of results. + */ +public class CliFormatter implements Writeable { + /** + * The minimum width for any column in the formatted results. + */ + private static final int MIN_COLUMN_WIDTH = 15; + + private int[] width; + + /** + * Create a new {@linkplain CliFormatter} for formatting responses similar + * to the provided {@link SqlResponse}. + */ + public CliFormatter(SqlResponse response) { + // Figure out the column widths: + // 1. Start with the widths of the column names + width = new int[response.columns().size()]; + for (int i = 0; i < width.length; i++) { + // TODO read the width from the data type? + width[i] = Math.max(MIN_COLUMN_WIDTH, response.columns().get(i).name().length()); + } + + // 2. Expand columns to fit the largest value + for (List row : response.rows()) { + for (int i = 0; i < width.length; i++) { + // TODO are we sure toString is correct here? What about dates that come back as longs. + // Tracked by https://github.com/elastic/x-pack-elasticsearch/issues/3081 + width[i] = Math.max(width[i], Objects.toString(row.get(i)).length()); + } + } + } + + public CliFormatter(StreamInput in) throws IOException { + width = in.readIntArray(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeIntArray(width); + } + + /** + * Format the provided {@linkplain SqlResponse} for the CLI + * including the header lines. + */ + public String formatWithHeader(SqlResponse response) { + // The header lines + StringBuilder sb = new StringBuilder(estimateSize(response.rows().size() + 2)); + for (int i = 0; i < width.length; i++) { + if (i > 0) { + sb.append('|'); + } + + String name = response.columns().get(i).name(); + // left padding + int leftPadding = (width[i] - name.length()) / 2; + for (int j = 0; j < leftPadding; j++) { + sb.append(' '); + } + sb.append(name); + // right padding + for (int j = 0; j < width[i] - name.length() - leftPadding; j++) { + sb.append(' '); + } + } + sb.append('\n'); + + for (int i = 0; i < width.length; i++) { + if (i > 0) { + sb.append('+'); + } + for (int j = 0; j < width[i]; j++) { + sb.append('-'); // emdash creates issues + } + } + sb.append('\n'); + + + /* Now format the results. Sadly, this means that column + * widths are entirely determined by the first batch of + * results. */ + return formatWithoutHeader(sb, response); + } + + /** + * Format the provided {@linkplain SqlResponse} for the CLI + * without the header lines. + */ + public String formatWithoutHeader(SqlResponse response) { + return formatWithoutHeader(new StringBuilder(estimateSize(response.rows().size())), response).toString(); + } + + private String formatWithoutHeader(StringBuilder sb, SqlResponse response) { + for (List row : response.rows()) { + for (int i = 0; i < width.length; i++) { + if (i > 0) { + sb.append('|'); + } + + // TODO are we sure toString is correct here? What about dates that come back as longs. + // Tracked by https://github.com/elastic/x-pack-elasticsearch/issues/3081 + String string = Objects.toString(row.get(i)); + if (string.length() <= width[i]) { + // Pad + sb.append(string); + int padding = width[i] - string.length(); + for (int p = 0; p < padding; p++) { + sb.append(' '); + } + } else { + // Trim + sb.append(string.substring(0, width[i] - 1)); + sb.append('~'); + } + } + sb.append('\n'); + } + return sb.toString(); + } + + /** + * Pick a good estimate of the buffer size needed to contain the rows. + */ + int estimateSize(int rows) { + /* Each column has either a '|' or a '\n' after it + * so initialize size to number of columns then add + * up the actual widths of each column. */ + int rowWidthEstimate = width.length; + for (int w : width) { + rowWidthEstimate += w; + } + return rowWidthEstimate * rows; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + CliFormatter that = (CliFormatter) o; + return Arrays.equals(width, that.width); + } + + @Override + public int hashCode() { + return Arrays.hashCode(width); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/CliFormatterCursor.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/CliFormatterCursor.java new file mode 100644 index 00000000000..fc03f47f922 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/CliFormatterCursor.java @@ -0,0 +1,87 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.sql.session.Configuration; +import org.elasticsearch.xpack.sql.session.Cursor; +import org.elasticsearch.xpack.sql.session.RowSet; + +import java.io.IOException; +import java.util.Objects; + +/** + * The cursor that wraps all necessary information for textual representation of the result table + */ +public class CliFormatterCursor implements Cursor { + public static final String NAME = "f"; + + private final Cursor delegate; + private final CliFormatter formatter; + + /** + * If the newCursor is empty, returns an empty cursor. Otherwise, creates a new + * CliFormatterCursor that wraps the newCursor. + */ + public static Cursor wrap(Cursor newCursor, CliFormatter formatter) { + if (newCursor == EMPTY) { + return EMPTY; + } + return new CliFormatterCursor(newCursor, formatter); + } + + private CliFormatterCursor(Cursor delegate, CliFormatter formatter) { + this.delegate = delegate; + this.formatter = formatter; + } + + public CliFormatterCursor(StreamInput in) throws IOException { + delegate = in.readNamedWriteable(Cursor.class); + formatter = new CliFormatter(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeNamedWriteable(delegate); + formatter.writeTo(out); + } + + public CliFormatter getCliFormatter() { + return formatter; + } + + @Override + public void nextPage(Configuration cfg, Client client, ActionListener listener) { + delegate.nextPage(cfg, client, listener); + } + + @Override + public void clear(Configuration cfg, Client client, ActionListener listener) { + delegate.clear(cfg, client, listener); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + CliFormatterCursor that = (CliFormatterCursor) o; + return Objects.equals(delegate, that.delegate) && + Objects.equals(formatter, that.formatter); + } + + @Override + public int hashCode() { + return Objects.hash(delegate, formatter); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/JdbcCursor.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/JdbcCursor.java new file mode 100644 index 00000000000..cbe62182cb4 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/JdbcCursor.java @@ -0,0 +1,96 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.sql.session.Configuration; +import org.elasticsearch.xpack.sql.session.Cursor; +import org.elasticsearch.xpack.sql.session.RowSet; + +import java.io.IOException; +import java.sql.JDBCType; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; + +/** + * The cursor that wraps all necessary information for jdbc + */ +public class JdbcCursor implements Cursor { + public static final String NAME = "j"; + private final Cursor delegate; + private final List types; + + /** + * If the newCursor is empty, returns an empty cursor. Otherwise, creates a new + * CliFormatterCursor that wraps the newCursor. + */ + public static Cursor wrap(Cursor newCursor, List types) { + if (newCursor == EMPTY) { + return EMPTY; + } + return new JdbcCursor(newCursor, types); + } + + private JdbcCursor(Cursor delegate, List types) { + this.delegate = delegate; + this.types = types; + } + + public JdbcCursor(StreamInput in) throws IOException { + delegate = in.readNamedWriteable(Cursor.class); + int size = in.readVInt(); + types = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + types.add(JDBCType.valueOf(in.readVInt())); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeNamedWriteable(delegate); + out.writeVInt(types.size()); + for (JDBCType type : types) { + out.writeVInt(type.getVendorTypeNumber()); + } + } + + public List getTypes() { + return types; + } + + @Override + public void nextPage(Configuration cfg, Client client, ActionListener listener) { + delegate.nextPage(cfg, client, listener); + } + + @Override + public void clear(Configuration cfg, Client client, ActionListener listener) { + delegate.clear(cfg, client, listener); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + JdbcCursor that = (JdbcCursor) o; + return Objects.equals(delegate, that.delegate) && + Objects.equals(types, that.types); + } + + @Override + public int hashCode() { + return Objects.hash(delegate, types); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlCliAction.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlCliAction.java new file mode 100644 index 00000000000..f998f16850f --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlCliAction.java @@ -0,0 +1,116 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.Version; +import org.elasticsearch.action.main.MainAction; +import org.elasticsearch.action.main.MainRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.xpack.sql.cli.net.protocol.InfoResponse; +import org.elasticsearch.xpack.sql.cli.net.protocol.Proto; +import org.elasticsearch.xpack.sql.cli.net.protocol.Proto.RequestType; +import org.elasticsearch.xpack.sql.cli.net.protocol.QueryCloseRequest; +import org.elasticsearch.xpack.sql.cli.net.protocol.QueryInitRequest; +import org.elasticsearch.xpack.sql.cli.net.protocol.QueryInitResponse; +import org.elasticsearch.xpack.sql.cli.net.protocol.QueryPageRequest; +import org.elasticsearch.xpack.sql.cli.net.protocol.QueryPageResponse; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.QueryCloseResponse; +import org.elasticsearch.xpack.sql.plugin.sql.action.SqlAction; +import org.elasticsearch.xpack.sql.plugin.sql.action.SqlRequest; +import org.elasticsearch.xpack.sql.protocol.shared.Request; +import org.elasticsearch.xpack.sql.session.Cursor; +import org.joda.time.DateTimeZone; + +import java.io.IOException; +import java.util.function.Consumer; + +import static org.elasticsearch.rest.RestRequest.Method.POST; + +public class RestSqlCliAction extends AbstractSqlProtocolRestAction { + public RestSqlCliAction(Settings settings, RestController controller) { + super(settings, Proto.INSTANCE); + controller.registerHandler(POST, "/_xpack/sql/cli", this); + } + + @Override + public String getName() { + return "xpack_sql_cli_action"; + } + + @Override + protected RestChannelConsumer innerPrepareRequest(Request request, Client client) throws IOException { + Consumer consumer = operation(request, client); + return consumer::accept; + } + + /** + * Actual implementation of the operation + */ + public Consumer operation(Request request, Client client) + throws IOException { + RequestType requestType = (RequestType) request.requestType(); + switch (requestType) { + case INFO: + return channel -> client.execute(MainAction.INSTANCE, new MainRequest(), toActionListener(channel, response -> + new InfoResponse(response.getNodeName(), response.getClusterName().value(), + response.getVersion().major, response.getVersion().minor, response.getVersion().toString(), + response.getBuild().shortHash(), response.getBuild().date()))); + case QUERY_INIT: + return queryInit(client, (QueryInitRequest) request); + case QUERY_PAGE: + return queryPage(client, (QueryPageRequest) request); + case QUERY_CLOSE: + return queryClose(client, (QueryCloseRequest) request); + default: + throw new IllegalArgumentException("Unsupported action [" + requestType + "]"); + } + } + + private Consumer queryInit(Client client, QueryInitRequest request) { + // TODO time zone support for CLI + SqlRequest sqlRequest = new SqlRequest(request.query, null, DateTimeZone.forTimeZone(request.timeZone), request.fetchSize, + TimeValue.timeValueMillis(request.timeout.requestTimeout), + TimeValue.timeValueMillis(request.timeout.pageTimeout), + Cursor.EMPTY); + long start = System.nanoTime(); + return channel -> client.execute(SqlAction.INSTANCE, sqlRequest, toActionListener(channel, response -> { + CliFormatter formatter = new CliFormatter(response); + String data = formatter.formatWithHeader(response); + return new QueryInitResponse(System.nanoTime() - start, + Cursor.encodeToString(Version.CURRENT, CliFormatterCursor.wrap(response.cursor(), formatter)), data); + })); + } + + private Consumer queryPage(Client client, QueryPageRequest request) { + Cursor cursor = Cursor.decodeFromString(request.cursor); + if (cursor instanceof CliFormatterCursor == false) { + throw new IllegalArgumentException("Unexpected cursor type: [" + cursor + "]"); + } + CliFormatter formatter = ((CliFormatterCursor)cursor).getCliFormatter(); + SqlRequest sqlRequest = new SqlRequest("", null, SqlRequest.DEFAULT_TIME_ZONE, 0, + TimeValue.timeValueMillis(request.timeout.requestTimeout), + TimeValue.timeValueMillis(request.timeout.pageTimeout), + cursor); + + long start = System.nanoTime(); + return channel -> client.execute(SqlAction.INSTANCE, sqlRequest, toActionListener(channel, response -> { + String data = formatter.formatWithoutHeader(response); + return new QueryPageResponse(System.nanoTime() - start, + Cursor.encodeToString(Version.CURRENT, CliFormatterCursor.wrap(response.cursor(), formatter)), data); + })); + } + + private Consumer queryClose(Client client, QueryCloseRequest request) { + Cursor cursor = Cursor.decodeFromString(request.cursor); + SqlClearCursorAction.Request sqlRequest = new SqlClearCursorAction.Request(cursor); + return channel -> client.execute(SqlClearCursorAction.INSTANCE, sqlRequest, toActionListener(channel, + response -> new QueryCloseResponse(response.isSucceeded()))); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlJdbcAction.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlJdbcAction.java new file mode 100644 index 00000000000..f3fb969a3f6 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlJdbcAction.java @@ -0,0 +1,179 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.Version; +import org.elasticsearch.action.main.MainAction; +import org.elasticsearch.action.main.MainRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.xpack.sql.analysis.index.EsIndex; +import org.elasticsearch.xpack.sql.analysis.index.IndexResolver; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfo; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.InfoResponse; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.MetaColumnInfo; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.MetaColumnRequest; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.MetaColumnResponse; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.MetaTableRequest; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.MetaTableResponse; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.RequestType; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.QueryCloseRequest; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.QueryCloseResponse; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.QueryInitRequest; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.QueryInitResponse; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.QueryPageRequest; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.QueryPageResponse; +import org.elasticsearch.xpack.sql.plugin.sql.action.SqlAction; +import org.elasticsearch.xpack.sql.plugin.sql.action.SqlRequest; +import org.elasticsearch.xpack.sql.plugin.sql.action.SqlResponse; +import org.elasticsearch.xpack.sql.protocol.shared.Request; +import org.elasticsearch.xpack.sql.session.Cursor; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.util.StringUtils; +import org.joda.time.DateTimeZone; + +import java.io.IOException; +import java.sql.JDBCType; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.function.Consumer; +import java.util.regex.Pattern; + +import static java.util.stream.Collectors.toList; +import static org.elasticsearch.common.Strings.hasText; +import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.xpack.sql.util.StringUtils.EMPTY; + +public class RestSqlJdbcAction extends AbstractSqlProtocolRestAction { + private final SqlLicenseChecker sqlLicenseChecker; + private final IndexResolver indexResolver; + + public RestSqlJdbcAction(Settings settings, RestController controller, SqlLicenseChecker sqlLicenseChecker, + IndexResolver indexResolver) { + super(settings, Proto.INSTANCE); + controller.registerHandler(POST, "/_xpack/sql/jdbc", this); + this.sqlLicenseChecker = sqlLicenseChecker; + this.indexResolver = indexResolver; + } + + @Override + public String getName() { + return "xpack_sql_jdbc_action"; + } + + @Override + protected RestChannelConsumer innerPrepareRequest(Request request, Client client) + throws IOException { + Consumer consumer = operation(request, client); + return consumer::accept; + } + + /** + * Actual implementation of the operation + */ + public Consumer operation(Request request, Client client) { + sqlLicenseChecker.checkIfJdbcAllowed(); + RequestType requestType = (RequestType) request.requestType(); + switch (requestType) { + case INFO: + return channel -> client.execute(MainAction.INSTANCE, new MainRequest(), toActionListener(channel, response -> + new InfoResponse(response.getNodeName(), response.getClusterName().value(), + response.getVersion().major, response.getVersion().minor, response.getVersion().toString(), + response.getBuild().shortHash(), response.getBuild().date()))); + case META_TABLE: + return metaTable((MetaTableRequest) request); + case META_COLUMN: + return metaColumn((MetaColumnRequest) request); + case QUERY_INIT: + return queryInit(client, (QueryInitRequest) request); + case QUERY_PAGE: + return queryPage(client, (QueryPageRequest) request); + case QUERY_CLOSE: + return queryClose(client, (QueryCloseRequest) request); + default: + throw new IllegalArgumentException("Unsupported action [" + requestType + "]"); + } + } + + private Consumer metaTable(MetaTableRequest request) { + String indexPattern = hasText(request.pattern()) ? StringUtils.jdbcToEsPattern(request.pattern()) : "*"; + return channel -> indexResolver.asList(indexPattern, toActionListener(channel, list -> + new MetaTableResponse(list.stream() + .map(EsIndex::name) + .collect(toList())))); + } + + private Consumer metaColumn(MetaColumnRequest request) { + String indexPattern = Strings.hasText(request.tablePattern()) ? StringUtils.jdbcToEsPattern(request.tablePattern()) : "*"; + Pattern columnMatcher = hasText(request.columnPattern()) ? StringUtils.likeRegex(request.columnPattern()) : null; + return channel -> indexResolver.asList(indexPattern, toActionListener(channel, esIndices -> { + List columns = new ArrayList<>(); + for (EsIndex esIndex : esIndices) { + int pos = 0; + for (Map.Entry entry : esIndex.mapping().entrySet()) { + pos++; + String name = entry.getKey(); + if (columnMatcher == null || columnMatcher.matcher(name).matches()) { + DataType type = entry.getValue(); + // the column size it's actually its precision (based on the Javadocs) + columns.add(new MetaColumnInfo(esIndex.name(), name, type.sqlType(), type.precision(), pos)); + } + } + } + return new MetaColumnResponse(columns); + })); + } + + private Consumer queryInit(Client client, QueryInitRequest request) { + SqlRequest sqlRequest = new SqlRequest(request.query, null, DateTimeZone.forTimeZone(request.timeZone), request.fetchSize, + TimeValue.timeValueMillis(request.timeout.requestTimeout), + TimeValue.timeValueMillis(request.timeout.pageTimeout), + Cursor.EMPTY); + long start = System.nanoTime(); + return channel -> client.execute(SqlAction.INSTANCE, sqlRequest, toActionListener(channel, response -> { + List types = new ArrayList<>(response.columns().size()); + List columns = new ArrayList<>(response.columns().size()); + for (SqlResponse.ColumnInfo info : response.columns()) { + types.add(info.jdbcType()); + columns.add(new ColumnInfo(info.name(), info.jdbcType(), EMPTY, EMPTY, EMPTY, EMPTY, info.displaySize())); + } + return new QueryInitResponse(System.nanoTime() - start, + Cursor.encodeToString(Version.CURRENT, JdbcCursor.wrap(response.cursor(), types)), columns, + new SqlResponsePayload(types, response.rows())); + })); + } + + private Consumer queryPage(Client client, QueryPageRequest request) { + Cursor cursor = Cursor.decodeFromString(request.cursor); + if (cursor instanceof JdbcCursor == false) { + throw new IllegalArgumentException("Unexpected cursor type: [" + cursor + "]"); + } + List types = ((JdbcCursor)cursor).getTypes(); + // NB: the timezone and page size are locked already by the query so pass in defaults (as they are not read anyway) + SqlRequest sqlRequest = new SqlRequest(EMPTY, null, SqlRequest.DEFAULT_TIME_ZONE, 0, + TimeValue.timeValueMillis(request.timeout.requestTimeout), + TimeValue.timeValueMillis(request.timeout.pageTimeout), + cursor); + long start = System.nanoTime(); + return channel -> client.execute(SqlAction.INSTANCE, sqlRequest, toActionListener(channel, + response -> new QueryPageResponse(System.nanoTime() - start, + Cursor.encodeToString(Version.CURRENT, JdbcCursor.wrap(response.cursor(), types)), + new SqlResponsePayload(types, response.rows())))); + } + + private Consumer queryClose(Client client, QueryCloseRequest request) { + Cursor cursor = Cursor.decodeFromString(request.cursor); + SqlClearCursorAction.Request sqlRequest = new SqlClearCursorAction.Request(cursor); + return channel -> client.execute(SqlClearCursorAction.INSTANCE, sqlRequest, toActionListener(channel, + response -> new QueryCloseResponse(response.isSucceeded()))); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorAction.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorAction.java new file mode 100644 index 00000000000..1abca32187f --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorAction.java @@ -0,0 +1,261 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.StatusToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.sql.execution.PlanExecutor; +import org.elasticsearch.xpack.sql.session.Configuration; +import org.elasticsearch.xpack.sql.session.Cursor; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.action.ValidateActions.addValidationError; +import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestStatus.NOT_FOUND; +import static org.elasticsearch.rest.RestStatus.OK; + +public class SqlClearCursorAction + extends Action { + + public static final SqlClearCursorAction INSTANCE = new SqlClearCursorAction(); + public static final String NAME = "indices:data/read/sql/close_cursor"; + + private SqlClearCursorAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, this); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends ActionRequest { + + public static final ObjectParser PARSER = new ObjectParser<>(NAME, Request::new); + + public static final ParseField CURSOR = new ParseField("cursor"); + + static { + PARSER.declareString((request, nextPage) -> request.setCursor(Cursor.decodeFromString(nextPage)), CURSOR); + } + + private Cursor cursor; + + public Request() { + + } + + public Request(Cursor cursor) { + this.cursor = cursor; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (getCursor() == null) { + validationException = addValidationError("cursor is required", validationException); + } + return validationException; + } + + public Cursor getCursor() { + return cursor; + } + + public Request setCursor(Cursor cursor) { + this.cursor = cursor; + return this; + } + + @Override + public String getDescription() { + return "SQL Clean cursor [" + getCursor() + "]"; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + cursor = in.readNamedWriteable(Cursor.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeNamedWriteable(cursor); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Request request = (Request) o; + return Objects.equals(cursor, request.cursor); + } + + @Override + public int hashCode() { + return Objects.hash(cursor); + } + } + + public static class RequestBuilder extends ActionRequestBuilder { + public RequestBuilder(ElasticsearchClient client, SqlClearCursorAction action, Cursor cursor) { + super(client, action, new Request(cursor)); + } + + public RequestBuilder(ElasticsearchClient client, SqlClearCursorAction action) { + super(client, action, new Request()); + } + + public RequestBuilder cursor(Cursor cursor) { + request.setCursor(cursor); + return this; + } + } + + public static class Response extends ActionResponse implements StatusToXContentObject { + + private static final ParseField SUCCEEDED = new ParseField("succeeded"); + + private boolean succeeded; + + public Response(boolean succeeded) { + this.succeeded = succeeded; + } + + Response() { + } + + /** + * @return Whether the attempt to clear a cursor was successful. + */ + public boolean isSucceeded() { + return succeeded; + } + + public Response setSucceeded(boolean succeeded) { + this.succeeded = succeeded; + return this; + } + + @Override + public RestStatus status() { + return succeeded ? NOT_FOUND : OK; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(SUCCEEDED.getPreferredName(), succeeded); + builder.endObject(); + return builder; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + succeeded = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(succeeded); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Response response = (Response) o; + return succeeded == response.succeeded; + } + + @Override + public int hashCode() { + return Objects.hash(succeeded); + } + } + + + public static class TransportAction extends HandledTransportAction { + private final PlanExecutor planExecutor; + private final SqlLicenseChecker sqlLicenseChecker; + + @Inject + public TransportAction(Settings settings, ThreadPool threadPool, + TransportService transportService, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + PlanExecutor planExecutor, + SqlLicenseChecker sqlLicenseChecker) { + super(settings, NAME, threadPool, transportService, actionFilters, + indexNameExpressionResolver, Request::new); + this.planExecutor = planExecutor; + this.sqlLicenseChecker = sqlLicenseChecker; + } + + @Override + protected void doExecute(Request request, ActionListener listener) { + sqlLicenseChecker.checkIfSqlAllowed(); + Cursor cursor = request.getCursor(); + planExecutor.cleanCursor(Configuration.DEFAULT, cursor, ActionListener.wrap( + success -> listener.onResponse(new Response(success)), listener::onFailure)); + } + } + + public static class RestAction extends BaseRestHandler { + public RestAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(POST, "/_xpack/sql/close", this); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + Request sqlRequest; + try (XContentParser parser = request.contentOrSourceParamParser()) { + sqlRequest = Request.PARSER.apply(parser, null); + } + return channel -> client.executeLocally(SqlClearCursorAction.INSTANCE, sqlRequest, new RestToXContentListener<>(channel)); + } + + @Override + public String getName() { + return "sql_translate_action"; + } + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlLicenseChecker.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlLicenseChecker.java new file mode 100644 index 00000000000..0b61960d7e2 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlLicenseChecker.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +/** + * Determines if different features of SQL should be enabled + */ +public class SqlLicenseChecker { + + private final Runnable checkIfSqlAllowed; + private final Runnable checkIfJdbcAllowed; + + public SqlLicenseChecker(Runnable checkIfSqlAllowed, Runnable checkIfJdbcAllowed) { + this.checkIfSqlAllowed = checkIfSqlAllowed; + this.checkIfJdbcAllowed = checkIfJdbcAllowed; + } + + /** + * Throws an ElasticsearchSecurityException if sql is not allowed + */ + public void checkIfSqlAllowed() { + checkIfSqlAllowed.run(); + } + + /** + * Throws an ElasticsearchSecurityException if jdbc is not allowed + */ + public void checkIfJdbcAllowed() { + checkIfJdbcAllowed.run(); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlPlugin.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlPlugin.java new file mode 100644 index 00000000000..e8ef6b26b97 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlPlugin.java @@ -0,0 +1,87 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.xpack.sql.analysis.index.IndexResolver; +import org.elasticsearch.xpack.sql.execution.PlanExecutor; +import org.elasticsearch.xpack.sql.plugin.sql.action.SqlAction; +import org.elasticsearch.xpack.sql.plugin.sql.action.TransportSqlAction; +import org.elasticsearch.xpack.sql.plugin.sql.rest.RestSqlAction; +import org.elasticsearch.xpack.sql.session.Cursor; + +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.function.Supplier; + +import static java.util.Collections.emptyList; + +public class SqlPlugin implements ActionPlugin { + + public static List getNamedWriteables() { + return Cursor.getNamedWriteables(); + } + + private final boolean enabled; + private final SqlLicenseChecker sqlLicenseChecker; + private IndexResolver indexResolver; + + public SqlPlugin(boolean enabled, SqlLicenseChecker sqlLicenseChecker) { + this.enabled = enabled; + this.sqlLicenseChecker = sqlLicenseChecker; + } + + /** + * Create components used by the sql plugin. + */ + public Collection createComponents(Client client) { + if (false == enabled) { + return emptyList(); + } + indexResolver = new IndexResolver(client); + return Arrays.asList(sqlLicenseChecker, indexResolver, new PlanExecutor(client, indexResolver)); + } + + @Override + public List getRestHandlers(Settings settings, RestController restController, + ClusterSettings clusterSettings, IndexScopedSettings indexScopedSettings, SettingsFilter settingsFilter, + IndexNameExpressionResolver indexNameExpressionResolver, Supplier nodesInCluster) { + + if (false == enabled) { + return emptyList(); + } + + return Arrays.asList(new RestSqlAction(settings, restController), + new SqlTranslateAction.RestAction(settings, restController), + new RestSqlCliAction(settings, restController), + new RestSqlJdbcAction(settings, restController, sqlLicenseChecker, indexResolver), + new SqlClearCursorAction.RestAction(settings, restController)); + } + + @Override + public List> getActions() { + if (false == enabled) { + return emptyList(); + } + + return Arrays.asList(new ActionHandler<>(SqlAction.INSTANCE, TransportSqlAction.class), + new ActionHandler<>(SqlTranslateAction.INSTANCE, SqlTranslateAction.TransportAction.class), + new ActionHandler<>(SqlClearCursorAction.INSTANCE, SqlClearCursorAction.TransportAction.class)); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlResponsePayload.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlResponsePayload.java new file mode 100644 index 00000000000..3dfdeab4eb3 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlResponsePayload.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.xpack.sql.jdbc.net.protocol.Payload; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.ProtoUtils; +import org.elasticsearch.xpack.sql.plugin.sql.action.SqlResponse; +import org.elasticsearch.xpack.sql.protocol.shared.SqlDataInput; +import org.elasticsearch.xpack.sql.protocol.shared.SqlDataOutput; + +import java.io.IOException; +import java.sql.JDBCType; +import java.util.List; +import java.util.Objects; + +/** + * Implementation {@link Payload} that adapts it to data from + * {@link SqlResponse}. + */ +class SqlResponsePayload implements Payload { + private final List typeLookup; + private final List> rows; + + SqlResponsePayload(List typeLookup, List> rows) { + this.typeLookup = typeLookup; + this.rows = rows; + } + + @Override + public void readFrom(SqlDataInput in) throws IOException { + throw new UnsupportedOperationException("This class can only be serialized"); + } + + @Override + public void writeTo(SqlDataOutput out) throws IOException { + out.writeInt(rows.size()); + + // unroll forEach manually to avoid a Consumer + try/catch for each value... + for (List row : rows) { + for (int c = 0; c < row.size(); c++) { + JDBCType type = typeLookup.get(c); + Object value = row.get(c); + ProtoUtils.writeValue(out, value, type); + } + } + } + + @Override + public int hashCode() { + return Objects.hash(typeLookup, rows); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + SqlResponsePayload other = (SqlResponsePayload) obj; + return Objects.equals(typeLookup, other.typeLookup) + && Objects.equals(rows, other.rows); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateAction.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateAction.java new file mode 100644 index 00000000000..498157f6bcd --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateAction.java @@ -0,0 +1,218 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.sql.execution.PlanExecutor; +import org.elasticsearch.xpack.sql.plugin.sql.action.AbstractSqlRequest; +import org.elasticsearch.xpack.sql.session.Configuration; +import org.joda.time.DateTimeZone; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.action.ValidateActions.addValidationError; +import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.xpack.sql.plugin.sql.action.AbstractSqlRequest.DEFAULT_FETCH_SIZE; +import static org.elasticsearch.xpack.sql.plugin.sql.action.AbstractSqlRequest.DEFAULT_PAGE_TIMEOUT; +import static org.elasticsearch.xpack.sql.plugin.sql.action.AbstractSqlRequest.DEFAULT_REQUEST_TIMEOUT; +import static org.elasticsearch.xpack.sql.plugin.sql.action.AbstractSqlRequest.DEFAULT_TIME_ZONE; + +public class SqlTranslateAction + extends Action { + + public static final SqlTranslateAction INSTANCE = new SqlTranslateAction(); + public static final String NAME = "indices:data/read/sql/translate"; + + private SqlTranslateAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, this); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends AbstractSqlRequest { + public static final ObjectParser PARSER = objectParser(Request::new); + + public Request() {} + + public Request(String query, QueryBuilder filter, DateTimeZone timeZone, int fetchSize, TimeValue requestTimeout, + TimeValue pageTimeout) { + super(query, filter, timeZone, fetchSize, requestTimeout, pageTimeout); + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if ((false == Strings.hasText(query()))) { + validationException = addValidationError("query is required", validationException); + } + return validationException; + } + + @Override + public String getDescription() { + return "SQL Translate [" + query() + "][" + filter() + "]"; + } + } + + public static class RequestBuilder extends ActionRequestBuilder { + public RequestBuilder(ElasticsearchClient client, SqlTranslateAction action) { + this(client, action, null, null, DEFAULT_TIME_ZONE, DEFAULT_FETCH_SIZE, DEFAULT_REQUEST_TIMEOUT, DEFAULT_PAGE_TIMEOUT); + } + + public RequestBuilder(ElasticsearchClient client, SqlTranslateAction action, String query, QueryBuilder filter, + DateTimeZone timeZone, int fetchSize, TimeValue requestTimeout, TimeValue pageTimeout) { + super(client, action, new Request(query, filter, timeZone, fetchSize, requestTimeout, pageTimeout)); + } + + public RequestBuilder query(String query) { + request.query(query); + return this; + } + + public RequestBuilder timeZone(DateTimeZone timeZone) { + request.timeZone(timeZone); + return this; + } + } + + public static class Response extends ActionResponse implements ToXContentObject { + private SearchSourceBuilder source; + + public Response() { + } + + public Response(SearchSourceBuilder source) { + this.source = source; + } + + public SearchSourceBuilder source() { + return source; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + source = new SearchSourceBuilder(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + source.writeTo(out); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + Response other = (Response) obj; + return Objects.equals(source, other.source); + } + + @Override + public int hashCode() { + return Objects.hash(source); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return source.toXContent(builder, params); + } + } + + public static class TransportAction extends HandledTransportAction { + private final PlanExecutor planExecutor; + private final SqlLicenseChecker sqlLicenseChecker; + + @Inject + public TransportAction(Settings settings, ThreadPool threadPool, + TransportService transportService, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + PlanExecutor planExecutor, + SqlLicenseChecker sqlLicenseChecker) { + super(settings, SqlTranslateAction.NAME, threadPool, transportService, actionFilters, + indexNameExpressionResolver, Request::new); + + this.planExecutor = planExecutor; + this.sqlLicenseChecker = sqlLicenseChecker; + } + + @Override + protected void doExecute(Request request, ActionListener listener) { + sqlLicenseChecker.checkIfSqlAllowed(); + String query = request.query(); + + Configuration cfg = new Configuration(request.timeZone(), request.fetchSize(), + request.requestTimeout(), request.pageTimeout(), request.filter()); + + planExecutor.searchSource(query, cfg, ActionListener.wrap( + searchSourceBuilder -> listener.onResponse(new Response(searchSourceBuilder)), listener::onFailure)); + } + } + + public static class RestAction extends BaseRestHandler { + public RestAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(GET, "/_xpack/sql/translate", this); + controller.registerHandler(POST, "/_xpack/sql/translate", this); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + Request sqlRequest; + try (XContentParser parser = request.contentOrSourceParamParser()) { + sqlRequest = Request.PARSER.apply(parser, null); + } + return channel -> client.executeLocally(SqlTranslateAction.INSTANCE, sqlRequest, new RestToXContentListener<>(channel)); + } + + @Override + public String getName() { + return "sql_translate_action"; + } + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/sql/action/AbstractSqlRequest.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/sql/action/AbstractSqlRequest.java new file mode 100644 index 00000000000..d7e638e39df --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/sql/action/AbstractSqlRequest.java @@ -0,0 +1,195 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin.sql.action; + + + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.CompositeIndicesRequest; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.inject.internal.Nullable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.index.query.AbstractQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.xpack.sql.protocol.shared.AbstractQueryInitRequest; +import org.elasticsearch.xpack.sql.protocol.shared.TimeoutInfo; +import org.joda.time.DateTimeZone; + +import java.io.IOException; +import java.util.Objects; +import java.util.function.Supplier; + +public abstract class AbstractSqlRequest extends ActionRequest implements CompositeIndicesRequest { + + public static final DateTimeZone DEFAULT_TIME_ZONE = DateTimeZone.UTC; + public static final int DEFAULT_FETCH_SIZE = AbstractQueryInitRequest.DEFAULT_FETCH_SIZE; + public static final TimeValue DEFAULT_REQUEST_TIMEOUT = TimeValue.timeValueMillis(TimeoutInfo.DEFAULT_REQUEST_TIMEOUT); + public static final TimeValue DEFAULT_PAGE_TIMEOUT = TimeValue.timeValueMillis(TimeoutInfo.DEFAULT_PAGE_TIMEOUT); + + private String query = ""; + private DateTimeZone timeZone = DEFAULT_TIME_ZONE; + private int fetchSize = DEFAULT_FETCH_SIZE; + private TimeValue requestTimeout = DEFAULT_REQUEST_TIMEOUT; + private TimeValue pageTimeout = DEFAULT_PAGE_TIMEOUT; + @Nullable + private QueryBuilder filter = null; + + public AbstractSqlRequest() { + super(); + } + + public AbstractSqlRequest(String query, QueryBuilder filter, DateTimeZone timeZone, int fetchSize, TimeValue requestTimeout, TimeValue pageTimeout) { + this.query = query; + this.timeZone = timeZone; + this.fetchSize = fetchSize; + this.requestTimeout = requestTimeout; + this.pageTimeout = pageTimeout; + } + + public static ObjectParser objectParser(Supplier supplier) { + ObjectParser parser = new ObjectParser("sql/query", supplier); + + parser.declareString(AbstractSqlRequest::query, new ParseField("query")); + parser.declareString((request, zoneId) -> request.timeZone(DateTimeZone.forID(zoneId)), new ParseField("time_zone")); + parser.declareInt(AbstractSqlRequest::fetchSize, new ParseField("fetch_size")); + parser.declareString( + (request, timeout) -> request.requestTimeout(TimeValue.parseTimeValue(timeout, DEFAULT_REQUEST_TIMEOUT, "request_timeout")), + new ParseField("request_timeout")); + parser.declareString( + (request, timeout) -> request.pageTimeout(TimeValue.parseTimeValue(timeout, DEFAULT_PAGE_TIMEOUT, "page_timeout")), + new ParseField("page_timeout")); + parser.declareObject(AbstractSqlRequest::filter, + (p, c) -> AbstractQueryBuilder.parseInnerQueryBuilder(p), new ParseField("filter")); + + return parser; + } + + public String query() { + return query; + } + + public AbstractSqlRequest query(String query) { + if (query == null) { + throw new IllegalArgumentException("query may not be null."); + } + this.query = query; + return this; + } + + public DateTimeZone timeZone() { + return timeZone; + } + + public AbstractSqlRequest timeZone(DateTimeZone timeZone) { + if (query == null) { + throw new IllegalArgumentException("time zone may not be null."); + } + this.timeZone = timeZone; + return this; + } + + /** + * Hint about how many results to fetch at once. + */ + public int fetchSize() { + return fetchSize; + } + + /** + * Hint about how many results to fetch at once. + */ + public AbstractSqlRequest fetchSize(int fetchSize) { + if (fetchSize <= 0) { + throw new IllegalArgumentException("fetch_size must be more than 0."); + } + this.fetchSize = fetchSize; + return this; + } + + public TimeValue requestTimeout() { + return requestTimeout; + } + + public AbstractSqlRequest requestTimeout(TimeValue requestTimeout) { + this.requestTimeout = requestTimeout; + return this; + } + + + public TimeValue pageTimeout() { + return pageTimeout; + } + + public AbstractSqlRequest pageTimeout(TimeValue pageTimeout) { + this.pageTimeout = pageTimeout; + return this; + } + + /** + * An optional Query DSL defined query that can added as a filter on the top of the SQL query + */ + public AbstractSqlRequest filter(QueryBuilder filter) { + this.filter = filter; + return this; + } + + /** + * An optional Query DSL defined query that can added as a filter on the top of the SQL query + */ + public QueryBuilder filter() { + return filter; + } + + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + query = in.readString(); + timeZone = DateTimeZone.forID(in.readString()); + fetchSize = in.readVInt(); + requestTimeout = new TimeValue(in); + pageTimeout = new TimeValue(in); + filter = in.readOptionalNamedWriteable(QueryBuilder.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(query); + out.writeString(timeZone.getID()); + out.writeVInt(fetchSize); + requestTimeout.writeTo(out); + pageTimeout.writeTo(out); + out.writeOptionalNamedWriteable(filter); + } + + @Override + public int hashCode() { + return Objects.hash(query, timeZone, fetchSize, requestTimeout, pageTimeout, filter); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + AbstractSqlRequest other = (AbstractSqlRequest) obj; + return Objects.equals(query, other.query) + && Objects.equals(timeZone, other.timeZone) + && fetchSize == other.fetchSize + && Objects.equals(requestTimeout, other.requestTimeout) + && Objects.equals(pageTimeout, other.pageTimeout) + && Objects.equals(filter, other.filter); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/sql/action/SqlAction.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/sql/action/SqlAction.java new file mode 100644 index 00000000000..3b3b747ea88 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/sql/action/SqlAction.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin.sql.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +public class SqlAction extends Action { + + public static final SqlAction INSTANCE = new SqlAction(); + public static final String NAME = "indices:data/read/sql"; + + private SqlAction() { + super(NAME); + } + + @Override + public SqlRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new SqlRequestBuilder(client, this); + } + + @Override + public SqlResponse newResponse() { + return new SqlResponse(); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/sql/action/SqlRequest.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/sql/action/SqlRequest.java new file mode 100644 index 00000000000..4698095fd3f --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/sql/action/SqlRequest.java @@ -0,0 +1,103 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin.sql.action; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.index.query.AbstractQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.xpack.sql.session.Cursor; +import org.joda.time.DateTimeZone; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +public class SqlRequest extends AbstractSqlRequest { + + public static final ObjectParser PARSER = objectParser(SqlRequest::new); + + public static final ParseField CURSOR = new ParseField("cursor"); + public static final ParseField FILTER = new ParseField("filter"); + + static { + PARSER.declareString((request, nextPage) -> request.cursor(Cursor.decodeFromString(nextPage)), CURSOR); + PARSER.declareObject(SqlRequest::filter, + (p, c) -> AbstractQueryBuilder.parseInnerQueryBuilder(p), FILTER); + } + + private Cursor cursor = Cursor.EMPTY; + + public SqlRequest() {} + + public SqlRequest(String query, QueryBuilder filter, DateTimeZone timeZone, int fetchSize, TimeValue requestTimeout, + TimeValue pageTimeout, Cursor cursor) { + super(query, filter, timeZone, fetchSize, requestTimeout, pageTimeout); + this.cursor = cursor; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if ((false == Strings.hasText(query())) && cursor == Cursor.EMPTY) { + validationException = addValidationError("one of [query] or [cursor] is required", validationException); + } + return validationException; + } + + /** + * The key that must be sent back to SQL to access the next page of + * results. + */ + public Cursor cursor() { + return cursor; + } + + /** + * The key that must be sent back to SQL to access the next page of + * results. + */ + public SqlRequest cursor(Cursor cursor) { + if (cursor == null) { + throw new IllegalArgumentException("cursor may not be null."); + } + this.cursor = cursor; + return this; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + cursor = in.readNamedWriteable(Cursor.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeNamedWriteable(cursor); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), cursor); + } + + @Override + public boolean equals(Object obj) { + return super.equals(obj) && Objects.equals(cursor, ((SqlRequest) obj).cursor); + } + + @Override + public String getDescription() { + return "SQL [" + query() + "][" + filter() + "]"; + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/sql/action/SqlRequestBuilder.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/sql/action/SqlRequestBuilder.java new file mode 100644 index 00000000000..44f1bc60f7f --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/sql/action/SqlRequestBuilder.java @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin.sql.action; + +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.xpack.sql.session.Cursor; +import org.joda.time.DateTimeZone; + +import static org.elasticsearch.xpack.sql.plugin.sql.action.AbstractSqlRequest.DEFAULT_FETCH_SIZE; +import static org.elasticsearch.xpack.sql.plugin.sql.action.AbstractSqlRequest.DEFAULT_PAGE_TIMEOUT; +import static org.elasticsearch.xpack.sql.plugin.sql.action.AbstractSqlRequest.DEFAULT_REQUEST_TIMEOUT; +import static org.elasticsearch.xpack.sql.plugin.sql.action.AbstractSqlRequest.DEFAULT_TIME_ZONE; + +public class SqlRequestBuilder extends ActionRequestBuilder { + + public SqlRequestBuilder(ElasticsearchClient client, SqlAction action) { + this(client, action, "", null, DEFAULT_TIME_ZONE, DEFAULT_FETCH_SIZE, DEFAULT_REQUEST_TIMEOUT, DEFAULT_PAGE_TIMEOUT, Cursor.EMPTY); + } + + public SqlRequestBuilder(ElasticsearchClient client, SqlAction action, String query, QueryBuilder filter, DateTimeZone timeZone, + int fetchSize, TimeValue requestTimeout, TimeValue pageTimeout, Cursor nextPageInfo) { + super(client, action, new SqlRequest(query, filter, timeZone, fetchSize, requestTimeout, pageTimeout, nextPageInfo)); + } + + public SqlRequestBuilder query(String query) { + request.query(query); + return this; + } + + public SqlRequestBuilder cursor(Cursor cursor) { + request.cursor(cursor); + return this; + } + + public SqlRequestBuilder filter(QueryBuilder filter) { + request.filter(filter); + return this; + } + + public SqlRequestBuilder timeZone(DateTimeZone timeZone) { + request.timeZone(timeZone); + return this; + } + + public SqlRequestBuilder requestTimeout(TimeValue timeout) { + request.requestTimeout(timeout); + return this; + } + + public SqlRequestBuilder pageTimeout(TimeValue timeout) { + request.pageTimeout(timeout); + return this; + } + + public SqlRequestBuilder fetchSize(int fetchSize) { + request.fetchSize(fetchSize); + return this; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/sql/action/SqlResponse.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/sql/action/SqlResponse.java new file mode 100644 index 00000000000..0880e7b3e9e --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/sql/action/SqlResponse.java @@ -0,0 +1,259 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin.sql.action; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.sql.session.Cursor; + +import java.io.IOException; +import java.sql.JDBCType; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; + +import static java.util.Collections.unmodifiableList; + +public class SqlResponse extends ActionResponse implements ToXContentObject { + private Cursor cursor; + private long size; + private int columnCount; + private List columns; + // TODO investigate reusing Page here - it probably is much more efficient + private List> rows; + + public SqlResponse() { + } + + public SqlResponse(Cursor cursor, long size, int columnCount, @Nullable List columns, List> rows) { + this.cursor = cursor; + this.size = size; + // Size isn't the total number of results like ES uses, it is the size of the current rows list. + // While not necessary internally, it is useful for REST responses + this.columnCount = columnCount; + this.columns = columns; + this.rows = rows; + } + + /** + * The key that must be sent back to SQL to access the next page of + * results. If equal to {@link Cursor#EMPTY} then there is no next page. + */ + public Cursor cursor() { + return cursor; + } + + public long size() { + return size; + } + + public List columns() { + return columns; + } + + public List> rows() { + return rows; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + cursor = in.readNamedWriteable(Cursor.class); + size = in.readVLong(); + columnCount = in.readVInt(); + if (in.readBoolean()) { + List columns = new ArrayList<>(columnCount); + for (int c = 0; c < columnCount; c++) { + columns.add(new ColumnInfo(in)); + } + this.columns = unmodifiableList(columns); + } else { + this.columns = null; + } + int rowCount = in.readVInt(); + List> rows = new ArrayList<>(rowCount); + for (int r = 0; r < rowCount; r++) { + List row = new ArrayList<>(columnCount); + for (int c = 0; c < columnCount; c++) { + row.add(in.readGenericValue()); + } + rows.add(unmodifiableList(row)); + } + this.rows = unmodifiableList(rows); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeNamedWriteable(cursor); + out.writeVLong(size); + out.writeVInt(columnCount); + if (columns == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + assert columns.size() == columnCount; + for (ColumnInfo column : columns) { + column.writeTo(out); + } + } + out.writeVInt(rows.size()); + for (List row : rows) { + assert row.size() == columnCount; + for (Object value : row) { + out.writeGenericValue(value); + } + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field("size", size()); + if (columns != null) { + builder.startArray("columns"); { + for (ColumnInfo column : columns) { + column.toXContent(builder, params); + } + } + builder.endArray(); + } + + builder.startArray("rows"); + for (List row : rows()) { + builder.startArray(); + for (Object value : row) { + builder.value(value); + } + builder.endArray(); + } + builder.endArray(); + + if (cursor != Cursor.EMPTY) { + builder.field(SqlRequest.CURSOR.getPreferredName(), Cursor.encodeToString(Version.CURRENT, cursor)); + } + } + return builder.endObject(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + SqlResponse that = (SqlResponse) o; + return size == that.size && + Objects.equals(cursor, that.cursor) && + Objects.equals(columns, that.columns) && + Objects.equals(rows, that.rows); + } + + @Override + public int hashCode() { + return Objects.hash(cursor, size, columns, rows); + } + + @Override + public String toString() { + return Strings.toString(this); + } + + /** + * Information about a column. + */ + public static final class ColumnInfo implements Writeable, ToXContentObject { + private final String name; + private final String esType; + private final JDBCType jdbcType; + private final int displaySize; + + public ColumnInfo(String name, String esType, JDBCType jdbcType, int displaySize) { + this.name = name; + this.esType = esType; + this.jdbcType = jdbcType; + this.displaySize = displaySize; + } + + ColumnInfo(StreamInput in) throws IOException { + name = in.readString(); + esType = in.readString(); + jdbcType = JDBCType.valueOf(in.readVInt()); + displaySize = in.readVInt(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(name); + out.writeString(esType); + out.writeVInt(jdbcType.getVendorTypeNumber()); + out.writeVInt(displaySize); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("name", name); + builder.field("type", esType); + // TODO include jdbc_type? + return builder.endObject(); + } + + /** + * Name of the column. + */ + public String name() { + return name; + } + + /** + * The type of the column in Elasticsearch. + */ + public String esType() { + return esType; + } + + /** + * The type of the column as it would be returned by a JDBC driver. + */ + public JDBCType jdbcType() { + return jdbcType; + } + + /** + * Used by JDBC + */ + public int displaySize() { + return displaySize; + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + ColumnInfo other = (ColumnInfo) obj; + return name.equals(other.name) + && esType.equals(other.esType) + && jdbcType.equals(other.jdbcType) + && displaySize == other.displaySize; + } + + @Override + public int hashCode() { + return Objects.hash(name, esType, jdbcType, displaySize); + } + + @Override + public String toString() { + return Strings.toString(this); + } + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/sql/action/TransportSqlAction.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/sql/action/TransportSqlAction.java new file mode 100644 index 00000000000..40ffb772744 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/sql/action/TransportSqlAction.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin.sql.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.sql.execution.PlanExecutor; +import org.elasticsearch.xpack.sql.plugin.SqlLicenseChecker; +import org.elasticsearch.xpack.sql.plugin.sql.action.SqlResponse.ColumnInfo; +import org.elasticsearch.xpack.sql.session.Configuration; +import org.elasticsearch.xpack.sql.session.Cursor; +import org.elasticsearch.xpack.sql.session.RowSet; +import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.type.Schema; + +import java.util.ArrayList; +import java.util.List; + +import static java.util.Collections.unmodifiableList; + +public class TransportSqlAction extends HandledTransportAction { + private final PlanExecutor planExecutor; + private final SqlLicenseChecker sqlLicenseChecker; + + @Inject + public TransportSqlAction(Settings settings, ThreadPool threadPool, + TransportService transportService, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + PlanExecutor planExecutor, + SqlLicenseChecker sqlLicenseChecker) { + super(settings, SqlAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, SqlRequest::new); + + this.planExecutor = planExecutor; + this.sqlLicenseChecker = sqlLicenseChecker; + } + + @Override + protected void doExecute(SqlRequest request, ActionListener listener) { + sqlLicenseChecker.checkIfSqlAllowed(); + operation(planExecutor, request, listener); + } + + /** + * Actual implementation of the action. Statically available to support embedded mode. + */ + public static void operation(PlanExecutor planExecutor, SqlRequest request, ActionListener listener) { + // The configuration is always created however when dealing with the next page, only the timeouts are relevant + // the rest having default values (since the query is already created) + Configuration cfg = new Configuration(request.timeZone(), request.fetchSize(), request.requestTimeout(), request.pageTimeout(), + request.filter()); + + if (request.cursor() == Cursor.EMPTY) { + planExecutor.sql(cfg, request.query(), + ActionListener.wrap(rowSet -> listener.onResponse(createResponse(rowSet)), listener::onFailure)); + } else { + planExecutor.nextPage(cfg, request.cursor(), + ActionListener.wrap(rowSet -> listener.onResponse(createResponse(rowSet, null)), listener::onFailure)); + } + } + + static SqlResponse createResponse(SchemaRowSet rowSet) { + List columns = new ArrayList<>(rowSet.columnCount()); + for (Schema.Entry entry : rowSet.schema()) { + columns.add(new ColumnInfo(entry.name(), entry.type().esName(), entry.type().sqlType(), entry.type().displaySize())); + } + columns = unmodifiableList(columns); + return createResponse(rowSet, columns); + } + + static SqlResponse createResponse(RowSet rowSet, List columns) { + List> rows = new ArrayList<>(); + rowSet.forEachRow(rowView -> { + List row = new ArrayList<>(rowView.columnCount()); + rowView.forEachColumn(row::add); + rows.add(unmodifiableList(row)); + }); + + return new SqlResponse( + rowSet.nextPageCursor(), + rowSet.size(), + rowSet.columnCount(), + columns, + rows); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/sql/rest/RestSqlAction.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/sql/rest/RestSqlAction.java new file mode 100644 index 00000000000..4e1855b9f88 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/plugin/sql/rest/RestSqlAction.java @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin.sql.rest; + +import org.elasticsearch.Version; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestResponseListener; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.sql.plugin.CliFormatter; +import org.elasticsearch.xpack.sql.plugin.CliFormatterCursor; +import org.elasticsearch.xpack.sql.plugin.sql.action.SqlAction; +import org.elasticsearch.xpack.sql.plugin.sql.action.SqlRequest; +import org.elasticsearch.xpack.sql.plugin.sql.action.SqlResponse; +import org.elasticsearch.xpack.sql.session.Cursor; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; + +import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestRequest.Method.POST; + +public class RestSqlAction extends BaseRestHandler { + public RestSqlAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(GET, "/_xpack/sql", this); + controller.registerHandler(POST, "/_xpack/sql", this); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + SqlRequest sqlRequest; + try (XContentParser parser = request.contentOrSourceParamParser()) { + sqlRequest = SqlRequest.PARSER.apply(parser, null); + } + + XContentType xContentType = XContentType.fromMediaTypeOrFormat(request.param("format", request.header("Accept"))); + if (xContentType != null) { + // The client expects us to send back results in a XContent format + return channel -> client.executeLocally(SqlAction.INSTANCE, sqlRequest, new RestToXContentListener<>(channel)); + } + // The client accepts plain text + long startNanos = System.nanoTime(); + + return channel -> client.execute(SqlAction.INSTANCE, sqlRequest, new RestResponseListener(channel) { + @Override + public RestResponse buildResponse(SqlResponse response) throws Exception { + final String data; + final CliFormatter formatter; + if (sqlRequest.cursor() != Cursor.EMPTY) { + formatter = ((CliFormatterCursor) sqlRequest.cursor()).getCliFormatter(); + data = formatter.formatWithoutHeader(response); + } else { + formatter = new CliFormatter(response); + data = formatter.formatWithHeader(response); + } + + return buildTextResponse(CliFormatterCursor.wrap(response.cursor(), formatter), System.nanoTime() - startNanos, data); + } + }); + } + + private RestResponse buildTextResponse(Cursor responseCursor, long tookNanos, String data) + throws IOException { + RestResponse restResponse = new BytesRestResponse(RestStatus.OK, BytesRestResponse.TEXT_CONTENT_TYPE, + data.getBytes(StandardCharsets.UTF_8)); + if (responseCursor != Cursor.EMPTY) { + restResponse.addHeader("Cursor", Cursor.encodeToString(Version.CURRENT, responseCursor)); + } + restResponse.addHeader("Took-nanos", Long.toString(tookNanos)); + return restResponse; + } + + @Override + public String getName() { + return "xpack_sql_action"; + } +} + diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/Agg.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/Agg.java new file mode 100644 index 00000000000..77737750c0b --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/Agg.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import java.util.Locale; +import java.util.Objects; + +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.xpack.sql.util.StringUtils; + +import static java.lang.String.format; + +public abstract class Agg { + private final String id; + private final String fieldName; + private final String propertyPath; + private final String asParentPath; + + Agg(String id, String propertyPath, String fieldName) { + this.id = id; + this.propertyPath = propertyPath; + int index = propertyPath.lastIndexOf("."); + this.asParentPath = index > 0 ? propertyPath.substring(0, index) : StringUtils.EMPTY; + this.fieldName = fieldName; + } + + public String id() { + return id; + } + + public String propertyPath() { + return propertyPath; + } + + public String asParentPath() { + return asParentPath; + } + + public String fieldName() { + return fieldName; + } + + abstract AggregationBuilder toBuilder(); + + @Override + public int hashCode() { + return Objects.hash(id, propertyPath); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + Agg other = (Agg) obj; + return Objects.equals(id, other.id) + && Objects.equals(fieldName, other.fieldName) + && Objects.equals(propertyPath, other.propertyPath); + } + + @Override + public String toString() { + return format(Locale.ROOT, "%s(%s)#%s", getClass().getSimpleName(), fieldName, propertyPath); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/AggFilter.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/AggFilter.java new file mode 100644 index 00000000000..1b8f6c99d61 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/AggFilter.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import java.util.Collection; +import java.util.Map; +import java.util.Objects; + +import org.elasticsearch.script.Script; +import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.util.Check; + +import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.bucketSelector; + +public class AggFilter extends PipelineAgg { + + private final ScriptTemplate scriptTemplate; + private final Map aggPaths; + + public AggFilter(String name, ScriptTemplate scriptTemplate) { + super(name); + Check.isTrue(scriptTemplate != null, "a valid script is required"); + this.scriptTemplate = scriptTemplate; + this.aggPaths = scriptTemplate.aggPaths(); + } + + public Map aggPaths() { + return aggPaths; + } + + public Collection aggRefs() { + return scriptTemplate.aggRefs(); + } + + public ScriptTemplate scriptTemplate() { + return scriptTemplate; + } + + @Override + PipelineAggregationBuilder toBuilder() { + Script script = scriptTemplate.toPainless(); + return bucketSelector(name(), aggPaths, script); + } + + @Override + public int hashCode() { + return Objects.hash(name(), scriptTemplate); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + AggFilter other = (AggFilter) obj; + return Objects.equals(name(), other.name()) + && Objects.equals(scriptTemplate(), other.scriptTemplate()); + } + + @Override + public String toString() { + return scriptTemplate.toString(); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/AggPath.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/AggPath.java new file mode 100644 index 00000000000..162111ed8fe --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/AggPath.java @@ -0,0 +1,87 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.xpack.sql.util.StringUtils; + +// utility class around constructing and parsing ES specific aggregation path +// see org.elasticsearch.search.aggregations.support.AggregationPath +public abstract class AggPath { + + private static final char PATH_DELIMITER_CHAR = '>'; + private static final String PATH_DELIMITER = String.valueOf(PATH_DELIMITER_CHAR); + private static final String VALUE_DELIMITER = "."; + private static final String PATH_BUCKET_VALUE = "._key"; + private static final String PATH_BUCKET_COUNT = "._count"; + private static final String PATH_BUCKET_VALUE_FORMATTED = "._key_as_string"; + private static final String PATH_DEFAULT_VALUE = ".value"; + + public static String bucketCount(String aggPath) { + return aggPath + PATH_BUCKET_COUNT; + } + + public static String bucketValue(String aggPath) { + return aggPath + PATH_BUCKET_VALUE; + } + + public static boolean isBucketValueFormatted(String path) { + return path.endsWith(PATH_BUCKET_VALUE_FORMATTED); + } + + public static String bucketValueWithoutFormat(String path) { + return path.substring(0, path.length() - PATH_BUCKET_VALUE_FORMATTED.length()); + } + + public static String metricValue(String aggPath) { + return aggPath + PATH_DEFAULT_VALUE; + } + + public static String metricValue(String aggPath, String valueName) { + // handle aggPath inconsistency (for percentiles and percentileRanks) percentile[99.9] (valid) vs percentile.99.9 (invalid) + return valueName.startsWith("[") ? aggPath + valueName : aggPath + VALUE_DELIMITER + valueName; + } + + public static String path(String parent, String child) { + return (Strings.hasLength(parent) ? parent + PATH_DELIMITER : StringUtils.EMPTY) + child; + } + + // + // The depth indicates the level of an agg excluding the root agg (because all aggs in SQL require a group). However all other bucket aggs are counted. + // Since the path does not indicate the type of agg used, to differentiate between metric properties and bucket properties, the bucket value is considered. + // This is needed since one might refer to the keys or count of a bucket path. + // As the opposite side there are metric aggs which have the same level as their parent (their nesting is an ES implementation detail) + + // Default examples: + // + // agg1 = 0 ; agg1 = default/root group + // agg1>agg2._count = 1 ; ._count indicates agg2 is a bucket agg and thus it counted - agg1 (default group), depth=0, agg2 (bucketed), depth=1 + // agg1>agg2>agg3.value = 1 ; agg3.value indicates a metric bucket thus only agg1 and agg2 are counted -> depth=2. In other words, agg3.value has the same depth as agg2._count + // agg1>agg2>agg3._count = 2 ; ._count indicates agg3 is a bucket agg, so count it for depth -> depth = 2 + // agg1>agg2>agg3.sum = 1 ; .sum indicates agg3 is a metric agg, only agg1 and agg2 are bucket and with agg1 being the default group -> depth = 1 + public static int depth(String path) { + int depth = countCharIn(path, PATH_DELIMITER_CHAR); + // a metric value always has .foo while a bucket prop with ._foo + int dot = path.lastIndexOf("."); + if (depth > 0 && dot > 0) { + String prop = path.substring(dot + 1); + if (!prop.startsWith("_")) { + return Math.max(0, depth - 1); + } + } + return depth; + } + + private static int countCharIn(CharSequence sequence, char c) { + int count = 0; + for (int i = 0; i < sequence.length(); i++) { + if (c == sequence.charAt(i)) { + count++; + } + } + return count; + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/Aggs.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/Aggs.java new file mode 100644 index 00000000000..ccb97f3d692 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/Aggs.java @@ -0,0 +1,229 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Objects; + +import static java.util.Collections.emptyList; +import static org.elasticsearch.xpack.sql.util.CollectionUtils.combine; + +// Aggregations associated with a query. +// +// This maps the SQL target namely: +// - aggregations are either at the root (default or nameless group) or belong to a named group +// - groups are chained in a linear fashion (group1 -> group2 -> group3) +// +// Aggregations are of two types - concrete metric aggs or pipeline/filtering aggs +// +// Pipeline Notes: +// +// As pipeline aggs can refers to aggs across different groups (different tree levels) +// one needs to pay attention attention before adding them to properly find the lowest level and +// make everything relative to that. +// +// For example sum > 100 && count < 10 might imply sum referring on level 2 while count is on level 3 +// In this case, the pipeline would be added to group/level 2 (and refer to sum) while count will be +// made relative to this group/level 3 namely group3>count + +public class Aggs { + + private final List groups; + private final List rootAggs; + private final List rootPipelineAggs; + + public Aggs() { + this(emptyList(), emptyList(), emptyList()); + } + + public Aggs(List rootAggs, List rootPipelineAggs, List groups) { + this.rootAggs = rootAggs; + this.rootPipelineAggs = rootPipelineAggs; + this.groups = groups; + } + + public List groups() { + return groups; + } + + public List asAggBuilders() { + List aggBuilders = new ArrayList<>(rootAggs.size() + 1); + + for (Agg agg : rootAggs) { + aggBuilders.add(agg.toBuilder()); + } + + if (!groups.isEmpty()) { + AggregationBuilder root = null, last = null; + for (GroupingAgg groupBy : groups) { + AggregationBuilder builder = groupBy.toBuilder(); + if (root == null) { + root = builder; + } + // chain each group + if (last != null) { + last.subAggregation(builder); + } + last = builder; + } + + aggBuilders.add(root); + } + + return aggBuilders; + } + + public List asPipelineBuilders() { + List aggBuilders = new ArrayList<>(rootPipelineAggs.size() + 1); + + for (PipelineAgg agg : rootPipelineAggs) { + aggBuilders.add(agg.toBuilder()); + } + + return aggBuilders; + } + + public Aggs addGroups(Collection groups) { + return new Aggs(rootAggs, rootPipelineAggs, combine(this.groups, groups)); + } + + public Aggs addAgg(Agg agg) { + if (agg instanceof GroupingAgg) { + GroupingAgg group = (GroupingAgg) agg; + return with(combine(this.groups, group)); + } + if (agg instanceof LeafAgg) { + LeafAgg leaf = (LeafAgg) agg; + if (groups.isEmpty()) { + return new Aggs(combine(rootAggs, leaf), rootPipelineAggs, this.groups); + } + else { + // copy the groups and replace the tail + List groups = new ArrayList<>(this.groups); + GroupingAgg tail = groups.get(groups.size() - 1); + groups.set(groups.size() - 1, tail.withAggs(combine(tail.subAggs(), leaf))); + return with(groups); + } + } + + throw new SqlIllegalArgumentException("Does not know how to handle type %s", agg); + } + + public Aggs addAgg(PipelineAgg pipelineAgg) { + if (groups.isEmpty()) { + return new Aggs(rootAggs, combine(rootPipelineAggs, pipelineAgg), this.groups); + } + else { + // copy the groups and replace the tail + List groups = new ArrayList<>(this.groups); + GroupingAgg tail = groups.get(groups.size() - 1); + groups.set(groups.size() - 1, tail.withPipelines(combine(tail.subPipelines(), pipelineAgg))); + return with(groups); + } + } + + + public Aggs addAgg(String groupId, LeafAgg child) { + // it's a root agg + if (groupId == null) { + return new Aggs(combine(rootAggs, child), rootPipelineAggs, groups()); + } + + List groups = new ArrayList<>(this.groups); + + for (int i = 0; i < groups.size(); i++) { + GroupingAgg group = groups.get(i); + if (groupId.equals(group.id())) { + groups.set(i, group.withAggs(combine(group.subAggs(), child))); + return with(groups); + } + } + throw new SqlIllegalArgumentException("Could not find group named %s", groupId); + } + + public Aggs addAgg(String groupId, PipelineAgg child) { + // it's a root agg + if (groupId == null) { + return new Aggs(rootAggs, combine(rootPipelineAggs, child), groups()); + } + + List groups = new ArrayList<>(this.groups); + + for (int i = 0; i < groups.size(); i++) { + GroupingAgg group = groups.get(i); + if (groupId.equals(group.id())) { + groups.set(i, group.withPipelines(combine(group.subPipelines(), child))); + return with(groups); + } + } + throw new SqlIllegalArgumentException("Could not find group named %s", groupId); + } + + public GroupingAgg findGroupForAgg(String groupOrAggId) { + for (GroupingAgg group : this.groups) { + if (groupOrAggId.equals(group.id())) { + return group; + } + for (LeafAgg subAgg : group.subAggs()) { + if (groupOrAggId.equals(subAgg.id())) { + return group; + } + } + } + + // is it maybe a root agg + for (Agg agg : rootAggs) { + if (groupOrAggId.equals(agg.id())) { + return GroupingAgg.DEFAULT_GROUP; + } + } + + return null; + } + + public Aggs updateGroup(GroupingAgg group) { + List groups = new ArrayList<>(this.groups); + for (int i = 0; i < groups.size(); i++) { + GroupingAgg g = groups.get(i); + if (group.id().equals(g.id())) { + groups.set(i, group); + return with(groups); + } + } + throw new SqlIllegalArgumentException("Could not find group named %s", group.id()); + } + + public Aggs with(List groups) { + return new Aggs(rootAggs, rootPipelineAggs, groups); + } + + @Override + public int hashCode() { + return Objects.hash(rootAggs, rootPipelineAggs, groups); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + Aggs other = (Aggs) obj; + return Objects.equals(rootAggs, other.rootAggs) + && Objects.equals(rootPipelineAggs, other.rootPipelineAggs) + && Objects.equals(groups, other.groups); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/AndAggFilter.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/AndAggFilter.java new file mode 100644 index 00000000000..ea1e3c95785 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/AndAggFilter.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import org.elasticsearch.xpack.sql.expression.function.scalar.script.Params; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ParamsBuilder; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.type.DataTypes; + +import java.util.Locale; + +import static java.lang.String.format; + +public class AndAggFilter extends AggFilter { + + public AndAggFilter(AggFilter left, AggFilter right) { + this(left.name() + "_&_" + right.name(), left, right); + } + + public AndAggFilter(String name, AggFilter left, AggFilter right) { + super(name, and(left.scriptTemplate(), right.scriptTemplate())); + } + + private static ScriptTemplate and(ScriptTemplate left, ScriptTemplate right) { + String template = format(Locale.ROOT, "( %s ) && ( %s )", left.template(), right.template()); + Params params = new ParamsBuilder().script(left.params()).script(right.params()).build(); + return new ScriptTemplate(template, params, DataTypes.BOOLEAN); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/AvgAgg.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/AvgAgg.java new file mode 100644 index 00000000000..eb7b9a1482b --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/AvgAgg.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import org.elasticsearch.search.aggregations.AggregationBuilder; + +import static org.elasticsearch.search.aggregations.AggregationBuilders.avg; + +public class AvgAgg extends LeafAgg { + + public AvgAgg(String id, String propertyPath, String fieldName) { + super(id, propertyPath, fieldName); + } + + @Override + AggregationBuilder toBuilder() { + return avg(id()).field(fieldName()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/CardinalityAgg.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/CardinalityAgg.java new file mode 100644 index 00000000000..b4b11a1a2f8 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/CardinalityAgg.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import org.elasticsearch.search.aggregations.AggregationBuilder; + +import static org.elasticsearch.search.aggregations.AggregationBuilders.cardinality; + +public class CardinalityAgg extends LeafAgg { + + public CardinalityAgg(String id, String propertyPath, String fieldName) { + super(id, propertyPath, fieldName); + } + + @Override AggregationBuilder toBuilder() { + return cardinality(id()).field(fieldName()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/ExtendedStatsAgg.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/ExtendedStatsAgg.java new file mode 100644 index 00000000000..fb335ac7713 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/ExtendedStatsAgg.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import org.elasticsearch.search.aggregations.AggregationBuilder; + +import static org.elasticsearch.search.aggregations.AggregationBuilders.extendedStats; + +public class ExtendedStatsAgg extends LeafAgg { + + public ExtendedStatsAgg(String id, String propertyPath, String fieldName) { + super(id, propertyPath, fieldName); + } + + @Override + AggregationBuilder toBuilder() { + return extendedStats(id()).field(fieldName()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByColumnAgg.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByColumnAgg.java new file mode 100644 index 00000000000..a442551110d --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByColumnAgg.java @@ -0,0 +1,84 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.elasticsearch.xpack.sql.querydsl.container.Sort; +import org.elasticsearch.xpack.sql.querydsl.container.Sort.Direction; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; + +public class GroupByColumnAgg extends GroupingAgg { + + private static final int DEFAULT_LIMIT = 512; + private final int limit; + + public GroupByColumnAgg(String id, String propertyPath, String fieldName) { + this(id, propertyPath, fieldName, emptyList(), emptyList(), emptyMap(), -1); + } + + public GroupByColumnAgg(String id, String propertyPath, String fieldName, List subAggs, List subPipelines, Map order, int limit) { + super(id, propertyPath, fieldName, subAggs, subPipelines, order); + this.limit = limit < 0 ? DEFAULT_LIMIT : Math.min(limit, DEFAULT_LIMIT); + } + + public int limit() { + return limit; + } + + @Override + protected AggregationBuilder toGroupingAgg() { + // TODO: the size should be configurable + TermsAggregationBuilder terms = termsTarget(terms(id()).size(limit)); + + List termOrders = emptyList(); + if (!order().isEmpty()) { + termOrders = new ArrayList<>(); + for (Entry entry : order().entrySet()) { + String key = entry.getKey(); + boolean asc = entry.getValue() == Direction.ASC; + BucketOrder o = null; + // special cases + if (GROUP_KEY_SORTING.equals(key)) { + o = BucketOrder.key(asc); + } + else if (GROUP_COUNT_SORTING.equals(key)) { + o = BucketOrder.count(asc); + } + else { + o = BucketOrder.aggregation(key, asc); + } + termOrders.add(o); + } + terms.order(termOrders); + } + + terms.minDocCount(1); + return terms; + } + + protected TermsAggregationBuilder termsTarget(TermsAggregationBuilder builder) { + return builder.field(fieldName()); + } + + @Override + protected GroupByColumnAgg copy(String id, String propertyPath, String fieldName, List subAggs, List subPipelines, Map order) { + return new GroupByColumnAgg(id, propertyPath, fieldName, subAggs, subPipelines, order, limit); + } + + public GroupByColumnAgg withLimit(int limit) { + return new GroupByColumnAgg(id(), propertyPath(), fieldName(), subAggs(), subPipelines(), order(), limit); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByDateAgg.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByDateAgg.java new file mode 100644 index 00000000000..c8183e338e1 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByDateAgg.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.elasticsearch.xpack.sql.querydsl.container.Sort; +import org.elasticsearch.xpack.sql.querydsl.container.Sort.Direction; +import org.joda.time.DateTimeZone; + +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static org.elasticsearch.search.aggregations.AggregationBuilders.dateHistogram; + +public class GroupByDateAgg extends GroupingAgg { + + private final String interval; + private final DateTimeZone timeZone; + + public GroupByDateAgg(String id, String propertyPath, String fieldName, String interval, DateTimeZone timeZone) { + this(id, propertyPath, fieldName, interval, timeZone, emptyList(), emptyList(), emptyMap()); + } + + public GroupByDateAgg(String id, String propertyPath, String fieldName, String interval, DateTimeZone timeZone, + List subAggs, List subPipelines, Map order) { + super(id, propertyPath, fieldName, subAggs, subPipelines, order); + this.interval = interval; + this.timeZone = timeZone; + } + + public String interval() { + return interval; + } + + @Override + protected AggregationBuilder toGroupingAgg() { + DateHistogramAggregationBuilder dhab = dateHistogram(id()) + .field(fieldName()) + .timeZone(timeZone) + .dateHistogramInterval(new DateHistogramInterval(interval)); + if (!order().isEmpty()) { + for (Entry entry : order().entrySet()) { + String key = entry.getKey(); + boolean asc = entry.getValue() == Direction.ASC; + // special cases + if (GROUP_KEY_SORTING.equals(key)) { + dhab.order(BucketOrder.key(asc)); + } + else if (GROUP_COUNT_SORTING.equals(key)) { + dhab.order(BucketOrder.count(asc)); + } + else { + dhab.order(BucketOrder.aggregation(key, asc)); + } + } + } + + dhab.minDocCount(1); + + return dhab; + } + + @Override + protected GroupingAgg copy(String id, String propertyPath, String fieldName, List subAggs, List subPipelines, Map order) { + return new GroupByDateAgg(id, propertyPath, fieldName, interval, timeZone, subAggs, subPipelines, order); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByScriptAgg.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByScriptAgg.java new file mode 100644 index 00000000000..3d9c62ade0a --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByScriptAgg.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.elasticsearch.search.aggregations.support.ValueType; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.querydsl.container.Sort.Direction; + +import java.util.List; +import java.util.Map; + +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; + +public class GroupByScriptAgg extends GroupByColumnAgg { + + private final ScriptTemplate script; + + public GroupByScriptAgg(String id, String propertyPath, String fieldName, ScriptTemplate script) { + this(id, propertyPath, fieldName, script, emptyList(), emptyList(), emptyMap(), -1); + } + + public GroupByScriptAgg(String id, String propertyPath, String fieldName, ScriptTemplate script, List subAggs, + List subPipelines, Map order, int limit) { + super(id, propertyPath, fieldName, subAggs, subPipelines, order, limit); + this.script = script; + } + + public ScriptTemplate script() { + return script; + } + + @Override + protected TermsAggregationBuilder termsTarget(TermsAggregationBuilder builder) { + builder.script(script.toPainless()); + if (script.outputType().isNumeric()) { + builder.valueType(ValueType.NUMBER); + } + + return builder; + } + + @Override + protected GroupByScriptAgg copy(String id, String propertyPath, String fieldName, List subAggs, List subPipelines, Map order) { + return new GroupByScriptAgg(id, propertyPath, fieldName, script, subAggs, subPipelines, order, limit()); + } + + @Override + public GroupByScriptAgg withLimit(int limit) { + return new GroupByScriptAgg(id(), propertyPath(), fieldName(), script, subAggs(), subPipelines(), order(), limit); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupingAgg.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupingAgg.java new file mode 100644 index 00000000000..52612e0a80d --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupingAgg.java @@ -0,0 +1,130 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.querydsl.container.Sort.Direction; + +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import static java.util.Collections.emptyList; +import static org.elasticsearch.xpack.sql.util.StringUtils.EMPTY; + +public abstract class GroupingAgg extends Agg { + + protected static final String GROUP_KEY_SORTING = "_key"; + protected static final String GROUP_COUNT_SORTING = "_count"; + + public static final GroupingAgg DEFAULT_GROUP = new GroupingAgg(EMPTY, EMPTY, EMPTY, emptyList(), emptyList(), null) { + + @Override + protected AggregationBuilder toGroupingAgg() { + throw new SqlIllegalArgumentException("Default group cannot build aggregations"); + } + + @Override + protected GroupingAgg copy(String id, String propertyPath, String fieldName, List subAggs, List subPipelines, Map order) { + throw new SqlIllegalArgumentException("Default group cannot be cloned"); + } + }; + + private final List subAggs; + private final List subPipelines; + private final Map order; + + GroupingAgg(String id, String propertyPath, String fieldName, List subAggs, List subPipelines, Map order) { + super(id, propertyPath, fieldName); + this.subAggs = subAggs; + this.subPipelines = subPipelines; + this.order = order; + } + + public List subAggs() { + return subAggs; + } + + public List subPipelines() { + return subPipelines; + } + + public Map order() { + return order; + } + + @Override + AggregationBuilder toBuilder() { + AggregationBuilder groupingAgg = toGroupingAgg(); + for (LeafAgg leafAgg : subAggs) { + groupingAgg.subAggregation(leafAgg.toBuilder()); + } + for (PipelineAgg aggFilter : subPipelines) { + groupingAgg.subAggregation(aggFilter.toBuilder()); + } + return groupingAgg; + } + + protected abstract AggregationBuilder toGroupingAgg(); + + + public GroupingAgg withAggs(List subAggs) { + return copy(id(), propertyPath(), fieldName(), subAggs, subPipelines, order); + } + + public GroupingAgg withPipelines(List subPipelines) { + return copy(id(), propertyPath(), fieldName(), subAggs, subPipelines, order); + } + + public GroupingAgg with(String id) { + return Objects.equals(id(), id) ? this : copy(id, propertyPath(), fieldName(), subAggs, subPipelines, order); + } + + public GroupingAgg with(Direction order) { + return with(GROUP_KEY_SORTING, order); + } + + public GroupingAgg with(String leafAggId, Direction order) { + if (Objects.equals(this.order.get(leafAggId), order)) { + return this; + } + Map newOrder = new LinkedHashMap<>(this.order); + newOrder.put(leafAggId, order); + return copy(id(), propertyPath(), fieldName(), subAggs, subPipelines, newOrder); + } + + protected abstract GroupingAgg copy(String id, String propertyPath, String fieldName, List subAggs, List subPipelines, Map order); + + @Override + public int hashCode() { + return Objects.hash(order, id(), propertyPath(), fieldName(), subAggs(), subPipelines()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + GroupByColumnAgg other = (GroupByColumnAgg) obj; + return Objects.equals(id(), other.id()) + && Objects.equals(propertyPath(), other.propertyPath()) + && Objects.equals(fieldName(), other.fieldName()) + && Objects.equals(subAggs(), other.subAggs()) + && Objects.equals(subPipelines(), other.subPipelines()); + } + + @Override + public String toString() { + return super.toString() + "=" + subAggs() + "|" + subPipelines(); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/LeafAgg.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/LeafAgg.java new file mode 100644 index 00000000000..6215809695a --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/LeafAgg.java @@ -0,0 +1,13 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +public abstract class LeafAgg extends Agg { + + LeafAgg(String id, String propertyPath, String fieldName) { + super(id, propertyPath, fieldName); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/MatrixStatsAgg.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/MatrixStatsAgg.java new file mode 100644 index 00000000000..e72ad870e16 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/MatrixStatsAgg.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import java.util.List; + +import org.elasticsearch.search.aggregations.AggregationBuilder; + +import static org.elasticsearch.search.aggregations.MatrixStatsAggregationBuilders.matrixStats; + +public class MatrixStatsAgg extends LeafAgg { + + private final List fields; + + public MatrixStatsAgg(String id, String propertyPath, List fields) { + super(id, propertyPath, ""); + this.fields = fields; + } + + @Override + AggregationBuilder toBuilder() { + return matrixStats(id()).fields(fields); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/MaxAgg.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/MaxAgg.java new file mode 100644 index 00000000000..14aac7fb0dd --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/MaxAgg.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import org.elasticsearch.search.aggregations.AggregationBuilder; + +import static org.elasticsearch.search.aggregations.AggregationBuilders.max; + +public class MaxAgg extends LeafAgg { + + public MaxAgg(String id, String propertyPath, String fieldName) { + super(id, propertyPath, fieldName); + } + + @Override + AggregationBuilder toBuilder() { + return max(id()).field(fieldName()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/MinAgg.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/MinAgg.java new file mode 100644 index 00000000000..afd5622c85f --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/MinAgg.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import org.elasticsearch.search.aggregations.AggregationBuilder; + +import static org.elasticsearch.search.aggregations.AggregationBuilders.min; + +public class MinAgg extends LeafAgg { + + public MinAgg(String id, String propertyPath, String fieldName) { + super(id, propertyPath, fieldName); + } + + @Override + AggregationBuilder toBuilder() { + return min(id()).field(fieldName()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/MultiFieldAgg.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/MultiFieldAgg.java new file mode 100644 index 00000000000..ccce4573170 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/MultiFieldAgg.java @@ -0,0 +1,10 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +public abstract class MultiFieldAgg { + +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/OrAggFilter.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/OrAggFilter.java new file mode 100644 index 00000000000..5bf2d78a222 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/OrAggFilter.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import org.elasticsearch.xpack.sql.expression.function.scalar.script.Params; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ParamsBuilder; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.type.DataTypes; + +import java.util.Locale; + +import static java.lang.String.format; + +public class OrAggFilter extends AggFilter { + + public OrAggFilter(AggFilter left, AggFilter right) { + this(left.name() + "_|_" + right.name(), left, right); + } + + public OrAggFilter(String name, AggFilter left, AggFilter right) { + super(name, and(left.scriptTemplate(), right.scriptTemplate())); + } + + private static ScriptTemplate and(ScriptTemplate left, ScriptTemplate right) { + String template = format(Locale.ROOT, "( %s ) || ( %s )", left.template(), right.template()); + Params params = new ParamsBuilder().script(left.params()).script(right.params()).build(); + return new ScriptTemplate(template, params, DataTypes.BOOLEAN); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/PercentileRanksAgg.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/PercentileRanksAgg.java new file mode 100644 index 00000000000..a77f87aabf3 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/PercentileRanksAgg.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import org.elasticsearch.search.aggregations.AggregationBuilder; + +import java.util.List; + +import static org.elasticsearch.search.aggregations.AggregationBuilders.percentileRanks; + +public class PercentileRanksAgg extends LeafAgg { + + private final List values; + + public PercentileRanksAgg(String id, String propertyPath, String fieldName, List values) { + super(id, propertyPath, fieldName); + this.values = values; + } + + public List percents() { + return values; + } + + @Override + AggregationBuilder toBuilder() { + return percentileRanks(id(), values.stream().mapToDouble(Double::doubleValue).toArray()) + .field(fieldName()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/PercentilesAgg.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/PercentilesAgg.java new file mode 100644 index 00000000000..6a3ae123022 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/PercentilesAgg.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import org.elasticsearch.search.aggregations.AggregationBuilder; + +import java.util.List; + +import static org.elasticsearch.search.aggregations.AggregationBuilders.percentiles; + +public class PercentilesAgg extends LeafAgg { + + private final List percents; + + public PercentilesAgg(String id, String propertyPath, String fieldName, List percents) { + super(id, propertyPath, fieldName); + this.percents = percents; + } + + public List percents() { + return percents; + } + + @Override + AggregationBuilder toBuilder() { + // TODO: look at keyed + return percentiles(id()) + .field(fieldName()) + .percentiles(percents.stream().mapToDouble(Double::doubleValue).toArray()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/PipelineAgg.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/PipelineAgg.java new file mode 100644 index 00000000000..3b78eba8304 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/PipelineAgg.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; + +public abstract class PipelineAgg { + + private final String name; + + public PipelineAgg(String name) { + this.name = name; + } + + public String name() { + return name; + } + + abstract PipelineAggregationBuilder toBuilder(); +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/StatsAgg.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/StatsAgg.java new file mode 100644 index 00000000000..7c83f99c01c --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/StatsAgg.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import org.elasticsearch.search.aggregations.AggregationBuilder; + +import static org.elasticsearch.search.aggregations.AggregationBuilders.stats; + +public class StatsAgg extends LeafAgg { + + public StatsAgg(String id, String propertyPath, String fieldName) { + super(id, propertyPath, fieldName); + } + + @Override + AggregationBuilder toBuilder() { + return stats(id()).field(fieldName()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/SumAgg.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/SumAgg.java new file mode 100644 index 00000000000..85916cb462d --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/SumAgg.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import org.elasticsearch.search.aggregations.AggregationBuilder; + +import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; + +public class SumAgg extends LeafAgg { + + public SumAgg(String id, String propertyPath, String fieldName) { + super(id, propertyPath, fieldName); + } + + @Override AggregationBuilder toBuilder() { + return sum(id()).field(fieldName()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/AggRef.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/AggRef.java new file mode 100644 index 00000000000..9b6fed48522 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/AggRef.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.container; + +import org.elasticsearch.xpack.sql.querydsl.agg.AggPath; + +public class AggRef implements ColumnReference { + private final String path; + private final int depth; + + public AggRef(String path) { + this.path = path; + depth = AggPath.depth(path); + } + + @Override + public String toString() { + return path; + } + + @Override + public int depth() { + return depth; + } + + public String path() { + return path; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/AttributeSort.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/AttributeSort.java new file mode 100644 index 00000000000..0fde127f634 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/AttributeSort.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.container; + +import java.util.Objects; + +import org.elasticsearch.xpack.sql.expression.Attribute; + +public class AttributeSort extends Sort { + + private final Attribute attribute; + + public AttributeSort(Attribute attribute, Direction direction) { + super(direction); + this.attribute = attribute; + } + + public Attribute attribute() { + return attribute; + } + + @Override + public int hashCode() { + return Objects.hash(attribute, direction()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + AttributeSort other = (AttributeSort) obj; + return Objects.equals(direction(), other.direction()) + && Objects.equals(attribute, other.attribute); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/ColumnReference.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/ColumnReference.java new file mode 100644 index 00000000000..17a8c551c7f --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/ColumnReference.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.container; + +/** + * Entity representing a 'column' backed by one or multiple results from ES. + */ +public interface ColumnReference { + + /** + * Indicates the depth of the result. Used for counting the actual size of a + * result by knowing how many nested levels there are. Typically used by + * aggregations. + * + * @return depth of the result + */ + int depth(); +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/ComputedRef.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/ComputedRef.java new file mode 100644 index 00000000000..de152e8ea95 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/ComputedRef.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.container; + +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ReferenceInput; + +import java.util.concurrent.atomic.AtomicInteger; + +public class ComputedRef implements ColumnReference { + + private final ProcessorDefinition processor; + private final int depth; + + public ComputedRef(ProcessorDefinition processor) { + this.processor = processor; + + // compute maximum depth + AtomicInteger d = new AtomicInteger(0); + processor.forEachDown(i -> { + ColumnReference ref = i.context(); + if (ref.depth() > d.get()) { + d.set(ref.depth()); + } + }, ReferenceInput.class); + + depth = d.get(); + } + + public ProcessorDefinition processor() { + return processor; + } + + @Override + public int depth() { + return depth; + } + + @Override + public String toString() { + return processor + "(" + processor + ")"; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/FieldReference.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/FieldReference.java new file mode 100644 index 00000000000..b3d411749ff --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/FieldReference.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.container; + +public interface FieldReference extends ColumnReference { + + @Override + default int depth() { + return 0; + } + + /** + * Field name. + * + * @return field name. + */ + String name(); +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/NestedFieldRef.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/NestedFieldRef.java new file mode 100644 index 00000000000..c5d5f68a1e4 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/NestedFieldRef.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.container; + +public class NestedFieldRef implements FieldReference { + private final String parent, name; + private final boolean docValue; + + public NestedFieldRef(String parent, String name, boolean useDocValueInsteadOfSource) { + this.parent = parent; + this.name = name; + this.docValue = useDocValueInsteadOfSource; + } + + public String parent() { + return parent; + } + + @Override + public String name() { + return name; + } + + public boolean useDocValue() { + return docValue; + } + + @Override + public String toString() { + return name; + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/QueryContainer.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/QueryContainer.java new file mode 100644 index 00000000000..e95208333b2 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/QueryContainer.java @@ -0,0 +1,374 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.container; + +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.execution.search.SourceGenerator; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.LiteralAttribute; +import org.elasticsearch.xpack.sql.expression.NestedFieldAttribute; +import org.elasticsearch.xpack.sql.expression.RootFieldAttribute; +import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunctionAttribute; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.AttributeInput; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ReferenceInput; +import org.elasticsearch.xpack.sql.querydsl.agg.AggPath; +import org.elasticsearch.xpack.sql.querydsl.agg.Aggs; +import org.elasticsearch.xpack.sql.querydsl.agg.GroupingAgg; +import org.elasticsearch.xpack.sql.querydsl.agg.LeafAgg; +import org.elasticsearch.xpack.sql.querydsl.query.AndQuery; +import org.elasticsearch.xpack.sql.querydsl.query.MatchAll; +import org.elasticsearch.xpack.sql.querydsl.query.NestedQuery; +import org.elasticsearch.xpack.sql.querydsl.query.Query; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.LinkedHashMap; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; + +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; +import static java.util.Collections.singletonMap; +import static org.elasticsearch.xpack.sql.util.CollectionUtils.combine; + +public class QueryContainer { + + private final Aggs aggs; + private final Query query; + + // final output seen by the client (hence the list or ordering) + // gets converted by the Scroller into Extractors for hits or actual results in case of aggregations + private final List columns; + + // aliases (maps an alias to its actual resolved attribute) + private final Map aliases; + + // pseudo functions (like count) - that are 'extracted' from other aggs + private final Map pseudoFunctions; + + // scalar function processors - recorded as functions get folded; + // at scrolling, their inputs (leaves) get updated + private final Map scalarFunctions; + + private final Set sort; + private final int limit; + + // computed + private final boolean aggsOnly; + private final int aggDepth; + + public QueryContainer() { + this(null, null, null, null, null, null, null, -1); + } + + public QueryContainer(Query query, Aggs aggs, List refs, Map aliases, + Map pseudoFunctions, + Map scalarFunctions, + Set sort, int limit) { + this.query = query; + this.aggs = aggs == null ? new Aggs() : aggs; + this.aliases = aliases == null || aliases.isEmpty() ? emptyMap() : aliases; + this.pseudoFunctions = pseudoFunctions == null || pseudoFunctions.isEmpty() ? emptyMap() : pseudoFunctions; + this.scalarFunctions = scalarFunctions == null || scalarFunctions.isEmpty() ? emptyMap() : scalarFunctions; + this.columns = refs == null || refs.isEmpty() ? emptyList() : refs; + this.sort = sort == null || sort.isEmpty() ? emptySet() : sort; + this.limit = limit; + + int aggLevel = 0; + boolean onlyAggs = true; + + for (ColumnReference ref : this.columns) { + if (ref.depth() > aggLevel) { + aggLevel = ref.depth(); + } + if (ref instanceof ComputedRef) { + // check field references + if (((ComputedRef) ref).processor().anyMatch(p -> p instanceof ReferenceInput && ((ReferenceInput) p).context() instanceof FieldReference)) { + onlyAggs = false; + } + } + if (ref instanceof FieldReference) { + onlyAggs = false; + } + } + aggsOnly = onlyAggs; + aggDepth = aggLevel; + } + + public Query query() { + return query; + } + + public Aggs aggs() { + return aggs; + } + + public List columns() { + return columns; + } + + public Map aliases() { + return aliases; + } + + public Map pseudoFunctions() { + return pseudoFunctions; + } + + public Set sort() { + return sort; + } + + public int limit() { + return limit; + } + + public boolean isAggsOnly() { + return aggsOnly; + } + + public int aggDepth() { + return aggDepth; + } + + public boolean hasColumns() { + return !columns.isEmpty(); + } + + // + // copy methods + // + + public QueryContainer with(Query q) { + return new QueryContainer(q, aggs, columns, aliases, pseudoFunctions, scalarFunctions, sort, limit); + } + + public QueryContainer with(List r) { + return new QueryContainer(query, aggs, r, aliases, pseudoFunctions, scalarFunctions, sort, limit); + } + + public QueryContainer withAliases(Map a) { + return new QueryContainer(query, aggs, columns, a, pseudoFunctions, scalarFunctions, sort, limit); + } + + public QueryContainer withPseudoFunctions(Map p) { + return new QueryContainer(query, aggs, columns, aliases, p, scalarFunctions, sort, limit); + } + + public QueryContainer with(Aggs a) { + return new QueryContainer(query, a, columns, aliases, pseudoFunctions, scalarFunctions, sort, limit); + } + + public QueryContainer withLimit(int l) { + return l == limit ? this : new QueryContainer(query, aggs, columns, aliases, pseudoFunctions, scalarFunctions, sort, l); + } + + public QueryContainer withScalarProcessors(Map procs) { + return new QueryContainer(query, aggs, columns, aliases, pseudoFunctions, procs, sort, limit); + } + + public QueryContainer sort(Sort sortable) { + Set sort = new LinkedHashSet<>(this.sort); + sort.add(sortable); + return new QueryContainer(query, aggs, columns, aliases, pseudoFunctions, scalarFunctions, sort, limit); + } + + private String aliasName(Attribute attr) { + return aliases.getOrDefault(attr, attr).name(); + } + + // + // reference methods + // + private ColumnReference fieldRef(RootFieldAttribute fieldAttr) { + return new SearchHitFieldRef(aliasName(fieldAttr), fieldAttr.dataType().hasDocValues()); + } + + private Tuple nestedFieldRef(NestedFieldAttribute attr) { + // attach the field to the relevant nested query + List nestedRefs = new ArrayList<>(); + + String parent = attr.parentPath(); + String name = aliasName(attr); + + Query q = query; + Map field = singletonMap(name, attr.dataType().hasDocValues()); + if (q == null) { + q = new NestedQuery(attr.location(), parent, field, new MatchAll(attr.location())); + } + else { + AtomicBoolean foundMatch = new AtomicBoolean(false); + q = q.transformDown(n -> { + if (parent.equals(n.path())) { + if (!n.fields().keySet().contains(name)) { + foundMatch.set(true); + Map fields = new LinkedHashMap<>(n.fields()); + fields.putAll(field); + return new NestedQuery(n.location(), n.path(), fields, n.child()); + } + } + return n; + }, NestedQuery.class); + + // no nested query exists for the given field, add one to retrieve its content + if (!foundMatch.get()) { + NestedQuery nested = new NestedQuery(attr.location(), parent, field, new MatchAll(attr.location())); + q = new AndQuery(attr.location(), q, nested); + } + } + + NestedFieldRef nestedFieldRef = new NestedFieldRef(attr.parentPath(), attr.name(), attr.dataType().hasDocValues()); + nestedRefs.add(nestedFieldRef); + + return new Tuple<>(new QueryContainer(q, aggs, columns, aliases, pseudoFunctions, scalarFunctions, sort, limit), nestedFieldRef); + } + + // replace function's input with references + private Tuple computingRef(ScalarFunctionAttribute sfa) { + Attribute name = aliases.getOrDefault(sfa, sfa); + ProcessorDefinition proc = scalarFunctions.get(name); + + // check the attribute itself + if (proc == null) { + if (name instanceof ScalarFunctionAttribute) { + sfa = (ScalarFunctionAttribute) name; + } + proc = sfa.processorDef(); + } + AtomicReference containerRef = new AtomicReference<>(this); + + // find the processor inputs (Attributes) and convert them into references + // no need to promote them to the top since the container doesn't have to be aware + proc = proc.transformUp(l -> { + Attribute attr = aliases.getOrDefault(l.context(), l.context()); + Tuple ref = containerRef.get().toReference(attr); + containerRef.set(ref.v1()); + return new ReferenceInput(l.expression(), ref.v2()); + }, AttributeInput.class); + + QueryContainer qContainer = containerRef.get(); + // update proc + Map procs = new LinkedHashMap<>(qContainer.scalarFunctions()); + procs.put(name, proc); + qContainer = qContainer.withScalarProcessors(procs); + return new Tuple<>(qContainer, new ComputedRef(proc)); + } + + public QueryContainer addColumn(Attribute attr) { + Tuple tuple = toReference(attr); + return tuple.v1().addColumn(tuple.v2()); + } + + private Tuple toReference(Attribute attr) { + if (attr instanceof RootFieldAttribute) { + return new Tuple<>(this, fieldRef((RootFieldAttribute) attr)); + } + if (attr instanceof NestedFieldAttribute) { + return nestedFieldRef((NestedFieldAttribute) attr); + } + if (attr instanceof ScalarFunctionAttribute) { + return computingRef((ScalarFunctionAttribute) attr); + } + if (attr instanceof LiteralAttribute) { + return new Tuple<>(this, new ComputedRef(((LiteralAttribute) attr).asProcessorDefinition())); + } + + throw new SqlIllegalArgumentException("Unknown output attribute %s", attr); + } + + public QueryContainer addColumn(ColumnReference ref) { + return with(combine(columns, ref)); + } + + public Map scalarFunctions() { + return scalarFunctions; + } + + // + // agg methods + // + public QueryContainer addAggColumn(String aggPath) { + return with(combine(columns, new AggRef(aggPath))); + } + + public QueryContainer addAggCount(GroupingAgg parentGroup, String functionId) { + ColumnReference ref = parentGroup == null ? TotalCountRef.INSTANCE : new AggRef(AggPath.bucketCount(parentGroup.asParentPath())); + Map pseudoFunctions = new LinkedHashMap<>(this.pseudoFunctions); + pseudoFunctions.put(functionId, parentGroup); + return new QueryContainer(query, aggs, combine(columns, ref), aliases, pseudoFunctions, scalarFunctions, sort, limit); + } + + public QueryContainer addAgg(String groupId, LeafAgg agg) { + return addAgg(groupId, agg, agg.propertyPath()); + } + + public QueryContainer addAgg(String groupId, LeafAgg agg, String aggRefPath) { + return new QueryContainer(query, aggs.addAgg(groupId, agg), columns, aliases, pseudoFunctions, scalarFunctions, sort, limit); + } + + public QueryContainer addGroups(Collection values) { + return with(aggs.addGroups(values)); + } + + public QueryContainer updateGroup(GroupingAgg group) { + return with(aggs.updateGroup(group)); + } + + public GroupingAgg findGroupForAgg(String aggId) { + return aggs.findGroupForAgg(aggId); + } + + // + // boiler plate + // + + @Override + public int hashCode() { + return Objects.hash(query, aggs, columns, aliases); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + QueryContainer other = (QueryContainer) obj; + return Objects.equals(query, other.query) + && Objects.equals(aggs, other.aggs) + && Objects.equals(columns, other.columns) + && Objects.equals(aliases, other.aliases) + && Objects.equals(sort, other.sort) + && Objects.equals(limit, other.limit); + } + + @Override + public String toString() { + try (XContentBuilder builder = JsonXContent.contentBuilder()) { + builder.humanReadable(true).prettyPrint(); + SourceGenerator.sourceBuilder(this, null, null).toXContent(builder, ToXContent.EMPTY_PARAMS); + return builder.string(); + } catch (IOException e) { + throw new RuntimeException("error rendering", e); + } + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/ScriptFieldRef.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/ScriptFieldRef.java new file mode 100644 index 00000000000..fd50528bb6d --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/ScriptFieldRef.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.container; + +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; + +public class ScriptFieldRef implements FieldReference { + + private final String name; + private final ScriptTemplate script; + + public ScriptFieldRef(String name, ScriptTemplate script) { + this.name = name; + this.script = script; + } + + @Override + public String name() { + return name; + } + + public ScriptTemplate script() { + return script; + } + + @Override + public String toString() { + return "{" + name + "}"; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/ScriptSort.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/ScriptSort.java new file mode 100644 index 00000000000..62c3750f638 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/ScriptSort.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.container; + +import java.util.Objects; + +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; + +public class ScriptSort extends Sort { + + private final ScriptTemplate script; + + public ScriptSort(ScriptTemplate script, Direction direction) { + super(direction); + this.script = script; + } + + public ScriptTemplate script() { + return script; + } + + @Override + public int hashCode() { + return Objects.hash(direction(), script); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + ScriptSort other = (ScriptSort) obj; + return Objects.equals(direction(), other.direction()) + && Objects.equals(script, other.script); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/SearchHitFieldRef.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/SearchHitFieldRef.java new file mode 100644 index 00000000000..2791d01de2c --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/SearchHitFieldRef.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.container; + +public class SearchHitFieldRef implements FieldReference { + private final String name; + private final boolean docValue; + + public SearchHitFieldRef(String name, boolean useDocValueInsteadOfSource) { + this.name = name; + this.docValue = useDocValueInsteadOfSource; + } + + @Override + public String name() { + return name; + } + + public boolean useDocValue() { + return docValue; + } + + @Override + public String toString() { + return name; + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/Sort.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/Sort.java new file mode 100644 index 00000000000..f2154a5a79c --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/Sort.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.container; + +import org.elasticsearch.xpack.sql.expression.Order.OrderDirection; + +public class Sort { + + public enum Direction { + ASC, DESC; + + public static Direction from(OrderDirection dir) { + return dir == null || dir == OrderDirection.ASC ? ASC : DESC; + } + } + + private final Direction direction; + + protected Sort(Direction direction) { + this.direction = direction; + } + + public Direction direction() { + return direction; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/TotalCountRef.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/TotalCountRef.java new file mode 100644 index 00000000000..8ce817c4943 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/TotalCountRef.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.container; + +import org.elasticsearch.xpack.sql.util.StringUtils; + +// somewhat of a fake agg (since it gets optimized and it gets its value from the response) +public final class TotalCountRef extends AggRef { + public static final TotalCountRef INSTANCE = new TotalCountRef(); + + public static final String PATH = "#_count_#"; + + TotalCountRef() { + super(StringUtils.EMPTY); + } + + @Override + public String toString() { + return "TotalCountRef"; + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/AndQuery.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/AndQuery.java new file mode 100644 index 00000000000..d288e9af32f --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/AndQuery.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.query; + +import java.util.Arrays; + +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.xpack.sql.tree.Location; + +import static org.elasticsearch.index.query.QueryBuilders.boolQuery; + +public class AndQuery extends Query { + + private final Query left, right; + + public AndQuery(Location location, Query left, Query right) { + super(location, Arrays.asList(left, right)); + this.left = left; + this.right = right; + } + + public Query left() { + return left; + } + + public Query right() { + return right; + } + + @Override + public QueryBuilder asBuilder() { + BoolQueryBuilder boolQuery = boolQuery(); + if (left != null) { + boolQuery.filter(left.asBuilder()); + } + if (right != null) { + boolQuery.filter(right.asBuilder()); + } + return boolQuery; + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/ExistsQuery.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/ExistsQuery.java new file mode 100644 index 00000000000..b9087186da4 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/ExistsQuery.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.query; + +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.xpack.sql.tree.Location; + +import static org.elasticsearch.index.query.QueryBuilders.existsQuery; + +public class ExistsQuery extends LeafQuery { + + private final String name; + + public ExistsQuery(Location location, String name) { + super(location); + this.name = name; + } + + @Override + public QueryBuilder asBuilder() { + return existsQuery(name); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/LeafQuery.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/LeafQuery.java new file mode 100644 index 00000000000..990ee2d1c98 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/LeafQuery.java @@ -0,0 +1,17 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.query; + +import org.elasticsearch.xpack.sql.tree.Location; + +import static java.util.Collections.emptyList; + +abstract class LeafQuery extends Query { + + LeafQuery(Location location) { + super(location, emptyList()); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MatchAll.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MatchAll.java new file mode 100644 index 00000000000..71085add799 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MatchAll.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.query; + +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.xpack.sql.tree.Location; + +import static java.util.Collections.emptyList; + +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; + +public class MatchAll extends Query { + + public MatchAll(Location location) { + super(location, emptyList()); + } + + @Override + public QueryBuilder asBuilder() { + return matchAllQuery(); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MatchQuery.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MatchQuery.java new file mode 100644 index 00000000000..cf43069d605 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MatchQuery.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.query; + +import java.util.Objects; + +import org.elasticsearch.index.query.MatchQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.xpack.sql.expression.predicate.fulltext.MatchQueryPredicate; +import org.elasticsearch.xpack.sql.expression.predicate.fulltext.FullTextPredicate.Operator; +import org.elasticsearch.xpack.sql.tree.Location; + +import static org.elasticsearch.index.query.QueryBuilders.matchQuery; + +public class MatchQuery extends LeafQuery { + + private final String name; + private final Object text; + private final Operator operator; + private final MatchQueryPredicate predicate; + + public MatchQuery(Location location, String name, Object text) { + this(location, name, text, Operator.AND, null); + } + + public MatchQuery(Location location, String name, Object text, MatchQueryPredicate predicate) { + this(location, name, text, null, predicate); + } + + private MatchQuery(Location location, String name, Object text, Operator operator, MatchQueryPredicate predicate) { + super(location); + this.name = name; + this.text = text; + this.predicate = predicate; + this.operator = operator != null ? operator : predicate.operator(); + } + + @Override + public QueryBuilder asBuilder() { + MatchQueryBuilder queryBuilder = matchQuery(name, text); + if (operator != null) { + queryBuilder.operator(operator.toEs()); + } + if (predicate != null) { + queryBuilder.analyzer(predicate.analyzer()); + } + return queryBuilder; + } + + @Override + public int hashCode() { + return Objects.hash(text, name, operator, predicate); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + MatchQuery other = (MatchQuery) obj; + return Objects.equals(text, other.text) + && Objects.equals(name, other.name) + && Objects.equals(operator, other.operator) + && Objects.equals(predicate, other.predicate); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MultiMatchQuery.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MultiMatchQuery.java new file mode 100644 index 00000000000..ce63dbaf7ab --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MultiMatchQuery.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.query; + +import java.util.Map; +import java.util.Objects; + +import org.elasticsearch.index.query.MultiMatchQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.xpack.sql.expression.predicate.fulltext.MultiMatchQueryPredicate; +import org.elasticsearch.xpack.sql.tree.Location; + +import static org.elasticsearch.index.query.QueryBuilders.multiMatchQuery; + +public class MultiMatchQuery extends LeafQuery { + + private final String query; + private final Map fields; + private final MultiMatchQueryPredicate predicate; + + public MultiMatchQuery(Location location, String query, Map fields, MultiMatchQueryPredicate predicate) { + super(location); + this.query = query; + this.fields = fields; + this.predicate = predicate; + } + + @Override + public QueryBuilder asBuilder() { + MultiMatchQueryBuilder queryBuilder = multiMatchQuery(query); + queryBuilder.fields(fields); + queryBuilder.analyzer(predicate.analyzer()); + if (predicate.operator() != null) { + queryBuilder.operator(predicate.operator().toEs()); + } + return queryBuilder; + } + + @Override + public int hashCode() { + return Objects.hash(query, fields, predicate); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + MultiMatchQuery other = (MultiMatchQuery) obj; + return Objects.equals(query, other.query) + && Objects.equals(fields, other.fields) + && Objects.equals(predicate, other.predicate); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/NestedQuery.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/NestedQuery.java new file mode 100644 index 00000000000..e2e1d1f0f74 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/NestedQuery.java @@ -0,0 +1,113 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.query; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Objects; + +import org.apache.lucene.search.join.ScoreMode; +import org.elasticsearch.index.query.InnerHitBuilder; +import org.elasticsearch.index.query.NestedQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.search.fetch.StoredFieldsContext; +import org.elasticsearch.search.fetch.subphase.FetchSourceContext; +import org.elasticsearch.xpack.sql.tree.Location; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonList; + +import static org.elasticsearch.index.query.QueryBuilders.nestedQuery; + +public class NestedQuery extends UnaryQuery { + + // TODO: make this configurable + private static final int MAX_INNER_HITS = 99; + private static final List NO_STORED_FIELD = singletonList(StoredFieldsContext._NONE_); + + private final String path; + private final Map fields; + + public NestedQuery(Location location, String path, Query child) { + this(location, path, emptyMap(), child); + } + + public NestedQuery(Location location, String path, Map fields, Query child) { + super(location, child); + this.path = path; + this.fields = fields; + } + + public String path() { + return path; + } + + public Map fields() { + return fields; + } + + @Override + public QueryBuilder asBuilder() { + // disable source + + NestedQueryBuilder query = nestedQuery(path, child().asBuilder(), ScoreMode.None); + + if (!fields.isEmpty()) { + InnerHitBuilder ihb = new InnerHitBuilder(); + ihb.setSize(0); + ihb.setSize(MAX_INNER_HITS); + + boolean noSourceNeeded = true; + List sourceFields = new ArrayList<>(); + + for (Entry entry : fields.entrySet()) { + if (entry.getValue()) { + ihb.addDocValueField(entry.getKey()); + } + else { + sourceFields.add(entry.getKey()); + noSourceNeeded = false; + } + } + + if (noSourceNeeded) { + ihb.setFetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE); + ihb.setStoredFieldNames(NO_STORED_FIELD); + } + else { + ihb.setFetchSourceContext(new FetchSourceContext(true, sourceFields.toArray(new String[sourceFields.size()]), null)); + } + + + query.innerHit(ihb); + } + + return query; + } + + @Override + public int hashCode() { + return Objects.hash(path, fields, child()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + NestedQuery other = (NestedQuery) obj; + return Objects.equals(path, other.path) + && Objects.equals(fields, other.fields) + && Objects.equals(child(), other.child()); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/NotQuery.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/NotQuery.java new file mode 100644 index 00000000000..fce28a177c6 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/NotQuery.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.query; + +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.xpack.sql.tree.Location; + +import static org.elasticsearch.index.query.QueryBuilders.boolQuery; + +public class NotQuery extends UnaryQuery { + + public NotQuery(Location location, Query child) { + super(location, child); + } + + @Override + public QueryBuilder asBuilder() { + return boolQuery().mustNot(child().asBuilder()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/OrQuery.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/OrQuery.java new file mode 100644 index 00000000000..21e079d76a7 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/OrQuery.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.query; + +import java.util.Arrays; + +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.xpack.sql.tree.Location; + +import static org.elasticsearch.index.query.QueryBuilders.boolQuery; + +public class OrQuery extends Query { + + private final Query left, right; + + public OrQuery(Location location, Query left, Query right) { + super(location, Arrays.asList(left, right)); + this.left = left; + this.right = right; + } + + public Query left() { + return left; + } + + public Query right() { + return right; + } + + @Override + public QueryBuilder asBuilder() { + BoolQueryBuilder boolQuery = boolQuery(); + if (left != null) { + boolQuery.should(left.asBuilder()); + } + if (right != null) { + boolQuery.should(right.asBuilder()); + } + return boolQuery; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/Query.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/Query.java new file mode 100644 index 00000000000..edaef5f1450 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/Query.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.query; + +import java.util.List; + +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.Node; + +public abstract class Query extends Node { + + Query(Location location, List children) { + super(location, children); + } + + public abstract QueryBuilder asBuilder(); +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/QueryStringQuery.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/QueryStringQuery.java new file mode 100644 index 00000000000..0ea6b5ef2b6 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/QueryStringQuery.java @@ -0,0 +1,79 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.query; + +import java.util.Map; +import java.util.Objects; + +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryStringQueryBuilder; +import org.elasticsearch.xpack.sql.expression.predicate.fulltext.StringQueryPredicate; +import org.elasticsearch.xpack.sql.expression.predicate.fulltext.FullTextPredicate.Operator; +import org.elasticsearch.xpack.sql.tree.Location; + +import static java.util.Collections.singletonMap; + +import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery; + +public class QueryStringQuery extends LeafQuery { + + private final String query; + private final Map fields; + private Operator operator; + private StringQueryPredicate predicate; + + // dedicated constructor for QueryTranslator + public QueryStringQuery(Location location, String query, String fieldName) { + this(location, query, singletonMap(fieldName, Float.valueOf(1.0f)), Operator.AND, null); + } + + public QueryStringQuery(Location location, String query, Map fields, StringQueryPredicate predicate) { + this(location, query, fields, null, predicate); + } + + private QueryStringQuery(Location location, String query, Map fields, Operator operator, StringQueryPredicate predicate) { + super(location); + this.query = query; + this.fields = fields; + this.predicate = predicate; + this.operator = operator != null ? operator : predicate.defaultOperator(); + } + + @Override + public QueryBuilder asBuilder() { + QueryStringQueryBuilder queryBuilder = queryStringQuery(query); + queryBuilder.fields(fields); + if (operator != null) { + queryBuilder.defaultOperator(operator.toEs()); + } + if (predicate != null) { + queryBuilder.analyzer(predicate.analyzer()); + } + return queryBuilder; + } + + @Override + public int hashCode() { + return Objects.hash(query, fields, operator, predicate); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + QueryStringQuery other = (QueryStringQuery) obj; + return Objects.equals(query, other.query) + && Objects.equals(fields, other.fields) + && Objects.equals(operator, other.operator) + && Objects.equals(predicate, other.predicate); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/RangeQuery.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/RangeQuery.java new file mode 100644 index 00000000000..b824c31808f --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/RangeQuery.java @@ -0,0 +1,95 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.query; + +import java.util.Objects; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.RangeQueryBuilder; +import org.elasticsearch.xpack.sql.tree.Location; + +import static org.elasticsearch.index.query.QueryBuilders.rangeQuery; + +public class RangeQuery extends LeafQuery { + + private final String field; + private final Object lower, upper; + private final boolean includeLower, includeUpper; + private final String format; + + public RangeQuery(Location location, String field, Object lower, boolean includeLower, Object upper, boolean includeUpper) { + this(location, field, lower, includeLower, upper, includeUpper, null); + } + + public RangeQuery(Location location, String field, Object lower, boolean includeLower, Object upper, boolean includeUpper, String format) { + super(location); + this.field = field; + this.lower = lower; + this.upper = upper; + this.includeLower = includeLower; + this.includeUpper = includeUpper; + this.format = format; + } + + public String field() { + return field; + } + + public Object lower() { + return lower; + } + + public Object upper() { + return upper; + } + + public boolean includeLower() { + return includeLower; + } + + public boolean includeUpper() { + return includeUpper; + } + + public String format() { + return format; + } + + @Override + public QueryBuilder asBuilder() { + RangeQueryBuilder queryBuilder = rangeQuery(field).from(lower, includeLower).to(upper, includeUpper); + if (Strings.hasText(format)) { + queryBuilder.format(format); + } + + return queryBuilder; + } + + @Override + public int hashCode() { + return Objects.hash(field, lower, upper, includeLower, includeUpper, format); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + RangeQuery other = (RangeQuery) obj; + return Objects.equals(field, other.field) && + Objects.equals(includeLower, other.includeLower) && + Objects.equals(includeUpper, other.includeUpper) && + Objects.equals(lower, other.lower) && + Objects.equals(upper, other.upper) && + Objects.equals(format, other.format); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/RegexQuery.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/RegexQuery.java new file mode 100644 index 00000000000..16a7b53d5c1 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/RegexQuery.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.query; + +import java.util.Objects; + +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.xpack.sql.tree.Location; + +import static org.elasticsearch.index.query.QueryBuilders.regexpQuery; + +public class RegexQuery extends LeafQuery { + + private final String field, regex; + + public RegexQuery(Location location, String field, String regex) { + super(location); + this.field = field; + this.regex = regex; + } + + public String field() { + return field; + } + + public String regex() { + return regex; + } + + @Override + public QueryBuilder asBuilder() { + return regexpQuery(field, regex); + } + + @Override + public int hashCode() { + return Objects.hash(field, regex); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + RegexQuery other = (RegexQuery) obj; + return Objects.equals(field, other.field) + && Objects.equals(regex, other.regex); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/ScriptQuery.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/ScriptQuery.java new file mode 100644 index 00000000000..d8f1c37f9b5 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/ScriptQuery.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.query; + +import java.util.Objects; + +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.tree.Location; + +import static org.elasticsearch.index.query.QueryBuilders.scriptQuery; + +public class ScriptQuery extends LeafQuery { + + private final ScriptTemplate script; + + public ScriptQuery(Location location, ScriptTemplate script) { + super(location); + this.script = script; + } + + public ScriptTemplate script() { + return script; + } + + @Override + public QueryBuilder asBuilder() { + return scriptQuery(script.toPainless()); + } + + @Override + public int hashCode() { + return Objects.hash(script); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + ScriptQuery other = (ScriptQuery) obj; + return Objects.equals(script, other.script); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/TermQuery.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/TermQuery.java new file mode 100644 index 00000000000..2774624dfe3 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/TermQuery.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.query; + +import java.util.Objects; + +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.xpack.sql.tree.Location; + +import static org.elasticsearch.index.query.QueryBuilders.termQuery; + +public class TermQuery extends LeafQuery { + + private final String term; + private final Object value; + + public TermQuery(Location location, String term, Object value) { + super(location); + this.term = term; + this.value = value; + } + + public String term() { + return term; + } + + public Object value() { + return value; + } + + @Override + public QueryBuilder asBuilder() { + return termQuery(term, value); + } + + @Override + public int hashCode() { + return Objects.hash(term, value); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + TermQuery other = (TermQuery) obj; + return Objects.equals(term, other.term) + && Objects.equals(value, other.value); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/UnaryQuery.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/UnaryQuery.java new file mode 100644 index 00000000000..0c48f3a6236 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/UnaryQuery.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.query; + +import org.elasticsearch.xpack.sql.tree.Location; + +import static java.util.Collections.singletonList; + +abstract class UnaryQuery extends Query { + + private final Query child; + + UnaryQuery(Location location, Query child) { + super(location, singletonList(child)); + this.child = child; + } + + public Query child() { + return child; + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/WildcardQuery.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/WildcardQuery.java new file mode 100644 index 00000000000..95bf6e0b8bc --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/WildcardQuery.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.query; + +import java.util.Objects; + +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.xpack.sql.tree.Location; + +import static org.elasticsearch.index.query.QueryBuilders.wildcardQuery; + +public class WildcardQuery extends LeafQuery { + + private final String field, query; + + public WildcardQuery(Location location, String field, String query) { + super(location); + this.field = field; + this.query = query; + } + + public String field() { + return field; + } + + public String query() { + return query; + } + + @Override + public QueryBuilder asBuilder() { + return wildcardQuery(field, query); + } + + @Override + public int hashCode() { + return Objects.hash(field, query); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + WildcardQuery other = (WildcardQuery) obj; + return Objects.equals(field, other.field) + && Objects.equals(query, other.query); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/rule/Rule.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/rule/Rule.java new file mode 100644 index 00000000000..fe519ee7435 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/rule/Rule.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.rule; + +import java.util.function.UnaryOperator; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.xpack.sql.tree.Node; +import org.elasticsearch.xpack.sql.util.ReflectionUtils; + +// Rule class that applies a transformation to a tree. +// In addition, performs type filtering so that a rule that works only on a type node can be skipped if necessary just based on the class signature. + +// Implementation detail: +// While lambdas are nice, actual node rules tend to be fairly large and are much better suited as full blown classes. +// In addition as they already embed their type information (all rule implementations end up with the generic information on them) +// this can be leveraged to perform the type filtering (as mentioned above). +// As a side note, getting the generic information from lambdas is very hacky and not portable (not without completely messing the JVM SM) + +// apply - indicates how to apply the rule (transformUp/Down, transformExpressions..) on the target +// rule - contains the actual rule logic. +public abstract class Rule> implements UnaryOperator { + + protected Logger log = Loggers.getLogger(getClass()); + + private final String name; + private final Class typeToken = ReflectionUtils.detectSuperTypeForRuleLike(getClass()); + + protected Rule() { + this(null); + } + + protected Rule(String name) { + this.name = (name == null ? ReflectionUtils.ruleLikeNaming(getClass()) : name); + } + + public Class typeToken() { + return typeToken; + } + + public String name() { + return name; + } + + protected abstract T rule(E e); + + @Override + public String toString() { + return name(); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/rule/RuleExecutionException.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/rule/RuleExecutionException.java new file mode 100644 index 00000000000..ab5bf8ec4ce --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/rule/RuleExecutionException.java @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.rule; + +import org.elasticsearch.xpack.sql.ServerSqlException; + +public class RuleExecutionException extends ServerSqlException { + + public RuleExecutionException(String message, Object... args) { + super(message, args); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/rule/RuleExecutor.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/rule/RuleExecutor.java new file mode 100644 index 00000000000..1643cab3a84 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/rule/RuleExecutor.java @@ -0,0 +1,195 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.rule; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.sql.tree.Node; +import org.elasticsearch.xpack.sql.tree.NodeUtils; + +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +public abstract class RuleExecutor> { + + private final Logger log = Loggers.getLogger(getClass()); + + public static class Limiter { + public static final Limiter DEFAULT = new Limiter(100); + public static final Limiter ONCE = new Limiter(1) { + + @Override + boolean reached(int runs) { + return runs >= 1; + } + }; + + private final int runs; + + public Limiter(int maximumRuns) { + this.runs = maximumRuns; + } + + boolean reached(int runs) { + if (runs >= this.runs) { + throw new RuleExecutionException("Rule execution limit %d reached", runs); + } + return false; + } + } + + public class Batch { + private final String name; + private final Rule[] rules; + private final Limiter limit; + + @SafeVarargs + @SuppressWarnings("varargs") + public Batch(String name, Limiter limit, Rule... rules) { + this.name = name; + this.limit = limit; + this.rules = rules; + } + + @SafeVarargs + public Batch(String name, Rule... rules) { + this(name, Limiter.DEFAULT, rules); + } + + public String name() { + return name; + } + } + + private final Iterable batches = batches(); + + protected abstract Iterable.Batch> batches(); + + public class Transformation { + private final TreeType before, after; + private final Rule rule; + private Boolean lazyHasChanged; + + Transformation(TreeType plan, Rule rule) { + this.rule = rule; + before = plan; + after = rule.apply(before); + } + + public boolean hasChanged() { + if (lazyHasChanged == null) { + lazyHasChanged = !before.equals(after); + } + return lazyHasChanged; + } + + public String ruleName() { + return rule.name(); + } + + public TreeType before() { + return before; + } + + public TreeType after() { + return after; + } + } + + public class ExecutionInfo { + + private final TreeType before, after; + private final Map> transformations; + + ExecutionInfo(TreeType before, TreeType after, Map> transformations) { + this.before = before; + this.after = after; + this.transformations = transformations; + } + + public TreeType before() { + return before; + } + + public TreeType after() { + return after; + } + + public Map> transformations() { + return transformations; + } + } + + protected TreeType execute(TreeType plan) { + return executeWithInfo(plan).after; + } + + protected ExecutionInfo executeWithInfo(TreeType plan) { + TreeType currentPlan = plan; + + long totalDuration = 0; + + Map> transformations = new LinkedHashMap<>(); + + for (Batch batch : batches) { + int batchRuns = 0; + List tfs = new ArrayList(); + transformations.put(batch, tfs); + + boolean hasChanged = false; + long batchStart = System.currentTimeMillis(); + long batchDuration = 0; + + // run each batch until no change occurs or the limit is reached + do { + hasChanged = false; + batchRuns++; + + for (Rule rule : batch.rules) { + Transformation tf = new Transformation(currentPlan, rule); + tfs.add(tf); + currentPlan = tf.after; + + if (tf.hasChanged()) { + hasChanged = true; + if (log.isTraceEnabled()) { + log.trace("Rule {} applied\n{}", rule, NodeUtils.diffString(tf.before, tf.after)); + } + } + else { + if (log.isTraceEnabled()) { + log.trace("Rule {} applied w/o changes", rule); + } + } + } + batchDuration = System.currentTimeMillis() - batchStart; + } while (hasChanged && !batch.limit.reached(batchRuns)); + + totalDuration += batchDuration; + + if (log.isTraceEnabled()) { + TreeType before = plan; + TreeType after = plan; + if (!tfs.isEmpty()) { + before = tfs.get(0).before; + after = tfs.get(tfs.size() - 1).after; + } + log.trace("Batch {} applied took {}\n{}", batch.name, TimeValue.timeValueMillis(batchDuration), NodeUtils.diffString(before, after)); + } + } + + if (!currentPlan.equals(plan)) { + if (log.isDebugEnabled()) { + log.debug("Tree transformation took {}\n{}", TimeValue.timeValueMillis(totalDuration), NodeUtils.diffString(plan, currentPlan)); + } + } + + return new ExecutionInfo(plan, currentPlan, transformations); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/AbstractRowSet.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/AbstractRowSet.java new file mode 100644 index 00000000000..ed36871d3fc --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/AbstractRowSet.java @@ -0,0 +1,80 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.session; + +import org.elasticsearch.xpack.sql.util.Check; + +public abstract class AbstractRowSet implements RowSet { + private boolean terminated = false; + + @Override + public Object column(int index) { + Check.isTrue(index >= 0, "Invalid index %d; needs to be positive", index); + Check.isTrue(index < columnCount(), "Invalid index %d for row of size %d", index, columnCount()); + Check.isTrue(hasCurrentRow(), "RowSet contains no (more) entries; use hasCurrent() to check its status"); + return getColumn(index); + } + + protected abstract Object getColumn(int column); + + @Override + public boolean hasCurrentRow() { + return terminated ? false : doHasCurrent(); + } + + @Override + public boolean advanceRow() { + if (terminated) { + return false; + } + if (!doNext()) { + terminated = true; + return false; + } + return true; + } + + protected abstract boolean doHasCurrent(); + + protected abstract boolean doNext(); + + @Override + public void reset() { + terminated = false; + doReset(); + } + + protected abstract void doReset(); + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + + if (hasCurrentRow()) { + for (int column = 0; column < columnCount(); column++) { + if (column > 0) { + sb.append("|"); + } + + String val = String.valueOf(getColumn(column)); + // the value might contain multiple lines (plan execution for example) + // TODO: this needs to be improved to properly scale each row across multiple lines + String[] split = val.split("\\n"); + + for (int splitIndex = 0; splitIndex < split.length; splitIndex++) { + if (splitIndex > 0) { + sb.append("\n"); + } + String string = split[splitIndex]; + sb.append(string); + } + } + sb.append("\n"); + } + + return sb.toString(); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/Configuration.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/Configuration.java new file mode 100644 index 00000000000..773b67ce84c --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/Configuration.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.session; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.xpack.sql.plugin.sql.action.AbstractSqlRequest; +import org.elasticsearch.xpack.sql.protocol.shared.AbstractQueryInitRequest; +import org.elasticsearch.xpack.sql.protocol.shared.Nullable; +import org.joda.time.DateTimeZone; + +// Typed object holding properties for a given +public class Configuration { + public static final Configuration DEFAULT = new Configuration(DateTimeZone.UTC, + AbstractQueryInitRequest.DEFAULT_FETCH_SIZE, + AbstractSqlRequest.DEFAULT_REQUEST_TIMEOUT, + AbstractSqlRequest.DEFAULT_PAGE_TIMEOUT, + null); + + private DateTimeZone timeZone; + private int pageSize; + private TimeValue requestTimeout; + private TimeValue pageTimeout; + @Nullable + private QueryBuilder filter; + + public Configuration(DateTimeZone tz, int pageSize, TimeValue requestTimeout, TimeValue pageTimeout, QueryBuilder filter) { + this.timeZone = tz; + this.pageSize = pageSize; + this.requestTimeout = requestTimeout; + this.pageTimeout = pageTimeout; + this.filter = filter; + } + + public DateTimeZone timeZone() { + return timeZone; + } + + public int pageSize() { + return pageSize; + } + + public TimeValue requestTimeout() { + return requestTimeout; + } + + public TimeValue pageTimeout() { + return pageTimeout; + } + + public QueryBuilder filter() { + return filter; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/Cursor.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/Cursor.java new file mode 100644 index 00000000000..5b0bb5a925b --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/Cursor.java @@ -0,0 +1,103 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.session; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.io.stream.InputStreamStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.sql.execution.search.ScrollCursor; +import org.elasticsearch.xpack.sql.execution.search.extractor.HitExtractors; +import org.elasticsearch.xpack.sql.plugin.CliFormatterCursor; +import org.elasticsearch.xpack.sql.plugin.JdbcCursor; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Base64; +import java.util.List; + +import static org.elasticsearch.xpack.sql.plugin.AbstractSqlProtocolRestAction.CURSOR_REGISTRY; + +/** + * Information required to access the next page of response. + */ +public interface Cursor extends NamedWriteable { + Cursor EMPTY = EmptyCursor.INSTANCE; + + /** + * Request the next page of data. + */ + void nextPage(Configuration cfg, Client client, ActionListener listener); + + /** + * Cleans the resources associated with the cursor + */ + void clear(Configuration cfg, Client client, ActionListener listener); + + /** + * The {@link NamedWriteable}s required to deserialize {@link Cursor}s. + */ + static List getNamedWriteables() { + List entries = new ArrayList<>(); + entries.addAll(HitExtractors.getNamedWriteables()); + entries.add(new NamedWriteableRegistry.Entry(Cursor.class, EmptyCursor.NAME, in -> EMPTY)); + entries.add(new NamedWriteableRegistry.Entry(Cursor.class, ScrollCursor.NAME, ScrollCursor::new)); + entries.add(new NamedWriteableRegistry.Entry(Cursor.class, CliFormatterCursor.NAME, CliFormatterCursor::new)); + entries.add(new NamedWriteableRegistry.Entry(Cursor.class, JdbcCursor.NAME, JdbcCursor::new)); + return entries; + } + + /** + * Write a {@linkplain Cursor} to a string for serialization across xcontent. + */ + static String encodeToString(Version version, Cursor info) { + if(info == EMPTY) { + return ""; + } + try (ByteArrayOutputStream os = new ByteArrayOutputStream()) { + try (OutputStream base64 = Base64.getEncoder().wrap(os); + StreamOutput out = new OutputStreamStreamOutput(base64)) { + Version.writeVersion(version, out); + out.writeNamedWriteable(info); + } + return os.toString(StandardCharsets.UTF_8.name()); + } catch (IOException ex) { + throw new RuntimeException("unexpected failure converting next page info to a string", ex); + } + } + + + /** + * Read a {@linkplain Cursor} from a string. + */ + static Cursor decodeFromString(String info) { + if (info.isEmpty()) { + return EMPTY; + } + byte[] bytes = info.getBytes(StandardCharsets.UTF_8); + try (StreamInput delegate = new InputStreamStreamInput(Base64.getDecoder().wrap(new ByteArrayInputStream(bytes))); + StreamInput in = new NamedWriteableAwareStreamInput(delegate, CURSOR_REGISTRY)) { + Version version = Version.readVersion(in); + if (version.after(Version.CURRENT)) { + throw new RuntimeException("Unsupported scroll version " + version); + } + in.setVersion(version); + return in.readNamedWriteable(Cursor.class); + } catch (IOException ex) { + throw new RuntimeException("unexpected failure deconding cursor", ex); + } + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/EmptyCursor.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/EmptyCursor.java new file mode 100644 index 00000000000..7e640c446cf --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/EmptyCursor.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.session; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +class EmptyCursor implements Cursor { + static final String NAME = "0"; + static final EmptyCursor INSTANCE = new EmptyCursor(); + + private EmptyCursor() { + // Only one instance allowed + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + // Nothing to write + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public void nextPage(Configuration cfg, Client client, ActionListener listener) { + throw new IllegalArgumentException("there is no next page"); + } + + @Override + public void clear(Configuration cfg, Client client, ActionListener listener) { + // There is nothing to clean + listener.onResponse(false); + } + + @Override + public boolean equals(Object obj) { + return obj == this; + } + + @Override + public int hashCode() { + return 27; + } + + @Override + public String toString() { + return "no next page"; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/EmptyExecutable.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/EmptyExecutable.java new file mode 100644 index 00000000000..09e0d3ac2a3 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/EmptyExecutable.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.session; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.xpack.sql.expression.Attribute; + +import java.util.List; +import java.util.Objects; + +public class EmptyExecutable implements Executable { + + private final List output; + + public EmptyExecutable(List output) { + this.output = output; + } + + @Override + public List output() { + return output; + } + + @Override + public void execute(SqlSession session, ActionListener listener) { + listener.onResponse(Rows.empty(output)); + } + + @Override + public int hashCode() { + return output.hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + EmptyExecutable other = (EmptyExecutable) obj; + return Objects.equals(output, other.output); + } + + @Override + public String toString() { + return output.toString(); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/EmptyRowSetCursor.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/EmptyRowSetCursor.java new file mode 100644 index 00000000000..7e943931e91 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/EmptyRowSetCursor.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.session; + +import org.elasticsearch.xpack.sql.type.Schema; + +class EmptyRowSetCursor extends AbstractRowSet implements SchemaRowSet { + private final Schema schema; + + EmptyRowSetCursor(Schema schema) { + this.schema = schema; + } + + @Override + protected boolean doHasCurrent() { + return false; + } + + @Override + protected boolean doNext() { + return false; + } + + @Override + protected Object getColumn(int index) { + throw new UnsupportedOperationException(); + } + + @Override + protected void doReset() { + // no-op + } + + @Override + public int size() { + return 0; + } + + @Override + public Cursor nextPageCursor() { + return Cursor.EMPTY; + } + + @Override + public Schema schema() { + return schema; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/Executable.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/Executable.java new file mode 100644 index 00000000000..dbc16317029 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/Executable.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.session; + +import java.util.List; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.xpack.sql.expression.Attribute; + +public interface Executable { + + List output(); + + void execute(SqlSession session, ActionListener listener); +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/ListRowSetCursor.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/ListRowSetCursor.java new file mode 100644 index 00000000000..39987d21ac6 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/ListRowSetCursor.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.session; + +import org.elasticsearch.xpack.sql.type.Schema; + +import java.util.List; + +class ListRowSetCursor extends AbstractRowSet implements SchemaRowSet { + + private final Schema schema; + private final List> list; + private int pos = 0; + + ListRowSetCursor(Schema schema, List> list) { + this.schema = schema; + this.list = list; + } + + @Override + protected boolean doHasCurrent() { + return pos < list.size(); + } + + @Override + protected boolean doNext() { + if (pos + 1 < list.size()) { + pos++; + return true; + } + return false; + } + + @Override + protected Object getColumn(int index) { + return list.get(pos).get(index); + } + + @Override + protected void doReset() { + pos = 0; + } + + @Override + public int size() { + return list.size(); + } + + @Override + public Cursor nextPageCursor() { + return Cursor.EMPTY; + } + + @Override + public Schema schema() { + return schema; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/RowSet.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/RowSet.java new file mode 100644 index 00000000000..38a22ff73f1 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/RowSet.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.session; + +import java.util.function.Consumer; + +/** + * A set of rows to be returned at one time and a way + * to get the next set of rows. + */ +public interface RowSet extends RowView { + + boolean hasCurrentRow(); + + boolean advanceRow(); + + // number or rows in this set; while not really necessary (the return of advanceRow works) + int size(); + + void reset(); + + /** + * The key used by PlanExecutor#nextPage to fetch the next page. + */ + Cursor nextPageCursor(); + + default void forEachRow(Consumer action) { + for (boolean hasRows = hasCurrentRow(); hasRows; hasRows = advanceRow()) { + action.accept(this); + } + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/RowView.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/RowView.java new file mode 100644 index 00000000000..c37b018d524 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/RowView.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.session; + +import java.util.Iterator; +import java.util.NoSuchElementException; +import java.util.Objects; +import java.util.function.Consumer; + +/** + * A view into a row. + * Offers access to the data but it shouldn't be held since it is not a data container. + */ +public interface RowView extends Iterable { + /** + * Number of columns in this row. + */ + int columnCount(); + + Object column(int index); + + default T column(int index, Class type) { + return type.cast(column(index)); + } + + @Override + default void forEach(Consumer action) { + forEachColumn(action::accept); + } + + default void forEachColumn(Consumer action) { + Objects.requireNonNull(action); + int rowSize = columnCount(); + for (int i = 0; i < rowSize; i++) { + action.accept(column(i)); + } + } + + @Override + default Iterator iterator() { + return new Iterator() { + private int pos = 0; + private final int rowSize = columnCount(); + + @Override + public boolean hasNext() { + return pos < rowSize; + } + + @Override + public Object next() { + if (pos >= rowSize) { + throw new NoSuchElementException(); + } + return column(pos++); + } + }; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/Rows.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/Rows.java new file mode 100644 index 00000000000..a5a99645fee --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/Rows.java @@ -0,0 +1,78 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.session; + +import java.util.ArrayList; +import java.util.List; + +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.Schema; +import org.elasticsearch.xpack.sql.util.Check; + +import static java.util.Collections.singletonList; + +import static java.util.Arrays.asList; + +public abstract class Rows { + + public static Schema schema(List attr) { + List names = new ArrayList<>(attr.size()); + List types = new ArrayList<>(attr.size()); + + for (Attribute a : attr) { + names.add(a.name()); + types.add(a.dataType()); + } + return new Schema(names, types); + } + + public static Schema schema(String n1, DataType t1) { + return new Schema(singletonList(n1), singletonList(t1)); + } + + public static Schema schema(String n1, DataType t1, String n2, DataType t2) { + return new Schema(asList(n1, n2), asList(t1, t2)); + } + + public static Schema schema(String n1, DataType t1, String n2, DataType t2, String n3, DataType t3) { + return new Schema(asList(n1, n2, n3), asList(t1, t2, t3)); + } + + public static Schema schema(String n1, DataType t1, String n2, DataType t2, String n3, DataType t3, String n4, DataType t4) { + return new Schema(asList(n1, n2, n3, n4), asList(t1, t2, t3, t4)); + } + + public static Schema schema(String n1, DataType t1, String n2, DataType t2, String n3, DataType t3, String n4, DataType t4, String n5, DataType t5) { + return new Schema(asList(n1, n2, n3, n4, n5), asList(t1, t2, t3, t4, t5)); + } + + public static SchemaRowSet of(List attrs, List> values) { + if (values.isEmpty()) { + return empty(attrs); + } + + if (values.size() == 1) { + return singleton(attrs, values.get(0).toArray()); + } + + Schema schema = schema(attrs); + return new ListRowSetCursor(schema, values); + } + + public static SchemaRowSet singleton(List attrs, Object... values) { + Check.isTrue(attrs.size() == values.length, "Schema %s and values %s are out of sync", attrs, values); + return new SingletonRowSet(schema(attrs), values); + } + + public static SchemaRowSet empty(Schema schema) { + return new EmptyRowSetCursor(schema); + } + + public static SchemaRowSet empty(List attrs) { + return new EmptyRowSetCursor(schema(attrs)); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/SchemaRowSet.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/SchemaRowSet.java new file mode 100644 index 00000000000..88c89b40543 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/SchemaRowSet.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.session; + +import org.elasticsearch.xpack.sql.type.Schema; + +/** + * A {@linkplain RowSet} with the {@link Schema} for the results + * attached. + */ +public interface SchemaRowSet extends RowSet { + /** + * Schema for the results. + */ + Schema schema(); + + @Override + default int columnCount() { + return schema().names().size(); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/SingletonExecutable.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/SingletonExecutable.java new file mode 100644 index 00000000000..126c978ef4e --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/SingletonExecutable.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.session; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.util.Check; + +import java.util.List; + +public class SingletonExecutable implements Executable { + + private final List output; + private final Object[] values; + + public SingletonExecutable(List output, Object... values) { + Check.isTrue(output.size() == values.length, "Output %s and values %s are out of sync", output, values); + this.output = output; + this.values = values; + } + + @Override + public List output() { + return output; + } + + @Override + public void execute(SqlSession session, ActionListener listener) { + listener.onResponse(Rows.singleton(output, values)); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < values.length; i++) { + sb.append(output.get(i)); + sb.append("="); + sb.append(values[i]); + } + return sb.toString(); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/SingletonRowSet.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/SingletonRowSet.java new file mode 100644 index 00000000000..c8a4e5eddfb --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/SingletonRowSet.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.session; + +import org.elasticsearch.xpack.sql.type.Schema; + +//TODO is it worth keeping this when we have ListRowSet? +class SingletonRowSet extends AbstractRowSet implements SchemaRowSet { + + private final Schema schema; + private final Object[] values; + + SingletonRowSet(Schema schema, Object[] values) { + this.schema = schema; + this.values = values; + } + + @Override + protected boolean doHasCurrent() { + return true; + } + + @Override + protected boolean doNext() { + return false; + } + + @Override + protected Object getColumn(int index) { + return values[index]; + } + + @Override + protected void doReset() { + // no-op + } + + @Override + public int size() { + return 1; + } + + @Override + public Cursor nextPageCursor() { + return Cursor.EMPTY; + } + + @Override + public Schema schema() { + return schema; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/SqlSession.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/SqlSession.java new file mode 100644 index 00000000000..4a4e5644176 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/session/SqlSession.java @@ -0,0 +1,215 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.session; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.analysis.analyzer.Analyzer; +import org.elasticsearch.xpack.sql.analysis.analyzer.PreAnalyzer; +import org.elasticsearch.xpack.sql.analysis.analyzer.PreAnalyzer.PreAnalysis; +import org.elasticsearch.xpack.sql.analysis.index.GetIndexResult; +import org.elasticsearch.xpack.sql.analysis.index.IndexResolver; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.FunctionRegistry; +import org.elasticsearch.xpack.sql.optimizer.Optimizer; +import org.elasticsearch.xpack.sql.parser.SqlParser; +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.plan.physical.PhysicalPlan; +import org.elasticsearch.xpack.sql.planner.Planner; +import org.elasticsearch.xpack.sql.rule.RuleExecutor; + +import java.util.function.Function; + +import static org.elasticsearch.action.ActionListener.wrap; + +public class SqlSession { + + private final Client client; + + private final SqlParser parser; + private final FunctionRegistry functionRegistry; + private final IndexResolver indexResolver; + private final PreAnalyzer preAnalyzer; + private final Analyzer analyzer; + private final Optimizer optimizer; + private final Planner planner; + + private Configuration settings; + + public static class SessionContext { + + public final Configuration configuration; + public final GetIndexResult getIndexResult; + + SessionContext(Configuration configuration, GetIndexResult getIndexResult) { + this.configuration = configuration; + this.getIndexResult = getIndexResult; + } + } + + // thread-local used for sharing settings across the plan compilation + // Currently this is used during: + // 1. parsing - to set the TZ in date time functions (if they are used) + // 2. analysis - to compute the ESIndex and share it across the rules + // Might be used in + // 3. Optimization - to pass in configs around plan hints/settings + // 4. Folding/mapping - same as above + + // TODO investigate removing + static final ThreadLocal CURRENT_CONTEXT = new ThreadLocal() { + @Override + public String toString() { + return "SQL SessionContext"; + } + }; + + public SqlSession(SqlSession other) { + this(other.settings, other.client, other.functionRegistry, other.parser, other.indexResolver, + other.preAnalyzer, other.analyzer, other.optimizer,other.planner); + } + + public SqlSession(Configuration settings, Client client, FunctionRegistry functionRegistry, + SqlParser parser, + IndexResolver indexResolver, + PreAnalyzer preAnalyzer, + Analyzer analyzer, + Optimizer optimizer, + Planner planner) { + this.client = client; + this.functionRegistry = functionRegistry; + + this.parser = parser; + this.indexResolver = indexResolver; + this.preAnalyzer = preAnalyzer; + this.analyzer = analyzer; + this.optimizer = optimizer; + this.planner = planner; + + this.settings = settings; + } + + public static SessionContext currentContext() { + SessionContext ctx = CURRENT_CONTEXT.get(); + if (ctx == null) { + throw new SqlIllegalArgumentException("Context is accessible only during the session"); + } + return ctx; + } + + public FunctionRegistry functionRegistry() { + return functionRegistry; + } + + public Client client() { + return client; + } + + public Planner planner() { + return planner; + } + + public IndexResolver indexResolver() { + return indexResolver; + } + + public Analyzer analyzer() { + return analyzer; + } + + public Optimizer optimizer() { + return optimizer; + } + + public Expression expression(String expression) { + return parser.createExpression(expression); + } + + private LogicalPlan doParse(String sql) { + try { + // NB: it's okay for the catalog to be empty - parsing only cares about the configuration + //TODO find a better way to replace the empty catalog + CURRENT_CONTEXT.set(new SessionContext(settings, GetIndexResult.invalid("_na_"))); + return parser.createStatement(sql); + } finally { + CURRENT_CONTEXT.remove(); + } + } + + public void analyzedPlan(LogicalPlan parsed, boolean verify, ActionListener listener) { + if (parsed.analyzed()) { + listener.onResponse(parsed); + return; + } + + preAnalyze(parsed, c -> { + try { + CURRENT_CONTEXT.set(new SessionContext(settings, c)); + return verify ? analyzer.verify(analyzer.analyze(parsed)) : analyzer.analyze(parsed); + } finally { + CURRENT_CONTEXT.remove(); + } + }, listener); + } + + public void debugAnalyzedPlan(LogicalPlan parsed, ActionListener.ExecutionInfo> listener) { + if (parsed.analyzed()) { + listener.onResponse(null); + return; + } + + preAnalyze(parsed, getIndexResult -> { + try { + CURRENT_CONTEXT.set(new SessionContext(settings, getIndexResult)); + return analyzer.debugAnalyze(parsed); + } finally { + CURRENT_CONTEXT.remove(); + } + }, listener); + } + + private void preAnalyze(LogicalPlan parsed, Function action, ActionListener listener) { + PreAnalysis preAnalysis = preAnalyzer.preAnalyze(parsed); + //TODO why do we have a list if we only support one single element? Seems like it's the wrong data structure? + if (preAnalysis.indices.size() > 1) { + listener.onFailure(new SqlIllegalArgumentException("Queries with multiple indices are not supported")); + } else if (preAnalysis.indices.size() == 1) { + indexResolver.asIndex(preAnalysis.indices.get(0), + wrap(indexResult -> listener.onResponse(action.apply(indexResult)), listener::onFailure)); + } else { + try { + //TODO when can this ever happen? shouldn't it be an exception instead? + listener.onResponse(action.apply(GetIndexResult.invalid("_na_"))); + } catch (Exception ex) { + listener.onFailure(ex); + } + } + } + + public void optimizedPlan(LogicalPlan verified, ActionListener listener) { + analyzedPlan(verified, true, wrap(v -> listener.onResponse(optimizer.optimize(v)), listener::onFailure)); + } + + public void physicalPlan(LogicalPlan optimized, boolean verify, ActionListener listener) { + optimizedPlan(optimized, wrap(o -> listener.onResponse(planner.plan(o, verify)), listener::onFailure)); + } + + public void sql(String sql, ActionListener listener) { + sqlExecutable(sql, wrap(e -> e.execute(this, listener), listener::onFailure)); + } + + public void sqlExecutable(String sql, ActionListener listener) { + try { + physicalPlan(doParse(sql), true, listener); + } catch (Exception ex) { + listener.onFailure(ex); + } + } + + public Configuration settings() { + return settings; + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/tree/Location.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/tree/Location.java new file mode 100644 index 00000000000..17989a5a612 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/tree/Location.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.tree; + +public final class Location { + private final int line; + private final int charPositionInLine; + + public static final Location EMPTY = new Location(-1, -2); + + public Location(int line, int charPositionInLine) { + this.line = line; + this.charPositionInLine = charPositionInLine; + } + + public int getLineNumber() { + return line; + } + + public int getColumnNumber() { + return charPositionInLine + 1; + } + + @Override + public String toString() { + return "@" + getLineNumber() + ":" + getColumnNumber(); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/tree/Node.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/tree/Node.java new file mode 100644 index 00000000000..f1d72ab839b --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/tree/Node.java @@ -0,0 +1,283 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.tree; + +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.function.Predicate; + +import static java.util.Collections.emptyList; + +/** + * Immutable tree structure. + * The traversal is done depth-first, pre-order (first the node then its children), that is seeks up and then goes down. + * Alternative method for post-order (children first, then node) is also offered, that is seeks down and then goes up. + * + * Allows transformation which returns the same tree (if no change has been performed) or a new tree otherwise. + * + * While it tries as much as possible to use functional Java, due to lack of parallelism, + * the use of streams and iterators is not really useful and brings too much baggage which + * might be used incorrectly. + * + * @param node type + */ +public abstract class Node> { + + private final Location location; + private final List children; + + public Node(List children) { + this(Location.EMPTY, children); + } + + public Node(Location location, List children) { + this.location = (location != null ? location : Location.EMPTY); + this.children = children; + } + + public Location location() { + return location; + } + + public List children() { + return children; + } + + @SuppressWarnings("unchecked") + public void forEachDown(Consumer action) { + action.accept((T) this); + children().forEach(c -> c.forEachDown(action)); + } + + @SuppressWarnings("unchecked") + public void forEachDown(Consumer action, final Class typeToken) { + forEachDown(t -> { + if (typeToken.isInstance(t)) { + action.accept((E) t); + } + }); + } + + @SuppressWarnings("unchecked") + public void forEachUp(Consumer action) { + children().forEach(c -> c.forEachUp(action)); + action.accept((T) this); + } + + @SuppressWarnings("unchecked") + public void forEachUp(Consumer action, final Class typeToken) { + forEachUp(t -> { + if (typeToken.isInstance(t)) { + action.accept((E) t); + } + }); + } + + public void forEachPropertiesOnly(Consumer rule, Class typeToken) { + forEachProperty(rule, typeToken); + } + + public void forEachPropertiesDown(Consumer rule, Class typeToken) { + forEachDown(e -> e.forEachProperty(rule, typeToken)); + } + + public void forEachPropertiesUp(Consumer rule, Class typeToken) { + forEachUp(e -> e.forEachProperty(rule, typeToken)); + } + + @SuppressWarnings("unchecked") + protected void forEachProperty(Consumer rule, Class typeToken) { + for (Object prop : NodeUtils.properties(this)) { + // skip children (only properties are interesting) + if (prop != children && !children.contains(prop) && typeToken.isInstance(prop)) { + rule.accept((E) prop); + } + } + } + + @SuppressWarnings("unchecked") + public boolean anyMatch(Predicate predicate) { + boolean result = predicate.test((T) this); + if (!result) { + for (T child : children) { + if (child.anyMatch(predicate)) { + return true; + } + } + } + return result; + } + + public List collect(Predicate predicate) { + List l = new ArrayList<>(); + forEachDown(n -> { + if (predicate.test(n)) { + l.add(n); + } + }); + return l.isEmpty() ? emptyList() : l; + } + + public List collectLeaves() { + return collect(n -> n.children().isEmpty()); + } + + // parse the list in pre-order and on match, skip the child/branch and move on to the next child/branch + public List collectFirstChildren(Predicate predicate) { + List matches = new ArrayList<>(); + doCollectFirst(predicate, matches); + return matches; + } + + @SuppressWarnings("unchecked") + protected void doCollectFirst(Predicate predicate, List matches) { + T t = (T) this; + if (predicate.test(t)) { + matches.add(t); + } else { + for (T child : children()) { + child.doCollectFirst(predicate, matches); + } + } + } + + // TODO: maybe add a flatMap (need to double check the Stream bit) + + // + // Transform methods + // + + // + // transform the node itself and its children + // + + @SuppressWarnings("unchecked") + public T transformDown(Function rule) { + T root = rule.apply((T) this); + Node node = this.equals(root) ? this : root; + + return node.transformChildren(child -> child.transformDown(rule)); + } + + @SuppressWarnings("unchecked") + public T transformDown(Function rule, final Class typeToken) { + // type filtering function + return transformDown((t) -> (typeToken.isInstance(t) ? rule.apply((E) t) : t)); + } + + @SuppressWarnings("unchecked") + public T transformUp(Function rule) { + T transformed = transformChildren(child -> child.transformUp(rule)); + T node = this.equals(transformed) ? (T) this : transformed; + return rule.apply(node); + } + + @SuppressWarnings("unchecked") + public T transformUp(Function rule, final Class typeToken) { + // type filtering function + return transformUp((t) -> (typeToken.isInstance(t) ? rule.apply((E) t) : t)); + } + + @SuppressWarnings("unchecked") + protected > T transformChildren(Function traversalOperation) { + boolean childrenChanged = false; + + // stream() could be used but the code is just as complicated without any advantages + // further more, it would include bring in all the associated stream/collector object creation even though in + // most cases the immediate tree would be quite small (0,1,2 elements) + List transformedChildren = new ArrayList<>(children().size()); + + for (T child : children) { + T next = traversalOperation.apply(child); + if (!child.equals(next)) { + childrenChanged = true; + } + else { + // use the initial value + next = child; + } + transformedChildren.add(next); + } + + return (childrenChanged ? replaceChildren(transformedChildren) : (T) this); + } + + public T replaceChildren(List newChildren) { + return NodeUtils.copyTree(this, newChildren); + } + + // + // transform the node properties and use the tree only for navigation + // + + public T transformPropertiesOnly(Function rule, Class typeToken) { + return transformNodeProps(rule, typeToken); + } + + public T transformPropertiesDown(Function rule, Class typeToken) { + return transformDown(t -> t.transformNodeProps(rule, typeToken)); + } + + public T transformPropertiesUp(Function rule, Class typeToken) { + return transformUp(t -> t.transformNodeProps(rule, typeToken)); + } + + @SuppressWarnings("unchecked") + protected T transformNodeProps(Function rule, Class typeToken) { + Object[] props = NodeUtils.properties(this); + boolean changed = false; + + for (int i = 0; i < props.length; i++) { + Object prop = props[i]; + // skip children (only properties are interesting) + if (prop != children && !children.contains(prop) && typeToken.isInstance(prop)) { + Object transformed = rule.apply((E) prop); + if (!prop.equals(transformed)) { + changed = true; + props[i] = transformed; + } + } + } + + return changed ? NodeUtils.cloneNode(this, props) : (T) this; + } + + + @Override + public int hashCode() { + return Objects.hash(children); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + Node other = (Node) obj; + return Objects.equals(children(), other.children()); + } + + public String nodeName() { + return getClass().getSimpleName(); + } + + public String nodeString() { + return NodeUtils.nodeString(this); + } + + @Override + public String toString() { + return NodeUtils.toString(this); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/tree/NodeUtils.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/tree/NodeUtils.java new file mode 100644 index 00000000000..fc8161688a3 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/tree/NodeUtils.java @@ -0,0 +1,366 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.tree; + +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.util.Check; + +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.lang.reflect.Parameter; +import java.util.BitSet; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Objects; +import java.util.Set; + +import static java.lang.String.format; + +public abstract class NodeUtils { + + public static class NodeInfo { + public final Constructor ctr; + public final Map params; + private final int childrenIndex; + + NodeInfo(Constructor ctr, Map params, int childrenIndex) { + this.ctr = ctr; + this.params = params; + this.childrenIndex = childrenIndex; + } + }; + + private static final String TO_STRING_IGNORE_PROP = "location"; + private static final int TO_STRING_MAX_PROP = 10; + private static final int TO_STRING_MAX_WIDTH = 110; + + private static final Map, NodeInfo> CACHE = new LinkedHashMap<>(); + + // make a modified copy of a tree node by replacing its children. + // to do so it instantiates the class with the new values assuming it will 'replay' the creation. + // as any child might be also a field the method uses the following convention: + // + // 1. the children are created through constructor alone + // 2. any children referenced through fields are also present in the children list + // 3. the list of children is created through the constructor + // 4. all the constructor arguments are available on the given instance through public methods using the same name. + // + // As an example: + // + // class Add extends TreeNode { + // private Literal left; + // private Literal right; + // + // public Add(Literal left, Literal right) { + // this.left = left; + // this.right = right; + // } + // + // public Literal left() { return left; } + // public Literal right() { return right; } + // } + static > T copyTree(Node tree, List newChildren) { + Check.notNull(tree, "Non-null tree expected"); + + // basic sanity check + List currentChildren = tree.children(); + Check.isTrue(currentChildren.size() == newChildren.size(), "Cannot make copy; expected %s children but received %s", currentChildren.size(), newChildren.size()); + + NodeInfo info = info(tree.getClass()); + Object[] props = properties(tree, info); + + // for each parameter, look in the list of children to find it + // if it's missing, it's added as is, otherwise it gets picked up from the new ones + for (int i = 0; i < props.length; i++) { + Object property = props[i]; + + // in the rare case (UnresolvedFunction) the children are specified, copy them directly in the constructor + if (i == info.childrenIndex) { + props[i] = newChildren; + } + // check children only if needed + else if (property instanceof Node) { + // as the same instances are inside the children, an identity check is done instead of the usual equals + for (int childIndex = 0; childIndex < currentChildren.size(); childIndex++) { + T child = currentChildren.get(childIndex); + // replace old property with the new one + if (property == child) { + props[i] = newChildren.get(childIndex); + // skip the rest of the children, if there are duplicates, will find them on their turn + break; + } + } + } + } + + return cloneNode(tree, props); + } + + + @SuppressWarnings("unchecked") + static > T cloneNode(Node tree, Object[] props) { + NodeInfo treeNodeInfo = info(tree.getClass()); + + // finally invoke the constructor and return the new copy + try { + return (T) treeNodeInfo.ctr.newInstance(props); + } catch (InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException ex) { + throw new SqlIllegalArgumentException("Cannot call constructor %s to copy tree node", + treeNodeInfo.ctr.toGenericString(), ex); + } + } + + @SuppressWarnings("rawtypes") + public static NodeInfo info(Class clazz) { + NodeInfo treeNodeInfo = CACHE.get(clazz); + + // perform discovery (and cache it) + if (treeNodeInfo == null) { + Constructor[] constructors = clazz.getConstructors(); + Check.isTrue(!CollectionUtils.isEmpty(constructors), "No public constructors found for class %s", clazz); + + // find the longest constructor + Constructor ctr = null; + int maxParameterCount = -1; + for (Constructor constructor : constructors) { + if (ctr == null || maxParameterCount < constructor.getParameterCount()) { + ctr = constructor; + maxParameterCount = constructor.getParameterCount(); + } + } + + int childrenIndex = -1; + + Map params = new LinkedHashMap<>(ctr.getParameterCount()); + + // find each argument in the ctr and find its relevant method/getter + Parameter[] parameters = ctr.getParameters(); + for (int paramIndex = 0; paramIndex < parameters.length; paramIndex++) { + Parameter param = parameters[paramIndex]; + Check.isTrue(param.isNamePresent(), "Can't find constructor parameter names for [%s]. Is class debug information available?", clazz.toGenericString()); + String paramName = param.getName(); + + if (paramName.equals("children")) { + childrenIndex = paramIndex; + } + // find getter for it + Method getter = null; + try { + getter = clazz.getMethod(paramName); + } catch (NoSuchMethodException nsme) { + throw new SqlIllegalArgumentException("class [%s] expected to have method [%s] for retrieving constructor arguments; none found", + clazz.getName(), paramName); + } + + // validate return type + Class expected = param.getType(); + Class found = getter.getReturnType(); + // found == Object if we're dealing with generics + Check.isTrue(found == Object.class || expected.isAssignableFrom(found), "Constructor param [%s] in class [%s] has type [%s] but found getter [%s]", paramName, clazz, expected, getter.toGenericString()); + + params.put(paramName, getter); + } + + treeNodeInfo = new NodeInfo(ctr, params, childrenIndex); + CACHE.put(clazz, treeNodeInfo); + } + + return treeNodeInfo; + } + + public static Map propertiesMap(Node tree) { + NodeInfo info = info(tree.getClass()); + Object[] results = properties(tree, info); + + Map props = new LinkedHashMap<>(results.length); + + int index = 0; + for (String name : info.params.keySet()) { + props.put(name, results[index++]); + } + return props; + } + + static Object[] properties(Node tree) { + return properties(tree, info(tree.getClass())); + } + + // minor optimization to avoid double map lookup inside this class + private static Object[] properties(Node tree, NodeInfo info) { + Object[] props = new Object[info.params.size()]; + int copyIndex = 0; + + for (Entry param : info.params.entrySet()) { + Method getter = param.getValue(); + Object result; + try { + result = getter.invoke(tree); + } catch (IllegalAccessException | IllegalArgumentException | InvocationTargetException ex) { + throw new SqlIllegalArgumentException("Cannot invoke method [%s]", getter.toGenericString(), ex); + } + + props[copyIndex++] = result; + } + + return props; + } + + public static String propertiesToString(Node tree, boolean skipIfChild) { + StringBuilder sb = new StringBuilder(); + + NodeInfo info = info(tree.getClass()); + Set keySet = info.params.keySet(); + Object[] properties = properties(tree, info); + + List children = tree.children(); + // eliminate children (they are rendered as part of the tree) + int maxProperties = TO_STRING_MAX_PROP; + int maxWidth = 0; + Iterator nameIterator = keySet.iterator(); + boolean needsComma = false; + + for (int i = 0; i < properties.length; i++) { + Object object = properties[i]; + String propertyName = nameIterator.next(); + // consider a property if it is not ignored AND + // it's not a child (optional) + if (!TO_STRING_IGNORE_PROP.equals(propertyName) && !(skipIfChild && children.contains(object))) { + if (maxProperties-- < 0) { + sb.append(format(Locale.ROOT, "...%s fields not shown", properties.length - TO_STRING_MAX_PROP)); + break; + } + + if (needsComma) { + sb.append(","); + } + String stringValue = Objects.toString(object); + if (maxWidth + stringValue.length() > TO_STRING_MAX_WIDTH) { + int cutoff = Math.max(0, TO_STRING_MAX_WIDTH - maxWidth); + sb.append(stringValue.substring(0, cutoff)); + sb.append("\n"); + stringValue = stringValue.substring(cutoff); + maxWidth = 0; + } + maxWidth += stringValue.length(); + sb.append(stringValue); + + needsComma = true; + } + } + + return sb.toString(); + } + + static String nodeString(Node treeNode) { + StringBuilder sb = new StringBuilder(); + sb.append(treeNode.nodeName()); + sb.append("["); + sb.append(propertiesToString(treeNode, true)); + sb.append("]"); + return sb.toString(); + } + + static String toString(Node treeNode) { + return treeString(treeNode, new StringBuilder(), 0, new BitSet()).toString(); + } + + static > StringBuilder treeString(Node treeNode, StringBuilder sb, int depth, BitSet hasParentPerDepth) { + if (depth > 0) { + // draw children + for (int column = 0; column < depth; column++) { + if (hasParentPerDepth.get(column)) { + sb.append("|"); + // if not the last elder, adding padding (since each column has two chars ("|_" or "\_") + if (column < depth - 1) { + sb.append(" "); + } + } + else { + // if the child has no parent (elder on the previous level), it means its the last sibling + sb.append((column == depth - 1) ? "\\" : " "); + } + } + + sb.append("_"); + } + + if (treeNode == null) { + sb.append("null"); + return sb; + } + + // TreeNode by name (to allow nodes to override their expression) + sb.append(treeNode.nodeString()); + + List children = treeNode.children(); + if (!children.isEmpty()) { + sb.append("\n"); + } + for (int i = 0; i < children.size(); i++) { + T t = children.get(i); + hasParentPerDepth.set(depth, i < children.size() - 1); + treeString(t, sb, depth + 1, hasParentPerDepth); + if (i < children.size() - 1) { + sb.append("\n"); + } + } + return sb; + } + + public static , B extends Node> String diffString(A left, B right) { + return diffString(left.toString(), right.toString()); + } + + public static String diffString(String left, String right) { + // break the strings into lines + // then compare each line + String[] leftSplit = left.split("\\n"); + String[] rightSplit = right.split("\\n"); + + // find max - we could use streams but autoboxing is not cool + int leftMaxPadding = 0; + for (String string : leftSplit) { + leftMaxPadding = Math.max(string.length(), leftMaxPadding); + } + + // try to allocate the buffer - 5 represents the column comparison chars + StringBuilder sb = new StringBuilder(left.length() + right.length() + Math.max(left.length(), right.length()) * 3); + + boolean leftAvailable = true, rightAvailable = true; + for (int leftIndex = 0, rightIndex = 0; leftAvailable || rightAvailable; leftIndex++, rightIndex++) { + String leftRow = "", rightRow = leftRow; + if (leftIndex < leftSplit.length) { + leftRow = leftSplit[leftIndex]; + } + else { + leftAvailable = false; + } + sb.append(leftRow); + for (int i = leftRow.length(); i < leftMaxPadding; i++) { + sb.append(" "); + } + // right side still available + if (rightIndex < rightSplit.length) { + rightRow = rightSplit[rightIndex]; + } + else { + rightAvailable = false; + } + if (leftAvailable || rightAvailable) { + sb.append(leftRow.equals(rightRow) ? " = " : " ! "); + sb.append(rightRow); + sb.append("\n"); + } + } + return sb.toString(); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/AbstractDataType.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/AbstractDataType.java new file mode 100644 index 00000000000..1951dff7fa6 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/AbstractDataType.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import java.sql.JDBCType; +import java.util.Objects; + +abstract class AbstractDataType implements DataType { + + private final JDBCType sqlType; + private final boolean hasDocValues; + + AbstractDataType(JDBCType sqlType, boolean hasDocValues) { + this.sqlType = sqlType; + this.hasDocValues = hasDocValues; + } + + @Override + public boolean hasDocValues() { + return hasDocValues; + } + + @Override + public boolean isPrimitive() { + return true; + } + + @Override + public JDBCType sqlType() { + return sqlType; + } + + @Override + public String toString() { + return esName(); + } + + @Override + public int hashCode() { + return esName().hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + AbstractDataType other = (AbstractDataType) obj; + return Objects.equals(esName(), other.esName()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/ArrayType.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/ArrayType.java new file mode 100644 index 00000000000..2abd118e755 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/ArrayType.java @@ -0,0 +1,82 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import java.sql.JDBCType; +import java.util.Objects; + +public class ArrayType implements DataType { + + private final DateType type; + private final int dimension; + + public ArrayType(DateType type, int dimension) { + this.type = type; + this.dimension = dimension; + } + + public DateType type() { + return type; + } + + public int dimension() { + return dimension; + } + + @Override + public String esName() { + return "array"; + } + + @Override + public JDBCType sqlType() { + return JDBCType.ARRAY; + } + + @Override + public int precision() { + return type.precision(); + } + + @Override + public boolean isInteger() { + return false; + } + + @Override + public boolean isRational() { + return false; + } + + @Override + public boolean isPrimitive() { + return false; + } + + @Override + public boolean hasDocValues() { + return type.hasDocValues(); + } + + @Override + public int hashCode() { + return Objects.hash(type, dimension); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + ArrayType other = (ArrayType) obj; + return Objects.equals(dimension, other.dimension) && Objects.equals(type, other.type); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/BinaryType.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/BinaryType.java new file mode 100644 index 00000000000..cf9bcb4b19e --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/BinaryType.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import java.sql.JDBCType; + +public class BinaryType extends AbstractDataType { + + BinaryType(boolean docValues) { + super(JDBCType.VARBINARY, docValues); + } + + @Override + public String esName() { + return "binary"; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/BooleanType.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/BooleanType.java new file mode 100644 index 00000000000..eed7e0636a5 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/BooleanType.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import java.sql.JDBCType; + +public class BooleanType extends AbstractDataType { + + BooleanType(boolean docValues) { + super(JDBCType.BOOLEAN, docValues); + } + + @Override + public String esName() { + return "boolean"; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/ByteType.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/ByteType.java new file mode 100644 index 00000000000..f531e8745d1 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/ByteType.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import java.sql.JDBCType; + +public class ByteType extends AbstractDataType { + + ByteType(boolean docValues) { + super(JDBCType.TINYINT, docValues); + } + + @Override + public String esName() { + return "byte"; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/CompoundDataType.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/CompoundDataType.java new file mode 100644 index 00000000000..431b007d759 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/CompoundDataType.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import java.sql.JDBCType; +import java.util.Map; + +public interface CompoundDataType extends DataType { + + @Override + default JDBCType sqlType() { + return JDBCType.STRUCT; + } + + @Override + default int precision() { + return 0; + } + + @Override + default boolean isInteger() { + return false; + } + + @Override + default boolean isRational() { + return false; + } + + @Override + default boolean isPrimitive() { + return false; + } + + @Override + default boolean hasDocValues() { + return false; + } + + Map properties(); + +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java new file mode 100644 index 00000000000..89c79133ddf --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java @@ -0,0 +1,67 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import java.sql.JDBCType; + +public interface DataType { + + String esName(); + + default String sqlName() { + return sqlType().name(); + } + + JDBCType sqlType(); + + boolean hasDocValues(); + + default Object defaultValue() { + return null; + } + + default int size() { + return JdbcUtils.size(sqlType()); + } + + default int precision() { + return JdbcUtils.precision(sqlType()); + } + + default int scale() { + return JdbcUtils.scale(sqlType()); + } + + default int displaySize() { + return JdbcUtils.displaySize(sqlType()); + } + + default boolean isSigned() { + return JdbcUtils.isSigned(sqlType()); + } + + default boolean isInteger() { + return JdbcUtils.isInteger(sqlType()); + } + + default boolean isRational() { + return JdbcUtils.isRational(sqlType()); + } + + default boolean isNumeric() { + return isInteger() || isRational(); + } + + default boolean isComplex() { + return !isPrimitive(); + } + + boolean isPrimitive(); + + default boolean same(DataType other) { + return getClass() == other.getClass(); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java new file mode 100644 index 00000000000..754949c6ef8 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java @@ -0,0 +1,393 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import org.elasticsearch.common.Booleans; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.joda.time.format.DateTimeFormatter; +import org.joda.time.format.ISODateTimeFormat; + +import java.util.Locale; +import java.util.function.DoubleFunction; +import java.util.function.Function; +import java.util.function.LongFunction; + +/** + * Conversions from one data type to another. + * This class throws {@link SqlIllegalArgumentException} to differentiate between validation + * errors inside SQL as oppose to the rest of ES. + */ +public abstract class DataTypeConversion { + + private static final DateTimeFormatter UTC_DATE_FORMATTER = ISODateTimeFormat.dateTimeNoMillis().withZoneUTC(); + + public static DataType commonType(DataType left, DataType right) { + if (left.same(right)) { + return left; + } + if (nullable(left)) { + return right; + } + if (nullable(right)) { + return left; + } + if (left.isNumeric() && right.isNumeric()) { + // if one is int + if (left.isInteger()) { + // promote the highest int + if (right.isInteger()) { + return left.size() > right.size() ? left : right; + } + // promote the rational + return right; + } + // try the other side + if (right.isInteger()) { + return left; + } + // promote the highest rational + return left.size() > right.size() ? left : right; + } + if (left instanceof StringType) { + if (right.isNumeric()) { + return right; + } + } + if (right instanceof StringType) { + if (left.isNumeric()) { + return left; + } + } + // none found + return null; + } + + public static boolean nullable(DataType from) { + return from instanceof NullType; + } + + public static boolean canConvert(DataType from, DataType to) { // TODO it'd be cleaner and more right to fetch the conversion + // only primitives are supported so far + if (from.isComplex() || to.isComplex()) { + return false; + } + + if (from.getClass() == to.getClass()) { + return true; + } + if (from instanceof NullType) { + return true; + } + + // anything can be converted to String + if (to instanceof StringType) { + return true; + } + // also anything can be converted into a bool + if (to instanceof BooleanType) { + return true; + } + + // numeric conversion + if ((from instanceof StringType || from instanceof BooleanType || from instanceof DateType || from.isNumeric()) && to.isNumeric()) { + return true; + } + // date conversion + if ((from instanceof DateType || from instanceof StringType || from.isNumeric()) && to instanceof DateType) { + return true; + } + + return false; + } + + /** + * Get the conversion from one type to another. + */ + public static Conversion conversionFor(DataType from, DataType to) { + if (to instanceof StringType) { + return conversionToString(from); + } + if (to instanceof LongType) { + return conversionToLong(from); + } + if (to instanceof IntegerType) { + return conversionToInt(from); + } + if (to instanceof ShortType) { + return conversionToShort(from); + } + if (to instanceof ByteType) { + return conversionToByte(from); + } + if (to instanceof FloatType) { + return conversionToFloat(from); + } + if (to instanceof DoubleType) { + return conversionToDouble(from); + } + if (to instanceof DateType) { + return conversionToDate(from); + } + if (to instanceof BooleanType) { + return conversionToBoolean(from); + } + throw new SqlIllegalArgumentException("cannot convert from [" + from + "] to [" + to + "]"); + } + + private static Conversion conversionToString(DataType from) { + if (from instanceof DateType) { + return Conversion.DATE_TO_STRING; + } + return Conversion.OTHER_TO_STRING; + } + + private static Conversion conversionToLong(DataType from) { + if (from.isRational()) { + return Conversion.RATIONAL_TO_LONG; + } + if (from.isInteger()) { + return Conversion.INTEGER_TO_LONG; + } + if (from instanceof BooleanType) { + return Conversion.BOOL_TO_INT; // We emit an int here which is ok because of Java's casting rules + } + if (from instanceof StringType) { + return Conversion.STRING_TO_LONG; + } + throw new SqlIllegalArgumentException("cannot convert from [" + from + "] to [Long]"); + } + + private static Conversion conversionToInt(DataType from) { + if (from.isRational()) { + return Conversion.RATIONAL_TO_INT; + } + if (from.isInteger()) { + return Conversion.INTEGER_TO_INT; + } + if (from instanceof BooleanType) { + return Conversion.BOOL_TO_INT; + } + if (from instanceof StringType) { + return Conversion.STRING_TO_INT; + } + throw new SqlIllegalArgumentException("cannot convert from [" + from + "] to [Integer]"); + } + + private static Conversion conversionToShort(DataType from) { + if (from.isRational()) { + return Conversion.RATIONAL_TO_SHORT; + } + if (from.isInteger()) { + return Conversion.INTEGER_TO_SHORT; + } + if (from instanceof BooleanType) { + return Conversion.BOOL_TO_SHORT; + } + if (from instanceof StringType) { + return Conversion.STRING_TO_SHORT; + } + throw new SqlIllegalArgumentException("cannot convert [" + from + "] to [Short]"); + } + + private static Conversion conversionToByte(DataType from) { + if (from.isRational()) { + return Conversion.RATIONAL_TO_BYTE; + } + if (from.isInteger()) { + return Conversion.INTEGER_TO_BYTE; + } + if (from instanceof BooleanType) { + return Conversion.BOOL_TO_BYTE; + } + if (from instanceof StringType) { + return Conversion.STRING_TO_BYTE; + } + throw new SqlIllegalArgumentException("cannot convert [" + from + "] to [Byte]"); + } + + private static Conversion conversionToFloat(DataType from) { + if (from.isRational()) { + return Conversion.RATIONAL_TO_FLOAT; + } + if (from.isInteger()) { + return Conversion.INTEGER_TO_FLOAT; + } + if (from instanceof BooleanType) { + return Conversion.BOOL_TO_FLOAT; + } + if (from instanceof StringType) { + return Conversion.STRING_TO_FLOAT; + } + throw new SqlIllegalArgumentException("cannot convert [" + from + "] to [Float]"); + } + + private static Conversion conversionToDouble(DataType from) { + if (from.isRational()) { + return Conversion.RATIONAL_TO_DOUBLE; + } + if (from.isInteger()) { + return Conversion.INTEGER_TO_DOUBLE; + } + if (from instanceof BooleanType) { + return Conversion.BOOL_TO_DOUBLE; + } + if (from instanceof StringType) { + return Conversion.STRING_TO_DOUBLE; + } + throw new SqlIllegalArgumentException("cannot convert [" + from + "] to [Double]"); + } + + private static Conversion conversionToDate(DataType from) { + if (from.isRational()) { + return Conversion.RATIONAL_TO_LONG; + } + if (from.isInteger()) { + return Conversion.INTEGER_TO_LONG; + } + if (from instanceof BooleanType) { + return Conversion.BOOL_TO_INT; // We emit an int here which is ok because of Java's casting rules + } + if (from instanceof StringType) { + return Conversion.STRING_TO_DATE; + } + throw new SqlIllegalArgumentException("cannot convert [" + from + "] to [Date]"); + } + + private static Conversion conversionToBoolean(DataType from) { + if (from.isNumeric()) { + return Conversion.NUMERIC_TO_BOOLEAN; + } + if (from instanceof StringType) { + return Conversion.STRING_TO_BOOLEAN; + } + throw new SqlIllegalArgumentException("cannot convert [" + from + "] to [Boolean]"); + } + + public static byte safeToByte(long x) { + if (x > Byte.MAX_VALUE || x < Byte.MIN_VALUE) { + throw new SqlIllegalArgumentException("Numeric %d out of byte range", Long.toString(x)); + } + return (byte) x; + } + + public static short safeToShort(long x) { + if (x > Short.MAX_VALUE || x < Short.MIN_VALUE) { + throw new SqlIllegalArgumentException("Numeric %d out of short range", Long.toString(x)); + } + return (short) x; + } + + public static int safeToInt(long x) { + if (x > Integer.MAX_VALUE || x < Integer.MIN_VALUE) { + throw new SqlIllegalArgumentException("numeric %d out of int range", Long.toString(x)); + } + return (int) x; + } + + public static long safeToLong(double x) { + if (x > Long.MAX_VALUE || x < Long.MIN_VALUE) { + throw new SqlIllegalArgumentException("[" + x + "] out of [Long] range"); + } + return Math.round(x); + } + + public static boolean convertToBoolean(String val) { + String lowVal = val.toLowerCase(Locale.ROOT); + if (Booleans.isBoolean(lowVal) == false) { + throw new SqlIllegalArgumentException("cannot cast [" + val + "] to [Boolean]"); + } + return Booleans.parseBoolean(lowVal); + } + public static Object convert(Object value, DataType dataType) { + DataType detectedType = DataTypes.fromJava(value); + if (detectedType.equals(dataType)) { + return value; + } + return conversionFor(detectedType, dataType).convert(value); + } + + /** + * Reference to a data type conversion that can be serialized. Note that the position in the enum + * is important because it is used for serialization. + */ + public enum Conversion { + DATE_TO_STRING(fromLong(UTC_DATE_FORMATTER::print)), + OTHER_TO_STRING(String::valueOf), + RATIONAL_TO_LONG(fromDouble(DataTypeConversion::safeToLong)), + INTEGER_TO_LONG(fromLong(value -> value)), + STRING_TO_LONG(fromString(Long::valueOf, "Long")), + RATIONAL_TO_INT(fromDouble(value -> safeToInt(safeToLong(value)))), + INTEGER_TO_INT(fromLong(DataTypeConversion::safeToInt)), + BOOL_TO_INT(fromBool(value -> value ? 1 : 0)), + STRING_TO_INT(fromString(Integer::valueOf, "Int")), + RATIONAL_TO_SHORT(fromDouble(value -> safeToShort(safeToLong(value)))), + INTEGER_TO_SHORT(fromLong(DataTypeConversion::safeToShort)), + BOOL_TO_SHORT(fromBool(value -> value ? (short) 1 : (short) 0)), + STRING_TO_SHORT(fromString(Short::valueOf, "Short")), + RATIONAL_TO_BYTE(fromDouble(value -> safeToByte(safeToLong(value)))), + INTEGER_TO_BYTE(fromLong(DataTypeConversion::safeToByte)), + BOOL_TO_BYTE(fromBool(value -> value ? (byte) 1 : (byte) 0)), + STRING_TO_BYTE(fromString(Byte::valueOf, "Byte")), + // TODO floating point conversions are lossy but conversions to integer conversions are not. Are we ok with that? + RATIONAL_TO_FLOAT(fromDouble(value -> (float) value)), + INTEGER_TO_FLOAT(fromLong(value -> (float) value)), + BOOL_TO_FLOAT(fromBool(value -> value ? 1f : 0f)), + STRING_TO_FLOAT(fromString(Float::valueOf, "Float")), + RATIONAL_TO_DOUBLE(fromDouble(value -> value)), + INTEGER_TO_DOUBLE(fromLong(Double::valueOf)), + BOOL_TO_DOUBLE(fromBool(value -> value ? 1d: 0d)), + STRING_TO_DOUBLE(fromString(Double::valueOf, "Double")), + STRING_TO_DATE(fromString(UTC_DATE_FORMATTER::parseMillis, "Date")), + NUMERIC_TO_BOOLEAN(fromLong(value -> value != 0)), + STRING_TO_BOOLEAN(fromString(DataTypeConversion::convertToBoolean, "Boolean")), + ; + + private final Function converter; + + Conversion(Function converter) { + this.converter = converter; + } + + private static Function fromDouble(DoubleFunction converter) { + return (Object l) -> converter.apply(((Number) l).doubleValue()); + } + + private static Function fromLong(LongFunction converter) { + return (Object l) -> converter.apply(((Number) l).longValue()); + } + + private static Function fromString(Function converter, String to) { + return (Object value) -> { + try { + return converter.apply(value.toString()); + } catch (NumberFormatException e) { + throw new SqlIllegalArgumentException("cannot cast [%s] to [%s]", value, to, e); + } catch (IllegalArgumentException e) { + throw new SqlIllegalArgumentException("cannot cast [%s] to [%s]:%s", value, to, e.getMessage(), e); + } + }; + } + + private static Function fromBool(Function converter) { + return (Object l) -> converter.apply(((Boolean) l)); + } + + public Object convert(Object l) { + if (l == null) { + return null; + } + return converter.apply(l); + } + } + + public static DataType asInteger(DataType dataType) { + if (!dataType.isNumeric()) { + return dataType; + } + + return dataType.isInteger() ? dataType : DataTypes.LONG; + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java new file mode 100644 index 00000000000..9dc894e47db --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.joda.time.DateTime; + +import java.sql.JDBCType; +import java.util.LinkedHashMap; +import java.util.Map; + +public abstract class DataTypes { + + public static final DataType NULL = new NullType(); + public static final DataType BOOLEAN = new BooleanType(true); + public static final DataType BYTE = new ByteType(true); + public static final DataType SHORT = new ShortType(true); + public static final DataType INTEGER = new IntegerType(true); + public static final DataType LONG = new LongType(true); + public static final DataType DOUBLE = new DoubleType(true); + public static final DataType FLOAT = new FloatType(true); + public static final DataType HALF_FLOAT = new HalfFloatType(true); + public static final DataType IP_TYPE = new IpType(true); + public static final DataType KEYWORD = KeywordType.DEFAULT; + public static final DataType TEXT = new TextType(); + + public static final DataType GEO_POINT = new GeoPointType(); + public static final DataType DATE = DateType.DEFAULT; + + public static final DataType BINARY = new BinaryType(true); + public static final DataType UNKNOWN = new UnknownDataType(); + + private static final Map ES_PRIMITIVES_DEFAULT = new LinkedHashMap<>(); + private static final Map ES_PRIMITIVES_NO_DOC_VALUES = new LinkedHashMap<>(); + private static final Map JDBC_TO_TYPES = new LinkedHashMap<>(); + + static { + initDefault(NULL); + initDefault(BOOLEAN); + initDefault(BYTE); + initDefault(SHORT); + initDefault(INTEGER); + initDefault(LONG); + initDefault(DOUBLE); + initDefault(FLOAT); + initDefault(HALF_FLOAT); + initDefault(IP_TYPE); + // text and keyword are handled separately + initDefault(BINARY); + initDefault(UNKNOWN); + + //init(GEO_POINT); + + for (DataType type : ES_PRIMITIVES_DEFAULT.values()) { + JDBC_TO_TYPES.put(type.sqlType(), type); + } + + initNoDocValues(NULL); + initNoDocValues(new BooleanType(false)); + initNoDocValues(new ByteType(false)); + initNoDocValues(new ShortType(false)); + initNoDocValues(new IntegerType(false)); + initNoDocValues(new LongType(false)); + initNoDocValues(new DoubleType(false)); + initNoDocValues(new FloatType(false)); + initNoDocValues(new HalfFloatType(false)); + initNoDocValues(new IpType(false)); + initNoDocValues(new BinaryType(false)); + initNoDocValues(UNKNOWN); + } + + private static void initDefault(DataType type) { + ES_PRIMITIVES_DEFAULT.put(type.esName(), type); + } + + private static void initNoDocValues(DataType type) { + ES_PRIMITIVES_NO_DOC_VALUES.put(type.esName(), type); + } + + public static DataType fromJava(Object value) { + if (value == null) { + return NULL; + } + if (value instanceof Integer) { + return INTEGER; + } + if (value instanceof Long) { + return LONG; + } + if (value instanceof Boolean) { + return BOOLEAN; + } + if (value instanceof Double) { + return DOUBLE; + } + if (value instanceof Float) { + return FLOAT; + } + if (value instanceof Byte) { + return BYTE; + } + if (value instanceof Short) { + return SHORT; + } + if (value instanceof DateTime) { + return DATE; + } + if (value instanceof String) { + return KEYWORD; + } + throw new SqlIllegalArgumentException("No idea what's the DataType for %s", value.getClass()); + } + + public static DataType from(JDBCType type) { + return JDBC_TO_TYPES.get(type); + } + + public static DataType fromEsName(String typeString, boolean docValuesEnabled) { + return docValuesEnabled ? ES_PRIMITIVES_DEFAULT.get(typeString) : ES_PRIMITIVES_NO_DOC_VALUES.get(typeString); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/DateType.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/DateType.java new file mode 100644 index 00000000000..4d5a313422c --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/DateType.java @@ -0,0 +1,53 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import org.elasticsearch.common.util.CollectionUtils; + +import java.sql.JDBCType; +import java.util.Arrays; +import java.util.List; + +public class DateType extends AbstractDataType { + + public static final List DEFAULT_FORMAT = Arrays.asList("strict_date_optional_time", "epoch_millis"); + public static final DateType DEFAULT = new DateType(true); + + private final List formats; + + DateType(boolean docValues, String... formats) { + /* Since we normalize timestamps to UTC for storage and do not keep + * the origination zone information information we are technically + * `TIMESTAMP WITHOUT TIME ZONE` or just `TIMESTAMP`, or, in Oracle + * parlance, `TIMESTAMP WITH LOCAL TIME ZONE`. + * `TIMESTAMP WITH TIME ZONE` implies that we store the original + * time zone of the even. Confusingly, PostgreSQL's + * `TIMESTAMP WITH TIME ZONE` type does not store original time zone, + * unlike H2 and Oracle, *but* it is aware of the session's time zone + * so it is preferred. But it is *weird*. As bad as it feels not to + * be like PostgreSQL, we are going to not be like PostgreSQL here + * and return TIMESTAMP so we more closely conform with H2 and + * (shudder) Oracle. */ + super(JDBCType.TIMESTAMP, docValues); + this.formats = CollectionUtils.isEmpty(formats) ? DEFAULT_FORMAT : Arrays.asList(formats); + } + + @Override + public String esName() { + return "date"; + } + + @Override + public int precision() { + // same as Long + // TODO: based this on format string + return 19; + } + + public List formats() { + return formats; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/DoubleType.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/DoubleType.java new file mode 100644 index 00000000000..0126a5c8ba1 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/DoubleType.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import java.sql.JDBCType; + +public class DoubleType extends AbstractDataType { + + DoubleType(boolean docValues) { + super(JDBCType.DOUBLE, docValues); + } + + @Override + public String esName() { + return "double"; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/FloatType.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/FloatType.java new file mode 100644 index 00000000000..f14045b1651 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/FloatType.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import java.sql.JDBCType; + +public class FloatType extends AbstractDataType { + + FloatType(boolean docValues) { + super(JDBCType.REAL, docValues); + } + + @Override + public String esName() { + return "float"; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/GeoPointType.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/GeoPointType.java new file mode 100644 index 00000000000..e93b232e700 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/GeoPointType.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import java.sql.JDBCType; + +public class GeoPointType extends AbstractDataType { + + GeoPointType() { + super(JDBCType.NULL, false); + } + + @Override + public String esName() { + return "geo-point"; + } + + @Override + public JDBCType sqlType() { + throw new UnsupportedOperationException("need to determine actual format"); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/HalfFloatType.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/HalfFloatType.java new file mode 100644 index 00000000000..c4227864f93 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/HalfFloatType.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import java.sql.JDBCType; + +public class HalfFloatType extends AbstractDataType { + + HalfFloatType(boolean docValues) { + super(JDBCType.FLOAT, docValues); + } + + @Override + public String esName() { + return "half_float"; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/IntegerType.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/IntegerType.java new file mode 100644 index 00000000000..82710f1e652 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/IntegerType.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import java.sql.JDBCType; + +public class IntegerType extends AbstractDataType { + + IntegerType(boolean docValues) { + super(JDBCType.INTEGER, docValues); + } + + @Override + public String esName() { + return "integer"; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/IpType.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/IpType.java new file mode 100644 index 00000000000..7bbfbf5b27e --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/IpType.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import java.sql.JDBCType; + +public class IpType extends AbstractDataType { + + IpType(boolean docValues) { + super(JDBCType.VARCHAR, docValues); + } + + @Override + public String esName() { + return "ip"; + } + + @Override + public int precision() { + // maximum address in IPv6 + return 39; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/JdbcUtils.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/JdbcUtils.java new file mode 100644 index 00000000000..486939125a6 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/JdbcUtils.java @@ -0,0 +1,122 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import java.sql.JDBCType; + +abstract class JdbcUtils { + + static boolean isSigned(JDBCType type) { + switch (type) { + case BIGINT: + case DECIMAL: + case DOUBLE: + case INTEGER: + case SMALLINT: + case FLOAT: + case REAL: + case NUMERIC: + case TINYINT: + return true; + default: + return false; + } + } + + static int scale(JDBCType type) { + switch (type) { + case REAL: return 7; + case FLOAT: + case DOUBLE: return 16; + default: return 0; + } + } + + static int precision(JDBCType type) { + switch (type) { + case NULL: return 0; + case BOOLEAN: return 1; + case TINYINT: return 3; + case SMALLINT: return 5; + case INTEGER: return 10; + case BIGINT: return 19; + // 24 bits precision - 24*log10(2) =~ 7 (7.22) + case REAL: return 7; + // 53 bits precision ~ 16(15.95) decimal digits (53log10(2)) + case FLOAT: + case DOUBLE: return 16; + case VARBINARY: + case VARCHAR: + return Integer.MAX_VALUE; + case TIME_WITH_TIMEZONE: return displaySize(type); + default: + return -1; + } + } + + static int displaySize(JDBCType type) { + switch (type) { + case NULL: return 0; + case BOOLEAN: return 1; + case TINYINT: return 3; + case SMALLINT: return 6; + case INTEGER: return 11; + case BIGINT: return 20; + case REAL: return 15; + case FLOAT: + case DOUBLE: return 25; + case VARCHAR: + case VARBINARY: return 0; + case TIMESTAMP: return 20; + default: + return 0; + } + } + + static boolean isRational(JDBCType type) { + switch (type) { + case REAL: + case DOUBLE: + case FLOAT: + case DECIMAL: + case NUMERIC: + return true; + default: + return false; + } + } + + static boolean isInteger(JDBCType type) { + switch (type) { + case TINYINT: + case SMALLINT: + case INTEGER: + case BIGINT: + return true; + default: + return false; + } + } + + static int size(JDBCType type) { + switch (type) { + case NULL: return 0; + case BOOLEAN: return 1; + case TINYINT: return Byte.BYTES; + case SMALLINT: return Short.BYTES; + case INTEGER: return Integer.BYTES; + case TIMESTAMP: + case BIGINT: return Long.BYTES; + case REAL: return Float.BYTES; + case FLOAT: + case DOUBLE: return Double.BYTES; + case VARCHAR: + case VARBINARY: return Integer.MAX_VALUE; + default: + return -1; + } + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/KeywordType.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/KeywordType.java new file mode 100644 index 00000000000..e692e0c104a --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/KeywordType.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import java.util.Map; +import java.util.Objects; + +import static java.util.Collections.emptyMap; + +public class KeywordType extends StringType { + + static final int DEFAULT_LENGTH = 256; + static final KeywordType DEFAULT = new KeywordType(true, DEFAULT_LENGTH, emptyMap()); + + private final int length; + + KeywordType(boolean docValues, int length, Map fields) { + super(docValues, fields); + this.length = length; + } + + @Override + public String esName() { + return "keyword"; + } + + @Override + public int precision() { + return length; + } + + static DataType from(boolean docValues, int length, Map fields) { + return docValues && length == DEFAULT_LENGTH && fields.isEmpty() ? DEFAULT : new KeywordType(docValues, length, fields); + } + + @Override + public int hashCode() { + return Objects.hash(length, hasDocValues(), fields()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + KeywordType other = (KeywordType) obj; + return Objects.equals(hasDocValues(), other.hasDocValues()) + && Objects.equals(length, other.length) + && Objects.equals(fields(), other.fields()); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/LongType.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/LongType.java new file mode 100644 index 00000000000..e2ab14cf3ca --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/LongType.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import java.sql.JDBCType; + +public class LongType extends AbstractDataType { + + LongType(boolean docValues) { + super(JDBCType.BIGINT, docValues); + } + + @Override + public String esName() { + return "long"; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/NestedType.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/NestedType.java new file mode 100644 index 00000000000..8c38c25ed64 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/NestedType.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import java.util.Map; +import java.util.Objects; + +public class NestedType implements CompoundDataType { + + private final Map properties; + + public NestedType(Map properties) { + this.properties = properties; + } + + public Map properties() { + return properties; + } + + @Override + public String esName() { + return "nested"; + } + + @Override + public int hashCode() { + return Objects.hash(properties); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + NestedType other = (NestedType) obj; + return Objects.equals(properties, other.properties); + } + + @Override + public String toString() { + return getClass().getSimpleName() + "[" + esName() + "|" + sqlName() + "]=" + properties; + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/NullType.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/NullType.java new file mode 100644 index 00000000000..3c59d711ec3 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/NullType.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import java.sql.JDBCType; + +public class NullType extends AbstractDataType { + + NullType() { + super(JDBCType.NULL, true); + } + + @Override + public String esName() { + return "null"; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/ObjectType.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/ObjectType.java new file mode 100644 index 00000000000..c7a6a3803fa --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/ObjectType.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import java.util.Map; +import java.util.Objects; + +import static java.util.Collections.emptyMap; + +public class ObjectType implements CompoundDataType { + + public static final ObjectType EMPTY = new ObjectType(emptyMap()); + + private final Map properties; + + public ObjectType(Map properties) { + this.properties = properties; + } + + public Map properties() { + return properties; + } + + @Override + public String esName() { + return "object"; + } + + @Override + public int hashCode() { + return Objects.hash(properties); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + ObjectType other = (ObjectType) obj; + return Objects.equals(properties, other.properties); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/ScaledFloatType.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/ScaledFloatType.java new file mode 100644 index 00000000000..9be5a2fac02 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/ScaledFloatType.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import java.sql.JDBCType; + +public class ScaledFloatType extends AbstractDataType { + + public ScaledFloatType(boolean docValues) { + super(JDBCType.FLOAT, docValues); + } + + @Override + public String esName() { + return "scaled_float"; + } + + @Override + public int precision() { + // just like long + return 19; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/Schema.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/Schema.java new file mode 100644 index 00000000000..4370b232f68 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/Schema.java @@ -0,0 +1,128 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import java.util.Iterator; +import java.util.List; +import java.util.NoSuchElementException; +import java.util.Spliterator; +import java.util.Spliterators; +import java.util.stream.Stream; +import java.util.stream.StreamSupport; + +import org.elasticsearch.xpack.sql.type.Schema.Entry; +import org.elasticsearch.xpack.sql.util.Check; + +import static java.util.Collections.emptyList; + +public class Schema implements Iterable { + + public interface Entry { + String name(); + DataType type(); + } + + static class DefaultEntry implements Entry { + private final String name; + private final DataType type; + + DefaultEntry(String name, DataType type) { + this.name = name; + this.type = type; + } + + @Override + public String name() { + return name; + } + + @Override + public DataType type() { + return type; + } + } + + public static final Schema EMPTY = new Schema(emptyList(), emptyList()); + + private final List names; + private final List types; + + public Schema(List names, List types) { + Check.isTrue(names.size() == types.size(), "Different # of names %s vs types %s", names, types); + this.types = types; + this.names = names; + } + + public List names() { + return names; + } + + public List types() { + return types; + } + + public int size() { + return names.size(); + } + + public Entry get(int i) { + return new DefaultEntry(names.get(i), types.get(i)); + } + + public DataType type(String name) { + int indexOf = names.indexOf(name); + if (indexOf < 0) { + return null; + } + return types.get(indexOf); + } + + @Override + public Iterator iterator() { + return new Iterator() { + private final int size = size(); + private int pos = -1; + + @Override + public boolean hasNext() { + return pos < size - 1; + } + + @Override + public Entry next() { + if (pos++ >= size) { + throw new NoSuchElementException(); + } + return get(pos); + } + }; + } + + public Stream stream() { + return StreamSupport.stream(spliterator(), false); + } + + @Override + public Spliterator spliterator() { + return Spliterators.spliterator(iterator(), size(), 0); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("["); + for (int i = 0; i < names.size(); i++) { + if (i > 0) { + sb.append(","); + } + sb.append(names.get(i)); + sb.append(":"); + sb.append(types.get(i).esName()); + } + sb.append("]"); + return sb.toString(); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/ShortType.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/ShortType.java new file mode 100644 index 00000000000..767a31d6d9b --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/ShortType.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import java.sql.JDBCType; + +public class ShortType extends AbstractDataType { + + ShortType(boolean docValues) { + super(JDBCType.SMALLINT, docValues); + } + + @Override + public String esName() { + return "short"; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/StringType.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/StringType.java new file mode 100644 index 00000000000..e30506e80d1 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/StringType.java @@ -0,0 +1,67 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import java.sql.JDBCType; +import java.util.LinkedHashMap; +import java.util.Map; + +import static java.util.Collections.emptyMap; +import static java.util.stream.Collectors.toMap; + +public abstract class StringType implements DataType { + + private final boolean docValue; + private final Map fields; + private final Map docValueFields; + + StringType(boolean docValue, Map fields) { + this.docValue = docValue; + this.fields = fields; + + if (docValue || fields.isEmpty()) { + docValueFields = emptyMap(); + } else { + docValueFields = fields.entrySet().stream() + .filter(e -> e.getValue().hasDocValues()) + .collect(toMap( + Map.Entry::getKey, + Map.Entry::getValue, + (k1, k2) -> { + throw new IllegalStateException("Duplicate key " + k1); + }, + LinkedHashMap::new)); + } + } + + @Override + public JDBCType sqlType() { + return JDBCType.VARCHAR; + } + + @Override + public boolean hasDocValues() { + return docValue; + } + + public Map fields() { + return fields; + } + + public Map docValueFields() { + return docValueFields; + } + + @Override + public boolean isPrimitive() { + return fields.isEmpty(); + } + + @Override + public String toString() { + return esName(); + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/TextType.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/TextType.java new file mode 100644 index 00000000000..30182eff789 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/TextType.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import java.util.Map; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; + +public class TextType extends StringType { + + private static final TextType DEFAULT = new TextType(false, singletonMap("keyword", KeywordType.DEFAULT)); + + private final boolean fieldData; + + TextType() { + this(false, emptyMap()); + } + + TextType(boolean fieldData, Map fields) { + super(false, fields); + this.fieldData = fieldData; + } + + public boolean hasFieldData() { + return fieldData; + } + + @Override + public String esName() { + return "text"; + } + + static DataType from(boolean fieldData, Map fields) { + return DEFAULT.fieldData == fieldData && DEFAULT.fields().equals(fields) ? DEFAULT : new TextType(fieldData, fields); + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/TokenCountType.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/TokenCountType.java new file mode 100644 index 00000000000..27a4c18d5aa --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/TokenCountType.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import java.sql.JDBCType; + +public class TokenCountType extends AbstractDataType { + + TokenCountType(boolean docValues) { + super(JDBCType.INTEGER, docValues); + } + + @Override + public String esName() { + return "token_count"; + } + + @Override + public boolean isInteger() { + return false; + } + + @Override + public boolean isRational() { + return false; + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/Types.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/Types.java new file mode 100644 index 00000000000..bdb0ecab198 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/Types.java @@ -0,0 +1,137 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.Strings; +import org.elasticsearch.xpack.sql.analysis.index.MappingException; + +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.Map.Entry; + +import static java.lang.Math.floor; +import static java.lang.Math.log10; +import static java.lang.Math.round; +import static java.util.Collections.emptyMap; + +public abstract class Types { + + @SuppressWarnings("unchecked") + public static Map fromEs(Map asMap) { + Map props = (Map) asMap.get("properties"); + return props == null || props.isEmpty() ? emptyMap() : startWalking(props); + } + + private static Map startWalking(Map mapping) { + Map translated = new LinkedHashMap<>(); + + if (mapping == null) { + return emptyMap(); + } + for (Entry entry : mapping.entrySet()) { + walkMapping(entry.getKey(), entry.getValue(), translated); + } + + return translated; + } + + @SuppressWarnings("unchecked") + private static void walkMapping(String name, Object value, Map mapping) { + // object type - only root or nested docs supported + if (value instanceof Map) { + Map content = (Map) value; + + // extract field type + Object type = content.get("type"); + if (type instanceof String) { + String st = type.toString(); + + if (isNested(st)) { + mapping.put(name, new NestedType(fromEs(content))); + return; + } + + if (isPrimitive(st)) { + // check dates first to account for the format + mapping.put(name, createPrimitiveType(st, content)); + return; + } + + else { + throw new MappingException("Don't know how to parse entry %s in map %s", type, content); + } + } + // object type ignored + } + else { + throw new MappingException("Don't know how to parse mapping %s", value); + } + } + + @SuppressWarnings("unchecked") + private static DataType createPrimitiveType(String typeString, Map content) { + // since this setting is available in most types, search for it regardless + + DataType type = null; + + boolean docValues = boolSetting(content.get("doc_values"), true); + switch (typeString) { + case "date": + Object fmt = content.get("format"); + if (fmt != null) { + type = new DateType(docValues, Strings.split(fmt.toString(), "||")); + } + else { + type = docValues ? DateType.DEFAULT : new DateType(false); + } + break; + case "text": + boolean fieldData = boolSetting(content.get("fielddata"), false); + Object value = content.get("fields"); + Map fields = emptyMap(); + if (value instanceof Map) { + fields = startWalking((Map) value); + } + type = TextType.from(fieldData, fields); + break; + case "keyword": + int length = intSetting(content.get("ignore_above"), KeywordType.DEFAULT_LENGTH); + fields = emptyMap(); + value = content.get("fields"); + if (value instanceof Map) { + fields = startWalking((Map) value); + } + type = KeywordType.from(docValues, length, fields); + break; + default: + type = DataTypes.fromEsName(typeString, docValues); + } + + return type; + } + + private static boolean boolSetting(Object value, boolean defaultValue) { + return value == null ? defaultValue : Booleans.parseBoolean(value.toString(), defaultValue); + } + + private static int intSetting(Object value, int defaultValue) { + return value == null ? defaultValue : Integer.parseInt(value.toString()); + } + + private static boolean isPrimitive(String string) { + return !isNested(string); + } + + private static boolean isNested(String type) { + return "nested".equals(type); + } + + static int precision(long number) { + long abs = number == Long.MIN_VALUE ? Long.MAX_VALUE : number < 0 ? -number : number; + return (int) round(floor(log10(abs))) + 1; + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/UnknownDataType.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/UnknownDataType.java new file mode 100644 index 00000000000..b77838cb906 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/type/UnknownDataType.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import java.sql.JDBCType; + +public class UnknownDataType extends AbstractDataType { + + UnknownDataType() { + super(JDBCType.OTHER, false); + } + + @Override + public String esName() { + return "unknown"; + } + + @Override + public boolean isComplex() { + return false; + } + + @Override + public boolean isPrimitive() { + return false; + } + +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/util/Check.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/util/Check.java new file mode 100644 index 00000000000..2fa8164a1cc --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/util/Check.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.util; + +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; + +/** + * Utility class used for checking various conditions at runtime, inside SQL (hence the specific exception) with + * minimum amount of code + */ +public abstract class Check { + + public static void isTrue(boolean expression, String message, Object... values) { + if (!expression) { + throw new SqlIllegalArgumentException(message, values); + } + } + + public static void isTrue(boolean expression, String message) { + if (!expression) { + throw new SqlIllegalArgumentException(message); + } + } + + public static void notNull(Object object, String message) { + if (object == null) { + throw new SqlIllegalArgumentException(message); + } + } + + public static void notNull(Object object, String message, Object... values) { + if (object == null) { + throw new SqlIllegalArgumentException(message, values); + } + } +} diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/util/CollectionUtils.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/util/CollectionUtils.java new file mode 100644 index 00000000000..24ed7b979f6 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/util/CollectionUtils.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.util; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Set; + +import static java.util.Collections.emptyList; + +public abstract class CollectionUtils { + + public static boolean isEmpty(Collection col) { + return col == null || col.isEmpty(); + } + + @SuppressWarnings("unchecked") + public static List combine(List left, List right) { + if (right.isEmpty()) { + return (List) left; + } + if (left.isEmpty()) { + return (List) right; + } + + List list = new ArrayList<>(left.size() + right.size()); + if (!left.isEmpty()) { + list.addAll(left); + } + if (!right.isEmpty()) { + list.addAll(right); + } + return list; + } + + @SafeVarargs + @SuppressWarnings("varargs") + public static List combine(Collection... collections) { + if (org.elasticsearch.common.util.CollectionUtils.isEmpty(collections)) { + return emptyList(); + } + + List list = new ArrayList<>(); + for (Collection col : collections) { + // typically AttributeSet which ends up iterating anyway plus creating a redundant array + if (col instanceof Set) { + for (T t : col) { + list.add(t); + } + } + else { + list.addAll(col); + } + } + return list; + } + + @SafeVarargs + @SuppressWarnings("varargs") + public static List combine(Collection left, T... entries) { + List list = new ArrayList<>(left.size() + entries.length); + if (!left.isEmpty()) { + list.addAll(left); + } + if (entries.length > 0) { + Collections.addAll(list, entries); + } + return list; + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/util/Graphviz.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/util/Graphviz.java new file mode 100644 index 00000000000..feb953bd5d9 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/util/Graphviz.java @@ -0,0 +1,335 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.util; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.concurrent.atomic.AtomicInteger; + +import org.elasticsearch.xpack.sql.tree.Node; +import org.elasticsearch.xpack.sql.tree.NodeUtils; + +// use the awesome http://mdaines.github.io/viz.js/ to visualize and play around with the various options +public abstract class Graphviz { + + private static final int NODE_LABEL_INDENT = 12; + private static final int CLUSTER_INDENT = 2; + private static final int INDENT = 1; + + + public static String dot(String name, Node root) { + StringBuilder sb = new StringBuilder(); + // name + sb.append("digraph G { " + + "rankdir=BT; \n" + + "label=\"" + name + "\"; \n" + + "node[shape=plaintext, color=azure1];\n " + + "edge[color=black,arrowsize=0.5];\n"); + handleNode(sb, root, new AtomicInteger(0), INDENT, true); + sb.append("}"); + return sb.toString(); + } + + public static String dot(Map> clusters, boolean drawSubTrees) { + AtomicInteger nodeCounter = new AtomicInteger(0); + + StringBuilder sb = new StringBuilder(); + // name + sb.append("digraph G { " + + "rankdir=BT;\n " + + "node[shape=plaintext, color=azure1];\n " + + "edge[color=black];\n " + + "graph[compound=true];\n\n"); + + + int clusterNodeStart = 1; + int clusterId = 0; + + StringBuilder clusterEdges = new StringBuilder(); + + for (Entry> entry : clusters.entrySet()) { + indent(sb, INDENT); + // draw cluster + sb.append("subgraph cluster"); + sb.append(++clusterId); + sb.append(" {\n"); + indent(sb, CLUSTER_INDENT); + sb.append("color=blue;\n"); + indent(sb, CLUSTER_INDENT); + sb.append("label="); + sb.append(quoteGraphviz(entry.getKey())); + sb.append(";\n\n"); + + /* to help align the clusters, add an invisible node (that could + * otherwise be used for labeling but it consumes too much space) + * used for alignment */ + indent(sb, CLUSTER_INDENT); + sb.append("c" + clusterId); + sb.append("[style=invis]\n"); + // add edge to the first node in the cluster + indent(sb, CLUSTER_INDENT); + sb.append("node" + (nodeCounter.get() + 1)); + sb.append(" -> "); + sb.append("c" + clusterId); + sb.append(" [style=invis];\n"); + + handleNode(sb, entry.getValue(), nodeCounter, CLUSTER_INDENT, drawSubTrees); + + int clusterNodeStop = nodeCounter.get(); + + indent(sb, INDENT); + sb.append("}\n"); + + // connect cluster only if there are at least two + if (clusterId > 1) { + indent(clusterEdges, INDENT); + clusterEdges.append("node" + clusterNodeStart); + clusterEdges.append(" -> "); + clusterEdges.append("node" + clusterNodeStop); + clusterEdges.append("[ltail=cluster"); + clusterEdges.append(clusterId - 1); + clusterEdges.append(" lhead=cluster"); + clusterEdges.append(clusterId); + clusterEdges.append("];\n"); + } + clusterNodeStart = clusterNodeStop; + } + + sb.append("\n"); + + // connecting the clusters arranges them in a weird position + // so don't + //sb.append(clusterEdges.toString()); + + // align the cluster by requiring the invisible nodes in each cluster to be of the same rank + indent(sb, INDENT); + sb.append("{ rank=same"); + for (int i = 1; i <= clusterId; i++) { + sb.append(" c" + i); + } + sb.append(" };\n}"); + + return sb.toString(); + } + + private static void handleNode(StringBuilder output, Node n, AtomicInteger nodeId, int currentIndent, boolean drawSubTrees) { + // each node has its own id + int thisId = nodeId.incrementAndGet(); + + // first determine node info + StringBuilder nodeInfo = new StringBuilder(); + nodeInfo.append("\n"); + indent(nodeInfo, currentIndent + NODE_LABEL_INDENT); + nodeInfo.append("\n"); + indent(nodeInfo, currentIndent + NODE_LABEL_INDENT); + nodeInfo.append("\n"); + indent(nodeInfo, currentIndent + NODE_LABEL_INDENT); + + Map props = NodeUtils.propertiesMap(n); + Map parsed = new LinkedHashMap<>(props.size()); + List> subTrees = new ArrayList<>(); + + for (Entry entry : props.entrySet()) { + Object v = entry.getValue(); + + // skip null values, children and location + if (v != null && !n.children().contains(v) && !"location".equals(entry.getKey())) { + if (v instanceof Collection) { + Collection c = (Collection) v; + StringBuilder colS = new StringBuilder(); + for (Object o : c) { + if (drawSubTrees && isAnotherTree(o)) { + subTrees.add((Node) o); + } + else { + colS.append(o); + colS.append("\n"); + } + } + if (colS.length() > 0) { + parsed.put(entry.getKey(), colS.toString()); + } + } + else { + if (drawSubTrees && isAnotherTree(entry.getValue())) { + subTrees.add((Node) entry.getValue()); + } + else { + parsed.put(entry.getKey(), entry.getValue().toString()); + } + } + } + } + + // remove the field name if only one prop is specified + if (parsed.size() == 1) { + nodeInfo.append("\n"); + indent(nodeInfo, currentIndent + NODE_LABEL_INDENT); + } + // add the name and include a border + else { + for (Entry entry : parsed.entrySet()) { + nodeInfo.append("\n"); + indent(nodeInfo, currentIndent + NODE_LABEL_INDENT); + } + } + + nodeInfo.append("
" + + n.nodeName() + + "
"); + nodeInfo.append(escapeHtml(parsed.values().iterator().next())); + nodeInfo.append("
"); + nodeInfo.append(entry.getKey()); + nodeInfo.append(""); + nodeInfo.append(escapeHtml(entry.getValue())); + nodeInfo.append("
\n"); + + // check any subtrees + if (!subTrees.isEmpty()) { + // write nested trees + output.append("subgraph cluster_" + thisId + " {"); + output.append("style=filled; color=white; fillcolor=azure2; label=\"\";\n"); + } + + // write node info + indent(output, currentIndent); + output.append("node"); + output.append(thisId); + output.append("[label="); + output.append(quoteGraphviz(nodeInfo.toString())); + output.append("];\n"); + + if (!subTrees.isEmpty()) { + indent(output, currentIndent + INDENT); + output.append("node[shape=ellipse, color=black]\n"); + + for (Node node : subTrees) { + indent(output, currentIndent + INDENT); + drawNodeTree(output, node, "st_" + thisId + "_", 0); + } + + output.append("\n}\n"); + } + + indent(output, currentIndent + 1); + //output.append("{ rankdir=LR; rank=same; \n"); + int prevId = -1; + // handle children + for (Node c : n.children()) { + // the child will always have the next id + int childId = nodeId.get() + 1; + handleNode(output, c, nodeId, currentIndent + INDENT, drawSubTrees); + indent(output, currentIndent + 1); + output.append("node"); + output.append(childId); + output.append(" -> "); + output.append("node"); + output.append(thisId); + output.append(";\n"); + + // add invisible connection between children for ordering + if (prevId != -1) { + indent(output, currentIndent + 1); + output.append("node"); + output.append(prevId); + output.append(" -> "); + output.append("node"); + output.append(childId); + output.append(";\n"); + } + prevId = childId; + } + indent(output, currentIndent); + //output.append("}\n"); + } + + private static void drawNodeTree(StringBuilder sb, Node node, String prefix, int counter) { + String nodeName = prefix + counter; + prefix = nodeName; + + // draw node + drawNode(sb, node, nodeName); + // then draw all children nodes and connections between them to be on the same level + sb.append("{ rankdir=LR; rank=same;\n"); + int prevId = -1; + int saveId = counter; + for (Node child : node.children()) { + int currId = ++counter; + drawNode(sb, child, prefix + currId); + if (prevId > -1) { + sb.append(prefix + prevId + " -> " + prefix + currId + " [style=invis];\n"); + } + prevId = currId; + } + sb.append("}\n"); + + // now draw connections to the parent + for (int i = saveId; i < counter; i++) { + sb.append(prefix + (i + 1) + " -> " + nodeName + ";\n"); + } + + // draw the child + counter = saveId; + for (Node child : node.children()) { + drawNodeTree(sb, child, prefix, ++counter); + } + } + + private static void drawNode(StringBuilder sb, Node node, String nodeName) { + if (node.children().isEmpty()) { + sb.append(nodeName + " [label=\"" + node.toString() + "\"];\n"); + } + else { + sb.append(nodeName + " [label=\"" + node.nodeName() + "\"];\n"); + } + } + + private static boolean isAnotherTree(Object value) { + if (value instanceof Node) { + Node n = (Node) value; + // create a subgraph + if (n.children().size() > 0) { + return true; + } + } + return false; + } + + private static String escapeHtml(Object value) { + return String.valueOf(value) + .replace("&", "&") + .replace("\"", """) + .replace("'", "'") + .replace("<", "<") + .replace(">", ">") + .replace("\n", "
"); + } + + private static String quoteGraphviz(String value) { + if (value.contains("<")) { + return "<" + value + ">"; + } + + return "\"" + value + "\""; + } + + private static String escapeGraphviz(String value) { + return value + .replace("<", "\\<") + .replace(">", "\\>") + .replace("\"", "\\\""); + } + + private static void indent(StringBuilder sb, int indent) { + for (int i = 0; i < indent; i++) { + sb.append(" "); + } + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/util/ReflectionUtils.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/util/ReflectionUtils.java new file mode 100644 index 00000000000..51a2a4d45d8 --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/util/ReflectionUtils.java @@ -0,0 +1,81 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.util; + +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; + +import java.lang.reflect.GenericArrayType; +import java.lang.reflect.ParameterizedType; +import java.lang.reflect.Type; +import java.lang.reflect.WildcardType; +import java.util.Arrays; + +public class ReflectionUtils { + + @SuppressWarnings("unchecked") + public static Class detectType(Type t) { + if (t instanceof Class) { + return (Class) t; + } + if (t instanceof ParameterizedType) { + Type[] typeArguments = ((ParameterizedType) t).getActualTypeArguments(); + if (typeArguments.length != 1) { + throw new SqlIllegalArgumentException("Unexpected number of type arguments %s for %s", Arrays.toString(typeArguments), t); + } + + return detectType(typeArguments[0]); + } + if (t instanceof WildcardType) { + WildcardType wt = (WildcardType) t; + if (wt.getLowerBounds().length == 1) { + return detectType(wt.getLowerBounds()[0]); + } + Type[] upperBounds = wt.getUpperBounds(); + + if (upperBounds.length != 1) { + throw new SqlIllegalArgumentException("Unexpected number of upper bounds %s for %s", Arrays.toString(upperBounds), t); + } + + return detectType(upperBounds[0]); + } + if (t instanceof GenericArrayType) { + return detectType(((GenericArrayType) t).getGenericComponentType()); + } + + throw new SqlIllegalArgumentException("Unrecognized type %s", t); + } + + @SuppressWarnings("unchecked") + public static Class detectSuperTypeForRuleLike(Class c) { + Class clazz = c; + for (Type type = clazz.getGenericSuperclass(); clazz != Object.class; type = clazz.getGenericSuperclass()) { + if (type instanceof ParameterizedType) { + Type[] typeArguments = ((ParameterizedType) type).getActualTypeArguments(); + if (typeArguments.length != 2 && typeArguments.length != 1) { + throw new SqlIllegalArgumentException("Unexpected number of type arguments %s for %s", Arrays.toString(typeArguments), + c); + } + + return (Class) typeArguments[0]; + } + clazz = clazz.getSuperclass(); + } + throw new SqlIllegalArgumentException("Unexpected class structure for class %s", c); + } + + // remove packaging from the name - strategy used for naming rules by default + public static String ruleLikeNaming(Class c) { + String className = c.getName(); + int parentPackage = className.lastIndexOf("."); + if (parentPackage > 0) { + int grandParentPackage = className.substring(0, parentPackage).lastIndexOf("."); + return (grandParentPackage > 0 ? className.substring(grandParentPackage + 1) : className.substring(parentPackage)); + } + else { + return className; + } + } +} \ No newline at end of file diff --git a/sql/server/src/main/java/org/elasticsearch/xpack/sql/util/StringUtils.java b/sql/server/src/main/java/org/elasticsearch/xpack/sql/util/StringUtils.java new file mode 100644 index 00000000000..4b70bd2f4cc --- /dev/null +++ b/sql/server/src/main/java/org/elasticsearch/xpack/sql/util/StringUtils.java @@ -0,0 +1,206 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.util; + +import org.apache.lucene.search.spell.LevensteinDistance; +import org.apache.lucene.util.CollectionUtil; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.search.builder.SearchSourceBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.Locale; +import java.util.regex.Pattern; + +import static java.util.stream.Collectors.joining; +import static java.util.stream.Collectors.toList; + +public abstract class StringUtils { + + public static final String EMPTY = ""; + public static final String NEW_LINE = "\n"; + + private static final int TO_STRING_LIMIT = 52; + + public static String limitedToString(Object o) { + String s = String.valueOf(o); + return s.length() > TO_STRING_LIMIT ? s.substring(0, TO_STRING_LIMIT).concat("...") : s; + } + + public static String limitedToString(Collection c) { + Iterator it = c.iterator(); + if (!it.hasNext()) + return "[]"; + + // ..] + StringBuilder sb = new StringBuilder(TO_STRING_LIMIT + 4); + sb.append('['); + for (;;) { + E e = it.next(); + String next = e == c ? "(this Collection)" : String.valueOf(e); + if (next.length() + sb.length() > TO_STRING_LIMIT) { + sb.append(next.substring(0, Math.max(0, TO_STRING_LIMIT - sb.length()))); + sb.append('.').append('.').append(']'); + return sb.toString(); + } + else { + sb.append(next); + } + if (!it.hasNext()) + return sb.append(']').toString(); + sb.append(',').append(' '); + } + } + + public static String concatWithDot(List strings) { + if (strings == null || strings.isEmpty()) { + return EMPTY; + } + return strings.stream().collect(joining(".")); + } + + //CamelCase to camel_case + public static String camelCaseToUnderscore(String string) { + if (!Strings.hasText(string)) { + return EMPTY; + } + StringBuilder sb = new StringBuilder(); + String s = string.trim(); + + boolean previousCharWasUp = false; + for (int i = 0; i < s.length(); i++) { + char ch = s.charAt(i); + if (Character.isAlphabetic(ch)) { + if (Character.isUpperCase(ch)) { + if (i > 0 && !previousCharWasUp) { + sb.append("_"); + } + previousCharWasUp = true; + } + else { + previousCharWasUp = (ch == '_'); + } + } + else { + previousCharWasUp = true; + } + sb.append(ch); + } + return sb.toString().toUpperCase(Locale.ROOT); + } + + public static String nullAsEmpty(String string) { + return string == null ? EMPTY : string; + } + + // % -> * + // _ -> . + // consider \ as an escaping char + public static String sqlToJavaPattern(CharSequence sqlPattern, char escapeChar, boolean shouldEscape) { + StringBuilder regex = new StringBuilder(sqlPattern.length() + 4); + + boolean escaped = false; + regex.append('^'); + for (int i = 0; i < sqlPattern.length(); i++) { + char curr = sqlPattern.charAt(i); + if (shouldEscape && !escaped && (curr == escapeChar)) { + escaped = true; + regex.append(curr); + } + else { + switch (curr) { + case '%': + regex.append(escaped ? "%" : ".*"); + escaped = false; + break; + case '_': + regex.append(escaped ? "_" : "."); + escaped = false; + break; + default: + // escape special regex characters + switch (curr) { + case '\\': + case '^': + case '$': + case '.': + case '*': + case '?': + case '+': + case '|': + case '(': + case ')': + case '[': + case ']': + case '{': + case '}': + if (!escaped) { + regex.append('\\'); + } + } + + regex.append(curr); + escaped = false; + } + } + } + regex.append('$'); + + return regex.toString(); + } + + //TODO: likely this needs to be changed to probably its own indexNameResolver + public static String jdbcToEsPattern(String sqlPattern) { + if (Strings.hasText(sqlPattern)) { + // the index might include a type - since we only support only support one type per index, remove the type + int dotIndex = sqlPattern.indexOf("."); + if (dotIndex >= 0) { + sqlPattern = sqlPattern.substring(0, dotIndex); + } + return sqlPattern.replace('%', '*').replace('_', '*'); + } + return EMPTY; + } + + public static String sqlToJavaPattern(CharSequence sqlPattern) { + return sqlToJavaPattern(sqlPattern, '\\', true); + } + + public static Pattern likeRegex(String likePattern) { + return Pattern.compile(sqlToJavaPattern(likePattern)); + } + + public static String toString(SearchSourceBuilder source) { + try (XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint().humanReadable(true)) { + source.toXContent(builder, ToXContent.EMPTY_PARAMS); + return builder.string(); + } catch (IOException e) { + throw new RuntimeException("error rendering", e); + } + } + + public static List findSimilar(String match, Iterable potentialMatches) { + LevensteinDistance ld = new LevensteinDistance(); + List> scoredMatches = new ArrayList<>(); + for (String potentialMatch : potentialMatches) { + float distance = ld.getDistance(match, potentialMatch); + if (distance >= 0.5f) { + scoredMatches.add(new Tuple<>(distance, potentialMatch)); + } + } + CollectionUtil.timSort(scoredMatches, (a,b) -> b.v1().compareTo(a.v1())); + return scoredMatches.stream() + .map(a -> a.v2()) + .collect(toList()); + } +} \ No newline at end of file diff --git a/sql/server/src/test/java/org/elasticsearch/xpack/sql/SqlTestUtils.java b/sql/server/src/test/java/org/elasticsearch/xpack/sql/SqlTestUtils.java new file mode 100644 index 00000000000..ff6f0db3f6e --- /dev/null +++ b/sql/server/src/test/java/org/elasticsearch/xpack/sql/SqlTestUtils.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql; + +import com.carrotsearch.randomizedtesting.generators.RandomStrings; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.RangeQueryBuilder; + +import java.util.Random; + +public final class SqlTestUtils { + + private SqlTestUtils() { + + } + + /** + * Returns a random QueryBuilder or null + */ + public static QueryBuilder randomFilterOrNull(Random random) { + final QueryBuilder randomFilter; + if (random.nextBoolean()) { + randomFilter = randomFilter(random); + } else { + randomFilter = null; + } + return randomFilter; + } + + /** + * Returns a random QueryBuilder + */ + public static QueryBuilder randomFilter(Random random) { + return new RangeQueryBuilder(RandomStrings.randomAsciiLettersOfLength(random, 10)) + .gt(random.nextInt()); + } + +} diff --git a/sql/server/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/PreAnalyzerTests.java b/sql/server/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/PreAnalyzerTests.java new file mode 100644 index 00000000000..7fe1698a066 --- /dev/null +++ b/sql/server/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/PreAnalyzerTests.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.analysis.analyzer; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.analysis.analyzer.PreAnalyzer.PreAnalysis; +import org.elasticsearch.xpack.sql.parser.SqlParser; +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.is; + +public class PreAnalyzerTests extends ESTestCase { + + private SqlParser parser = new SqlParser(); + private PreAnalyzer preAnalyzer = new PreAnalyzer(); + + public void testBasicIndex() { + LogicalPlan plan = parser.createStatement("SELECT * FROM index"); + PreAnalysis result = preAnalyzer.preAnalyze(plan); + assertThat(plan.preAnalyzed(), is(true)); + assertThat(result.indices, contains("index")); + } + + public void testQuotedIndex() { + LogicalPlan plan = parser.createStatement("SELECT * FROM \"aaa\""); + PreAnalysis result = preAnalyzer.preAnalyze(plan); + assertThat(plan.preAnalyzed(), is(true)); + assertThat(result.indices, contains("aaa")); + } + + public void testComplicatedQuery() { + LogicalPlan plan = parser.createStatement("SELECT MAX(a) FROM aaa WHERE d > 10 GROUP BY b HAVING AVG(c) ORDER BY e ASC"); + PreAnalysis result = preAnalyzer.preAnalyze(plan); + assertThat(plan.preAnalyzed(), is(true)); + assertThat(result.indices, contains("aaa")); + } +} \ No newline at end of file diff --git a/sql/server/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java b/sql/server/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java new file mode 100644 index 00000000000..da34477f924 --- /dev/null +++ b/sql/server/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java @@ -0,0 +1,145 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.analysis.analyzer; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.xpack.sql.analysis.AnalysisException; +import org.elasticsearch.xpack.sql.analysis.index.EsIndex; +import org.elasticsearch.xpack.sql.analysis.index.GetIndexResult; +import org.elasticsearch.xpack.sql.expression.function.DefaultFunctionRegistry; +import org.elasticsearch.xpack.sql.expression.function.FunctionRegistry; +import org.elasticsearch.xpack.sql.parser.SqlParser; +import org.elasticsearch.xpack.sql.session.TestingSqlSession; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; +import org.junit.After; +import org.junit.Before; + +import java.util.LinkedHashMap; +import java.util.Map; + +@TestLogging("org.elasticsearch.xpack.sql:TRACE") +public class VerifierErrorMessagesTests extends ESTestCase { + + private SqlParser parser; + private GetIndexResult getIndexResult; + private FunctionRegistry functionRegistry; + private Analyzer analyzer; + + public VerifierErrorMessagesTests() { + parser = new SqlParser(); + functionRegistry = new DefaultFunctionRegistry(); + + Map mapping = new LinkedHashMap<>(); + mapping.put("bool", DataTypes.BOOLEAN); + mapping.put("int", DataTypes.INTEGER); + mapping.put("text", DataTypes.TEXT); + mapping.put("keyword", DataTypes.KEYWORD); + EsIndex test = new EsIndex("test", mapping); + getIndexResult = GetIndexResult.valid(test); + analyzer = new Analyzer(functionRegistry); + } + + @Before + public void setupContext() { + TestingSqlSession.setCurrentContext(TestingSqlSession.ctx(getIndexResult)); + } + + @After + public void disposeContext() { + TestingSqlSession.removeCurrentContext(); + } + + private String verify(String sql) { + AnalysisException e = expectThrows(AnalysisException.class, () -> analyzer.analyze(parser.createStatement(sql), true)); + assertTrue(e.getMessage().startsWith("Found ")); + String header = "Found 1 problem(s)\nline "; + return e.getMessage().substring(header.length()); + } + + public void testMissingIndex() { + TestingSqlSession.removeCurrentContext(); + TestingSqlSession.setCurrentContext(TestingSqlSession.ctx(GetIndexResult.notFound("missing"))); + assertEquals("1:17: Unknown index [missing]", verify("SELECT foo FROM missing")); + } + + public void testMissingColumn() { + assertEquals("1:8: Unknown column [xxx]", verify("SELECT xxx FROM test")); + } + + public void testMisspelledColumn() { + assertEquals("1:8: Unknown column [txt], did you mean [text]?", verify("SELECT txt FROM test")); + } + + public void testFunctionOverMissingField() { + assertEquals("1:12: Unknown column [xxx]", verify("SELECT ABS(xxx) FROM test")); + } + + public void testMissingFunction() { + assertEquals("1:8: Unknown function [ZAZ]", verify("SELECT ZAZ(bool) FROM test")); + } + + public void testMisspelledFunction() { + assertEquals("1:8: Unknown function [COONT], did you mean [COUNT]?", verify("SELECT COONT(bool) FROM test")); + } + + public void testMissingColumnInGroupBy() { + assertEquals("1:41: Unknown column [xxx]", verify("SELECT * FROM test GROUP BY DAY_OF_YEAR(xxx)")); + } + + public void testFilterOnUnknownColumn() { + assertEquals("1:26: Unknown column [xxx]", verify("SELECT * FROM test WHERE xxx = 1")); + } + + public void testMissingColumnInOrderBy() { + // xxx offset is that of the order by field + assertEquals("1:29: Unknown column [xxx]", verify("SELECT * FROM test ORDER BY xxx")); + } + + public void testMissingColumnFunctionInOrderBy() { + // xxx offset is that of the order by field + assertEquals("1:41: Unknown column [xxx]", verify("SELECT * FROM test ORDER BY DAY_oF_YEAR(xxx)")); + } + + + public void testMultipleColumns() { + // xxx offset is that of the order by field + assertEquals("1:43: Unknown column [xxx]\nline 1:8: Unknown column [xxx]", + verify("SELECT xxx FROM test GROUP BY DAY_oF_YEAR(xxx)")); + } + + // GROUP BY + public void testGroupBySelectNonGrouped() { + assertEquals("1:8: Cannot use non-grouped column [text], expected [int]", + verify("SELECT text, int FROM test GROUP BY int")); + } + + public void testGroupByOrderByNonGrouped() { + assertEquals("1:50: Cannot order by non-grouped column [bool], expected [text]", + verify("SELECT MAX(int) FROM test GROUP BY text ORDER BY bool")); + } + + public void testGroupByOrderByScalarOverNonGrouped() { + assertEquals("1:50: Cannot order by non-grouped column [bool], expected [text]", + verify("SELECT MAX(int) FROM test GROUP BY text ORDER BY ABS(bool)")); + } + + public void testGroupByHavingNonGrouped() { + assertEquals("1:48: Cannot filter by non-grouped column [int], expected [text]", + verify("SELECT AVG(int) FROM test GROUP BY text HAVING int > 10")); + } + + public void testGroupByAggregate() { + assertEquals("1:36: Cannot use an aggregate [AVG] for grouping", + verify("SELECT AVG(int) FROM test GROUP BY AVG(int)")); + } + + public void testGroupByScalarFunctionWithAggOnTarget() { + assertEquals("1:31: Cannot use an aggregate [AVG] for grouping", + verify("SELECT int FROM test GROUP BY AVG(int) + 2")); + } +} \ No newline at end of file diff --git a/sql/server/src/test/java/org/elasticsearch/xpack/sql/execution/search/CursorTests.java b/sql/server/src/test/java/org/elasticsearch/xpack/sql/execution/search/CursorTests.java new file mode 100644 index 00000000000..41ba50599cb --- /dev/null +++ b/sql/server/src/test/java/org/elasticsearch/xpack/sql/execution/search/CursorTests.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.search.ClearScrollRequest; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.Client; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.session.Configuration; +import org.elasticsearch.xpack.sql.session.Cursor; +import org.mockito.ArgumentCaptor; + +import java.util.Collections; + +import static org.elasticsearch.action.support.PlainActionFuture.newFuture; +import static org.hamcrest.Matchers.hasSize; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyZeroInteractions; + +public class CursorTests extends ESTestCase { + + public void testEmptyCursorClearCursor() { + Client clientMock = mock(Client.class); + Cursor cursor = Cursor.EMPTY; + PlainActionFuture future = newFuture(); + cursor.clear(Configuration.DEFAULT, clientMock, future); + assertFalse(future.actionGet()); + verifyZeroInteractions(clientMock); + } + + @SuppressWarnings("unchecked") + public void testScrollCursorClearCursor() { + Client clientMock = mock(Client.class); + ActionListener listenerMock = mock(ActionListener.class); + String cursorString = randomAlphaOfLength(10); + Cursor cursor = new ScrollCursor(cursorString, Collections.emptyList(), randomInt()); + + cursor.clear(Configuration.DEFAULT, clientMock, listenerMock); + + ArgumentCaptor request = ArgumentCaptor.forClass(ClearScrollRequest.class); + verify(clientMock).clearScroll(request.capture(), any(ActionListener.class)); + assertEquals(Collections.singletonList(cursorString), request.getValue().getScrollIds()); + verifyZeroInteractions(listenerMock); + } + +} diff --git a/sql/server/src/test/java/org/elasticsearch/xpack/sql/execution/search/ScrollCursorTests.java b/sql/server/src/test/java/org/elasticsearch/xpack/sql/execution/search/ScrollCursorTests.java new file mode 100644 index 00000000000..4982e9fa953 --- /dev/null +++ b/sql/server/src/test/java/org/elasticsearch/xpack/sql/execution/search/ScrollCursorTests.java @@ -0,0 +1,72 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search; + +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.sql.execution.search.extractor.ConstantExtractorTests; +import org.elasticsearch.xpack.sql.execution.search.extractor.DocValueExtractorTests; +import org.elasticsearch.xpack.sql.execution.search.extractor.HitExtractor; +import org.elasticsearch.xpack.sql.execution.search.extractor.HitExtractors; +import org.elasticsearch.xpack.sql.execution.search.extractor.InnerHitExtractorTests; +import org.elasticsearch.xpack.sql.execution.search.extractor.ProcessingHitExtractorTests; +import org.elasticsearch.xpack.sql.execution.search.extractor.SourceExtractorTests; +import org.elasticsearch.xpack.sql.session.Cursor; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.function.Supplier; + +public class ScrollCursorTests extends AbstractWireSerializingTestCase { + public static ScrollCursor randomScrollCursor() { + int extractorsSize = between(1, 20); + List extractors = new ArrayList<>(extractorsSize); + for (int i = 0; i < extractorsSize; i++) { + extractors.add(randomHitExtractor(0)); + } + return new ScrollCursor(randomAlphaOfLength(5), extractors, randomIntBetween(10, 1024)); + } + + static HitExtractor randomHitExtractor(int depth) { + List> options = new ArrayList<>(); + if (depth < 5) { + options.add(() -> ProcessingHitExtractorTests.randomProcessingHitExtractor(depth)); + } + options.add(ConstantExtractorTests::randomConstantExtractor); + options.add(DocValueExtractorTests::randomDocValueExtractor); + options.add(InnerHitExtractorTests::randomInnerHitExtractor); + options.add(SourceExtractorTests::randomSourceExtractor); + return randomFrom(options).get(); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(HitExtractors.getNamedWriteables()); + } + + @Override + protected ScrollCursor createTestInstance() { + return randomScrollCursor(); + } + + @Override + protected Reader instanceReader() { + return ScrollCursor::new; + } + + @Override + protected ScrollCursor copyInstance(ScrollCursor instance, Version version) throws IOException { + /* Randomly chose between internal protocol round trip and String based + * round trips used to toXContent. */ + if (randomBoolean()) { + return super.copyInstance(instance, version); + } + return (ScrollCursor)Cursor.decodeFromString(Cursor.encodeToString(version, instance)); + } +} diff --git a/sql/server/src/test/java/org/elasticsearch/xpack/sql/execution/search/SourceGeneratorTests.java b/sql/server/src/test/java/org/elasticsearch/xpack/sql/execution/search/SourceGeneratorTests.java new file mode 100644 index 00000000000..bd3b37e2fcc --- /dev/null +++ b/sql/server/src/test/java/org/elasticsearch/xpack/sql/execution/search/SourceGeneratorTests.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search; + +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.ConstantScoreQueryBuilder; +import org.elasticsearch.index.query.MatchQueryBuilder; +import org.elasticsearch.index.query.Operator; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.querydsl.agg.Aggs; +import org.elasticsearch.xpack.sql.querydsl.agg.GroupByColumnAgg; +import org.elasticsearch.xpack.sql.querydsl.container.QueryContainer; +import org.elasticsearch.xpack.sql.querydsl.query.MatchQuery; +import org.elasticsearch.xpack.sql.tree.Location; + +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; + +public class SourceGeneratorTests extends ESTestCase { + + public void testNoQueryNoFilter() { + QueryContainer container = new QueryContainer(); + SearchSourceBuilder sourceBuilder = SourceGenerator.sourceBuilder(container, null, randomIntBetween(1, 10)); + assertNull(sourceBuilder.query()); + } + + public void testQueryNoFilter() { + QueryContainer container = new QueryContainer().with(new MatchQuery(Location.EMPTY, "foo", "bar")); + SearchSourceBuilder sourceBuilder = SourceGenerator.sourceBuilder(container, null, randomIntBetween(1, 10)); + assertEquals(new MatchQueryBuilder("foo", "bar").operator(Operator.AND), sourceBuilder.query()); + } + + public void testNoQueryFilter() { + QueryContainer container = new QueryContainer(); + QueryBuilder filter = new MatchQueryBuilder("bar", "baz"); + SearchSourceBuilder sourceBuilder = SourceGenerator.sourceBuilder(container, filter, randomIntBetween(1, 10)); + assertEquals(new ConstantScoreQueryBuilder(new MatchQueryBuilder("bar", "baz")), sourceBuilder.query()); + } + + public void testQueryFilter() { + QueryContainer container = new QueryContainer().with(new MatchQuery(Location.EMPTY, "foo", "bar")); + QueryBuilder filter = new MatchQueryBuilder("bar", "baz"); + SearchSourceBuilder sourceBuilder = SourceGenerator.sourceBuilder(container, filter, randomIntBetween(1, 10)); + assertEquals(new BoolQueryBuilder().must(new MatchQueryBuilder("foo", "bar").operator(Operator.AND)) + .filter(new MatchQueryBuilder("bar", "baz")), sourceBuilder.query()); + } + + public void testLimit() { + Aggs aggs = new Aggs(emptyList(), emptyList(), singletonList(new GroupByColumnAgg("1", "", "field"))); + QueryContainer container = new QueryContainer().withLimit(10).with(aggs); + SearchSourceBuilder sourceBuilder = SourceGenerator.sourceBuilder(container, null, randomIntBetween(1, 10)); + Builder aggBuilder = sourceBuilder.aggregations(); + assertEquals(1, aggBuilder.count()); + TermsAggregationBuilder termsBuilder = (TermsAggregationBuilder) aggBuilder.getAggregatorFactories().get(0); + assertEquals(10, termsBuilder.size()); + } +} diff --git a/sql/server/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ConstantExtractorTests.java b/sql/server/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ConstantExtractorTests.java new file mode 100644 index 00000000000..7abd45c033e --- /dev/null +++ b/sql/server/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ConstantExtractorTests.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search.extractor; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +import java.io.IOException; +import java.util.function.Supplier; + +public class ConstantExtractorTests extends AbstractWireSerializingTestCase { + public static ConstantExtractor randomConstantExtractor() { + return new ConstantExtractor(randomValidConstant()); + } + + private static Object randomValidConstant() { + @SuppressWarnings("unchecked") + Supplier valueSupplier = randomFrom( + () -> randomInt(), + () -> randomDouble(), + () -> randomAlphaOfLengthBetween(1, 140)); + return valueSupplier.get(); + } + + @Override + protected ConstantExtractor createTestInstance() { + return randomConstantExtractor(); + } + + @Override + protected Reader instanceReader() { + return ConstantExtractor::new; + } + + @Override + protected ConstantExtractor mutateInstance(ConstantExtractor instance) throws IOException { + return new ConstantExtractor(instance.get(null) + "mutated"); + } + + public void testGet() { + Object expected = randomValidConstant(); + int times = between(1, 1000); + for (int i = 0; i < times; i++) { + assertSame(expected, new ConstantExtractor(expected).get(null)); + } + } + + public void testToString() { + assertEquals("^foo", new ConstantExtractor("foo").toString()); + assertEquals("^42", new ConstantExtractor("42").toString()); + } +} diff --git a/sql/server/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/DocValueExtractorTests.java b/sql/server/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/DocValueExtractorTests.java new file mode 100644 index 00000000000..0a1a3e430e9 --- /dev/null +++ b/sql/server/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/DocValueExtractorTests.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search.extractor; + +import org.elasticsearch.common.document.DocumentField; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static java.util.Collections.singletonMap; + +public class DocValueExtractorTests extends AbstractWireSerializingTestCase { + public static DocValueExtractor randomDocValueExtractor() { + return new DocValueExtractor(randomAlphaOfLength(5)); + } + + @Override + protected DocValueExtractor createTestInstance() { + return randomDocValueExtractor(); + } + + @Override + protected Reader instanceReader() { + return DocValueExtractor::new; + } + + @Override + protected DocValueExtractor mutateInstance(DocValueExtractor instance) throws IOException { + return new DocValueExtractor(instance.toString().substring(1) + "mutated"); + } + + public void testGet() { + String fieldName = randomAlphaOfLength(5); + DocValueExtractor extractor = new DocValueExtractor(fieldName); + + int times = between(1, 1000); + for (int i = 0; i < times; i++) { + List documentFieldValues = new ArrayList<>(); + documentFieldValues.add(new Object()); + if (randomBoolean()) { + documentFieldValues.add(new Object()); + } + SearchHit hit = new SearchHit(1); + DocumentField field = new DocumentField(fieldName, documentFieldValues); + hit.fields(singletonMap(fieldName, field)); + assertEquals(documentFieldValues.get(0), extractor.get(hit)); + } + } + + public void testToString() { + assertEquals("%incoming_links", new DocValueExtractor("incoming_links").toString()); + } +} diff --git a/sql/server/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/InnerHitExtractorTests.java b/sql/server/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/InnerHitExtractorTests.java new file mode 100644 index 00000000000..b05cb8710fd --- /dev/null +++ b/sql/server/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/InnerHitExtractorTests.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search.extractor; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +import java.io.IOException; + +public class InnerHitExtractorTests extends AbstractWireSerializingTestCase { + public static InnerHitExtractor randomInnerHitExtractor() { + return new InnerHitExtractor(randomAlphaOfLength(5), randomAlphaOfLength(5), randomBoolean()); + } + + @Override + protected InnerHitExtractor createTestInstance() { + return randomInnerHitExtractor(); + } + + @Override + protected Reader instanceReader() { + return InnerHitExtractor::new; + } + + @Override + protected InnerHitExtractor mutateInstance(InnerHitExtractor instance) throws IOException { + return new InnerHitExtractor(instance.hitName() + "mustated", instance.fieldName(), true); + } + + @AwaitsFix(bugUrl = "https://github.com/elastic/x-pack-elasticsearch/issues/3082") + public void testGet() throws IOException { + fail("implement after we're sure of the InnerHitExtractor's implementation"); + } + + public void testToString() { + assertEquals("field@hit", new InnerHitExtractor("hit", "field", true).toString()); + } +} diff --git a/sql/server/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ProcessingHitExtractorTests.java b/sql/server/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ProcessingHitExtractorTests.java new file mode 100644 index 00000000000..cda06186c62 --- /dev/null +++ b/sql/server/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ProcessingHitExtractorTests.java @@ -0,0 +1,84 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search.extractor; + +import org.elasticsearch.common.document.DocumentField; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.sql.expression.function.scalar.CastProcessorTests; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessorTests; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathFunctionProcessorTests; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.ChainingProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.ChainingProcessorTests; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.HitExtractorProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.MatrixFieldProcessorTests; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.function.Supplier; + +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; + +public class ProcessingHitExtractorTests extends AbstractWireSerializingTestCase { + public static ComputingHitExtractor randomProcessingHitExtractor(int depth) { + return new ComputingHitExtractor(randomProcessor(0)); + } + + public static Processor randomProcessor(int depth) { + List> options = new ArrayList<>(); + if (depth < 5) { + options.add(() -> ChainingProcessorTests.randomComposeProcessor(depth)); + } + options.add(CastProcessorTests::randomCastProcessor); + options.add(DateTimeProcessorTests::randomDateTimeProcessor); + options.add(MathFunctionProcessorTests::randomMathFunctionProcessor); + options.add(MatrixFieldProcessorTests::randomMatrixFieldProcessor); + return randomFrom(options).get(); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(HitExtractors.getNamedWriteables()); + } + + @Override + protected ComputingHitExtractor createTestInstance() { + return randomProcessingHitExtractor(0); + } + + @Override + protected Reader instanceReader() { + return ComputingHitExtractor::new; + } + + @Override + protected ComputingHitExtractor mutateInstance(ComputingHitExtractor instance) throws IOException { + return new ComputingHitExtractor( + randomValueOtherThan(instance.processor(), () -> randomProcessor(0))); + } + + public void testGet() { + String fieldName = randomAlphaOfLength(5); + ChainingProcessor extractor = new ChainingProcessor(new HitExtractorProcessor(new DocValueExtractor(fieldName)), new MathProcessor(MathOperation.LOG)); + + int times = between(1, 1000); + for (int i = 0; i < times; i++) { + double value = randomDouble(); + double expected = Math.log(value); + SearchHit hit = new SearchHit(1); + DocumentField field = new DocumentField(fieldName, singletonList(value)); + hit.fields(singletonMap(fieldName, field)); + assertEquals(expected, extractor.process(hit)); + } + } +} diff --git a/sql/server/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/SourceExtractorTests.java b/sql/server/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/SourceExtractorTests.java new file mode 100644 index 00000000000..2341c86e2b9 --- /dev/null +++ b/sql/server/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/SourceExtractorTests.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search.extractor; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +import java.io.IOException; +import java.util.function.Supplier; + +public class SourceExtractorTests extends AbstractWireSerializingTestCase { + public static SourceExtractor randomSourceExtractor() { + return new SourceExtractor(randomAlphaOfLength(5)); + } + + @Override + protected SourceExtractor createTestInstance() { + return randomSourceExtractor(); + } + + @Override + protected Reader instanceReader() { + return SourceExtractor::new; + } + + @Override + protected SourceExtractor mutateInstance(SourceExtractor instance) throws IOException { + return new SourceExtractor(instance.toString().substring(1) + "mutated"); + } + + public void testGet() throws IOException { + String fieldName = randomAlphaOfLength(5); + SourceExtractor extractor = new SourceExtractor(fieldName); + + int times = between(1, 1000); + for (int i = 0; i < times; i++) { + /* We use values that are parsed from json as "equal" to make the + * test simpler. */ + @SuppressWarnings("unchecked") + Supplier valueSupplier = randomFrom( + () -> randomAlphaOfLength(5), + () -> randomInt(), + () -> randomDouble()); + Object value = valueSupplier.get(); + SearchHit hit = new SearchHit(1); + XContentBuilder source = JsonXContent.contentBuilder(); + source.startObject(); { + source.field(fieldName, value); + if (randomBoolean()) { + source.field(fieldName + "_random_junk", value + "_random_junk"); + } + } + source.endObject(); + BytesReference sourceRef = source.bytes(); + hit.sourceRef(sourceRef); + assertEquals(value, extractor.get(hit)); + } + } + + public void testToString() { + assertEquals("#name", new SourceExtractor("name").toString()); + } +} diff --git a/sql/server/src/test/java/org/elasticsearch/xpack/sql/expression/AttributeMapTests.java b/sql/server/src/test/java/org/elasticsearch/xpack/sql/expression/AttributeMapTests.java new file mode 100644 index 00000000000..fa85ca9cbff --- /dev/null +++ b/sql/server/src/test/java/org/elasticsearch/xpack/sql/expression/AttributeMapTests.java @@ -0,0 +1,181 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.Collection; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; + +import static java.util.stream.Collectors.toList; +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.arrayWithSize; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; + +public class AttributeMapTests extends ESTestCase { + + private static Attribute a(String name) { + return new UnresolvedAttribute(Location.EMPTY, name); + } + + private static AttributeMap threeMap() { + Map map = new LinkedHashMap<>(); + map.put(a("one"), "one"); + map.put(a("two"), "two"); + map.put(a("three"), "three"); + + return new AttributeMap<>(map); + } + + public void testEmptyConstructor() { + AttributeMap m = new AttributeMap<>(); + assertThat(m.size(), is(0)); + assertThat(m.isEmpty(), is(true)); + } + + public void testMapConstructor() { + Map map = new LinkedHashMap<>(); + map.put(a("one"), "one"); + map.put(a("two"), "two"); + map.put(a("three"), "three"); + + AttributeMap m = new AttributeMap<>(map); + assertThat(m.size(), is(3)); + assertThat(m.isEmpty(), is(false)); + + Attribute one = m.keySet().iterator().next(); + assertThat(m.containsKey(one), is(true)); + assertThat(m.containsKey(a("one")), is(false)); + assertThat(m.containsValue("one"), is(true)); + assertThat(m.containsValue("on"), is(false)); + assertThat(m.attributeNames(), contains("one", "two", "three")); + assertThat(m.values(), contains(map.values().toArray())); + + // defensive copying + map.put(a("four"), "four"); + assertThat(m.size(), is(3)); + assertThat(m.isEmpty(), is(false)); + } + + public void testSingleItemConstructor() { + Attribute one = a("one"); + AttributeMap m = new AttributeMap<>(one, "one"); + assertThat(m.size(), is(1)); + assertThat(m.isEmpty(), is(false)); + + assertThat(m.containsKey(one), is(true)); + assertThat(m.containsKey(a("one")), is(false)); + assertThat(m.containsValue("one"), is(true)); + assertThat(m.containsValue("on"), is(false)); + } + + public void testSubstract() { + AttributeMap m = threeMap(); + AttributeMap mo = new AttributeMap<>(m.keySet().iterator().next(), "one"); + AttributeMap empty = new AttributeMap<>(); + + assertThat(m.substract(empty), is(m)); + assertThat(m.substract(m), is(empty)); + assertThat(mo.substract(m), is(empty)); + + AttributeMap substract = m.substract(mo); + + assertThat(substract.size(), is(2)); + assertThat(substract.attributeNames(), contains("two", "three")); + } + + public void testIntersect() { + AttributeMap m = threeMap(); + AttributeMap mo = new AttributeMap<>(m.keySet().iterator().next(), "one"); + AttributeMap empty = new AttributeMap<>(); + + assertThat(m.intersect(empty), is(empty)); + assertThat(m.intersect(m), is(m)); + assertThat(mo.intersect(m), is(mo)); + } + + public void testSubsetOf() { + AttributeMap m = threeMap(); + AttributeMap mo = new AttributeMap<>(m.keySet().iterator().next(), "one"); + AttributeMap empty = new AttributeMap<>(); + + assertThat(m.subsetOf(empty), is(false)); + assertThat(m.subsetOf(m), is(true)); + assertThat(mo.subsetOf(m), is(true)); + + assertThat(empty.subsetOf(m), is(true)); + assertThat(mo.subsetOf(m), is(true)); + } + + public void testKeySet() { + Attribute one = a("one"); + Attribute two = a("two"); + Attribute three = a("three"); + + Map map = new LinkedHashMap<>(); + map.put(one, "one"); + map.put(two, "two"); + map.put(three, "three"); + + Set keySet = new AttributeMap<>(map).keySet(); + assertThat(keySet, contains(one, two, three)); + + // toObject + Object[] array = keySet.toArray(); + + assertThat(array, arrayWithSize(3)); + assertThat(array, arrayContaining(one, two, three)); + } + + public void testValues() { + AttributeMap m = threeMap(); + Collection values = m.values(); + + assertThat(values, hasSize(3)); + assertThat(values, contains("one", "two", "three")); + } + + public void testEntrySet() { + Attribute one = a("one"); + Attribute two = a("two"); + Attribute three = a("three"); + + Map map = new LinkedHashMap<>(); + map.put(one, "one"); + map.put(two, "two"); + map.put(three, "three"); + + Set> set = new AttributeMap<>(map).entrySet(); + + assertThat(set, hasSize(3)); + + List keys = set.stream().map(Map.Entry::getKey).collect(toList()); + List values = set.stream().map(Map.Entry::getValue).collect(toList()); + + assertThat(keys, hasSize(3)); + + + assertThat(values, hasSize(3)); + assertThat(values, contains("one", "two", "three")); + } + + public void testForEach() { + AttributeMap m = threeMap(); + + Map collect = new LinkedHashMap<>(); + m.forEach(collect::put); + AttributeMap copy = new AttributeMap<>(collect); + + assertThat(m, is(copy)); + } +} \ No newline at end of file diff --git a/sql/server/src/test/java/org/elasticsearch/xpack/sql/expression/QuotingTests.java b/sql/server/src/test/java/org/elasticsearch/xpack/sql/expression/QuotingTests.java new file mode 100644 index 00000000000..9026041b6da --- /dev/null +++ b/sql/server/src/test/java/org/elasticsearch/xpack/sql/expression/QuotingTests.java @@ -0,0 +1,102 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.parser.ParsingException; +import org.elasticsearch.xpack.sql.parser.SqlParser; +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.plan.logical.OrderBy; +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.nullValue; + + +public class QuotingTests extends ESTestCase { + + private static UnresolvedAttribute from(String s) { + return new UnresolvedAttribute(Location.EMPTY, s); + } + + public void testBasicString() { + String s = "someField"; + UnresolvedAttribute ua = from(s); + assertThat(ua.name(), equalTo(s)); + assertThat(ua.qualifiedName(), equalTo(s)); + assertThat(ua.qualifier(), nullValue()); + } + + public void testSingleQuoteLiteral() { + String name = "@timestamp"; + Expression exp = new SqlParser().createExpression("'" + name + "'"); + assertThat(exp, instanceOf(Literal.class)); + Literal l = (Literal) exp; + assertThat(l.value(), equalTo(name)); + } + + public void testMultiSingleQuotedLiteral() { + String first = "bucket"; + String second = "head"; + Expression exp = new SqlParser().createExpression(String.format(Locale.ROOT, "'%s' '%s'", first, second)); + assertThat(exp, instanceOf(Literal.class)); + Literal l = (Literal) exp; + assertThat(l.value(), equalTo(first + second)); + } + + public void testQuotedAttribute() { + String quote = "\""; + String name = "@timestamp"; + Expression exp = new SqlParser().createExpression(quote + name + quote); + assertThat(exp, instanceOf(UnresolvedAttribute.class)); + UnresolvedAttribute ua = (UnresolvedAttribute) exp; + assertThat(ua.name(), equalTo(name)); + assertThat(ua.qualifiedName(), equalTo(name)); + assertThat(ua.qualifier(), nullValue()); + } + + public void testBackQuotedAttribute() { + String quote = "`"; + String name = "@timestamp"; + ParsingException ex = expectThrows(ParsingException.class, () -> new SqlParser().createExpression(quote + name + quote)); + assertThat(ex.getMessage(), equalTo("line 1:1: backquoted indetifiers not supported; please use double quotes instead")); + } + + public void testQuotedAttributeAndQualifier() { + String quote = "\""; + String qualifier = "table"; + String name = "@timestamp"; + Expression exp = new SqlParser().createExpression(quote + qualifier + quote + "." + quote + name + quote); + assertThat(exp, instanceOf(UnresolvedAttribute.class)); + UnresolvedAttribute ua = (UnresolvedAttribute) exp; + assertThat(ua.name(), equalTo(name)); + assertThat(ua.qualifiedName(), equalTo(qualifier + "." + name)); + assertThat(ua.qualifier(), equalTo(qualifier)); + } + + + public void testBackQuotedAttributeAndQualifier() { + String quote = "`"; + String qualifier = "table"; + String name = "@timestamp"; + ParsingException ex = expectThrows(ParsingException.class, () -> new SqlParser().createExpression(quote + qualifier + quote + "." + quote + name + quote)); + assertThat(ex.getMessage(), equalTo("line 1:1: backquoted indetifiers not supported; please use double quotes instead")); + } + + public void testGreedyQuoting() { + LogicalPlan plan = new SqlParser().createStatement("SELECT * FROM \"table\" ORDER BY \"field\""); + final List plans = new ArrayList<>(); + plan.forEachDown(plans::add); + assertThat(plans, hasSize(4)); + assertThat(plans.get(1), instanceOf(OrderBy.class)); + } +} \ No newline at end of file diff --git a/sql/server/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/CastProcessorTests.java b/sql/server/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/CastProcessorTests.java new file mode 100644 index 00000000000..831978705d0 --- /dev/null +++ b/sql/server/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/CastProcessorTests.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.type.DataTypeConversion.Conversion; + +import java.io.IOException; + +public class CastProcessorTests extends AbstractWireSerializingTestCase { + public static CastProcessor randomCastProcessor() { + return new CastProcessor(randomFrom(Conversion.values())); + } + + @Override + protected CastProcessor createTestInstance() { + return randomCastProcessor(); + } + + @Override + protected Reader instanceReader() { + return CastProcessor::new; + } + + @Override + protected CastProcessor mutateInstance(CastProcessor instance) throws IOException { + return new CastProcessor(randomValueOtherThan(instance.converter(), () -> randomFrom(Conversion.values()))); + } + + public void testApply() { + { + CastProcessor proc = new CastProcessor(Conversion.STRING_TO_INT); + assertEquals(null, proc.process(null)); + assertEquals(1, proc.process("1")); + Exception e = expectThrows(SqlIllegalArgumentException.class, () -> proc.process("1.2")); + assertEquals("cannot cast [1.2] to [Int]", e.getMessage()); + } + { + CastProcessor proc = new CastProcessor(Conversion.BOOL_TO_INT); + assertEquals(null, proc.process(null)); + assertEquals(1, proc.process(true)); + assertEquals(0, proc.process(false)); + } + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(Processors.getNamedWriteables()); + } +} diff --git a/sql/server/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/BinaryArithmeticProcessorTests.java b/sql/server/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/BinaryArithmeticProcessorTests.java new file mode 100644 index 00000000000..25e46d1774b --- /dev/null +++ b/sql/server/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/BinaryArithmeticProcessorTests.java @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Literal; +import org.elasticsearch.xpack.sql.expression.function.scalar.Processors; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.ConstantProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; + +import static org.elasticsearch.xpack.sql.tree.Location.EMPTY; + +public class BinaryArithmeticProcessorTests extends AbstractWireSerializingTestCase { + public static BinaryArithmeticProcessor randomProcessor() { + return new BinaryArithmeticProcessor( + new ConstantProcessor(randomLong()), + new ConstantProcessor(randomLong()), + randomFrom(BinaryArithmeticProcessor.BinaryArithmeticOperation.values())); + } + + @Override + protected BinaryArithmeticProcessor createTestInstance() { + return randomProcessor(); + } + + @Override + protected Reader instanceReader() { + return BinaryArithmeticProcessor::new; + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(Processors.getNamedWriteables()); + } + + public void testAdd() { + BinaryArithmeticProcessor ba = new Add(EMPTY, l(7), l(3)).makeProcessorDefinition().asProcessor(); + assertEquals(10, ba.process(null)); + } + + public void testSub() { + BinaryArithmeticProcessor ba = new Sub(EMPTY, l(7), l(3)).makeProcessorDefinition().asProcessor(); + assertEquals(4, ba.process(null)); + } + + public void testMul() { + BinaryArithmeticProcessor ba = new Mul(EMPTY, l(7), l(3)).makeProcessorDefinition().asProcessor(); + assertEquals(21, ba.process(null)); + } + + public void testDiv() { + BinaryArithmeticProcessor ba = new Div(EMPTY, l(7), l(3)).makeProcessorDefinition().asProcessor(); + assertEquals(2, ((Number) ba.process(null)).longValue()); + ba = new Div(EMPTY, l((double) 7), l(3)).makeProcessorDefinition().asProcessor(); + assertEquals(2.33, ((Number) ba.process(null)).doubleValue(), 0.01d); + } + + public void testMod() { + BinaryArithmeticProcessor ba = new Mod(EMPTY, l(7), l(3)).makeProcessorDefinition().asProcessor(); + assertEquals(1, ba.process(null)); + } + + public void testNegate() { + Processor ba = new Neg(EMPTY, l(7)).asProcessorDefinition().asProcessor(); + assertEquals(-7, ba.process(null)); + } + + // ((3*2+4)/2-2)%2 + public void testTree() { + Expression mul = new Mul(EMPTY, l(3), l(2)); + Expression add = new Add(EMPTY, mul, l(4)); + Expression div = new Div(EMPTY, add, l(2)); + Expression sub = new Sub(EMPTY, div, l(2)); + Mod mod = new Mod(EMPTY, sub, l(2)); + + Processor proc = mod.makeProcessorDefinition().asProcessor(); + assertEquals(1, proc.process(null)); + } + + private static Literal l(Object value) { + return Literal.of(EMPTY, value); + } +} \ No newline at end of file diff --git a/sql/server/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessorTests.java b/sql/server/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessorTests.java new file mode 100644 index 00000000000..70ccad04087 --- /dev/null +++ b/sql/server/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessorTests.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessor.DateTimeExtractor; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; + +import java.io.IOException; + +public class DateTimeProcessorTests extends AbstractWireSerializingTestCase { + public static DateTimeProcessor randomDateTimeProcessor() { + return new DateTimeProcessor(randomFrom(DateTimeExtractor.values()), DateTimeZone.UTC); + } + + @Override + protected DateTimeProcessor createTestInstance() { + return randomDateTimeProcessor(); + } + + @Override + protected Reader instanceReader() { + return DateTimeProcessor::new; + } + + @Override + protected DateTimeProcessor mutateInstance(DateTimeProcessor instance) throws IOException { + return new DateTimeProcessor(randomValueOtherThan(instance.extractor(), () -> randomFrom(DateTimeExtractor.values())), DateTimeZone.UTC); + } + + public void testApply() { + DateTimeProcessor proc = new DateTimeProcessor(DateTimeExtractor.YEAR, DateTimeZone.UTC); + assertEquals(1970, proc.process(0L)); + assertEquals(1970, proc.process(new DateTime(0L, DateTimeZone.UTC))); + assertEquals(2017, proc.process(new DateTime(2017, 01, 02, 10, 10, DateTimeZone.UTC))); + + proc = new DateTimeProcessor(DateTimeExtractor.DAY_OF_MONTH, DateTimeZone.UTC); + assertEquals(1, proc.process(0L)); + assertEquals(1, proc.process(new DateTime(0L, DateTimeZone.UTC))); + assertEquals(2, proc.process(new DateTime(2017, 01, 02, 10, 10, DateTimeZone.UTC))); + assertEquals(31, proc.process(new DateTime(2017, 01, 31, 10, 10, DateTimeZone.UTC))); + } +} diff --git a/sql/server/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfYearTests.java b/sql/server/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfYearTests.java new file mode 100644 index 00000000000..bf246ee7662 --- /dev/null +++ b/sql/server/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfYearTests.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.expression.Literal; +import org.elasticsearch.xpack.sql.type.DateType; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; + +public class DayOfYearTests extends ESTestCase { + private static final DateTimeZone UTC = DateTimeZone.UTC; + + public void testAsColumnProcessor() { + assertEquals(1, extract(dateTime(0), UTC)); + assertEquals(1, extract(dateTime(0), DateTimeZone.forID("+01:00"))); + assertEquals(365, extract(dateTime(0), DateTimeZone.forID("-01:00"))); + } + + private DateTime dateTime(long millisSinceEpoch) { + return new DateTime(millisSinceEpoch, UTC); + } + + private Object extract(Object value, DateTimeZone timeZone) { + return build(value, timeZone).asProcessorDefinition().asProcessor().process(value); + } + + private DayOfYear build(Object value, DateTimeZone timeZone) { + return new DayOfYear(null, new Literal(null, value, DateType.DEFAULT), timeZone); + } +} diff --git a/sql/server/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/MathFunctionProcessorTests.java b/sql/server/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/MathFunctionProcessorTests.java new file mode 100644 index 00000000000..886531f6f94 --- /dev/null +++ b/sql/server/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/MathFunctionProcessorTests.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; + +import java.io.IOException; + +public class MathFunctionProcessorTests extends AbstractWireSerializingTestCase { + public static MathProcessor randomMathFunctionProcessor() { + return new MathProcessor(randomFrom(MathOperation.values())); + } + + @Override + protected MathProcessor createTestInstance() { + return randomMathFunctionProcessor(); + } + + @Override + protected Reader instanceReader() { + return MathProcessor::new; + } + + @Override + protected MathProcessor mutateInstance(MathProcessor instance) throws IOException { + return new MathProcessor(randomValueOtherThan(instance.processor(), () -> randomFrom(MathOperation.values()))); + } + + public void testApply() { + MathProcessor proc = new MathProcessor(MathOperation.E); + assertEquals(Math.E, proc.process(null)); + assertEquals(Math.E, proc.process("cat")); + assertEquals(Math.E, proc.process(Math.PI)); + + proc = new MathProcessor(MathOperation.SQRT); + assertEquals(2.0, (double) proc.process(4), 0); + assertEquals(3.0, (double) proc.process(9d), 0); + assertEquals(1.77, (double) proc.process(3.14), 0.01); + } +} diff --git a/sql/server/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/ChainingProcessorTests.java b/sql/server/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/ChainingProcessorTests.java new file mode 100644 index 00000000000..ca261478a7c --- /dev/null +++ b/sql/server/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/ChainingProcessorTests.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.sql.expression.function.scalar.Processors; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.ChainingProcessor; + +import java.io.IOException; +import java.util.function.Supplier; + +import static org.elasticsearch.xpack.sql.execution.search.extractor.ProcessingHitExtractorTests.randomProcessor; + +public class ChainingProcessorTests extends AbstractWireSerializingTestCase { + public static ChainingProcessor randomComposeProcessor(int depth) { + return new ChainingProcessor(randomProcessor(depth + 1), randomProcessor(depth + 1)); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(Processors.getNamedWriteables()); + } + + @Override + protected ChainingProcessor createTestInstance() { + return randomComposeProcessor(0); + } + + @Override + protected Reader instanceReader() { + return ChainingProcessor::new; + } + + @Override + protected ChainingProcessor mutateInstance(ChainingProcessor instance) throws IOException { + @SuppressWarnings("unchecked") + Supplier supplier = randomFrom( + () -> new ChainingProcessor( + instance.first(), randomValueOtherThan(instance.second(), () -> randomProcessor(0))), + () -> new ChainingProcessor( + randomValueOtherThan(instance.first(), () -> randomProcessor(0)), instance.second())); + return supplier.get(); + } +} diff --git a/sql/server/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/ConstantProcessorTests.java b/sql/server/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/ConstantProcessorTests.java new file mode 100644 index 00000000000..b26cf8edc1c --- /dev/null +++ b/sql/server/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/ConstantProcessorTests.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +import java.io.IOException; + +public class ConstantProcessorTests extends AbstractWireSerializingTestCase { + public static ConstantProcessor randomConstantProcessor() { + return new ConstantProcessor(randomAlphaOfLength(5)); + } + + @Override + protected ConstantProcessor createTestInstance() { + return randomConstantProcessor(); + } + + @Override + protected Reader instanceReader() { + return ConstantProcessor::new; + } + + @Override + protected ConstantProcessor mutateInstance(ConstantProcessor instance) throws IOException { + return new ConstantProcessor(randomValueOtherThan(instance.process(null), () -> randomAlphaOfLength(5))); + } + + public void testApply() { + ConstantProcessor proc = new ConstantProcessor("test"); + assertEquals("test", proc.process(null)); + assertEquals("test", proc.process("cat")); + } +} diff --git a/sql/server/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/MatrixFieldProcessorTests.java b/sql/server/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/MatrixFieldProcessorTests.java new file mode 100644 index 00000000000..6763c41eb3c --- /dev/null +++ b/sql/server/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/MatrixFieldProcessorTests.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +import java.io.IOException; + +import static java.util.Collections.singletonMap; + +public class MatrixFieldProcessorTests extends AbstractWireSerializingTestCase { + public static MatrixFieldProcessor randomMatrixFieldProcessor() { + return new MatrixFieldProcessor(randomAlphaOfLength(5)); + } + + @Override + protected MatrixFieldProcessor createTestInstance() { + return randomMatrixFieldProcessor(); + } + + @Override + protected Reader instanceReader() { + return MatrixFieldProcessor::new; + } + + @Override + protected MatrixFieldProcessor mutateInstance(MatrixFieldProcessor instance) throws IOException { + return new MatrixFieldProcessor(randomValueOtherThan(instance.key(), () -> randomAlphaOfLength(5))); + } + + public void testApply() { + MatrixFieldProcessor proc = new MatrixFieldProcessor("test"); + assertEquals(null, proc.process(null)); + assertEquals("cat", proc.process("cat")); + assertEquals(null, proc.process(singletonMap("foo", "cat"))); + assertEquals("cat", proc.process(singletonMap("test", "cat"))); + } +} diff --git a/sql/server/src/test/java/org/elasticsearch/xpack/sql/planner/VerifierErrorMessagesTests.java b/sql/server/src/test/java/org/elasticsearch/xpack/sql/planner/VerifierErrorMessagesTests.java new file mode 100644 index 00000000000..a5666217878 --- /dev/null +++ b/sql/server/src/test/java/org/elasticsearch/xpack/sql/planner/VerifierErrorMessagesTests.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.planner; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.analysis.analyzer.Analyzer; +import org.elasticsearch.xpack.sql.analysis.index.EsIndex; +import org.elasticsearch.xpack.sql.analysis.index.GetIndexResult; +import org.elasticsearch.xpack.sql.expression.function.DefaultFunctionRegistry; +import org.elasticsearch.xpack.sql.expression.function.FunctionRegistry; +import org.elasticsearch.xpack.sql.optimizer.Optimizer; +import org.elasticsearch.xpack.sql.parser.SqlParser; +import org.elasticsearch.xpack.sql.session.TestingSqlSession; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; +import org.junit.After; +import org.junit.Before; + +import java.util.LinkedHashMap; +import java.util.Map; + +public class VerifierErrorMessagesTests extends ESTestCase { + + private SqlParser parser; + private FunctionRegistry functionRegistry; + private GetIndexResult getIndexResult; + private Analyzer analyzer; + private Optimizer optimizer; + private Planner planner; + + public VerifierErrorMessagesTests() { + parser = new SqlParser(); + functionRegistry = new DefaultFunctionRegistry(); + + Map mapping = new LinkedHashMap<>(); + mapping.put("bool", DataTypes.BOOLEAN); + mapping.put("int", DataTypes.INTEGER); + mapping.put("text", DataTypes.TEXT); + mapping.put("keyword", DataTypes.KEYWORD); + EsIndex test = new EsIndex("test", mapping); + getIndexResult = GetIndexResult.valid(test); + analyzer = new Analyzer(functionRegistry); + optimizer = new Optimizer(); + planner = new Planner(); + + } + + @Before + public void setupContext() { + TestingSqlSession.setCurrentContext(TestingSqlSession.ctx(getIndexResult)); + } + + @After + public void disposeContext() { + TestingSqlSession.removeCurrentContext(); + } + + private String verify(String sql) { + PlanningException e = expectThrows(PlanningException.class, + () -> planner.mapPlan(optimizer.optimize(analyzer.analyze(parser.createStatement(sql), true)), true)); + assertTrue(e.getMessage().startsWith("Found ")); + String header = "Found 1 problem(s)\nline "; + return e.getMessage().substring(header.length()); + } + + + public void testMultiGroupBy() { + // TODO: location needs to be updated after merging extend-having + assertEquals("1:32: Currently, only a single expression can be used with GROUP BY; please select one of [bool, keyword]", + verify("SELECT bool FROM test GROUP BY bool, keyword")); + } +} \ No newline at end of file diff --git a/sql/server/src/test/java/org/elasticsearch/xpack/sql/plugin/CliFormatterTests.java b/sql/server/src/test/java/org/elasticsearch/xpack/sql/plugin/CliFormatterTests.java new file mode 100644 index 00000000000..f51c93bc4f9 --- /dev/null +++ b/sql/server/src/test/java/org/elasticsearch/xpack/sql/plugin/CliFormatterTests.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.plugin.sql.action.SqlResponse; +import org.elasticsearch.xpack.sql.plugin.sql.action.SqlResponse.ColumnInfo; +import org.elasticsearch.xpack.sql.session.Cursor; + +import java.sql.JDBCType; +import java.util.Arrays; + +import static org.hamcrest.Matchers.arrayWithSize; + +public class CliFormatterTests extends ESTestCase { + private final SqlResponse firstResponse = new SqlResponse(Cursor.EMPTY, 10, 5, + Arrays.asList( + new ColumnInfo("foo", "string", JDBCType.VARCHAR, 0), + new ColumnInfo("bar", "long", JDBCType.BIGINT, 15), + new ColumnInfo("15charwidename!", "double", JDBCType.DOUBLE, 25), + new ColumnInfo("superduperwidename!!!", "double", JDBCType.DOUBLE, 25), + new ColumnInfo("baz", "keyword", JDBCType.VARCHAR, 0)), + Arrays.asList( + Arrays.asList("15charwidedata!", 1, 6.888, 12, "rabbit"), + Arrays.asList("dog", 1.7976931348623157E308, 123124.888, 9912, "goat"))); + private final CliFormatter formatter = new CliFormatter(firstResponse); + + /** + * Tests for {@link CliFormatter#formatWithHeader(SqlResponse)}, values + * of exactly the minimum column size, column names of exactly + * the minimum column size, column headers longer than the + * minimum column size, and values longer than the minimum + * column size. + */ + public void testFormatWithHeader() { + String[] result = formatter.formatWithHeader(firstResponse).split("\n"); + assertThat(result, arrayWithSize(4)); + assertEquals(" foo | bar |15charwidename!|superduperwidename!!!| baz ", result[0]); + assertEquals("---------------+----------------------+---------------+---------------------+---------------", result[1]); + assertEquals("15charwidedata!|1 |6.888 |12 |rabbit ", result[2]); + assertEquals("dog |1.7976931348623157E308|123124.888 |9912 |goat ", result[3]); + } + + /** + * Tests for {@link CliFormatter#formatWithoutHeader(SqlResponse)} and + * truncation of long columns. + */ + public void testFormatWithoutHeader() { + String[] result = formatter.formatWithoutHeader(new SqlResponse(Cursor.EMPTY, 10, 5, null, + Arrays.asList( + Arrays.asList("ohnotruncateddata", 4, 1, 77, "wombat"), + Arrays.asList("dog", 2, 123124.888, 9912, "goat")))).split("\n"); + assertThat(result, arrayWithSize(2)); + assertEquals("ohnotruncatedd~|4 |1 |77 |wombat ", result[0]); + assertEquals("dog |2 |123124.888 |9912 |goat ", result[1]); + } + + /** + * Ensure that our estimates are perfect in at least some cases. + */ + public void testEstimateSize() { + assertEquals(formatter.formatWithHeader(firstResponse).length(), + formatter.estimateSize(firstResponse.rows().size() + 2)); + assertEquals(formatter.formatWithoutHeader(firstResponse).length(), + formatter.estimateSize(firstResponse.rows().size())); + } +} diff --git a/sql/server/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorRequestTests.java b/sql/server/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorRequestTests.java new file mode 100644 index 00000000000..5193a797018 --- /dev/null +++ b/sql/server/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorRequestTests.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils.MutateFunction; +import org.elasticsearch.xpack.sql.session.Cursor; + +import static org.elasticsearch.xpack.sql.execution.search.ScrollCursorTests.randomScrollCursor; + +public class SqlClearCursorRequestTests extends AbstractStreamableTestCase { + + @Override + protected SqlClearCursorAction.Request createTestInstance() { + return new SqlClearCursorAction.Request(randomScrollCursor()); + } + + @Override + protected SqlClearCursorAction.Request createBlankInstance() { + return new SqlClearCursorAction.Request(); + } + + @Override + @SuppressWarnings("unchecked") + protected MutateFunction getMutateFunction() { + return request -> getCopyFunction().copy(request).setCursor(randomScrollCursor()); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(Cursor.getNamedWriteables()); + } + +} diff --git a/sql/server/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorResponseTests.java b/sql/server/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorResponseTests.java new file mode 100644 index 00000000000..a72b0901426 --- /dev/null +++ b/sql/server/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorResponseTests.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils.MutateFunction; + +public class SqlClearCursorResponseTests extends AbstractStreamableTestCase { + + @Override + protected SqlClearCursorAction.Response createTestInstance() { + return new SqlClearCursorAction.Response(randomBoolean()); + } + + @Override + protected SqlClearCursorAction.Response createBlankInstance() { + return new SqlClearCursorAction.Response(); + } + + @Override + protected MutateFunction getMutateFunction() { + return response -> getCopyFunction().copy(response).setSucceeded(response.isSucceeded() == false); + } +} diff --git a/sql/server/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlPluginTests.java b/sql/server/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlPluginTests.java new file mode 100644 index 00000000000..801ad86ea46 --- /dev/null +++ b/sql/server/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlPluginTests.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.test.ESTestCase; + +import java.util.Collections; + +import static org.hamcrest.Matchers.empty; +import static org.mockito.Mockito.mock; + +public class SqlPluginTests extends ESTestCase { + + public void testSqlDisabled() { + SqlPlugin plugin = new SqlPlugin(false, new SqlLicenseChecker(() -> {}, () -> {})); + assertThat(plugin.createComponents(mock(Client.class)), empty()); + assertThat(plugin.getActions(), empty()); + assertThat(plugin.getRestHandlers(Settings.EMPTY, mock(RestController.class), + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, new SettingsFilter(Settings.EMPTY, Collections.emptyList()), + mock(IndexNameExpressionResolver.class), () -> mock(DiscoveryNodes.class)), empty()); + } + +} diff --git a/sql/server/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateRequestTests.java b/sql/server/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateRequestTests.java new file mode 100644 index 00000000000..ed90022da58 --- /dev/null +++ b/sql/server/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateRequestTests.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils.MutateFunction; + +import java.util.Collections; + +import static org.elasticsearch.xpack.sql.SqlTestUtils.randomFilter; +import static org.elasticsearch.xpack.sql.SqlTestUtils.randomFilterOrNull; + +public class SqlTranslateRequestTests extends AbstractStreamableTestCase { + + @Override + protected SqlTranslateAction.Request createTestInstance() { + return new SqlTranslateAction.Request(randomAlphaOfLength(10), randomFilterOrNull(random()), randomDateTimeZone(), + between(1, Integer.MAX_VALUE), randomTV(), randomTV()); + } + + private TimeValue randomTV() { + return TimeValue.parseTimeValue(randomTimeValue(), null, "test"); + } + + @Override + protected SqlTranslateAction.Request createBlankInstance() { + return new SqlTranslateAction.Request(); + } + + @Override + @SuppressWarnings("unchecked") + protected MutateFunction getMutateFunction() { + return randomFrom( + request -> (SqlTranslateAction.Request) getCopyFunction().copy(request) + .query(randomValueOtherThan(request.query(), () -> randomAlphaOfLength(5))), + request -> (SqlTranslateAction.Request) getCopyFunction().copy(request) + .timeZone(randomValueOtherThan(request.timeZone(), ESTestCase::randomDateTimeZone)), + request -> (SqlTranslateAction.Request) getCopyFunction().copy(request) + .fetchSize(randomValueOtherThan(request.fetchSize(), () -> between(1, Integer.MAX_VALUE))), + request -> (SqlTranslateAction.Request) getCopyFunction().copy(request) + .requestTimeout(randomValueOtherThan(request.requestTimeout(), () -> randomTV())), + request -> (SqlTranslateAction.Request) getCopyFunction().copy(request) + .pageTimeout(randomValueOtherThan(request.pageTimeout(), () -> randomTV())), + request -> (SqlTranslateAction.Request) getCopyFunction().copy(request).filter(randomValueOtherThan(request.filter(), + () -> request.filter() == null ? randomFilter(random()) : randomFilterOrNull(random())))); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + // We need this for QueryBuilder serialization + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + return new NamedWriteableRegistry(searchModule.getNamedWriteables()); + } +} diff --git a/sql/server/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateResponseTests.java b/sql/server/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateResponseTests.java new file mode 100644 index 00000000000..cedb5a97d71 --- /dev/null +++ b/sql/server/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateResponseTests.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils.MutateFunction; + +public class SqlTranslateResponseTests extends AbstractStreamableTestCase { + + @Override + protected SqlTranslateAction.Response createTestInstance() { + SearchSourceBuilder s = new SearchSourceBuilder(); + if (randomBoolean()) { + long docValues = iterations(5, 10); + for (int i = 0; i < docValues; i++) { + s.docValueField(randomAlphaOfLength(10)); + } + } + + if (randomBoolean()) { + long sourceFields = iterations(5, 10); + for (int i = 0; i < sourceFields; i++) { + s.storedField(randomAlphaOfLength(10)); + } + } + + s.fetchSource(randomBoolean()).from(randomInt(256)).explain(randomBoolean()).size(randomInt(256)); + + return new SqlTranslateAction.Response(s); + } + + @Override + protected SqlTranslateAction.Response createBlankInstance() { + return new SqlTranslateAction.Response(); + } + + @Override + protected MutateFunction getMutateFunction() { + return response -> { + SqlTranslateAction.Response copy = getCopyFunction().copy(response); + copy.source().size(randomValueOtherThan(response.source().size(), () -> between(0, Integer.MAX_VALUE))); + return copy; + }; + } +} diff --git a/sql/server/src/test/java/org/elasticsearch/xpack/sql/plugin/sql/action/SqlRequestTests.java b/sql/server/src/test/java/org/elasticsearch/xpack/sql/plugin/sql/action/SqlRequestTests.java new file mode 100644 index 00000000000..9667b94b8db --- /dev/null +++ b/sql/server/src/test/java/org/elasticsearch/xpack/sql/plugin/sql/action/SqlRequestTests.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin.sql.action; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils.MutateFunction; +import org.elasticsearch.xpack.sql.plugin.SqlPlugin; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static org.elasticsearch.xpack.sql.SqlTestUtils.randomFilter; +import static org.elasticsearch.xpack.sql.SqlTestUtils.randomFilterOrNull; +import static org.elasticsearch.xpack.sql.plugin.sql.action.SqlResponseTests.randomCursor; + +public class SqlRequestTests extends AbstractStreamableTestCase { + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + List namedWriteables = new ArrayList<>(); + namedWriteables.addAll(searchModule.getNamedWriteables()); + namedWriteables.addAll(SqlPlugin.getNamedWriteables()); + return new NamedWriteableRegistry(namedWriteables); + } + + @Override + protected SqlRequest createTestInstance() { + return new SqlRequest(randomAlphaOfLength(10), randomFilterOrNull(random()), randomDateTimeZone(), + between(1, Integer.MAX_VALUE), randomTV(), randomTV(), randomCursor()); + } + + private TimeValue randomTV() { + return TimeValue.parseTimeValue(randomTimeValue(), null, "test"); + } + + @Override + protected SqlRequest createBlankInstance() { + return new SqlRequest(); + } + + @Override + @SuppressWarnings("unchecked") + protected MutateFunction getMutateFunction() { + return randomFrom( + request -> getCopyFunction().copy(request) + .cursor(randomValueOtherThan(request.cursor(), SqlResponseTests::randomCursor)), + request -> (SqlRequest) getCopyFunction().copy(request) + .query(randomValueOtherThan(request.query(), () -> randomAlphaOfLength(5))), + request -> (SqlRequest) getCopyFunction().copy(request) + .timeZone(randomValueOtherThan(request.timeZone(), ESTestCase::randomDateTimeZone)), + request -> (SqlRequest) getCopyFunction().copy(request) + .fetchSize(randomValueOtherThan(request.fetchSize(), () -> between(1, Integer.MAX_VALUE))), + request -> (SqlRequest) getCopyFunction().copy(request) + .requestTimeout(randomValueOtherThan(request.requestTimeout(), () -> randomTV())), + request -> (SqlRequest) getCopyFunction().copy(request) + .pageTimeout(randomValueOtherThan(request.pageTimeout(), () -> randomTV())), + request -> (SqlRequest) getCopyFunction().copy(request).filter(randomValueOtherThan(request.filter(), + () -> request.filter() == null ? randomFilter(random()) : randomFilterOrNull(random())))); + } +} diff --git a/sql/server/src/test/java/org/elasticsearch/xpack/sql/plugin/sql/action/SqlResponseTests.java b/sql/server/src/test/java/org/elasticsearch/xpack/sql/plugin/sql/action/SqlResponseTests.java new file mode 100644 index 00000000000..bb51f5d5df0 --- /dev/null +++ b/sql/server/src/test/java/org/elasticsearch/xpack/sql/plugin/sql/action/SqlResponseTests.java @@ -0,0 +1,151 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin.sql.action; + +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.sql.execution.search.ScrollCursorTests; +import org.elasticsearch.xpack.sql.plugin.CliFormatter; +import org.elasticsearch.xpack.sql.plugin.CliFormatterCursor; +import org.elasticsearch.xpack.sql.plugin.JdbcCursor; +import org.elasticsearch.xpack.sql.plugin.SqlPlugin; +import org.elasticsearch.xpack.sql.plugin.sql.action.SqlResponse.ColumnInfo; +import org.elasticsearch.xpack.sql.session.Cursor; + +import java.io.IOException; +import java.sql.JDBCType; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.hasSize; + +public class SqlResponseTests extends AbstractStreamableTestCase { + static Cursor randomCursor() { + return randomBoolean() ? Cursor.EMPTY : randomNonEmptyCursor(); + } + + static Cursor randomNonEmptyCursor() { + switch (randomIntBetween(0, 2)) { + case 0: + return ScrollCursorTests.randomScrollCursor(); + case 1: + int typeNum = randomIntBetween(0, 10); + List types = new ArrayList<>(); + for (int i = 0; i < typeNum; i++) { + types.add(randomFrom(JDBCType.values())); + } + return JdbcCursor.wrap(ScrollCursorTests.randomScrollCursor(), types); + case 2: + SqlResponse response = createRandomInstance(Cursor.EMPTY); + if (response.columns() != null && response.rows() != null) { + return CliFormatterCursor.wrap(ScrollCursorTests.randomScrollCursor(), new CliFormatter(response)); + } else { + return ScrollCursorTests.randomScrollCursor(); + } + default: + throw new IllegalArgumentException("Unexpected random value "); + } + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(SqlPlugin.getNamedWriteables()); + } + + @Override + protected SqlResponse createTestInstance() { + return createRandomInstance(randomCursor()); + } + + private static SqlResponse createRandomInstance(Cursor cursor) { + int columnCount = between(1, 10); + + List columns = null; + if (randomBoolean()) { + columns = new ArrayList<>(columnCount); + for (int i = 0; i < columnCount; i++) { + columns.add(new ColumnInfo(randomAlphaOfLength(10), randomAlphaOfLength(10), randomFrom(JDBCType.values()), randomInt(25))); + } + } + + List> rows; + if (randomBoolean()) { + rows = Collections.emptyList(); + } else { + int rowCount = between(1, 10); + rows = new ArrayList<>(rowCount); + for (int r = 0; r < rowCount; r++) { + List row = new ArrayList<>(rowCount); + for (int c = 0; c < columnCount; c++) { + row.add(randomBoolean() ? randomAlphaOfLength(10) : randomInt()); + } + rows.add(row); + } + } + + return new SqlResponse(cursor, randomNonNegativeLong(), columnCount, columns, rows); + } + + @Override + protected SqlResponse createBlankInstance() { + return new SqlResponse(); + } + + public void testToXContent() throws IOException { + SqlResponse testInstance = createTestInstance(); + + XContentBuilder builder = testInstance.toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS); + Map rootMap = XContentHelper.convertToMap(builder.bytes(), false, builder.contentType()).v2(); + + logger.info(builder.string()); + + assertEquals(testInstance.size(), rootMap.get("size")); + if (testInstance.columns() != null) { + List columns = (List) rootMap.get("columns"); + assertThat(columns, hasSize(testInstance.columns().size())); + for (int i = 0; i < columns.size(); i++) { + Map columnMap = (Map) columns.get(i); + ColumnInfo columnInfo = testInstance.columns().get(i); + assertEquals(columnInfo.name(), columnMap.get("name")); + assertEquals(columnInfo.esType(), columnMap.get("type")); + } + } else { + assertNull(rootMap.get("columns")); + } + + List rows = ((List) rootMap.get("rows")); + assertThat(rows, hasSize(testInstance.rows().size())); + for (int i = 0; i < rows.size(); i++) { + List row = (List) rows.get(i); + assertEquals(row, testInstance.rows().get(i)); + } + + if (testInstance.cursor() != Cursor.EMPTY) { + assertEquals(rootMap.get(SqlRequest.CURSOR.getPreferredName()), Cursor.encodeToString(Version.CURRENT, testInstance.cursor())); + } + } + + public void testVersionHandling() { + Cursor cursor = randomNonEmptyCursor(); + assertEquals(cursor, Cursor.decodeFromString(Cursor.encodeToString(Version.CURRENT, cursor))); + + Version nextMinorVersion = Version.fromId(Version.CURRENT.id + 10000); + + String encodedWithWrongVersion = Cursor.encodeToString(nextMinorVersion, cursor); + RuntimeException exception = expectThrows(RuntimeException.class, () -> { + Cursor.decodeFromString(encodedWithWrongVersion); + }); + + assertEquals(exception.getMessage(), "Unsupported scroll version " + nextMinorVersion); + } +} diff --git a/sql/server/src/test/java/org/elasticsearch/xpack/sql/session/TestingSqlSession.java b/sql/server/src/test/java/org/elasticsearch/xpack/sql/session/TestingSqlSession.java new file mode 100644 index 00000000000..73dd90ef5a0 --- /dev/null +++ b/sql/server/src/test/java/org/elasticsearch/xpack/sql/session/TestingSqlSession.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.session; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.analysis.index.GetIndexResult; +import org.elasticsearch.xpack.sql.session.SqlSession.SessionContext; + +public class TestingSqlSession { + + public static SessionContext ctx(GetIndexResult getIndexResult) { + Configuration cfg = new Configuration(ESTestCase.randomDateTimeZone(), ESTestCase.between(1, 100), + TimeValue.parseTimeValue(ESTestCase.randomPositiveTimeValue(), "test-random"), + TimeValue.parseTimeValue(ESTestCase.randomPositiveTimeValue(), "test-random"), null); + return new SessionContext(cfg, getIndexResult); + } + + public static void setCurrentContext(SessionContext ctx) { + assert SqlSession.CURRENT_CONTEXT.get() == null; + SqlSession.CURRENT_CONTEXT.set(ctx); + } + + public static void removeCurrentContext() { + SqlSession.CURRENT_CONTEXT.remove(); + } +} diff --git a/sql/server/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java b/sql/server/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java new file mode 100644 index 00000000000..3dfd4cbef3a --- /dev/null +++ b/sql/server/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java @@ -0,0 +1,142 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.type.DataTypeConversion.Conversion; + +public class DataTypeConversionTests extends ESTestCase { + public void testConversionToString() { + Conversion conversion = DataTypeConversion.conversionFor(new DoubleType(true), KeywordType.DEFAULT); + assertNull(conversion.convert(null)); + assertEquals("10.0", conversion.convert(10.0)); + + conversion = DataTypeConversion.conversionFor(new DateType(true), KeywordType.DEFAULT); + assertNull(conversion.convert(null)); + assertEquals("1970-01-01T00:00:00Z", conversion.convert(0)); + } + + /** + * Test conversion to a date or long. These are almost the same. + */ + public void testConversionToLongOrDate() { + DataType to = randomBoolean() ? new LongType(true) : new DateType(true); + { + Conversion conversion = DataTypeConversion.conversionFor(new DoubleType(true), to); + assertNull(conversion.convert(null)); + assertEquals(10L, conversion.convert(10.0)); + assertEquals(10L, conversion.convert(10.1)); + assertEquals(11L, conversion.convert(10.6)); + Exception e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert(Double.MAX_VALUE)); + assertEquals("[" + Double.MAX_VALUE + "] out of [Long] range", e.getMessage()); + } + { + Conversion conversion = DataTypeConversion.conversionFor(new IntegerType(true), to); + assertNull(conversion.convert(null)); + assertEquals(10L, conversion.convert(10)); + assertEquals(-134L, conversion.convert(-134)); + } + { + Conversion conversion = DataTypeConversion.conversionFor(new BooleanType(true), to); + assertNull(conversion.convert(null)); + assertEquals(1, conversion.convert(true)); + assertEquals(0, conversion.convert(false)); + } + Conversion conversion = DataTypeConversion.conversionFor(KeywordType.DEFAULT, to); + assertNull(conversion.convert(null)); + if (to instanceof LongType) { + assertEquals(1L, conversion.convert("1")); + assertEquals(0L, conversion.convert("-0")); + Exception e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert("0xff")); + assertEquals("cannot cast [0xff] to [Long]", e.getMessage()); + } else { + // TODO we'd like to be able to optionally parse millis here I think.... + assertEquals(1000L, conversion.convert("1970-01-01T00:00:01Z")); + assertEquals(1483228800000L, conversion.convert("2017-01-01T00:00:00Z")); + assertEquals(18000000L, conversion.convert("1970-01-01T00:00:00-05:00")); + Exception e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert("0xff")); + assertEquals("cannot cast [0xff] to [Date]:Invalid format: \"0xff\" is malformed at \"xff\"", e.getMessage()); + } + } + + public void testConversionToDouble() { + { + Conversion conversion = DataTypeConversion.conversionFor(new FloatType(true), new DoubleType(true)); + assertNull(conversion.convert(null)); + assertEquals(10.0, (double) conversion.convert(10.0f), 0.00001); + assertEquals(10.1, (double) conversion.convert(10.1f), 0.00001); + assertEquals(10.6, (double) conversion.convert(10.6f), 0.00001); + } + { + Conversion conversion = DataTypeConversion.conversionFor(new IntegerType(true), new DoubleType(true)); + assertNull(conversion.convert(null)); + assertEquals(10.0, (double) conversion.convert(10), 0.00001); + assertEquals(-134.0, (double) conversion.convert(-134), 0.00001); + } + { + Conversion conversion = DataTypeConversion.conversionFor(new BooleanType(true), new DoubleType(true)); + assertNull(conversion.convert(null)); + assertEquals(1.0, (double) conversion.convert(true), 0); + assertEquals(0.0, (double) conversion.convert(false), 0); + } + { + Conversion conversion = DataTypeConversion.conversionFor(KeywordType.DEFAULT, new DoubleType(true)); + assertNull(conversion.convert(null)); + assertEquals(1.0, (double) conversion.convert("1"), 0); + assertEquals(0.0, (double) conversion.convert("-0"), 0); + assertEquals(12.776, (double) conversion.convert("12.776"), 0.00001); + Exception e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert("0xff")); + assertEquals("cannot cast [0xff] to [Double]", e.getMessage()); + } + } + + public void testConversionToBoolean() { + { + Conversion conversion = DataTypeConversion.conversionFor(new FloatType(true), new BooleanType(true)); + assertNull(conversion.convert(null)); + assertEquals(true, conversion.convert(10.0f)); + assertEquals(true, conversion.convert(-10.0f)); + assertEquals(false, conversion.convert(0.0f)); + } + { + Conversion conversion = DataTypeConversion.conversionFor(new IntegerType(true), new BooleanType(true)); + assertNull(conversion.convert(null)); + assertEquals(true, conversion.convert(10)); + assertEquals(true, conversion.convert(-10)); + assertEquals(false, conversion.convert(0)); + } + { + Conversion conversion = DataTypeConversion.conversionFor(new DoubleType(true), new BooleanType(true)); + assertNull(conversion.convert(null)); + assertEquals(true, conversion.convert(10.0)); + assertEquals(true, conversion.convert(-10.0)); + assertEquals(false, conversion.convert(0.0)); + } + { + Conversion conversion = DataTypeConversion.conversionFor(KeywordType.DEFAULT, new BooleanType(true)); + assertNull(conversion.convert(null)); + // We only handled upper and lower case true and false + assertEquals(true, conversion.convert("true")); + assertEquals(false, conversion.convert("false")); + assertEquals(true, conversion.convert("True")); + assertEquals(false, conversion.convert("fAlSe")); + // Everything else should fail + Exception e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert("10")); + assertEquals("cannot cast [10] to [Boolean]", e.getMessage()); + e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert("-1")); + assertEquals("cannot cast [-1] to [Boolean]", e.getMessage()); + e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert("0")); + assertEquals("cannot cast [0] to [Boolean]", e.getMessage()); + e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert("blah")); + assertEquals("cannot cast [blah] to [Boolean]", e.getMessage()); + e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert("Yes")); + assertEquals("cannot cast [Yes] to [Boolean]", e.getMessage()); + e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert("nO")); + assertEquals("cannot cast [nO] to [Boolean]", e.getMessage()); + } + } +} diff --git a/sql/server/src/test/java/org/elasticsearch/xpack/sql/util/StringUtilsTests.java b/sql/server/src/test/java/org/elasticsearch/xpack/sql/util/StringUtilsTests.java new file mode 100644 index 00000000000..b3ebe244693 --- /dev/null +++ b/sql/server/src/test/java/org/elasticsearch/xpack/sql/util/StringUtilsTests.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.util; + +import org.elasticsearch.test.ESTestCase; + +import static org.elasticsearch.xpack.sql.util.StringUtils.sqlToJavaPattern; + +public class StringUtilsTests extends ESTestCase { + + public void testNoRegex() { + assertEquals("^fooBar$", sqlToJavaPattern("fooBar")); + } + + public void testEscapedJavaRegex() { + assertEquals("^\\.\\d$", sqlToJavaPattern("\\.\\d")); + } + + public void testSimpleSqlRegex1() { + assertEquals("^foo.bar$", sqlToJavaPattern("foo_bar")); + } + + public void testSimpleSqlRegex2() { + assertEquals("^foo.*bar$", sqlToJavaPattern("foo%bar")); + } + + public void testMultipleSqlRegexes() { + assertEquals("^foo.*bar.$", sqlToJavaPattern("foo%bar_")); + } + + public void testJavaRegexNoSqlRegex() { + assertEquals("^foo\\.\\*bar$", sqlToJavaPattern("foo.*bar")); + } + + public void testMultipleRegexAndSqlRegex() { + assertEquals("^foo\\.\\*bar\\..*$", sqlToJavaPattern("foo.*bar.%")); + } + + public void testComplicatedJavaRegex() { + assertEquals("^\\^\\[\\d\\]\\.\\*\\$$", sqlToJavaPattern("^[\\d].*$")); + } +} diff --git a/sql/shared-client/build.gradle b/sql/shared-client/build.gradle new file mode 100644 index 00000000000..5984238c887 --- /dev/null +++ b/sql/shared-client/build.gradle @@ -0,0 +1,36 @@ +description = 'Code shared between jdbc and cli' + +dependencies { + compile "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" +} + +dependencyLicenses { + mapping from: /jackson-.*/, to: 'jackson' +} + +forbiddenApisMain { + // does not depend on core, so only jdk and http signatures should be checked + signaturesURLs = [this.class.getResource('/forbidden/jdk-signatures.txt')] +} + +forbiddenApisTest { + bundledSignatures -= 'jdk-non-portable' + bundledSignatures += 'jdk-internal' +} + +forbiddenPatterns { + exclude '**/*.keystore' +} + +// Allow for com.sun.net.httpserver.* usage for testing +eclipse { + classpath.file { + whenMerged { cp -> + def con = entries.find { e -> + e.kind == "con" && e.toString().contains("org.eclipse.jdt.launching.JRE_CONTAINER") + } + con.accessRules.add(new org.gradle.plugins.ide.eclipse.model.AccessRule( + "accessible", "com/sun/net/httpserver/*")) + } + } +} diff --git a/sql/shared-client/licenses/jackson-LICENSE b/sql/shared-client/licenses/jackson-LICENSE new file mode 100644 index 00000000000..f5f45d26a49 --- /dev/null +++ b/sql/shared-client/licenses/jackson-LICENSE @@ -0,0 +1,8 @@ +This copy of Jackson JSON processor streaming parser/generator is licensed under the +Apache (Software) License, version 2.0 ("the License"). +See the License for details about distribution rights, and the +specific rights regarding derivate works. + +You may obtain a copy of the License at: + +http://www.apache.org/licenses/LICENSE-2.0 diff --git a/sql/shared-client/licenses/jackson-NOTICE b/sql/shared-client/licenses/jackson-NOTICE new file mode 100644 index 00000000000..4c976b7b4cc --- /dev/null +++ b/sql/shared-client/licenses/jackson-NOTICE @@ -0,0 +1,20 @@ +# Jackson JSON processor + +Jackson is a high-performance, Free/Open Source JSON processing library. +It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has +been in development since 2007. +It is currently developed by a community of developers, as well as supported +commercially by FasterXML.com. + +## Licensing + +Jackson core and extension components may licensed under different licenses. +To find the details that apply to this artifact see the accompanying LICENSE file. +For more information, including possible other licensing options, contact +FasterXML.com (http://fasterxml.com). + +## Credits + +A list of contributors may be found from CREDITS file, which is included +in some artifacts (usually source distributions); but is always available +from the source code management (SCM) system project uses. diff --git a/sql/shared-client/licenses/jackson-core-2.8.10.jar.sha1 b/sql/shared-client/licenses/jackson-core-2.8.10.jar.sha1 new file mode 100644 index 00000000000..a322d371e26 --- /dev/null +++ b/sql/shared-client/licenses/jackson-core-2.8.10.jar.sha1 @@ -0,0 +1 @@ +eb21a035c66ad307e66ec8fce37f5d50fd62d039 \ No newline at end of file diff --git a/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/Bytes.java b/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/Bytes.java new file mode 100644 index 00000000000..0e0a3af5633 --- /dev/null +++ b/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/Bytes.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.client.shared; + +import java.nio.charset.StandardCharsets; +import java.util.Arrays; + +public class Bytes { + + private final byte[] buf; + private final int size; + + public Bytes(byte[] buf, int size) { + this.buf = buf; + this.size = size; + } + + public byte[] bytes() { + return buf; + } + + public int size() { + return size; + } + + public byte[] copy() { + return Arrays.copyOf(buf, size); + } + + public String toString() { + return new String(buf, 0, size, StandardCharsets.UTF_8); + } +} diff --git a/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/CheckedConsumer.java b/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/CheckedConsumer.java new file mode 100644 index 00000000000..62258eebefa --- /dev/null +++ b/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/CheckedConsumer.java @@ -0,0 +1,17 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.client.shared; + +import java.util.function.Consumer; + +/** + * A {@link Consumer}-like interface which allows throwing checked exceptions. + * Elasticsearch has one of these but we don't depend on Elasticsearch. + */ +@FunctionalInterface +public interface CheckedConsumer { + void accept(T t) throws E; +} diff --git a/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/CheckedFunction.java b/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/CheckedFunction.java new file mode 100644 index 00000000000..67e174ffd70 --- /dev/null +++ b/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/CheckedFunction.java @@ -0,0 +1,17 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.client.shared; + +import java.util.function.Function; + +/** + * A {@link Function}-like interface which allows throwing checked exceptions. + * Elasticsearch has one of these but we don't depend on Elasticsearch. + */ +@FunctionalInterface +public interface CheckedFunction { + R apply(T t) throws E; +} diff --git a/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/ClientException.java b/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/ClientException.java new file mode 100644 index 00000000000..0123fe73863 --- /dev/null +++ b/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/ClientException.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.client.shared; + +import java.util.Locale; + +import static java.lang.String.format; + +public class ClientException extends RuntimeException { + public ClientException(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) { + super(message, cause, enableSuppression, writableStackTrace); + } + + public ClientException(String message) { + super(message); + } + + public ClientException(String message, Object... args) { // TODO remove these ctors + super(format(Locale.ROOT, message, args)); + } + + public ClientException(Throwable cause, String message, Object... args) { + super(format(Locale.ROOT, message, args), cause); + } + + public ClientException(Throwable cause) { + super(cause); + } +} diff --git a/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/ConnectionConfiguration.java b/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/ConnectionConfiguration.java new file mode 100644 index 00000000000..b4d64927ad7 --- /dev/null +++ b/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/ConnectionConfiguration.java @@ -0,0 +1,227 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.client.shared; + +import java.net.URI; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Enumeration; +import java.util.LinkedHashSet; +import java.util.Properties; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; + +import static java.util.Collections.emptyList; + +/** + * Common configuration class used for client. + * Uses a Properties object to be created (as clients would use strings to configure it). + * While this is convenient, it makes validation tricky (of both the names and values) and thus + * it's available only during construction. + * Some values might be updated later on in a typed fashion (dedicated method) in order + * to move away from the loose Strings... + */ +public class ConnectionConfiguration { + + // Timeouts + + // 30s + public static final String CONNECT_TIMEOUT = "connect.timeout"; + private static final String CONNECT_TIMEOUT_DEFAULT = String.valueOf(TimeUnit.SECONDS.toMillis(30)); + + // 1m + public static final String NETWORK_TIMEOUT = "network.timeout"; + private static final String NETWORK_TIMEOUT_DEFAULT = String.valueOf(TimeUnit.MINUTES.toMillis(1)); + + // 90s + public static final String QUERY_TIMEOUT = "query.timeout"; + private static final String QUERY_TIMEOUT_DEFAULT = String.valueOf(TimeUnit.SECONDS.toMillis(90)); + + // 45s + public static final String PAGE_TIMEOUT = "page.timeout"; + private static final String PAGE_TIMEOUT_DEFAULT = String.valueOf(TimeUnit.SECONDS.toMillis(45)); + + public static final String PAGE_SIZE = "page.size"; + private static final String PAGE_SIZE_DEFAULT = "1000"; + + // Auth + + public static final String AUTH_USER = "user"; + // NB: this is password instead of pass since that's what JDBC DriverManager/tools use + public static final String AUTH_PASS = "password"; + + protected static final Set OPTION_NAMES = new LinkedHashSet<>( + Arrays.asList(CONNECT_TIMEOUT, NETWORK_TIMEOUT, QUERY_TIMEOUT, PAGE_TIMEOUT, PAGE_SIZE, AUTH_USER, AUTH_PASS)); + + static { + OPTION_NAMES.addAll(SslConfig.OPTION_NAMES); + OPTION_NAMES.addAll(ProxyConfig.OPTION_NAMES); + } + + // Base URI for all request + private final URI baseURI; + private final String connectionString; + // Proxy + + private long connectTimeout; + private long networkTimeout; + private long queryTimeout; + + private long pageTimeout; + private int pageSize; + + private final String user, pass; + + private final SslConfig sslConfig; + private final ProxyConfig proxyConfig; + + public ConnectionConfiguration(URI baseURI, String connectionString, Properties props) throws ClientException { + this.connectionString = connectionString; + Properties settings = props != null ? props : new Properties(); + + checkPropertyNames(settings, optionNames()); + + connectTimeout = parseValue(CONNECT_TIMEOUT, settings.getProperty(CONNECT_TIMEOUT, CONNECT_TIMEOUT_DEFAULT), Long::parseLong); + networkTimeout = parseValue(NETWORK_TIMEOUT, settings.getProperty(NETWORK_TIMEOUT, NETWORK_TIMEOUT_DEFAULT), Long::parseLong); + queryTimeout = parseValue(QUERY_TIMEOUT, settings.getProperty(QUERY_TIMEOUT, QUERY_TIMEOUT_DEFAULT), Long::parseLong); + // page + pageTimeout = parseValue(PAGE_TIMEOUT, settings.getProperty(PAGE_TIMEOUT, PAGE_TIMEOUT_DEFAULT), Long::parseLong); + pageSize = parseValue(PAGE_SIZE, settings.getProperty(PAGE_SIZE, PAGE_SIZE_DEFAULT), Integer::parseInt); + + // auth + user = settings.getProperty(AUTH_USER); + pass = settings.getProperty(AUTH_PASS); + + sslConfig = new SslConfig(settings); + proxyConfig = new ProxyConfig(settings); + + this.baseURI = normalizeSchema(baseURI, connectionString, sslConfig.isEnabled()); + } + + private static URI normalizeSchema(URI uri, String connectionString, boolean isSSLEnabled) { + // Make sure the protocol is correct + final String scheme; + if (isSSLEnabled) { + // It's ok to upgrade from http to https + scheme = "https"; + } else { + // Silently downgrading from https to http can cause security issues + if ("https".equals(uri.getScheme())) { + throw new ClientException("SSL is disabled"); + } + scheme = "http"; + } + try { + return new URI(scheme, null, uri.getHost(), uri.getPort(), uri.getPath(), uri.getQuery(), uri.getFragment()); + } catch (URISyntaxException ex) { + throw new ClientException("Cannot parse process baseURI [" + connectionString + "] " + ex.getMessage()); + } + } + + private Collection optionNames() { + Collection options = new ArrayList<>(OPTION_NAMES); + options.addAll(extraOptions()); + return options; + } + + protected Collection extraOptions() { + return emptyList(); + } + + private static void checkPropertyNames(Properties settings, Collection knownNames) throws ClientException { + // validate specified properties to pick up typos and such + Enumeration pNames = settings.propertyNames(); + while (pNames.hasMoreElements()) { + String message = isKnownProperty(pNames.nextElement().toString(), knownNames); + if (message != null) { + throw new ClientException(message); + } + } + } + + private static String isKnownProperty(String propertyName, Collection knownOptions) { + if (knownOptions.contains(propertyName)) { + return null; + } + return "Unknown parameter [" + propertyName + "] ; did you mean " + StringUtils.findSimiliar(propertyName, knownOptions); + } + + protected T parseValue(String key, String value, Function parser) { + try { + return parser.apply(value); + } catch (Exception ex) { + throw new ClientException("Cannot parse property [" + key + "] with value [" + value + "]; " + ex.getMessage()); + } + } + + protected boolean isSSLEnabled() { + return sslConfig.isEnabled(); + } + + SslConfig sslConfig() { + return sslConfig; + } + + ProxyConfig proxyConfig() { + return proxyConfig; + } + + public void connectTimeout(long millis) { + connectTimeout = millis; + } + + public long connectTimeout() { + return connectTimeout; + } + + public void networkTimeout(long millis) { + networkTimeout = millis; + } + + public long networkTimeout() { + return networkTimeout; + } + + public void queryTimeout(long millis) { + queryTimeout = millis; + } + + public long queryTimeout() { + return queryTimeout; + } + + public long pageTimeout() { + return pageTimeout; + } + + public int pageSize() { + return pageSize; + } + + // auth + public String authUser() { + return user; + } + + public String authPass() { + return pass; + } + + public URI baseUri() { + return baseURI; + } + + /** + * Returns the original connections string + */ + public String connectionString() { + return connectionString; + } + +} diff --git a/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/JreHttpUrlConnection.java b/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/JreHttpUrlConnection.java new file mode 100644 index 00000000000..3bbd6fa9db5 --- /dev/null +++ b/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/JreHttpUrlConnection.java @@ -0,0 +1,299 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.client.shared; + +import java.io.Closeable; +import java.io.DataInput; +import java.io.DataInputStream; +import java.io.DataOutput; +import java.io.DataOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.HttpURLConnection; +import java.net.MalformedURLException; +import java.net.Proxy; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.sql.SQLClientInfoException; +import java.sql.SQLDataException; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.sql.SQLInvalidAuthorizationSpecException; +import java.sql.SQLRecoverableException; +import java.sql.SQLSyntaxErrorException; +import java.sql.SQLTimeoutException; +import java.util.Base64; +import java.util.function.Function; +import java.util.zip.GZIPInputStream; + +import javax.sql.rowset.serial.SerialException; + +import static java.util.Collections.emptyMap; + +import javax.net.ssl.HttpsURLConnection; +import javax.net.ssl.SSLSocketFactory; + +public class JreHttpUrlConnection implements Closeable { + /** + * State added to {@link SQLException}s when the server encounters an + * error. + */ + public static final String SQL_STATE_BAD_SERVER = "bad_server"; + + public static R http(String path, String query, ConnectionConfiguration cfg, Function handler) { + final URI uriPath = cfg.baseUri().resolve(path); // update path if needed + final String uriQuery = query == null ? uriPath.getQuery() : query; // update query if needed + final URL url; + try { + url = new URI(uriPath.getScheme(), null, uriPath.getHost(), uriPath.getPort(), uriPath.getPath(), uriQuery, + uriPath.getFragment()).toURL(); + } catch (URISyntaxException | MalformedURLException ex) { + throw new ClientException(ex, "Cannot build url using base: [" + uriPath + "] query: [" + query + "] path: [" + path + "]"); + } + try (JreHttpUrlConnection con = new JreHttpUrlConnection(url, cfg)) { + return handler.apply(con); + } + } + + private boolean closed = false; + final HttpURLConnection con; + private final URL url; + private static final String GZIP = "gzip"; + + public JreHttpUrlConnection(URL url, ConnectionConfiguration cfg) throws ClientException { + this.url = url; + try { + // due to the way the URL API is designed, the proxy needs to be passed in first + Proxy p = cfg.proxyConfig().proxy(); + con = (HttpURLConnection) (p != null ? url.openConnection(p) : url.openConnection()); + } catch (IOException ex) { + throw new ClientException(ex, "Cannot setup connection to %s (%s)", url, ex.getMessage()); + } + + // the rest of the connection setup + setupConnection(cfg); + } + + private void setupConnection(ConnectionConfiguration cfg) { + // setup basic stuff first + + // timeouts + con.setConnectTimeout((int) cfg.connectTimeout()); + con.setReadTimeout((int) cfg.networkTimeout()); + + // disable content caching + con.setAllowUserInteraction(false); + con.setUseCaches(false); + + // HTTP params + // HttpURL adds this header by default, HttpS does not + // adding it here to be consistent + con.setRequestProperty("Accept-Charset", "UTF-8"); + //con.setRequestProperty("Accept-Encoding", GZIP); + + setupSSL(cfg); + setupBasicAuth(cfg); + } + + private void setupSSL(ConnectionConfiguration cfg) { + if (cfg.sslConfig().isEnabled()) { + HttpsURLConnection https = (HttpsURLConnection) con; + SSLSocketFactory factory = cfg.sslConfig().sslSocketFactory(); + AccessController.doPrivileged((PrivilegedAction) () -> { + https.setSSLSocketFactory(factory); + return null; + }); + } + } + + private void setupBasicAuth(ConnectionConfiguration cfg) { + if (StringUtils.hasText(cfg.authUser())) { + String basicValue = cfg.authUser() + ":" + cfg.authPass(); + String encoded = StringUtils.asUTFString(Base64.getEncoder().encode(StringUtils.toUTF(basicValue))); + con.setRequestProperty("Authorization", "Basic " + encoded); + } + } + + public boolean head() throws ClientException { + try { + con.setRequestMethod("HEAD"); + int responseCode = con.getResponseCode(); + return responseCode == HttpURLConnection.HTTP_OK; + } catch (IOException ex) { + throw new ClientException(ex, "Cannot HEAD address %s (%s)", url, ex.getMessage()); + } + } + + public ResponseOrException post( + CheckedConsumer doc, + CheckedFunction parser + ) throws ClientException { + try { + con.setRequestMethod("POST"); + con.setDoOutput(true); + con.setRequestProperty("Content-Type", "application/json"); + try (OutputStream out = con.getOutputStream()) { + doc.accept(new DataOutputStream(out)); + } + if (con.getResponseCode() < 300) { + try (InputStream stream = getStream(con, con.getInputStream())) { + return new ResponseOrException<>(parser.apply(new DataInputStream(stream))); + } + } + RemoteFailure failure; + try (InputStream stream = getStream(con, con.getErrorStream())) { + failure = RemoteFailure.parseFromResponse(stream); + } + if (con.getResponseCode() >= 500) { + /* + * Borrowing a page from the HTTP spec, we throw a "transient" + * exception if the server responded with a 500, not because + * we think that the application should retry, but because we + * think that the failure is not the fault of the application. + */ + return new ResponseOrException<>(new SQLException("Server encountered an error [" + + failure.reason() + "]. [" + failure.remoteTrace() + "]", SQL_STATE_BAD_SERVER)); + } + SqlExceptionType type = SqlExceptionType.fromRemoteFailureType(failure.type()); + if (type == null) { + return new ResponseOrException<>(new SQLException("Server sent bad type [" + + failure.type() + "]. Original type was [" + failure.reason() + "]. [" + + failure.remoteTrace() + "]", SQL_STATE_BAD_SERVER)); + } + return new ResponseOrException<>(type.asException(failure.reason())); + } catch (IOException ex) { + throw new ClientException(ex, "Cannot POST address %s (%s)", url, ex.getMessage()); + } + } + + public static class ResponseOrException { + private final R response; + private final SQLException exception; + + private ResponseOrException(R response) { + this.response = response; + this.exception = null; + } + + private ResponseOrException(SQLException exception) { + this.response = null; + this.exception = exception; + } + + public R getResponseOrThrowException() throws SQLException { + if (exception != null) { + throw exception; + } + assert response != null; + return response; + } + } + + private static InputStream getStream(HttpURLConnection con, InputStream stream) throws IOException { + if (GZIP.equals(con.getContentEncoding())) { + return new GZIPInputStream(stream); + } + return stream; + } + + public void connect() { + if (closed) { + throw new ClientException("Connection cannot be reused"); + } + try { + con.connect(); + } catch (IOException ex) { + throw new ClientException(ex, "Cannot open connection to %s (%s)", url, ex.getMessage()); + } + } + + @Override + public void close() { + if (!closed) { + closed = true; + + // consume streams + consumeStreams(); + } + } + + public void disconnect() { + try { + connect(); + } finally { + con.disconnect(); + } + } + + // http://docs.oracle.com/javase/7/docs/technotes/guides/net/http-keepalive.html + private void consumeStreams() { + try (InputStream in = con.getInputStream()) { + while (in != null && in.read() > -1) { + } + } catch (IOException ex) { + // ignore + } finally { + try (InputStream ein = con.getErrorStream()) { + while (ein != null && ein.read() > -1) { + } + } catch (IOException ex) { + // keep on ignoring + } + } + } + + /** + * Exception type. + */ + public enum SqlExceptionType { + UNKNOWN(SQLException::new), + SERIAL(SerialException::new), + CLIENT_INFO(message -> new SQLClientInfoException(message, emptyMap())), + DATA(SQLDataException::new), + SYNTAX(SQLSyntaxErrorException::new), + RECOVERABLE(SQLRecoverableException::new), + TIMEOUT(SQLTimeoutException::new), + SECURITY(SQLInvalidAuthorizationSpecException::new), + NOT_SUPPORTED(SQLFeatureNotSupportedException::new); + + public static SqlExceptionType fromRemoteFailureType(String type) { + switch (type) { + case "analysis_exception": + case "resource_not_found_exception": + case "verification_exception": + return DATA; + case "planning_exception": + case "mapping_exception": + return NOT_SUPPORTED; + case "parsing_exception": + return SYNTAX; + case "security_exception": + return SECURITY; + case "timeout_exception": + return TIMEOUT; + default: + return null; + } + } + + private final Function toException; + + SqlExceptionType(Function toException) { + this.toException = toException; + } + + SQLException asException(String message) { + if (message == null) { + throw new IllegalArgumentException("[message] cannot be null"); + } + return toException.apply(message); + } + } +} diff --git a/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/ObjectUtils.java b/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/ObjectUtils.java new file mode 100644 index 00000000000..df924cdb375 --- /dev/null +++ b/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/ObjectUtils.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.client.shared; + +import java.util.Arrays; +import java.util.Map; +import java.util.function.Function; + +import static java.util.stream.Collectors.toMap; + +public abstract class ObjectUtils { + + public static boolean isEmpty(int[] array) { + return (array == null || array.length == 0); + } + + public static boolean isEmpty(byte[] array) { + return (array == null || array.length == 0); + } + + public static boolean isEmpty(Object[] array) { + return (array == null || array.length == 0); + } + + public static > Map mapEnum(Class clazz, Function mapper) { + return Arrays.stream(clazz.getEnumConstants()).collect(toMap(mapper, Function.identity())); + } +} diff --git a/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/ProxyConfig.java b/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/ProxyConfig.java new file mode 100644 index 00000000000..6c8865fa061 --- /dev/null +++ b/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/ProxyConfig.java @@ -0,0 +1,67 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.client.shared; + +import java.net.InetSocketAddress; +import java.net.Proxy; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.Arrays; +import java.util.LinkedHashSet; +import java.util.Properties; +import java.util.Set; + +class ProxyConfig { + + private static final String HTTP_PROXY = "proxy.http"; + private static final String HTTP_PROXY_DEFAULT = StringUtils.EMPTY; + private static final String SOCKS_PROXY = "proxy.socks"; + private static final String SOCKS_PROXY_DEFAULT = StringUtils.EMPTY; + + static final Set OPTION_NAMES = new LinkedHashSet<>(Arrays.asList(HTTP_PROXY, SOCKS_PROXY)); + + private final Proxy proxy; + + ProxyConfig(Properties settings) { + Proxy.Type type = null; + // try http first + Object[] address = host(settings.getProperty(HTTP_PROXY, HTTP_PROXY_DEFAULT), 80); + type = Proxy.Type.HTTP; + // nope, check socks + if (address == null) { + address = host(settings.getProperty(SOCKS_PROXY, SOCKS_PROXY_DEFAULT), 1080); + type = Proxy.Type.SOCKS; + } + proxy = address != null ? createProxy(type, address) : null; + } + + @SuppressForbidden(reason = "create the actual proxy") + private Proxy createProxy(Proxy.Type type, Object[] address) { + return new Proxy(type, new InetSocketAddress((String) address[0], (int) address[1])); + } + + boolean enabled() { + return proxy != null; + } + + Proxy proxy() { + return proxy; + } + + // returns hostname (string), port (int) + private static Object[] host(String address, int defaultPort) { + if (!StringUtils.hasText(address)) { + return null; + } + try { + URI uri = new URI(address); + Object[] results = { uri.getHost(), uri.getPort() > 0 ? uri.getPort() : defaultPort }; + return results; + } catch (URISyntaxException ex) { + throw new ClientException("Unrecognized address format %s", address); + } + } +} diff --git a/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/RemoteFailure.java b/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/RemoteFailure.java new file mode 100644 index 00000000000..1bf19847867 --- /dev/null +++ b/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/RemoteFailure.java @@ -0,0 +1,281 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.client.shared; + +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.core.JsonParseException; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.core.JsonToken; +import java.io.BufferedInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.Reader; +import java.nio.charset.StandardCharsets; +import java.util.HashMap; +import java.util.Map; + +import static java.util.Collections.emptyMap; + +/** + * A failure that happened on the remote server. + */ +public class RemoteFailure { + /** + * The maximum number of bytes before we no longer include the raw response if + * there is a catastrophic error parsing the remote failure. The actual value + * was chosen because it is ten times larger then a "normal" elasticsearch + * failure but not so big that we'll consume a ton of memory on huge errors. + * It will produce huge error messages but the user might + * want all that because it is probably being thrown by + * their proxy. + */ + static final int MAX_RAW_RESPONSE = 512 * 1024; + + private static final JsonFactory JSON_FACTORY = new JsonFactory(); + static { + // Set up the factory similarly to how XContent does + JSON_FACTORY.configure(JsonGenerator.Feature.QUOTE_FIELD_NAMES, true); + JSON_FACTORY.configure(JsonParser.Feature.ALLOW_COMMENTS, true); + JSON_FACTORY.configure(JsonFactory.Feature.FAIL_ON_SYMBOL_HASH_OVERFLOW, true); + // Do not automatically close unclosed objects/arrays in com.fasterxml.jackson.core.json.UTF8JsonGenerator#close() method + JSON_FACTORY.configure(JsonGenerator.Feature.AUTO_CLOSE_JSON_CONTENT, false); + JSON_FACTORY.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, false); + // Don't close the stream because we might need to reset and reply it if there is an error. The caller closes the stream. + JSON_FACTORY.configure(JsonParser.Feature.AUTO_CLOSE_SOURCE, false); + } + + /** + * Parse a failure from the response. The stream is not closed when the parsing is complete. + * The caller must close it. + * @throws IOException if there is a catastrophic failure parsing the remote failure + */ + public static RemoteFailure parseFromResponse(InputStream stream) throws IOException { + // Mark so we can rewind to get the entire response in case we have to render an error. + stream = new BufferedInputStream(stream); + stream.mark(MAX_RAW_RESPONSE); + JsonParser parser = null; + try { + parser = JSON_FACTORY.createParser(stream); + return parseResponseTopLevel(parser); + } catch (JsonParseException e) { + throw new IOException(parseErrorMessage(e.getOriginalMessage(), stream, parser), e); + } catch (IOException e) { + throw new IOException(parseErrorMessage(e.getMessage(), stream, parser), e); + } finally { + if (parser != null) { + parser.close(); + } + } + } + + private final String type; + private final String reason; + private final String remoteTrace; + private final Map headers; + private final RemoteFailure cause; + + RemoteFailure(String type, String reason, String remoteTrace, Map headers, RemoteFailure cause) { + this.type = type; + this.reason = reason; + this.remoteTrace = remoteTrace; + this.headers = headers; + this.cause = cause; + } + + public String type() { + return type; + } + + public String reason() { + return reason; + } + + /** + * Stack trace from Elasticsearch for the remote failure. Mostly just useful for debugging + * errors that happen to be bugs. + */ + public String remoteTrace() { + return remoteTrace; + } + + /** + * Headers sent by the remote failure. + */ + public Map headers() { + return headers; + } + + /** + * Cause of the remote failure. Mostly just useful for dbuegging errors that happen to be bugs. + */ + public RemoteFailure cause() { + return cause; + } + + private static RemoteFailure parseResponseTopLevel(JsonParser parser) throws IOException { + RemoteFailure exception = null; + + /* It'd be lovely to use the high level constructs that we have in core like ObjectParser + * but, alas, we aren't going to modularize those out any time soon. */ + JsonToken token = parser.nextToken(); + if (token != JsonToken.START_OBJECT) { + throw new IllegalArgumentException("Expected error to start with [START_OBJECT] but started with [" + token + + "][" + parser.getText() + "]"); + } + String fieldName = null; + while ((token = parser.nextToken()) != JsonToken.END_OBJECT) { + if (token == JsonToken.FIELD_NAME) { + fieldName = parser.getCurrentName(); + } else { + switch (fieldName) { + case "error": + if (token != JsonToken.START_OBJECT) { + throw new IOException("Expected [error] to be an object but was [" + token + "][" + parser.getText() + "]"); + } + exception = parseFailure(parser); + continue; + case "status": + if (token != JsonToken.VALUE_NUMBER_INT) { + throw new IOException("Expected [status] to be a string but was [" + token + "][" + parser.getText() + "]"); + } + // Intentionally ignored + continue; + default: + throw new IOException("Expected one of [error, status] but got [" + fieldName + "][" + parser.getText() + "]"); + } + } + } + if (exception == null) { + throw new IOException("Expected [error] but didn't see it."); + } + return exception; + } + + private static RemoteFailure parseFailure(JsonParser parser) throws IOException { + String type = null; + String reason = null; + String remoteTrace = null; + Map headers = emptyMap(); + RemoteFailure cause = null; + + JsonToken token; + String fieldName = null; + while ((token = parser.nextToken()) != JsonToken.END_OBJECT) { + if (token == JsonToken.FIELD_NAME) { + fieldName = parser.getCurrentName(); + } else { + switch (fieldName) { + case "caused_by": + if (token != JsonToken.START_OBJECT) { + throw new IOException("Expected [caused_by] to be an object but was [" + token + "][" + parser.getText() + "]"); + } + cause = parseFailure(parser); + break; + case "header": + if (token != JsonToken.START_OBJECT) { + throw new IOException("Expected [header] to be an object but was [" + token + "][" + parser.getText() + "]"); + } + headers = parseHeaders(parser); + break; + case "reason": + switch (token) { + case VALUE_STRING: + reason = parser.getText(); + break; + case VALUE_NULL: + break; + default: + throw new IOException("Expected [reason] to be a string but was [" + token + "][" + parser.getText() + "]"); + } + break; + case "root_cause": + if (token != JsonToken.START_ARRAY) { + throw new IOException("Expected [root_cause] to be an array but was [" + token + "][" + parser.getText() + "]"); + } + parser.skipChildren(); // Intentionally ignored + break; + case "stack_trace": + if (token != JsonToken.VALUE_STRING) { + throw new IOException("Expected [stack_trace] to be a string but was [" + token + "][" + parser.getText() + "]"); + } + remoteTrace = parser.getText(); + break; + case "type": + if (token != JsonToken.VALUE_STRING) { + throw new IOException("Expected [type] to be a string but was [" + token + "][" + parser.getText() + "]"); + } + type = parser.getText(); + break; + default: + throw new IOException("Expected one of [caused_by, reason, root_cause, stack_trace, type] but got [" + + fieldName + "]"); + } + } + } + if (type == null) { + throw new IOException("expected [type] but didn't see it"); + } + if (remoteTrace == null) { + throw new IOException("expected [stack_trace] cannot but didn't see it"); + } + return new RemoteFailure(type, reason, remoteTrace, headers, cause); + } + + private static Map parseHeaders(JsonParser parser) throws IOException { + Map headers = new HashMap<>(); + + JsonToken token; + while ((token = parser.nextToken()) != JsonToken.END_OBJECT) { + if (token != JsonToken.FIELD_NAME) { + throw new IOException("expected header name but was [" + token + "][" + parser.getText() + "]"); + } + String name = parser.getText(); + token = parser.nextToken(); + if (token != JsonToken.VALUE_STRING) { + throw new IOException("expected header value but was [" + token + "][" + parser.getText() + "]"); + } + String value = parser.getText(); + headers.put(name, value); + } + + return headers; + } + + /** + * Build an error message from a parse failure. + */ + private static String parseErrorMessage(String message, InputStream stream, JsonParser parser) { + String responseMessage; + try { + try { + stream.reset(); + } catch (IOException e) { + // So far as I know, this is always caused by the response being too large + throw new IOException("Response too large", e); + } + try (Reader reader = new InputStreamReader(stream, StandardCharsets.UTF_8)) { + StringBuilder builder = new StringBuilder(); + builder.append("Response:\n"); + char[] buf = new char[512]; + int read; + while ((read = reader.read(buf)) != -1) { + builder.append(buf, 0, read); + } + responseMessage = builder.toString(); + } + } catch (IOException replayException) { + responseMessage = "Attempted to include response but failed because [" + replayException.getMessage() + "]."; + } + String parserLocation = ""; + if (parser != null) { + parserLocation = " at [line " + parser.getTokenLocation().getLineNr() + + " col " + parser.getTokenLocation().getColumnNr() + "]"; + } + return "Can't parse error from Elasticearch [" + message + "]" + parserLocation + ". " + responseMessage; + } +} diff --git a/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/SslConfig.java b/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/SslConfig.java new file mode 100644 index 00000000000..fb1b0dd7fd3 --- /dev/null +++ b/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/SslConfig.java @@ -0,0 +1,168 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.client.shared; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardOpenOption; +import java.security.GeneralSecurityException; +import java.security.KeyStore; +import java.util.Arrays; +import java.util.LinkedHashSet; +import java.util.Objects; +import java.util.Properties; +import java.util.Set; + +import javax.net.ssl.KeyManager; +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLSocketFactory; +import javax.net.ssl.TrustManager; +import javax.net.ssl.TrustManagerFactory; + +public class SslConfig { + + public static final String SSL = "ssl"; + private static final String SSL_DEFAULT = "false"; + + public static final String SSL_PROTOCOL = "ssl.protocol"; + private static final String SSL_PROTOCOL_DEFAULT = "TLS"; // SSL alternative + + public static final String SSL_KEYSTORE_LOCATION = "ssl.keystore.location"; + private static final String SSL_KEYSTORE_LOCATION_DEFAULT = ""; + + public static final String SSL_KEYSTORE_PASS = "ssl.keystore.pass"; + private static final String SSL_KEYSTORE_PASS_DEFAULT = ""; + + public static final String SSL_KEYSTORE_TYPE = "ssl.keystore.type"; + private static final String SSL_KEYSTORE_TYPE_DEFAULT = "JKS"; // PCKS12 + + public static final String SSL_TRUSTSTORE_LOCATION = "ssl.truststore.location"; + private static final String SSL_TRUSTSTORE_LOCATION_DEFAULT = ""; + + public static final String SSL_TRUSTSTORE_PASS = "ssl.truststore.pass"; + private static final String SSL_TRUSTSTORE_PASS_DEFAULT = ""; + + public static final String SSL_TRUSTSTORE_TYPE = "ssl.truststore.type"; + private static final String SSL_TRUSTSTORE_TYPE_DEFAULT = "JKS"; + + static final Set OPTION_NAMES = new LinkedHashSet<>(Arrays.asList(SSL, SSL_PROTOCOL, + SSL_KEYSTORE_LOCATION, SSL_KEYSTORE_PASS, SSL_KEYSTORE_TYPE, + SSL_TRUSTSTORE_LOCATION, SSL_TRUSTSTORE_PASS, SSL_TRUSTSTORE_TYPE)); + + private final boolean enabled; + private final String protocol, keystoreLocation, keystorePass, keystoreType; + private final String truststoreLocation, truststorePass, truststoreType; + + private final SSLContext sslContext; + + SslConfig(Properties settings) { + enabled = StringUtils.parseBoolean(settings.getProperty(SSL, SSL_DEFAULT)); + protocol = settings.getProperty(SSL_PROTOCOL, SSL_PROTOCOL_DEFAULT); + keystoreLocation = settings.getProperty(SSL_KEYSTORE_LOCATION, SSL_KEYSTORE_LOCATION_DEFAULT); + keystorePass = settings.getProperty(SSL_KEYSTORE_PASS, SSL_KEYSTORE_PASS_DEFAULT); + keystoreType = settings.getProperty(SSL_KEYSTORE_TYPE, SSL_KEYSTORE_TYPE_DEFAULT); + truststoreLocation = settings.getProperty(SSL_TRUSTSTORE_LOCATION, SSL_TRUSTSTORE_LOCATION_DEFAULT); + truststorePass = settings.getProperty(SSL_TRUSTSTORE_PASS, SSL_TRUSTSTORE_PASS_DEFAULT); + truststoreType = settings.getProperty(SSL_TRUSTSTORE_TYPE, SSL_TRUSTSTORE_TYPE_DEFAULT); + + sslContext = enabled ? createSSLContext() : null; + } + + // ssl + boolean isEnabled() { + return enabled; + } + + SSLSocketFactory sslSocketFactory() { + return sslContext.getSocketFactory(); + } + + private SSLContext createSSLContext() { + SSLContext ctx; + try { + ctx = SSLContext.getInstance(protocol); + ctx.init(loadKeyManagers(), loadTrustManagers(), null); + } catch (Exception ex) { + throw new ClientException(ex, "Failed to initialize SSL - %s", ex.getMessage()); + } + + return ctx; + } + + private KeyManager[] loadKeyManagers() throws GeneralSecurityException, IOException { + if (!StringUtils.hasText(keystoreLocation)) { + return null; + } + + char[] pass = (StringUtils.hasText(keystorePass) ? keystorePass.trim().toCharArray() : null); + KeyStore keyStore = loadKeyStore(keystoreLocation, pass, keystoreType); + KeyManagerFactory kmFactory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); + kmFactory.init(keyStore, pass); + return kmFactory.getKeyManagers(); + } + + + private KeyStore loadKeyStore(String location, char[] pass, String keyStoreType) throws GeneralSecurityException, IOException { + KeyStore keyStore = KeyStore.getInstance(keyStoreType); + Path path = Paths.get(location); + + if (!Files.exists(path)) { + throw new ClientException( + "Expected to find keystore file at [%s] but was unable to. Make sure you have specified a valid URI.", location); + } + + try (InputStream in = Files.newInputStream(Paths.get(location), StandardOpenOption.READ)) { + keyStore.load(in, pass); + } catch (Exception ex) { + throw new ClientException(ex, "Cannot open keystore [%s] - %s", location, ex.getMessage()); + } finally { + + } + return keyStore; + } + + private TrustManager[] loadTrustManagers() throws GeneralSecurityException, IOException { + KeyStore keyStore = null; + + if (StringUtils.hasText(truststoreLocation)) { + char[] pass = (StringUtils.hasText(truststorePass) ? truststorePass.trim().toCharArray() : null); + keyStore = loadKeyStore(truststoreLocation, pass, truststoreType); + } + + TrustManagerFactory tmFactory = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); + tmFactory.init(keyStore); + return tmFactory.getTrustManagers(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + SslConfig other = (SslConfig) obj; + return Objects.equals(enabled, other.enabled) + && Objects.equals(protocol, other.protocol) + && Objects.equals(keystoreLocation, other.keystoreLocation) + && Objects.equals(keystorePass, other.keystorePass) + && Objects.equals(keystoreType, other.keystoreType) + && Objects.equals(truststoreLocation, other.truststoreLocation) + && Objects.equals(truststorePass, other.truststorePass) + && Objects.equals(truststoreType, other.truststoreType); + } + + public int hashCode() { + return getClass().hashCode(); + } +} diff --git a/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/StringUtils.java b/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/StringUtils.java new file mode 100644 index 00000000000..192c217be51 --- /dev/null +++ b/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/StringUtils.java @@ -0,0 +1,308 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.client.shared; + +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.LinkedList; +import java.util.List; +import java.util.StringTokenizer; + +public abstract class StringUtils { + public static final String EMPTY = ""; + public static final String SLASH = "/"; + public static final String PATH_TOP = ".."; + public static final String PATH_CURRENT = "."; + public static final String DEFAULT_DELIMITER = ","; + + public static String nullAsEmpty(String string) { + return string == null ? EMPTY : string; + } + + public static boolean hasText(CharSequence sequence) { + if (!hasLength(sequence)) { + return false; + } + int length = sequence.length(); + for (int i = 0; i < length; i++) { + if (!Character.isWhitespace(sequence.charAt(i))) { + return true; + } + } + return false; + } + + public static boolean hasLength(CharSequence sequence) { + return (sequence != null && sequence.length() > 0); + } + + public static boolean isUpperCase(CharSequence sequence) { + for (int i = 0; i < sequence.length(); i++) { + if (Character.isLetter(sequence.charAt(i)) && !Character.isUpperCase(sequence.charAt(i))) { + return false; + } + } + return true; + } + + public static String[] splitToIndexAndType(String pattern) { + List tokens = tokenize(pattern, "."); + + String[] results = new String[2]; + if (tokens.size() == 2) { + results[0] = tokens.get(0); + results[1] = tokens.get(1); + } + else { + results[0] = nullAsEmpty(pattern); + results[1] = EMPTY; + } + + return results; + } + + public static List tokenize(String string) { + return tokenize(string, DEFAULT_DELIMITER); + } + + public static List tokenize(String string, String delimiters) { + return tokenize(string, delimiters, true, true); + } + + public static List tokenize(String string, String delimiters, boolean trimTokens, boolean ignoreEmptyTokens) { + if (!hasText(string)) { + return Collections.emptyList(); + } + StringTokenizer st = new StringTokenizer(string, delimiters); + List tokens = new ArrayList(); + while (st.hasMoreTokens()) { + String token = st.nextToken(); + if (trimTokens) { + token = token.trim(); + } + if (!ignoreEmptyTokens || token.length() > 0) { + tokens.add(token); + } + } + return tokens; + } + + public static String concatenate(Collection list) { + return concatenate(list, DEFAULT_DELIMITER); + } + + public static String concatenate(Collection list, String delimiter) { + if (list == null || list.isEmpty()) { + return EMPTY; + } + if (delimiter == null) { + delimiter = EMPTY; + } + StringBuilder sb = new StringBuilder(); + + for (Object object : list) { + sb.append(object.toString()); + sb.append(delimiter); + } + + sb.setLength(sb.length() - delimiter.length()); + return sb.toString(); + } + + public static String normalize(String path) { + if (path == null) { + return null; + } + String pathToUse = path.replace("\\", SLASH); + + int prefixIndex = pathToUse.indexOf(":"); + String prefix = ""; + if (prefixIndex != -1) { + prefix = pathToUse.substring(0, prefixIndex + 1); + if (prefix.contains(SLASH)) { + prefix = ""; + } + else { + pathToUse = pathToUse.substring(prefixIndex + 1); + } + } + if (pathToUse.startsWith(SLASH)) { + prefix = prefix + SLASH; + pathToUse = pathToUse.substring(1); + } + + List pathList = tokenize(pathToUse, SLASH); + List pathTokens = new LinkedList(); + int tops = 0; + + for (int i = pathList.size() - 1; i >= 0; i--) { + String element = pathList.get(i); + if (PATH_CURRENT.equals(element)) { + // current folder, ignore it + } + else if (PATH_TOP.equals(element)) { + // top folder, skip previous element + tops++; + } + else { + if (tops > 0) { + // should it be skipped? + tops--; + } + else { + pathTokens.add(0, element); + } + } + } + + for (int i = 0; i < tops; i++) { + pathTokens.add(0, PATH_TOP); + } + + return prefix + concatenate(pathTokens, SLASH); + } + + public static String asUTFString(byte[] content) { + return asUTFString(content, 0, content.length); + } + + public static String asUTFString(byte[] content, int offset, int length) { + return (content == null || length == 0 ? EMPTY : new String(content, offset, length, StandardCharsets.UTF_8)); + } + + public static byte[] toUTF(String string) { + return string.getBytes(StandardCharsets.UTF_8); + } + + // Based on "Algorithms on Strings, Trees and Sequences by Dan Gusfield". + // returns -1 if the two strings are within the given threshold of each other, -1 otherwise + private static int levenshteinDistance(CharSequence one, CharSequence another, int threshold) { + int n = one.length(); + int m = another.length(); + + // if one string is empty, the edit distance is necessarily the length of the other + if (n == 0) { + return m <= threshold ? m : -1; + } + else if (m == 0) { + return n <= threshold ? n : -1; + } + + if (n > m) { + // swap the two strings to consume less memory + final CharSequence tmp = one; + one = another; + another = tmp; + n = m; + m = another.length(); + } + + int p[] = new int[n + 1]; // 'previous' cost array, horizontally + int d[] = new int[n + 1]; // cost array, horizontally + int _d[]; // placeholder to assist in swapping p and d + + // fill in starting table values + final int boundary = Math.min(n, threshold) + 1; + for (int i = 0; i < boundary; i++) { + p[i] = i; + } + + // these fills ensure that the value above the rightmost entry of our + // stripe will be ignored in following loop iterations + Arrays.fill(p, boundary, p.length, Integer.MAX_VALUE); + Arrays.fill(d, Integer.MAX_VALUE); + + for (int j = 1; j <= m; j++) { + final char t_j = another.charAt(j - 1); + d[0] = j; + + // compute stripe indices, constrain to array size + final int min = Math.max(1, j - threshold); + final int max = (j > Integer.MAX_VALUE - threshold) ? n : Math.min(n, j + threshold); + + // the stripe may lead off of the table if s and t are of different sizes + if (min > max) { + return -1; + } + + // ignore entry left of leftmost + if (min > 1) { + d[min - 1] = Integer.MAX_VALUE; + } + + // iterates through [min, max] in s + for (int i = min; i <= max; i++) { + if (one.charAt(i - 1) == t_j) { + // diagonally left and up + d[i] = p[i - 1]; + } + else { + // 1 + minimum of cell to the left, to the top, diagonally left and up + d[i] = 1 + Math.min(Math.min(d[i - 1], p[i]), p[i - 1]); + } + } + + // copy current distance counts to 'previous row' distance counts + _d = p; + p = d; + d = _d; + } + + // if p[n] is greater than the threshold, there's no guarantee on it being the correct + // distance + if (p[n] <= threshold) { + return p[n]; + } + return -1; + } + + public static List findSimiliar(CharSequence match, Collection potential) { + List list = new ArrayList(3); + + // 1 switches or 1 extra char + int maxDistance = 5; + + for (String string : potential) { + int dist = levenshteinDistance(match, string, maxDistance); + if (dist >= 0) { + if (dist < maxDistance) { + maxDistance = dist; + list.clear(); + list.add(string); + } + else if (dist == maxDistance) { + list.add(string); + } + } + } + + return list; + } + + public static boolean parseBoolean(String input) { + switch(input) { + case "true": return true; + case "false": return false; + default: throw new IllegalArgumentException("must be [true] or [false]"); + } + } + + public static String asHexString(byte[] content, int offset, int length) { + StringBuilder buf = new StringBuilder(); + for (int i = offset; i < length; i++) { + String hex = Integer.toHexString(0xFF & content[i]); + if (hex.length() == 1) { + buf.append('0'); + } + buf.append(hex); + } + return buf.toString(); + } + +} \ No newline at end of file diff --git a/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/SuppressForbidden.java b/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/SuppressForbidden.java new file mode 100644 index 00000000000..52b864edff4 --- /dev/null +++ b/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/SuppressForbidden.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.client.shared; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Annotation to suppress forbidden-apis errors inside a whole class, a method, or a field. + */ +@Retention(RetentionPolicy.CLASS) +@Target({ ElementType.CONSTRUCTOR, ElementType.FIELD, ElementType.METHOD, ElementType.TYPE }) +public @interface SuppressForbidden { + String reason(); +} diff --git a/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/UriUtils.java b/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/UriUtils.java new file mode 100644 index 00000000000..f8c2e73e6a0 --- /dev/null +++ b/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/UriUtils.java @@ -0,0 +1,79 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.client.shared; + +import java.net.URI; +import java.net.URISyntaxException; + +public final class UriUtils { + private UriUtils() { + + } + + /** + * Parses the URL provided by the user and + */ + public static URI parseURI(String connectionString, URI defaultURI) { + final URI uri = parseWithNoScheme(connectionString); + final String path = "".equals(uri.getPath()) ? defaultURI.getPath() : uri.getPath(); + final String query = uri.getQuery() == null ? defaultURI.getQuery() : uri.getQuery(); + final int port = uri.getPort() < 0 ? defaultURI.getPort() : uri.getPort(); + try { + return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), port, path, query, defaultURI.getFragment()); + } catch (URISyntaxException e) { + throw new IllegalArgumentException("Invalid connection configuration [" + connectionString + "]: " + e.getMessage(), e); + } + } + + private static URI parseWithNoScheme(String connectionString) { + URI uri; + // check if URI can be parsed correctly without adding scheme + // if the connection string is in format host:port or just host, the host is going to be null + // if the connection string contains IPv6 localhost [::1] the parsing will fail + URISyntaxException firstException = null; + try { + uri = new URI(connectionString); + if (uri.getHost() == null || uri.getScheme() == null) { + uri = null; + } + } catch (URISyntaxException e) { + firstException = e; + uri = null; + } + + if (uri == null) { + // We couldn't parse URI without adding scheme, let's try again with scheme this time + try { + return new URI("http://" + connectionString); + } catch (URISyntaxException e) { + IllegalArgumentException ie = + new IllegalArgumentException("Invalid connection configuration [" + connectionString + "]: " + e.getMessage(), e); + if (firstException != null) { + ie.addSuppressed(firstException); + } + throw ie; + } + } else { + // We managed to parse URI and all necessary pieces are present, let's make sure the scheme is correct + if ("http".equals(uri.getScheme()) == false && "https".equals(uri.getScheme()) == false) { + throw new IllegalArgumentException( + "Invalid connection configuration [" + connectionString + "]: Only http and https protocols are supported"); + } + return uri; + } + } + + /** + * Removes the query part of the URI + */ + public static URI removeQuery(URI uri, String connectionString, URI defaultURI) { + try { + return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), uri.getPath(), null, defaultURI.getFragment()); + } catch (URISyntaxException e) { + throw new IllegalArgumentException("Invalid connection configuration [" + connectionString + "]: " + e.getMessage(), e); + } + } +} diff --git a/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/Version.java b/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/Version.java new file mode 100644 index 00000000000..f991aa3d792 --- /dev/null +++ b/sql/shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/Version.java @@ -0,0 +1,130 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.client.shared; + +import java.io.IOException; +import java.net.URL; +import java.util.Collections; +import java.util.Enumeration; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Set; +import java.util.jar.JarInputStream; +import java.util.jar.Manifest; + +public abstract class Version { + private static final String VER; + private static final String SHORT_HASH; + + private static final int VER_MAJ, VER_MIN, VER_REV; + + static int[] from(String ver) { + String[] parts = ver.split("[.-]"); + if (parts.length == 3 || parts.length == 4) { + return new int[] { Integer.parseInt(parts[0]), Integer.parseInt(parts[1]), Integer.parseInt(parts[2]) }; + } + else { + throw new Error("Detected Elasticsearch SQL jar but found invalid version " + ver); + } + } + + static { + // check classpath + String target = Version.class.getName().replace(".", "/").concat(".class"); + Enumeration res = null; + + try { + res = Version.class.getClassLoader().getResources(target); + } catch (IOException ex) { + throw new Error("Cannot detect Elasticsearch SQL jar; it typically indicates a deployment issue..."); + } + + if (res != null) { + List urls = Collections.list(res); + Set normalized = new LinkedHashSet(); + + for (URL url : urls) { + normalized.add(StringUtils.normalize(url.toString())); + } + + int foundJars = 0; + if (normalized.size() > 1) { + StringBuilder sb = new StringBuilder( + "Multiple Elasticsearch SQL versions detected in the classpath; please use only one\n"); + for (String s : normalized) { + if (s.contains("jar:")) { + foundJars++; + sb.append(s.replace("!/" + target, "")); + sb.append("\n"); + } + } + if (foundJars > 1) { + throw new Error(sb.toString()); + } + } + } + + // This is similar to how Elasticsearch's Build class digs up its build information. + // Since version info is not critical, the parsing is lenient + URL url = Version.class.getProtectionDomain().getCodeSource().getLocation(); + String urlStr = url.toString(); + + int maj = 0, min = 0, rev = 0; + String ver = "Unknown"; + String hash = ver; + + if (urlStr.startsWith("file:/") && urlStr.endsWith("-SNAPSHOT.jar")) { + try (JarInputStream jar = new JarInputStream(url.openStream())) { + Manifest manifest = jar.getManifest(); + hash = manifest.getMainAttributes().getValue("Change"); + ver = manifest.getMainAttributes().getValue("X-Compile-Elasticsearch-Version"); + int[] vers = from(ver); + maj = vers[0]; + min = vers[1]; + rev = vers[2]; + } catch (Exception ex) { + throw new Error("Detected Elasticsearch SQL jar but cannot retrieve its version", ex); + } + } + VER_MAJ = maj; + VER_MIN = min; + VER_REV = rev; + VER = ver; + SHORT_HASH = hash; + } + + public static int versionMajor() { + return VER_MAJ; + } + + public static int versionMinor() { + return VER_MIN; + } + + public static int versionRevision() { + return VER_REV; + } + + public static String version() { + return "v" + versionNumber() + " [" + versionHash() + "]"; + } + + public static String versionNumber() { + return VER; + } + + public static String versionHash() { + return SHORT_HASH; + } + + public static int jdbcMajorVersion() { + return 4; + } + + public static int jdbcMinorVersion() { + return 2; + } +} diff --git a/sql/shared-client/src/test/java/org/elasticsearch/xpack/sql/client/shared/RemoteFailureTests.java b/sql/shared-client/src/test/java/org/elasticsearch/xpack/sql/client/shared/RemoteFailureTests.java new file mode 100644 index 00000000000..516cc56af85 --- /dev/null +++ b/sql/shared-client/src/test/java/org/elasticsearch/xpack/sql/client/shared/RemoteFailureTests.java @@ -0,0 +1,149 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.client.shared; + +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.util.Locale; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.startsWith; + +public class RemoteFailureTests extends ESTestCase { + public void testParseBasic() throws IOException { + RemoteFailure failure = parse("basic.json"); + assertEquals("illegal_argument_exception", failure.type()); + assertEquals("[sql/query] unknown field [test], parser not found", failure.reason()); + assertThat(failure.remoteTrace(), + containsString("at org.elasticsearch.common.xcontent.ObjectParser.getParser(ObjectParser.java:346)")); + assertNull(failure.cause()); + assertEquals(emptyMap(), failure.headers()); + } + + public void testParseNested() throws IOException { + RemoteFailure failure = parse("nested.json"); + assertEquals("parsing_exception", failure.type()); + assertEquals("line 1:1: no viable alternative at input 'test'", failure.reason()); + assertThat(failure.remoteTrace(), + containsString("at org.elasticsearch.xpack.sql.parser.SqlParser$1.syntaxError(SqlParser.java:151)")); + assertNotNull(failure.cause()); + + failure = failure.cause(); + assertEquals("no_viable_alt_exception", failure.type()); + assertEquals(null, failure.reason()); + assertThat(failure.remoteTrace(), + containsString("at org.antlr.v4.runtime.atn.ParserATNSimulator.noViableAlt(ParserATNSimulator.java:1886)")); + assertNull(failure.cause()); + assertEquals(emptyMap(), failure.headers()); + } + + public void testParseMissingAuth() throws IOException { + RemoteFailure failure = parse("missing_auth.json"); + assertEquals("security_exception", failure.type()); + assertEquals("missing authentication token for REST request [/?pretty&error_trace]", failure.reason()); + assertThat(failure.remoteTrace(), + containsString("DefaultAuthenticationFailureHandler.missingToken")); + assertNull(failure.cause()); + assertEquals(singletonMap("WWW-Authenticate", "Basic realm=\"security\" charset=\"UTF-8\""), + failure.headers()); + } + + public void testNoError() throws IOException { + IOException e = expectThrows(IOException.class, () -> parse("no_error.json")); + assertEquals( + "Can't parse error from Elasticearch [Expected [error] but didn't see it.] at [line 1 col 2]. Response:\n{}", + e.getMessage()); + } + + public void testBogusError() throws IOException { + IOException e = expectThrows(IOException.class, () -> parse("bogus_error.json")); + assertEquals( + "Can't parse error from Elasticearch [Expected [error] to be an object but was [VALUE_STRING][bogus]] " + + "at [line 1 col 12]. Response:\n" + + "{ \"error\": \"bogus\" }", + e.getMessage()); + } + + public void testNoStack() throws IOException { + IOException e = expectThrows(IOException.class, () -> parse("no_stack.json")); + assertThat(e.getMessage(), + startsWith("Can't parse error from Elasticearch [expected [stack_trace] cannot but " + + "didn't see it] at [line 5 col 3]. Response:\n{")); + } + + public void testNoType() throws IOException { + IOException e = expectThrows(IOException.class, () -> parse("no_type.json")); + assertThat(e.getMessage(), + startsWith("Can't parse error from Elasticearch [expected [type] but didn't see it] at [line 5 col 3]. Response:\n{")); + } + + public void testInvalidJson() throws IOException { + IOException e = expectThrows(IOException.class, () -> parse("invalid_json.txt")); + assertEquals( + "Can't parse error from Elasticearch [Unrecognized token 'I': was expecting 'null', 'true', 'false' or NaN] " + + "at [line 1 col 1]. Response:\n" + + "I'm not json at all", + e.getMessage()); + } + + public void testExceptionBuildingParser() throws IOException { + IOException e = expectThrows(IOException.class, () -> RemoteFailure.parseFromResponse(new InputStream() { + @Override + public int read() throws IOException { + throw new IOException("Testing error"); + } + })); + assertEquals( + "Can't parse error from Elasticearch [Testing error]. Attempted to include response but failed because [Testing error].", + e.getMessage()); + } + + public void testTotalGarbage() throws IOException { + IOException e = expectThrows(IOException.class, () -> + RemoteFailure.parseFromResponse(new BytesArray(new byte[] { + (byte) 0xEF, (byte) 0xBB, (byte) 0xBF, // The UTF-8 BOM + (byte) 0xFF // An invalid UTF-8 character + }).streamInput())); + assertThat(e.getMessage(), + startsWith("Can't parse error from Elasticearch [Unrecognized token 'ÿ': " + + "was expecting ('true', 'false' or 'null')] at [line 1 col 1]. Response:\n")); + } + + public void testTooBig() throws IOException { + StringBuilder tooBig = new StringBuilder(RemoteFailure.MAX_RAW_RESPONSE); + tooBig.append("{\n"); + tooBig.append("\"error\" : {\n"); + tooBig.append(" \"type\" : \"illegal_argument_exception\",\n"); + tooBig.append(" \"reason\" : \"something\",\n"); + tooBig.append(" \"header\" : {\n"); + int i = 0; + while (tooBig.length() < RemoteFailure.MAX_RAW_RESPONSE) { + tooBig.append(" \"").append(String.format(Locale.ROOT, "%04d", i++)) + .append("\" : \"lots and lots and lots and lots and lots of words\",\n"); + } + tooBig.append(" \"end\" : \"lots and lots and lots and lots and lots of words\"\n"); + tooBig.append(" }\n"); + tooBig.append("}\n"); + IOException e = expectThrows(IOException.class, () -> + RemoteFailure.parseFromResponse(new BytesArray(tooBig.toString()).streamInput())); + assertEquals( + "Can't parse error from Elasticearch [expected [stack_trace] cannot but didn't see it] " + + "at [line 7951 col 1]. Attempted to include response but failed because [Response too large].", + e.getMessage()); + } + + private RemoteFailure parse(String fileName) throws IOException { + try (InputStream in = Files.newInputStream(getDataPath("/remote_failure/" + fileName))) { + return RemoteFailure.parseFromResponse(in); + } + } +} diff --git a/sql/shared-client/src/test/java/org/elasticsearch/xpack/sql/client/shared/StringUtilsTests.java b/sql/shared-client/src/test/java/org/elasticsearch/xpack/sql/client/shared/StringUtilsTests.java new file mode 100644 index 00000000000..b758d361ab9 --- /dev/null +++ b/sql/shared-client/src/test/java/org/elasticsearch/xpack/sql/client/shared/StringUtilsTests.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.client.shared; + +import org.elasticsearch.test.ESTestCase; + +import static org.elasticsearch.xpack.sql.client.shared.StringUtils.nullAsEmpty; + +public class StringUtilsTests extends ESTestCase { + public void testNullAsEmpty() { + assertEquals("", nullAsEmpty(null)); + assertEquals("", nullAsEmpty("")); + String rando = randomRealisticUnicodeOfCodepointLength(5); + assertEquals(rando, nullAsEmpty(rando)); + } +} diff --git a/sql/shared-client/src/test/java/org/elasticsearch/xpack/sql/client/shared/UriUtilsTests.java b/sql/shared-client/src/test/java/org/elasticsearch/xpack/sql/client/shared/UriUtilsTests.java new file mode 100644 index 00000000000..f75b20d0f0d --- /dev/null +++ b/sql/shared-client/src/test/java/org/elasticsearch/xpack/sql/client/shared/UriUtilsTests.java @@ -0,0 +1,87 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.client.shared; + +import org.elasticsearch.test.ESTestCase; + +import java.net.URI; + +import static org.elasticsearch.xpack.sql.client.shared.UriUtils.parseURI; +import static org.elasticsearch.xpack.sql.client.shared.UriUtils.removeQuery; + +public class UriUtilsTests extends ESTestCase { + + public static URI DEFAULT_URI = URI.create("http://localhost:9200/"); + + public void testHostAndPort() throws Exception { + assertEquals(URI.create("http://server:9200/"), parseURI("server:9200", DEFAULT_URI)); + } + + public void testJustHost() throws Exception { + assertEquals(URI.create("http://server:9200/"), parseURI("server", DEFAULT_URI)); + } + + public void testHttpWithPort() throws Exception { + assertEquals(URI.create("http://server:9201/"), parseURI("http://server:9201", DEFAULT_URI)); + } + + public void testHttpsWithPort() throws Exception { + assertEquals(URI.create("https://server:9201/"), parseURI("https://server:9201", DEFAULT_URI)); + } + + public void testHttpNoPort() throws Exception { + assertEquals(URI.create("https://server:9200/"), parseURI("https://server", DEFAULT_URI)); + } + + public void testLocalhostV6() throws Exception { + assertEquals(URI.create("http://[::1]:51082/"), parseURI("[::1]:51082", DEFAULT_URI)); + } + + public void testHttpsWithUser() throws Exception { + assertEquals(URI.create("https://user@server:9200/"), parseURI("https://user@server", DEFAULT_URI)); + } + + public void testUserPassHost() throws Exception { + assertEquals(URI.create("http://user:password@server:9200/"), parseURI("user:password@server", DEFAULT_URI)); + } + + public void testHttpPath() throws Exception { + assertEquals(URI.create("https://server:9201/some_path"), parseURI("https://server:9201/some_path", DEFAULT_URI)); + } + + public void testHttpQuery() throws Exception { + assertEquals(URI.create("https://server:9201/?query"), parseURI("https://server:9201/?query", DEFAULT_URI)); + } + + public void testUnsupportedProtocol() throws Exception { + assertEquals( + "Invalid connection configuration [ftp://server:9201/]: Only http and https protocols are supported", + expectThrows(IllegalArgumentException.class, () -> parseURI("ftp://server:9201/", DEFAULT_URI)).getMessage() + ); + } + + public void testMalformed() throws Exception { + assertEquals( + "Invalid connection configuration []: Expected authority at index 7: http://", + expectThrows(IllegalArgumentException.class, () -> parseURI("", DEFAULT_URI)).getMessage() + ); + } + + public void testRemoveQuery() throws Exception { + assertEquals(URI.create("http://server:9100"), + removeQuery(URI.create("http://server:9100?query"), "http://server:9100?query", DEFAULT_URI)); + } + + public void testRemoveQueryTrailingSlash() throws Exception { + assertEquals(URI.create("http://server:9100/"), + removeQuery(URI.create("http://server:9100/?query"), "http://server:9100/?query", DEFAULT_URI)); + } + + public void testRemoveQueryNoQuery() throws Exception { + assertEquals(URI.create("http://server:9100"), + removeQuery(URI.create("http://server:9100"), "http://server:9100", DEFAULT_URI)); + } +} diff --git a/sql/shared-client/src/test/java/org/elasticsearch/xpack/sql/client/shared/VersionTests.java b/sql/shared-client/src/test/java/org/elasticsearch/xpack/sql/client/shared/VersionTests.java new file mode 100644 index 00000000000..f5fa466b986 --- /dev/null +++ b/sql/shared-client/src/test/java/org/elasticsearch/xpack/sql/client/shared/VersionTests.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.client.shared; + +import org.elasticsearch.test.ESTestCase; + +public class VersionTests extends ESTestCase { + public void test70Version() { + int[] ver = Version.from("7.0.0-alpha"); + assertEquals(7, ver[0]); + assertEquals(0, ver[1]); + assertEquals(0, ver[2]); + } + + public void test712Version() { + int[] ver = Version.from("7.1.2"); + assertEquals(7, ver[0]); + assertEquals(1, ver[1]); + assertEquals(2, ver[2]); + } + + public void testCurrent() { + int[] ver = Version.from(org.elasticsearch.Version.CURRENT.toString()); + assertEquals(org.elasticsearch.Version.CURRENT.major, ver[0]); + assertEquals(org.elasticsearch.Version.CURRENT.minor, ver[1]); + assertEquals(org.elasticsearch.Version.CURRENT.revision, ver[2]); + } + + public void testInvalidVersion() { + Error err = expectThrows(Error.class, () -> Version.from("7.1")); + assertEquals("Detected Elasticsearch SQL jar but found invalid version 7.1", err.getMessage()); + } +} diff --git a/sql/shared-client/src/test/resources/remote_failure/basic.json b/sql/shared-client/src/test/resources/remote_failure/basic.json new file mode 100644 index 00000000000..2e2e157fa0b --- /dev/null +++ b/sql/shared-client/src/test/resources/remote_failure/basic.json @@ -0,0 +1,15 @@ +{ + "error" : { + "root_cause" : [ + { + "type" : "illegal_argument_exception", + "reason" : "[sql/query] unknown field [test], parser not found", + "stack_trace" : "[[sql/query] unknown field [test], parser not found]; nested: IllegalArgumentException[[sql/query] unknown field [test], parser not found];\n\tat org.elasticsearch.ElasticsearchException.guessRootCauses(ElasticsearchException.java:618)\n\tat org.elasticsearch.ElasticsearchException.generateFailureXContent(ElasticsearchException.java:563)\n\tat org.elasticsearch.rest.BytesRestResponse.build(BytesRestResponse.java:138)\n\tat org.elasticsearch.rest.BytesRestResponse.(BytesRestResponse.java:96)\n\tat org.elasticsearch.rest.BytesRestResponse.(BytesRestResponse.java:91)\n\tat org.elasticsearch.rest.RestController.dispatchRequest(RestController.java:243)\n\tat org.elasticsearch.rest.RestController.tryAllHandlers(RestController.java:336)\n\tat org.elasticsearch.rest.RestController.dispatchRequest(RestController.java:174)\n\tat org.elasticsearch.http.netty4.Netty4HttpServerTransport.dispatchRequest(Netty4HttpServerTransport.java:469)\n\tat org.elasticsearch.http.netty4.Netty4HttpRequestHandler.channelRead0(Netty4HttpRequestHandler.java:72)\n\tat io.netty.channel.SimpleChannelInboundHandler.channelRead(SimpleChannelInboundHandler.java:105)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat org.elasticsearch.http.netty4.pipelining.HttpPipeliningHandler.channelRead(HttpPipeliningHandler.java:68)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.handler.codec.MessageToMessageCodec.channelRead(MessageToMessageCodec.java:111)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:310)\n\tat io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:284)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.channel.ChannelInboundHandlerAdapter.channelRead(ChannelInboundHandlerAdapter.java:86)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1334)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:926)\n\tat io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:134)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:644)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKeysPlain(NioEventLoop.java:544)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:498)\n\tat io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:458)\n\tat io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:858)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: java.lang.IllegalArgumentException: [sql/query] unknown field [test], parser not found\n\tat org.elasticsearch.common.xcontent.ObjectParser.getParser(ObjectParser.java:346)\n\tat org.elasticsearch.common.xcontent.ObjectParser.parse(ObjectParser.java:159)\n\tat org.elasticsearch.common.xcontent.ObjectParser.apply(ObjectParser.java:183)\n\tat org.elasticsearch.xpack.sql.plugin.sql.rest.RestSqlAction.prepareRequest(RestSqlAction.java:47)\n\tat org.elasticsearch.rest.BaseRestHandler.handleRequest(BaseRestHandler.java:80)\n\tat org.elasticsearch.rest.RestController.dispatchRequest(RestController.java:240)\n\t... 45 more\n" + } + ], + "type" : "illegal_argument_exception", + "reason" : "[sql/query] unknown field [test], parser not found", + "stack_trace" : "java.lang.IllegalArgumentException: [sql/query] unknown field [test], parser not found\n\tat org.elasticsearch.common.xcontent.ObjectParser.getParser(ObjectParser.java:346)\n\tat org.elasticsearch.common.xcontent.ObjectParser.parse(ObjectParser.java:159)\n\tat org.elasticsearch.common.xcontent.ObjectParser.apply(ObjectParser.java:183)\n\tat org.elasticsearch.xpack.sql.plugin.sql.rest.RestSqlAction.prepareRequest(RestSqlAction.java:47)\n\tat org.elasticsearch.rest.BaseRestHandler.handleRequest(BaseRestHandler.java:80)\n\tat org.elasticsearch.rest.RestController.dispatchRequest(RestController.java:240)\n\tat org.elasticsearch.rest.RestController.tryAllHandlers(RestController.java:336)\n\tat org.elasticsearch.rest.RestController.dispatchRequest(RestController.java:174)\n\tat org.elasticsearch.http.netty4.Netty4HttpServerTransport.dispatchRequest(Netty4HttpServerTransport.java:469)\n\tat org.elasticsearch.http.netty4.Netty4HttpRequestHandler.channelRead0(Netty4HttpRequestHandler.java:72)\n\tat io.netty.channel.SimpleChannelInboundHandler.channelRead(SimpleChannelInboundHandler.java:105)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat org.elasticsearch.http.netty4.pipelining.HttpPipeliningHandler.channelRead(HttpPipeliningHandler.java:68)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.handler.codec.MessageToMessageCodec.channelRead(MessageToMessageCodec.java:111)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:310)\n\tat io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:284)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.channel.ChannelInboundHandlerAdapter.channelRead(ChannelInboundHandlerAdapter.java:86)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1334)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:926)\n\tat io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:134)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:644)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKeysPlain(NioEventLoop.java:544)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:498)\n\tat io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:458)\n\tat io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:858)\n\tat java.lang.Thread.run(Thread.java:748)\n" + }, + "status" : 400 +} diff --git a/sql/shared-client/src/test/resources/remote_failure/bogus_error.json b/sql/shared-client/src/test/resources/remote_failure/bogus_error.json new file mode 100644 index 00000000000..f79361cec1c --- /dev/null +++ b/sql/shared-client/src/test/resources/remote_failure/bogus_error.json @@ -0,0 +1 @@ +{ "error": "bogus" } \ No newline at end of file diff --git a/sql/shared-client/src/test/resources/remote_failure/invalid_json.txt b/sql/shared-client/src/test/resources/remote_failure/invalid_json.txt new file mode 100644 index 00000000000..e7da6bcf1ab --- /dev/null +++ b/sql/shared-client/src/test/resources/remote_failure/invalid_json.txt @@ -0,0 +1 @@ +I'm not json at all \ No newline at end of file diff --git a/sql/shared-client/src/test/resources/remote_failure/missing_auth.json b/sql/shared-client/src/test/resources/remote_failure/missing_auth.json new file mode 100644 index 00000000000..3d2927f85d6 --- /dev/null +++ b/sql/shared-client/src/test/resources/remote_failure/missing_auth.json @@ -0,0 +1,21 @@ +{ + "error" : { + "root_cause" : [ + { + "type" : "security_exception", + "reason" : "missing authentication token for REST request [/?pretty&error_trace]", + "header" : { + "WWW-Authenticate" : "Basic realm=\"security\" charset=\"UTF-8\"" + }, + "stack_trace" : "ElasticsearchSecurityException[missing authentication token for REST request [/?pretty&error_trace]]\n\tat org.elasticsearch.xpack.security.support.Exceptions.authenticationError(Exceptions.java:36)\n\tat org.elasticsearch.xpack.security.authc.DefaultAuthenticationFailureHandler.missingToken(DefaultAuthenticationFailureHandler.java:69)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$AuditableRestRequest.anonymousAccessDenied(AuthenticationService.java:603)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.lambda$handleNullToken$17(AuthenticationService.java:357)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.handleNullToken(AuthenticationService.java:362)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.consumeToken(AuthenticationService.java:277)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.lambda$extractToken$7(AuthenticationService.java:249)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.extractToken(AuthenticationService.java:266)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.lambda$null$0(AuthenticationService.java:201)\n\tat org.elasticsearch.action.ActionListener$1.onResponse(ActionListener.java:59)\n\tat org.elasticsearch.xpack.security.authc.TokenService.getAndValidateToken(TokenService.java:230)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.lambda$authenticateAsync$2(AuthenticationService.java:197)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.lambda$lookForExistingAuthentication$4(AuthenticationService.java:228)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.lookForExistingAuthentication(AuthenticationService.java:239)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.authenticateAsync(AuthenticationService.java:193)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.access$000(AuthenticationService.java:147)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService.authenticate(AuthenticationService.java:99)\n\tat org.elasticsearch.xpack.security.rest.SecurityRestFilter.handleRequest(SecurityRestFilter.java:69)\n\tat org.elasticsearch.rest.RestController.dispatchRequest(RestController.java:240)\n\tat org.elasticsearch.rest.RestController.tryAllHandlers(RestController.java:336)\n\tat org.elasticsearch.rest.RestController.dispatchRequest(RestController.java:174)\n\tat org.elasticsearch.http.netty4.Netty4HttpServerTransport.dispatchRequest(Netty4HttpServerTransport.java:469)\n\tat org.elasticsearch.http.netty4.Netty4HttpRequestHandler.channelRead0(Netty4HttpRequestHandler.java:80)\n\tat io.netty.channel.SimpleChannelInboundHandler.channelRead(SimpleChannelInboundHandler.java:105)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat org.elasticsearch.http.netty4.pipelining.HttpPipeliningHandler.channelRead(HttpPipeliningHandler.java:68)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.handler.codec.MessageToMessageCodec.channelRead(MessageToMessageCodec.java:111)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:310)\n\tat io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:284)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.channel.ChannelInboundHandlerAdapter.channelRead(ChannelInboundHandlerAdapter.java:86)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1334)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:926)\n\tat io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:134)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:644)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKeysPlain(NioEventLoop.java:544)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:498)\n\tat io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:458)\n\tat io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:858)\n\tat java.lang.Thread.run(Thread.java:748)\n" + } + ], + "type" : "security_exception", + "reason" : "missing authentication token for REST request [/?pretty&error_trace]", + "header" : { + "WWW-Authenticate" : "Basic realm=\"security\" charset=\"UTF-8\"" + }, + "stack_trace" : "ElasticsearchSecurityException[missing authentication token for REST request [/?pretty&error_trace]]\n\tat org.elasticsearch.xpack.security.support.Exceptions.authenticationError(Exceptions.java:36)\n\tat org.elasticsearch.xpack.security.authc.DefaultAuthenticationFailureHandler.missingToken(DefaultAuthenticationFailureHandler.java:69)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$AuditableRestRequest.anonymousAccessDenied(AuthenticationService.java:603)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.lambda$handleNullToken$17(AuthenticationService.java:357)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.handleNullToken(AuthenticationService.java:362)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.consumeToken(AuthenticationService.java:277)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.lambda$extractToken$7(AuthenticationService.java:249)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.extractToken(AuthenticationService.java:266)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.lambda$null$0(AuthenticationService.java:201)\n\tat org.elasticsearch.action.ActionListener$1.onResponse(ActionListener.java:59)\n\tat org.elasticsearch.xpack.security.authc.TokenService.getAndValidateToken(TokenService.java:230)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.lambda$authenticateAsync$2(AuthenticationService.java:197)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.lambda$lookForExistingAuthentication$4(AuthenticationService.java:228)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.lookForExistingAuthentication(AuthenticationService.java:239)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.authenticateAsync(AuthenticationService.java:193)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.access$000(AuthenticationService.java:147)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService.authenticate(AuthenticationService.java:99)\n\tat org.elasticsearch.xpack.security.rest.SecurityRestFilter.handleRequest(SecurityRestFilter.java:69)\n\tat org.elasticsearch.rest.RestController.dispatchRequest(RestController.java:240)\n\tat org.elasticsearch.rest.RestController.tryAllHandlers(RestController.java:336)\n\tat org.elasticsearch.rest.RestController.dispatchRequest(RestController.java:174)\n\tat org.elasticsearch.http.netty4.Netty4HttpServerTransport.dispatchRequest(Netty4HttpServerTransport.java:469)\n\tat org.elasticsearch.http.netty4.Netty4HttpRequestHandler.channelRead0(Netty4HttpRequestHandler.java:80)\n\tat io.netty.channel.SimpleChannelInboundHandler.channelRead(SimpleChannelInboundHandler.java:105)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat org.elasticsearch.http.netty4.pipelining.HttpPipeliningHandler.channelRead(HttpPipeliningHandler.java:68)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.handler.codec.MessageToMessageCodec.channelRead(MessageToMessageCodec.java:111)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:310)\n\tat io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:284)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.channel.ChannelInboundHandlerAdapter.channelRead(ChannelInboundHandlerAdapter.java:86)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1334)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:926)\n\tat io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:134)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:644)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKeysPlain(NioEventLoop.java:544)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:498)\n\tat io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:458)\n\tat io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:858)\n\tat java.lang.Thread.run(Thread.java:748)\n" + }, + "status" : 401 +} diff --git a/sql/shared-client/src/test/resources/remote_failure/nested.json b/sql/shared-client/src/test/resources/remote_failure/nested.json new file mode 100644 index 00000000000..1b8d0cd02c7 --- /dev/null +++ b/sql/shared-client/src/test/resources/remote_failure/nested.json @@ -0,0 +1,21 @@ +{ + "error" : { + "root_cause" : [ + { + "type" : "parsing_exception", + "reason" : "line 1:1: no viable alternative at input 'test'", + "stack_trace" : "ParsingException[line 1:1: no viable alternative at input 'test']; nested: NoViableAltException;\n\tat org.elasticsearch.xpack.sql.parser.SqlParser$1.syntaxError(SqlParser.java:151)\n\tat org.antlr.v4.runtime.ProxyErrorListener.syntaxError(ProxyErrorListener.java:65)\n\tat org.antlr.v4.runtime.Parser.notifyErrorListeners(Parser.java:566)\n\tat org.antlr.v4.runtime.DefaultErrorStrategy.reportNoViableAlternative(DefaultErrorStrategy.java:308)\n\tat org.antlr.v4.runtime.DefaultErrorStrategy.reportError(DefaultErrorStrategy.java:145)\n\tat org.elasticsearch.xpack.sql.parser.SqlBaseParser.statement(SqlBaseParser.java:726)\n\tat org.elasticsearch.xpack.sql.parser.SqlBaseParser.singleStatement(SqlBaseParser.java:173)\n\tat org.elasticsearch.xpack.sql.parser.SqlParser.invokeParser(SqlParser.java:81)\n\tat org.elasticsearch.xpack.sql.parser.SqlParser.createStatement(SqlParser.java:43)\n\tat org.elasticsearch.xpack.sql.session.SqlSession.parse(SqlSession.java:121)\n\tat org.elasticsearch.xpack.sql.session.SqlSession.executable(SqlSession.java:144)\n\tat org.elasticsearch.xpack.sql.execution.PlanExecutor.sql(PlanExecutor.java:89)\n\tat org.elasticsearch.xpack.sql.plugin.sql.action.TransportSqlAction.operation(TransportSqlAction.java:71)\n\tat org.elasticsearch.xpack.sql.plugin.sql.action.TransportSqlAction.doExecute(TransportSqlAction.java:60)\n\tat org.elasticsearch.xpack.sql.plugin.sql.action.TransportSqlAction.doExecute(TransportSqlAction.java:41)\n\tat org.elasticsearch.action.support.TransportAction.doExecute(TransportAction.java:143)\n\tat org.elasticsearch.action.support.TransportAction$RequestFilterChain.proceed(TransportAction.java:167)\n\tat org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:139)\n\tat org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:81)\n\tat org.elasticsearch.client.node.NodeClient.executeLocally(NodeClient.java:83)\n\tat org.elasticsearch.xpack.sql.plugin.sql.rest.RestSqlAction.lambda$prepareRequest$0(RestSqlAction.java:50)\n\tat org.elasticsearch.rest.BaseRestHandler.handleRequest(BaseRestHandler.java:97)\n\tat org.elasticsearch.rest.RestController.dispatchRequest(RestController.java:240)\n\tat org.elasticsearch.rest.RestController.tryAllHandlers(RestController.java:336)\n\tat org.elasticsearch.rest.RestController.dispatchRequest(RestController.java:174)\n\tat org.elasticsearch.http.netty4.Netty4HttpServerTransport.dispatchRequest(Netty4HttpServerTransport.java:469)\n\tat org.elasticsearch.http.netty4.Netty4HttpRequestHandler.channelRead0(Netty4HttpRequestHandler.java:72)\n\tat io.netty.channel.SimpleChannelInboundHandler.channelRead(SimpleChannelInboundHandler.java:105)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat org.elasticsearch.http.netty4.pipelining.HttpPipeliningHandler.channelRead(HttpPipeliningHandler.java:68)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.handler.codec.MessageToMessageCodec.channelRead(MessageToMessageCodec.java:111)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:310)\n\tat io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:284)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.channel.ChannelInboundHandlerAdapter.channelRead(ChannelInboundHandlerAdapter.java:86)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1334)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:926)\n\tat io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:134)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:644)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKeysPlain(NioEventLoop.java:544)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:498)\n\tat io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:458)\n\tat io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:858)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: org.antlr.v4.runtime.NoViableAltException\n\tat org.antlr.v4.runtime.atn.ParserATNSimulator.noViableAlt(ParserATNSimulator.java:1886)\n\tat org.antlr.v4.runtime.atn.ParserATNSimulator.execATN(ParserATNSimulator.java:486)\n\tat org.antlr.v4.runtime.atn.ParserATNSimulator.adaptivePredict(ParserATNSimulator.java:412)\n\tat org.elasticsearch.xpack.sql.parser.SqlBaseParser.statement(SqlBaseParser.java:477)\n\t... 62 more\n" + } + ], + "type" : "parsing_exception", + "reason" : "line 1:1: no viable alternative at input 'test'", + "caused_by" : { + "type" : "no_viable_alt_exception", + "reason" : null, + "stack_trace" : "org.antlr.v4.runtime.NoViableAltException\n\tat org.antlr.v4.runtime.atn.ParserATNSimulator.noViableAlt(ParserATNSimulator.java:1886)\n\tat org.antlr.v4.runtime.atn.ParserATNSimulator.execATN(ParserATNSimulator.java:486)\n\tat org.antlr.v4.runtime.atn.ParserATNSimulator.adaptivePredict(ParserATNSimulator.java:412)\n\tat org.elasticsearch.xpack.sql.parser.SqlBaseParser.statement(SqlBaseParser.java:477)\n\tat org.elasticsearch.xpack.sql.parser.SqlBaseParser.singleStatement(SqlBaseParser.java:173)\n\tat org.elasticsearch.xpack.sql.parser.SqlParser.invokeParser(SqlParser.java:81)\n\tat org.elasticsearch.xpack.sql.parser.SqlParser.createStatement(SqlParser.java:43)\n\tat org.elasticsearch.xpack.sql.session.SqlSession.parse(SqlSession.java:121)\n\tat org.elasticsearch.xpack.sql.session.SqlSession.executable(SqlSession.java:144)\n\tat org.elasticsearch.xpack.sql.execution.PlanExecutor.sql(PlanExecutor.java:89)\n\tat org.elasticsearch.xpack.sql.plugin.sql.action.TransportSqlAction.operation(TransportSqlAction.java:71)\n\tat org.elasticsearch.xpack.sql.plugin.sql.action.TransportSqlAction.doExecute(TransportSqlAction.java:60)\n\tat org.elasticsearch.xpack.sql.plugin.sql.action.TransportSqlAction.doExecute(TransportSqlAction.java:41)\n\tat org.elasticsearch.action.support.TransportAction.doExecute(TransportAction.java:143)\n\tat org.elasticsearch.action.support.TransportAction$RequestFilterChain.proceed(TransportAction.java:167)\n\tat org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:139)\n\tat org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:81)\n\tat org.elasticsearch.client.node.NodeClient.executeLocally(NodeClient.java:83)\n\tat org.elasticsearch.xpack.sql.plugin.sql.rest.RestSqlAction.lambda$prepareRequest$0(RestSqlAction.java:50)\n\tat org.elasticsearch.rest.BaseRestHandler.handleRequest(BaseRestHandler.java:97)\n\tat org.elasticsearch.rest.RestController.dispatchRequest(RestController.java:240)\n\tat org.elasticsearch.rest.RestController.tryAllHandlers(RestController.java:336)\n\tat org.elasticsearch.rest.RestController.dispatchRequest(RestController.java:174)\n\tat org.elasticsearch.http.netty4.Netty4HttpServerTransport.dispatchRequest(Netty4HttpServerTransport.java:469)\n\tat org.elasticsearch.http.netty4.Netty4HttpRequestHandler.channelRead0(Netty4HttpRequestHandler.java:72)\n\tat io.netty.channel.SimpleChannelInboundHandler.channelRead(SimpleChannelInboundHandler.java:105)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat org.elasticsearch.http.netty4.pipelining.HttpPipeliningHandler.channelRead(HttpPipeliningHandler.java:68)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.handler.codec.MessageToMessageCodec.channelRead(MessageToMessageCodec.java:111)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:310)\n\tat io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:284)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.channel.ChannelInboundHandlerAdapter.channelRead(ChannelInboundHandlerAdapter.java:86)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1334)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:926)\n\tat io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:134)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:644)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKeysPlain(NioEventLoop.java:544)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:498)\n\tat io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:458)\n\tat io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:858)\n\tat java.lang.Thread.run(Thread.java:748)\n" + }, + "stack_trace" : "ParsingException[line 1:1: no viable alternative at input 'test']; nested: NoViableAltException;\n\tat org.elasticsearch.xpack.sql.parser.SqlParser$1.syntaxError(SqlParser.java:151)\n\tat org.antlr.v4.runtime.ProxyErrorListener.syntaxError(ProxyErrorListener.java:65)\n\tat org.antlr.v4.runtime.Parser.notifyErrorListeners(Parser.java:566)\n\tat org.antlr.v4.runtime.DefaultErrorStrategy.reportNoViableAlternative(DefaultErrorStrategy.java:308)\n\tat org.antlr.v4.runtime.DefaultErrorStrategy.reportError(DefaultErrorStrategy.java:145)\n\tat org.elasticsearch.xpack.sql.parser.SqlBaseParser.statement(SqlBaseParser.java:726)\n\tat org.elasticsearch.xpack.sql.parser.SqlBaseParser.singleStatement(SqlBaseParser.java:173)\n\tat org.elasticsearch.xpack.sql.parser.SqlParser.invokeParser(SqlParser.java:81)\n\tat org.elasticsearch.xpack.sql.parser.SqlParser.createStatement(SqlParser.java:43)\n\tat org.elasticsearch.xpack.sql.session.SqlSession.parse(SqlSession.java:121)\n\tat org.elasticsearch.xpack.sql.session.SqlSession.executable(SqlSession.java:144)\n\tat org.elasticsearch.xpack.sql.execution.PlanExecutor.sql(PlanExecutor.java:89)\n\tat org.elasticsearch.xpack.sql.plugin.sql.action.TransportSqlAction.operation(TransportSqlAction.java:71)\n\tat org.elasticsearch.xpack.sql.plugin.sql.action.TransportSqlAction.doExecute(TransportSqlAction.java:60)\n\tat org.elasticsearch.xpack.sql.plugin.sql.action.TransportSqlAction.doExecute(TransportSqlAction.java:41)\n\tat org.elasticsearch.action.support.TransportAction.doExecute(TransportAction.java:143)\n\tat org.elasticsearch.action.support.TransportAction$RequestFilterChain.proceed(TransportAction.java:167)\n\tat org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:139)\n\tat org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:81)\n\tat org.elasticsearch.client.node.NodeClient.executeLocally(NodeClient.java:83)\n\tat org.elasticsearch.xpack.sql.plugin.sql.rest.RestSqlAction.lambda$prepareRequest$0(RestSqlAction.java:50)\n\tat org.elasticsearch.rest.BaseRestHandler.handleRequest(BaseRestHandler.java:97)\n\tat org.elasticsearch.rest.RestController.dispatchRequest(RestController.java:240)\n\tat org.elasticsearch.rest.RestController.tryAllHandlers(RestController.java:336)\n\tat org.elasticsearch.rest.RestController.dispatchRequest(RestController.java:174)\n\tat org.elasticsearch.http.netty4.Netty4HttpServerTransport.dispatchRequest(Netty4HttpServerTransport.java:469)\n\tat org.elasticsearch.http.netty4.Netty4HttpRequestHandler.channelRead0(Netty4HttpRequestHandler.java:72)\n\tat io.netty.channel.SimpleChannelInboundHandler.channelRead(SimpleChannelInboundHandler.java:105)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat org.elasticsearch.http.netty4.pipelining.HttpPipeliningHandler.channelRead(HttpPipeliningHandler.java:68)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.handler.codec.MessageToMessageCodec.channelRead(MessageToMessageCodec.java:111)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:310)\n\tat io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:284)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.channel.ChannelInboundHandlerAdapter.channelRead(ChannelInboundHandlerAdapter.java:86)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1334)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:926)\n\tat io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:134)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:644)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKeysPlain(NioEventLoop.java:544)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:498)\n\tat io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:458)\n\tat io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:858)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: org.antlr.v4.runtime.NoViableAltException\n\tat org.antlr.v4.runtime.atn.ParserATNSimulator.noViableAlt(ParserATNSimulator.java:1886)\n\tat org.antlr.v4.runtime.atn.ParserATNSimulator.execATN(ParserATNSimulator.java:486)\n\tat org.antlr.v4.runtime.atn.ParserATNSimulator.adaptivePredict(ParserATNSimulator.java:412)\n\tat org.elasticsearch.xpack.sql.parser.SqlBaseParser.statement(SqlBaseParser.java:477)\n\t... 62 more\n" + }, + "status" : 500 +} + \ No newline at end of file diff --git a/sql/shared-client/src/test/resources/remote_failure/no_error.json b/sql/shared-client/src/test/resources/remote_failure/no_error.json new file mode 100644 index 00000000000..9e26dfeeb6e --- /dev/null +++ b/sql/shared-client/src/test/resources/remote_failure/no_error.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/sql/shared-client/src/test/resources/remote_failure/no_stack.json b/sql/shared-client/src/test/resources/remote_failure/no_stack.json new file mode 100644 index 00000000000..2302729a988 --- /dev/null +++ b/sql/shared-client/src/test/resources/remote_failure/no_stack.json @@ -0,0 +1,6 @@ +{ + "error" : { + "type" : "illegal_argument_exception", + "reason" : "[sql/query] unknown field [test], parser not found" + } +} diff --git a/sql/shared-client/src/test/resources/remote_failure/no_type.json b/sql/shared-client/src/test/resources/remote_failure/no_type.json new file mode 100644 index 00000000000..fe453fa3d86 --- /dev/null +++ b/sql/shared-client/src/test/resources/remote_failure/no_type.json @@ -0,0 +1,6 @@ +{ + "error" : { + "reason" : "[sql/query] unknown field [test], parser not found", + "stack_trace" : "java.lang.IllegalArgumentException: [sql/query] unknown field [test], parser not found\n\tat org.elasticsearch.common.xcontent.ObjectParser.getParser(ObjectParser.java:346)\n\tat org.elasticsearch.common.xcontent.ObjectParser.parse(ObjectParser.java:159)\n\tat org.elasticsearch.common.xcontent.ObjectParser.apply(ObjectParser.java:183)\n\tat org.elasticsearch.xpack.sql.plugin.sql.rest.RestSqlAction.prepareRequest(RestSqlAction.java:47)\n\tat org.elasticsearch.rest.BaseRestHandler.handleRequest(BaseRestHandler.java:80)\n\tat org.elasticsearch.rest.RestController.dispatchRequest(RestController.java:240)\n\tat org.elasticsearch.rest.RestController.tryAllHandlers(RestController.java:336)\n\tat org.elasticsearch.rest.RestController.dispatchRequest(RestController.java:174)\n\tat org.elasticsearch.http.netty4.Netty4HttpServerTransport.dispatchRequest(Netty4HttpServerTransport.java:469)\n\tat org.elasticsearch.http.netty4.Netty4HttpRequestHandler.channelRead0(Netty4HttpRequestHandler.java:72)\n\tat io.netty.channel.SimpleChannelInboundHandler.channelRead(SimpleChannelInboundHandler.java:105)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat org.elasticsearch.http.netty4.pipelining.HttpPipeliningHandler.channelRead(HttpPipeliningHandler.java:68)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.handler.codec.MessageToMessageCodec.channelRead(MessageToMessageCodec.java:111)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:310)\n\tat io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:284)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.channel.ChannelInboundHandlerAdapter.channelRead(ChannelInboundHandlerAdapter.java:86)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1334)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:926)\n\tat io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:134)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:644)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKeysPlain(NioEventLoop.java:544)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:498)\n\tat io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:458)\n\tat io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:858)\n\tat java.lang.Thread.run(Thread.java:748)\n" + } +} diff --git a/sql/shared-client/src/test/resources/ssl/client.keystore b/sql/shared-client/src/test/resources/ssl/client.keystore new file mode 100644 index 00000000000..07d44636234 Binary files /dev/null and b/sql/shared-client/src/test/resources/ssl/client.keystore differ diff --git a/sql/shared-client/src/test/resources/ssl/readme.txt b/sql/shared-client/src/test/resources/ssl/readme.txt new file mode 100644 index 00000000000..769aa43abf0 --- /dev/null +++ b/sql/shared-client/src/test/resources/ssl/readme.txt @@ -0,0 +1,13 @@ +# setup of the SSL files + +# generate keys for server and client +$ keytool -v -genkey -keyalg rsa -alias server -keypass password -keystore server.keystore -storepass password -validity 99999 -ext SAN=dns:localhost,ip:127.0.0.1 +$ keytool -v -genkey -keyalg rsa -alias client -keypass password -keystore client.keystore -storepass password -validity 99999 -ext SAN=dns:localhost,ip:127.0.0.1 + +# generate certificates +$ keytool -v -export -alias server -file server.crt -keystore server.keystore -storepass password +$ keytool -v -export -alias client -file client.crt -keystore client.keystore -storepass password + +# import the client cert into the server keystore and vice-versa +$ keytool -v -importcert -alias client -file client.crt -keystore server.keystore -storepass password +$ keytool -v -importcert -alias server -file server.crt -keystore client.keystore -storepass password \ No newline at end of file diff --git a/sql/shared-client/src/test/resources/ssl/server.keystore b/sql/shared-client/src/test/resources/ssl/server.keystore new file mode 100644 index 00000000000..3a2e80d77bb Binary files /dev/null and b/sql/shared-client/src/test/resources/ssl/server.keystore differ diff --git a/sql/shared-proto/build.gradle b/sql/shared-proto/build.gradle new file mode 100644 index 00000000000..b1be23ac1a7 --- /dev/null +++ b/sql/shared-proto/build.gradle @@ -0,0 +1,10 @@ +description = 'Request and response objects shared by the cli and jdbc and the server.' + +dependencies { + testCompile project(':x-pack-elasticsearch:sql:test-utils') +} + +forbiddenApisMain { + // does not depend on core, so only jdk and http signatures should be checked + signaturesURLs = [this.class.getResource('/forbidden/jdk-signatures.txt')] +} diff --git a/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/AbstractInfoRequest.java b/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/AbstractInfoRequest.java new file mode 100644 index 00000000000..f70793ae464 --- /dev/null +++ b/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/AbstractInfoRequest.java @@ -0,0 +1,79 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.protocol.shared; + +import java.io.IOException; +import java.util.Objects; + +/** + * Request general information about the server. + */ +public abstract class AbstractInfoRequest extends Request { + public final String jvmVersion, jvmVendor, jvmClassPath, osName, osVersion; + + /** + * Build the info request containing information about the current JVM. + */ + protected AbstractInfoRequest() { + jvmVersion = System.getProperty("java.version", ""); + jvmVendor = System.getProperty("java.vendor", ""); + jvmClassPath = System.getProperty("java.class.path", ""); + osName = System.getProperty("os.name", ""); + osVersion = System.getProperty("os.version", ""); + } + + protected AbstractInfoRequest(String jvmVersion, String jvmVendor, String jvmClassPath, String osName, String osVersion) { + this.jvmVersion = jvmVersion; + this.jvmVendor = jvmVendor; + this.jvmClassPath = jvmClassPath; + this.osName = osName; + this.osVersion = osVersion; + } + + protected AbstractInfoRequest(SqlDataInput in) throws IOException { + jvmVersion = in.readUTF(); + jvmVendor = in.readUTF(); + jvmClassPath = in.readUTF(); + osName = in.readUTF(); + osVersion = in.readUTF(); + } + + @Override + public final void writeTo(SqlDataOutput out) throws IOException { + out.writeUTF(jvmVersion); + out.writeUTF(jvmVendor); + out.writeUTF(jvmClassPath); + out.writeUTF(osName); + out.writeUTF(osVersion); + } + + @Override + protected final String toStringBody() { + return "jvm=[version=[" + jvmVersion + + "] vendor=[" + jvmVendor + + "] classPath=[" + jvmClassPath + + "]] os=[name=[" + osName + + "] version=[" + osVersion + "]]"; + } + + @Override + public final boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + AbstractInfoRequest other = (AbstractInfoRequest) obj; + return Objects.equals(jvmVersion, other.jvmVersion) + && Objects.equals(jvmVendor, other.jvmVendor) + && Objects.equals(jvmClassPath, other.jvmClassPath) + && Objects.equals(osName, other.osName) + && Objects.equals(osVersion, other.osVersion); + } + + @Override + public final int hashCode() { + return Objects.hash(jvmVersion, jvmVendor, jvmClassPath, osName, osVersion); + } +} diff --git a/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/AbstractInfoResponse.java b/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/AbstractInfoResponse.java new file mode 100644 index 00000000000..bc4141adf3c --- /dev/null +++ b/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/AbstractInfoResponse.java @@ -0,0 +1,82 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.protocol.shared; + +import java.io.DataInput; +import java.io.IOException; +import java.util.Objects; + +/** + * General information about the server. + */ +public abstract class AbstractInfoResponse extends Response { + public final String node, cluster, versionString, versionHash, versionDate; + public final int majorVersion, minorVersion; + + protected AbstractInfoResponse(String nodeName, String clusterName, byte versionMajor, byte versionMinor, String version, + String versionHash, String versionDate) { + this.node = nodeName; + this.cluster = clusterName; + this.versionString = version; + this.versionHash = versionHash; + this.versionDate = versionDate; + + this.majorVersion = versionMajor; + this.minorVersion = versionMinor; + } + + protected AbstractInfoResponse(Request request, DataInput in) throws IOException { + node = in.readUTF(); + cluster = in.readUTF(); + majorVersion = in.readByte(); + minorVersion = in.readByte(); + versionString = in.readUTF(); + versionHash = in.readUTF(); + versionDate = in.readUTF(); + } + + @Override + protected final void writeTo(SqlDataOutput out) throws IOException { + out.writeUTF(node); + out.writeUTF(cluster); + out.writeByte(majorVersion); + out.writeByte(minorVersion); + out.writeUTF(versionString); + out.writeUTF(versionHash); + out.writeUTF(versionDate); + } + + @Override + protected final String toStringBody() { + return "node=[" + node + + "] cluster=[" + cluster + + "] version=[" + versionString + + "]/[major=[" + majorVersion + + "] minor=[" + minorVersion + + "] hash=[" + versionHash + + "] date=[" + versionDate + "]]"; + } + + @Override + public final boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + AbstractInfoResponse other = (AbstractInfoResponse) obj; + return Objects.equals(node, other.node) + && Objects.equals(cluster, other.cluster) + && Objects.equals(majorVersion, other.majorVersion) + && Objects.equals(minorVersion, other.minorVersion) + && Objects.equals(versionString, other.versionString) + && Objects.equals(versionHash, other.versionHash) + && Objects.equals(versionDate, other.versionDate); + } + + @Override + public final int hashCode() { + return Objects.hash(node, cluster, majorVersion, minorVersion, versionString, versionHash, versionDate); + } +} \ No newline at end of file diff --git a/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/AbstractProto.java b/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/AbstractProto.java new file mode 100644 index 00000000000..50e3924b233 --- /dev/null +++ b/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/AbstractProto.java @@ -0,0 +1,102 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.protocol.shared; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * Base implementation for the binary protocol for the CLI and JDBC. + * All backwards compatibility is done on the server side using the + * version number sent in the header. + */ +public abstract class AbstractProto { + private static final int MAGIC_NUMBER = 0x0C0DEC110; + public static final int CURRENT_VERSION = 000_000_001; + + public void writeRequest(Request request, DataOutput out) throws IOException { + writeHeader(CURRENT_VERSION, out); + request.requestType().writeTo(out); + request.writeTo(new SqlDataOutput(out, CURRENT_VERSION)); + } + + public SqlDataInput clientStream(DataInput in) throws IOException { + int clientVersion = readHeader(in); + if (clientVersion > CURRENT_VERSION) { + throw new IOException("Unknown client version [" + clientVersion + "]. Always upgrade client last."); + } + return new SqlDataInput(in, clientVersion); + } + + public Request readRequest(SqlDataInput in) throws IOException { + return readRequestType(in).reader().read(in); + } + + public Request readRequest(DataInput in) throws IOException { + SqlDataInput client = clientStream(in); + return readRequest(client); + } + + public void writeResponse(Response response, int clientVersion, DataOutput out) throws IOException { + writeHeader(clientVersion, out); + response.responseType().writeTo(out); + response.writeTo(new SqlDataOutput(out, clientVersion)); + } + + public Response readResponse(Request request, DataInput in) throws IOException { + int version = readHeader(in); + if (version != CURRENT_VERSION) { + throw new IOException("Response version [" + version + "] does not match client version [" + + CURRENT_VERSION + "]. Server is busted."); + } + // TODO why do I need the response type at all? Just a byte for err/exception/normal, then get response type from request. + Response response = readResponseType(in).reader().read(request, new SqlDataInput(in, version)); + if (response.requestType() != request.requestType()) { + throw new IOException("Expected request type to be [" + request.requestType() + + "] but was [" + response.requestType() + "]. Server is busted."); + } + return response; + } + + protected abstract RequestType readRequestType(DataInput in) throws IOException; + protected abstract ResponseType readResponseType(DataInput in) throws IOException; + @FunctionalInterface + protected interface RequestReader { + Request read(SqlDataInput in) throws IOException; + } + protected interface RequestType { + void writeTo(DataOutput out) throws IOException; + RequestReader reader(); + } + @FunctionalInterface + protected interface ResponseReader { + Response read(Request request, SqlDataInput in) throws IOException; + } + protected interface ResponseType { + void writeTo(DataOutput out) throws IOException; + ResponseReader reader(); + } + + private static void writeHeader(int clientVersion, DataOutput out) throws IOException { + out.writeInt(MAGIC_NUMBER); + out.writeInt(clientVersion); + } + + /** + * Read the protocol header. + * @return the version + * @throws IOException if there is an underlying {@linkplain IOException} or if the protocol is malformed + */ + private static int readHeader(DataInput in) throws IOException { + int magic = in.readInt(); + if (magic != MAGIC_NUMBER) { + throw new IOException("Unknown protocol magic number [" + Integer.toHexString(magic) + "]"); + } + int version = in.readInt(); + return version; + } +} diff --git a/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/AbstractQueryCloseRequest.java b/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/AbstractQueryCloseRequest.java new file mode 100644 index 00000000000..b2e7f895373 --- /dev/null +++ b/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/AbstractQueryCloseRequest.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.protocol.shared; + +import java.io.IOException; +import java.util.Objects; + +public abstract class AbstractQueryCloseRequest extends Request { + public final String cursor; + + protected AbstractQueryCloseRequest(String cursor) { + if (cursor == null) { + throw new IllegalArgumentException("[cursor] must not be null"); + } + this.cursor = cursor; + } + + protected AbstractQueryCloseRequest(SqlDataInput in) throws IOException { + this.cursor = in.readUTF(); + } + + @Override + public void writeTo(SqlDataOutput out) throws IOException { + out.writeUTF(cursor); + } + + @Override + protected String toStringBody() { + return cursor; + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + AbstractQueryCloseRequest other = (AbstractQueryCloseRequest) obj; + return Objects.equals(cursor, other.cursor); + } + + @Override + public int hashCode() { + return Objects.hash(cursor); + } +} + diff --git a/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/AbstractQueryCloseResponse.java b/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/AbstractQueryCloseResponse.java new file mode 100644 index 00000000000..479f2d8b25b --- /dev/null +++ b/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/AbstractQueryCloseResponse.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.protocol.shared; + +import java.io.DataInput; +import java.io.IOException; +import java.util.Objects; + +/** + * Superclass for responses both for {@link AbstractQueryInitRequest} + * and {@link AbstractQueryPageRequest}. + */ +public abstract class AbstractQueryCloseResponse extends Response { + private final boolean succeeded; + + protected AbstractQueryCloseResponse(boolean succeeded) { + this.succeeded = succeeded; + } + + protected AbstractQueryCloseResponse(Request request, DataInput in) throws IOException { + succeeded = in.readBoolean(); + } + + @Override + protected void writeTo(SqlDataOutput out) throws IOException { + out.writeBoolean(succeeded); + } + + /** + * True if the cursor was really closed + */ + public boolean succeeded() { + return succeeded; + } + + @Override + protected String toStringBody() { + return Boolean.toString(succeeded); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + AbstractQueryCloseResponse other = (AbstractQueryCloseResponse) obj; + return succeeded == other.succeeded; + } + + @Override + public int hashCode() { + return Objects.hash(succeeded); + } +} diff --git a/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/AbstractQueryInitRequest.java b/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/AbstractQueryInitRequest.java new file mode 100644 index 00000000000..34108571bac --- /dev/null +++ b/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/AbstractQueryInitRequest.java @@ -0,0 +1,72 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.protocol.shared; + +import java.io.IOException; +import java.util.Objects; +import java.util.TimeZone; + +public abstract class AbstractQueryInitRequest extends Request { + /** + * Global choice for the default fetch size. + */ + public static final int DEFAULT_FETCH_SIZE = 1000; + + public final String query; + public final int fetchSize; + public final TimeZone timeZone; + public final TimeoutInfo timeout; + + protected AbstractQueryInitRequest(String query, int fetchSize, TimeZone timeZone, TimeoutInfo timeout) { + this.query = query; + this.fetchSize = fetchSize; + this.timeZone = timeZone; + this.timeout = timeout; + } + + protected AbstractQueryInitRequest(SqlDataInput in) throws IOException { + query = in.readUTF(); + fetchSize = in.readInt(); + timeZone = TimeZone.getTimeZone(in.readUTF()); + timeout = new TimeoutInfo(in); + } + + @Override + public void writeTo(SqlDataOutput out) throws IOException { + out.writeUTF(query); + out.writeInt(fetchSize); + out.writeUTF(timeZone.getID()); + timeout.writeTo(out); + } + + @Override + protected String toStringBody() { + StringBuilder b = new StringBuilder(); + b.append("query=[").append(query).append(']'); + if (false == timeZone.getID().equals("UTC")) { + b.append(" timeZone=[").append(timeZone.getID()).append(']'); + } + return b.toString(); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + AbstractQueryInitRequest other = (AbstractQueryInitRequest) obj; + return fetchSize == other.fetchSize + && Objects.equals(query, other.query) + && Objects.equals(timeout, other.timeout) + && Objects.equals(timeZone.getID(), other.timeZone.getID()); + } + + @Override + public int hashCode() { + return Objects.hash(fetchSize, query, timeout, timeZone.getID().hashCode()); + } + +} diff --git a/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/AbstractQueryPageRequest.java b/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/AbstractQueryPageRequest.java new file mode 100644 index 00000000000..cf4ce9c2968 --- /dev/null +++ b/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/AbstractQueryPageRequest.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.protocol.shared; + +import java.io.IOException; +import java.util.Objects; + +public abstract class AbstractQueryPageRequest extends Request { + public final String cursor; + public final TimeoutInfo timeout; + + protected AbstractQueryPageRequest(String cursor, TimeoutInfo timeout) { + if (cursor == null) { + throw new IllegalArgumentException("[cursor] must not be null"); + } + if (timeout == null) { + throw new IllegalArgumentException("[timeout] must not be null"); + } + this.cursor = cursor; + this.timeout = timeout; + } + + protected AbstractQueryPageRequest(SqlDataInput in) throws IOException { + this.cursor = in.readUTF(); + this.timeout = new TimeoutInfo(in); + } + + @Override + public void writeTo(SqlDataOutput out) throws IOException { + out.writeUTF(cursor); + timeout.writeTo(out); + } + + @Override + protected String toStringBody() { + return cursor; + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + AbstractQueryPageRequest other = (AbstractQueryPageRequest) obj; + return Objects.equals(cursor, other.cursor) + && timeout.equals(other.timeout); + } + + @Override + public int hashCode() { + return Objects.hash(cursor, timeout); + } +} diff --git a/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/AbstractQueryResponse.java b/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/AbstractQueryResponse.java new file mode 100644 index 00000000000..f1ba90400dc --- /dev/null +++ b/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/AbstractQueryResponse.java @@ -0,0 +1,79 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.protocol.shared; + +import java.io.DataInput; +import java.io.IOException; +import java.util.Objects; + +/** + * Superclass for responses both for {@link AbstractQueryInitRequest} + * and {@link AbstractQueryPageRequest}. + */ +public abstract class AbstractQueryResponse extends Response { + private final long tookNanos; + private final String cursor; + + protected AbstractQueryResponse(long tookNanos, String cursor) { + if (cursor == null) { + throw new IllegalArgumentException("cursor must not be null"); + } + this.tookNanos = tookNanos; + this.cursor = cursor; + } + + protected AbstractQueryResponse(Request request, DataInput in) throws IOException { + tookNanos = in.readLong(); + cursor = in.readUTF(); + } + + @Override + protected void writeTo(SqlDataOutput out) throws IOException { + out.writeLong(tookNanos); + out.writeUTF(cursor); + } + + /** + * How long the request took on the server as measured by + * {@link System#nanoTime()}. + */ + public long tookNanos() { + return tookNanos; + } + + /** + * Cursor for fetching the next page. If it has {@code length = 0} + * then there is no next page. + */ + public String cursor() { + return cursor; + } + + @Override + protected String toStringBody() { + StringBuilder b = new StringBuilder(); + b.append("tookNanos=[").append(tookNanos); + b.append("] cursor=["); + b.append(cursor); + b.append("]"); + return b.toString(); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + AbstractQueryResponse other = (AbstractQueryResponse) obj; + return tookNanos == other.tookNanos + && Objects.equals(cursor, other.cursor); + } + + @Override + public int hashCode() { + return Objects.hash(tookNanos, cursor); + } +} diff --git a/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/Nullable.java b/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/Nullable.java new file mode 100644 index 00000000000..e640798fcb8 --- /dev/null +++ b/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/Nullable.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.protocol.shared; + +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * The presence of this annotation on a method parameter indicates that + * {@code null} is an acceptable value for that parameter. It should not be + * used for parameters of primitive types. + */ +@Documented +@Retention(RetentionPolicy.RUNTIME) +@Target({ElementType.PARAMETER, ElementType.FIELD, ElementType.METHOD}) +public @interface Nullable { +} diff --git a/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/ProtoUtil.java b/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/ProtoUtil.java new file mode 100644 index 00000000000..eb143ee1d8e --- /dev/null +++ b/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/ProtoUtil.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.protocol.shared; + +import java.io.DataInput; +import java.io.IOException; + +public class ProtoUtil { + private static final int MAX_ARRAY_SIZE = 5 * 1024 * 1024 * 1024; + + public static int readArraySize(DataInput in) throws IOException { + int length = in.readInt(); + if (length > MAX_ARRAY_SIZE) { + throw new IOException("array size unbelievably long [" + length + "]"); + } + return length; + } +} diff --git a/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/Request.java b/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/Request.java new file mode 100644 index 00000000000..7bac068f9d2 --- /dev/null +++ b/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/Request.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.protocol.shared; + +import org.elasticsearch.xpack.sql.protocol.shared.AbstractProto.RequestType; + +import java.io.DataOutput; +import java.io.IOException; + +public abstract class Request { + @Override + public final String toString() { + return getClass().getSimpleName() + "<" + toStringBody() + ">"; + } + + /** + * Write this request to the {@link DataOutput}. Implementers should + * be kind and stick this right under the ctor that reads the response. + */ + protected abstract void writeTo(SqlDataOutput out) throws IOException; + + /** + * Body to go into the {@link #toString()} result. + */ + protected abstract String toStringBody(); + + /** + * Type of this request. + */ + public abstract RequestType requestType(); + + /* + * Must properly implement {@linkplain #equals(Object)} for + * round trip testing. + */ + @Override + public abstract boolean equals(Object obj); + + /* + * Must properly implement {@linkplain #hashCode()} for + * round trip testing. + */ + @Override + public abstract int hashCode(); +} diff --git a/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/Response.java b/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/Response.java new file mode 100644 index 00000000000..05d15c4adab --- /dev/null +++ b/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/Response.java @@ -0,0 +1,53 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.protocol.shared; + +import org.elasticsearch.xpack.sql.protocol.shared.AbstractProto.RequestType; +import org.elasticsearch.xpack.sql.protocol.shared.AbstractProto.ResponseType; + +import java.io.DataOutput; +import java.io.IOException; + +public abstract class Response { + @Override + public final String toString() { + return getClass().getSimpleName() + "<" + toStringBody() + ">"; + } + + /** + * Write this response to the {@link DataOutput}. + */ + protected abstract void writeTo(SqlDataOutput out) throws IOException; + + /** + * Body to go into the {@link #toString()} result. + */ + protected abstract String toStringBody(); + + /** + * Type of the request for which this is the response. + */ + public abstract RequestType requestType(); + + /** + * Type of this response. + */ + public abstract ResponseType responseType(); + + /* + * Must properly implement {@linkplain #equals(Object)} for + * round trip testing. + */ + @Override + public abstract boolean equals(Object obj); + + /* + * Must properly implement {@linkplain #hashCode()} for + * round trip testing. + */ + @Override + public abstract int hashCode(); +} diff --git a/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/SqlDataInput.java b/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/SqlDataInput.java new file mode 100644 index 00000000000..14941f066cd --- /dev/null +++ b/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/SqlDataInput.java @@ -0,0 +1,123 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.protocol.shared; + +import java.io.DataInput; +import java.io.IOException; + +/** + * {@linkplain DataInput} customized for SQL. It has: + *
    + *
  • {@link #version}. This allows us to add new fields + * to the protocol in a backwards compatible way by bumping + * the version number.
  • + *
+ */public final class SqlDataInput implements DataInput { + private final DataInput delegate; + private final int version; + + public SqlDataInput(DataInput delegate, int version) { + this.delegate = delegate; + this.version = version; + } + + /** + * Version of the protocol to use. When new fields are added + * to the protocol we bump the maximum version. Requests and + * responses use the minimum version understood by both the + * client and the server. + */ + public int version() { + return version; + } + + /** + * Override the built-in {@link DataInput#readUTF()} + * to support strings that need more than 65535 charcters. + */ + @Override + public String readUTF() throws IOException { + int splits = delegate.readInt(); + if (splits == 0) { + return delegate.readUTF(); + } + StringBuilder b = new StringBuilder(SqlDataOutput.WORST_CASE_SPLIT * splits); + for (int i = 0; i < splits; i++) { + b.append(delegate.readUTF()); + } + return b.toString(); + } + + @Override + public void readFully(byte[] b) throws IOException { + delegate.readFully(b); + } + + @Override + public void readFully(byte[] b, int off, int len) throws IOException { + delegate.readFully(b, off, len); + } + + @Override + public int skipBytes(int n) throws IOException { + return delegate.skipBytes(n); + } + + @Override + public boolean readBoolean() throws IOException { + return delegate.readBoolean(); + } + + @Override + public byte readByte() throws IOException { + return delegate.readByte(); + } + + @Override + public int readUnsignedByte() throws IOException { + return delegate.readUnsignedByte(); + } + + @Override + public short readShort() throws IOException { + return delegate.readShort(); + } + + @Override + public int readUnsignedShort() throws IOException { + return delegate.readUnsignedShort(); + } + + @Override + public char readChar() throws IOException { + return delegate.readChar(); + } + + @Override + public int readInt() throws IOException { + return delegate.readInt(); + } + + @Override + public long readLong() throws IOException { + return delegate.readLong(); + } + + @Override + public float readFloat() throws IOException { + return delegate.readFloat(); + } + + @Override + public double readDouble() throws IOException { + return delegate.readDouble(); + } + + @Override + public String readLine() throws IOException { + return delegate.readLine(); + } +} diff --git a/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/SqlDataOutput.java b/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/SqlDataOutput.java new file mode 100644 index 00000000000..1171c3c6c16 --- /dev/null +++ b/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/SqlDataOutput.java @@ -0,0 +1,130 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.protocol.shared; + +import java.io.DataOutput; +import java.io.IOException; + +/** + * {@linkplain DataOutput} customized for SQL. It has: + *
    + *
  • {@link #version}. This allows us to add new fields + * to the protocol in a backwards compatible way by bumping + * the version number.
  • + *
+ */ +public final class SqlDataOutput implements DataOutput { + private final DataOutput delegate; + private final int version; + + public SqlDataOutput(DataOutput delegate, int version) { + this.delegate = delegate; + this.version = version; + } + + /** + * Version of the protocol to use. When new fields are added + * to the protocol we bump the maximum version. Requests and + * responses use the minimum version understood by both the + * client and the server. + */ + public int version() { + return version; + } + + /** + * The maximum size of a string to submit to {@link #delegate}'s + * {@link DataOutput#writeUTF(String)}. The {@code 65535} is the + * number of bytes that the string can be encoded to. The {@code 3} + * is the "worst case" for the number of bytes used to encode each + * char. + */ + static final int WORST_CASE_SPLIT = 65535 / 3; + /** + * Override the built-in {@link DataOutput#writeUTF(String)} + * to support strings that need more than 65535 charcters. + */ + @Override + public void writeUTF(String s) throws IOException { + int splits = s.length() / WORST_CASE_SPLIT + 1; + delegate.writeInt(splits); + + int start = 0; + while (true) { + int end = Math.min(s.length(), start + WORST_CASE_SPLIT); + delegate.writeUTF(s.substring(start, end)); + if (end == s.length()) { + break; + } + start = end; + } + } + + @Override + public void write(int b) throws IOException { + delegate.write(b); + } + + @Override + public void write(byte[] b) throws IOException { + delegate.write(b); + } + + @Override + public void write(byte[] b, int off, int len) throws IOException { + delegate.write(b, off, len); + } + + @Override + public void writeBoolean(boolean v) throws IOException { + delegate.writeBoolean(v); + } + + @Override + public void writeByte(int v) throws IOException { + delegate.writeByte(v); + } + + @Override + public void writeShort(int v) throws IOException { + delegate.writeShort(v); + } + + @Override + public void writeChar(int v) throws IOException { + delegate.writeChar(v); + } + + @Override + public void writeInt(int v) throws IOException { + delegate.writeInt(v); + } + + @Override + public void writeLong(long v) throws IOException { + delegate.writeLong(v); + } + + @Override + public void writeFloat(float v) throws IOException { + delegate.writeFloat(v); + } + + @Override + public void writeDouble(double v) throws IOException { + delegate.writeDouble(v); + } + + @Override + public void writeBytes(String s) throws IOException { + delegate.writeBytes(s); + } + + @Override + public void writeChars(String s) throws IOException { + delegate.writeChars(s); + } +} diff --git a/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/TimeoutInfo.java b/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/TimeoutInfo.java new file mode 100644 index 00000000000..af398929e23 --- /dev/null +++ b/sql/shared-proto/src/main/java/org/elasticsearch/xpack/sql/protocol/shared/TimeoutInfo.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.protocol.shared; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.Objects; +import java.util.concurrent.TimeUnit; + +/** + * Common class handling timeouts. Due to the nature of JDBC, all timeout values are expressed as millis. + * Contains + */ +public class TimeoutInfo { + + public static final long DEFAULT_REQUEST_TIMEOUT = TimeUnit.SECONDS.toMillis(90); + public static final long DEFAULT_PAGE_TIMEOUT = TimeUnit.SECONDS.toMillis(45); + + // client time - millis since epoch of when the client made the request + // request timeout - how long the client is willing to wait for the server to process its request + // page timeout - how long retrieving the next page (of the query) should take (this is used to scroll across pages) + public final long clientTime, requestTimeout, pageTimeout; + + public TimeoutInfo(long clientTime, long timeout, long requestTimeout) { + this.clientTime = clientTime; + this.requestTimeout = timeout; + this.pageTimeout = requestTimeout; + } + + TimeoutInfo(DataInput in) throws IOException { + clientTime = in.readLong(); + requestTimeout = in.readLong(); + pageTimeout = in.readLong(); + } + + void writeTo(DataOutput out) throws IOException { + out.writeLong(clientTime); + out.writeLong(requestTimeout); + out.writeLong(pageTimeout); + } + + @Override + public String toString() { + return "client=" + clientTime + ",request=" + requestTimeout + ",page=" + pageTimeout; + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + TimeoutInfo other = (TimeoutInfo) obj; + return clientTime == other.clientTime + && requestTimeout == other.requestTimeout + && pageTimeout == other.pageTimeout; + } + + @Override + public int hashCode() { + return Objects.hash(clientTime, requestTimeout, pageTimeout); + } +} diff --git a/sql/shared-proto/src/test/java/org/elasticsearch/xpack/sql/protocol/shared/SqlDataInputOutputTests.java b/sql/shared-proto/src/test/java/org/elasticsearch/xpack/sql/protocol/shared/SqlDataInputOutputTests.java new file mode 100644 index 00000000000..7e438c0ee7e --- /dev/null +++ b/sql/shared-proto/src/test/java/org/elasticsearch/xpack/sql/protocol/shared/SqlDataInputOutputTests.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.protocol.shared; + +import org.apache.http.client.entity.DeflateInputStream; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.test.ESTestCase; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; + +public class SqlDataInputOutputTests extends ESTestCase { + public void testSmallString() throws IOException { + assertRoundTripString("t"); + assertRoundTripString("test"); + assertRoundTripString(randomAlphaOfLengthBetween(500, 1000)); + } + + public void testLargeAscii() throws IOException { + assertRoundTripString(randomAlphaOfLengthBetween(65535, 655350)); + } + + public void testUnicode() throws IOException { + assertRoundTripString(randomRealisticUnicodeOfLengthBetween(65535 / 3, 65535)); + assertRoundTripString(randomRealisticUnicodeOfLengthBetween(65535, 655350)); + } + + /** + * Round trip a string using {@link SqlDataOutput#writeUTF(String)} + * and {@link SqlDataInput#readUTF()}. + */ + private void assertRoundTripString(String string) throws IOException { + try (BytesStreamOutput out = new BytesStreamOutput()) { + SqlDataOutput sout = new SqlDataOutput(new DataOutputStream(out), 0); + sout.writeUTF(string); + try (StreamInput in = out.bytes().streamInput()) { + SqlDataInput sin = new SqlDataInput(new DataInputStream(in), 0); + assertEquals(string, sin.readUTF()); + } + } + } +} diff --git a/sql/shared-proto/src/test/java/org/elasticsearch/xpack/sql/protocol/shared/TimeoutInfoTests.java b/sql/shared-proto/src/test/java/org/elasticsearch/xpack/sql/protocol/shared/TimeoutInfoTests.java new file mode 100644 index 00000000000..6e6fb3da044 --- /dev/null +++ b/sql/shared-proto/src/test/java/org/elasticsearch/xpack/sql/protocol/shared/TimeoutInfoTests.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.protocol.shared; + +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.elasticsearch.xpack.sql.test.RoundTripTestUtils.assertRoundTrip; + +public class TimeoutInfoTests extends ESTestCase { + static TimeoutInfo randomTimeoutInfo() { + return new TimeoutInfo(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()); + } + + public void testRoundTrip() throws IOException { + assertRoundTrip(randomTimeoutInfo(), TimeoutInfo::writeTo, TimeoutInfo::new); + } +} diff --git a/sql/test-utils/build.gradle b/sql/test-utils/build.gradle new file mode 100644 index 00000000000..c61676532ce --- /dev/null +++ b/sql/test-utils/build.gradle @@ -0,0 +1,25 @@ +apply plugin: 'elasticsearch.build' + +description = 'Shared test utilities for jdbc and cli protocol projects' + +dependencies { + compile "junit:junit:${versions.junit}" + compile "org.hamcrest:hamcrest-all:${versions.hamcrest}" + compile "org.elasticsearch.test:framework:${versions.elasticsearch}" +} + +thirdPartyAudit.excludes = [ + // Referneced by the test:framework but not used + 'org.apache.tools.ant.BuildException', + 'org.apache.tools.ant.DirectoryScanner', + 'org.apache.tools.ant.Task', + 'org.apache.tools.ant.types.FileSet', + 'org.easymock.EasyMock', + 'org.easymock.IArgumentMatcher', + 'org.jmock.core.Constraint', +] + +/* Elasticsearch traditionally disables this for test utilities because it is + * hard to configure and (hopefully) much less important for tests than + * production code. */ +dependencyLicenses.enabled = false diff --git a/sql/test-utils/src/main/java/org/elasticsearch/xpack/sql/test/RoundTripTestUtils.java b/sql/test-utils/src/main/java/org/elasticsearch/xpack/sql/test/RoundTripTestUtils.java new file mode 100644 index 00000000000..62b7fc1da5b --- /dev/null +++ b/sql/test-utils/src/main/java/org/elasticsearch/xpack/sql/test/RoundTripTestUtils.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.test; + +import org.elasticsearch.common.CheckedBiConsumer; +import org.elasticsearch.common.CheckedFunction; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInput; +import java.io.DataInputStream; +import java.io.DataOutput; +import java.io.DataOutputStream; +import java.io.IOException; +import java.io.InputStream; + +import static org.junit.Assert.assertEquals; + +/** + * Base class for testing round trips across the serialization protocol. + */ +public abstract class RoundTripTestUtils { + private RoundTripTestUtils () { + // Only static utilities here + } + + public static void assertRoundTrip(T example, CheckedBiConsumer writeTo, + CheckedFunction readFrom) throws IOException { + T once = roundTrip(example, writeTo, readFrom); + assertEquals(example, once); + T twice = roundTrip(once, writeTo, readFrom); + assertEquals(example, twice); + assertEquals(once, twice); + } + + public static T roundTrip(T example, CheckedBiConsumer writeTo, + CheckedFunction readFrom) throws IOException { + try (ByteArrayOutputStream out = new ByteArrayOutputStream()) { + writeTo.accept(example, new DataOutputStream(out)); + try (InputStream in = new ByteArrayInputStream(out.toByteArray())) { + T decoded = readFrom.apply(new DataInputStream(in)); + assertEquals("should have emptied the stream", 0, in.available()); + return decoded; + } + } + } +} diff --git a/sql/test-utils/src/test/java/org/elasticsearch/xpack/sql/test/RoundTripTestUtilsTests.java b/sql/test-utils/src/test/java/org/elasticsearch/xpack/sql/test/RoundTripTestUtilsTests.java new file mode 100644 index 00000000000..08c8bad918a --- /dev/null +++ b/sql/test-utils/src/test/java/org/elasticsearch/xpack/sql/test/RoundTripTestUtilsTests.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.test; + +import org.elasticsearch.test.ESTestCase; + +import java.io.DataInput; +import java.io.IOException; + +import static org.elasticsearch.xpack.sql.test.RoundTripTestUtils.assertRoundTrip; +import static org.hamcrest.Matchers.startsWith; + +public class RoundTripTestUtilsTests extends ESTestCase { + public void testAssertRoundTrip() throws IOException { + // Should pass + assertRoundTrip(randomAlphaOfLength(5), (str, out) -> out.writeUTF(str), DataInput::readUTF); + + // Should fail because we have trailing stuff + AssertionError e = expectThrows(AssertionError.class, () -> assertRoundTrip(randomAlphaOfLength(5), (str, out) -> { + out.writeUTF(str); + out.writeInt(randomInt()); + }, DataInput::readUTF)); + assertEquals("should have emptied the stream expected:<0> but was:<4>", e.getMessage()); + + // Should fail because we read the wrong string + e = expectThrows(AssertionError.class, () -> assertRoundTrip(randomAlphaOfLength(5), + (str, out) -> out.writeUTF(str), in -> in.readUTF() + "wrong")); + assertThat(e.getMessage(), startsWith("expected:<")); + } +} diff --git a/test/sql-cli-fixture/build.gradle b/test/sql-cli-fixture/build.gradle new file mode 100644 index 00000000000..dcf745c89a2 --- /dev/null +++ b/test/sql-cli-fixture/build.gradle @@ -0,0 +1,14 @@ +apply plugin: 'elasticsearch.build' + +forbiddenApisMain { + // does not depend on core, so only jdk and http signatures should be checked + signaturesURLs = [this.class.getResource('/forbidden/jdk-signatures.txt')] +} + +thirdPartyAudit.enabled = false +licenseHeaders.enabled = false +test.enabled = false +jarHell.enabled = false +// Not published so no need to assemble +tasks.remove(assemble) +build.dependsOn.remove('assemble') diff --git a/test/sql-cli-fixture/src/main/java/org/elasticsearch/xpack/sql/cli/fixture/CliFixture.java b/test/sql-cli-fixture/src/main/java/org/elasticsearch/xpack/sql/cli/fixture/CliFixture.java new file mode 100644 index 00000000000..c2ed5c0808e --- /dev/null +++ b/test/sql-cli-fixture/src/main/java/org/elasticsearch/xpack/sql/cli/fixture/CliFixture.java @@ -0,0 +1,154 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.fixture; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.lang.management.ManagementFactory; +import java.net.Inet6Address; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.ServerSocket; +import java.net.Socket; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardCopyOption; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Locale; + +import static java.util.Collections.singleton; + +public class CliFixture { + public static void main(String[] args) throws IOException, InterruptedException { + if (args.length < 2) { + throw new IllegalArgumentException("usage: "); + } + Path dir = Paths.get(args[0]); + Path cliJar = Paths.get(args[1]); + int port = 0; + if (args.length > 2) { + port = Integer.parseInt(args[2]); + } + if (false == Files.exists(cliJar)) { + throw new IllegalArgumentException(cliJar + " doesn't exist"); + } + if (false == Files.isRegularFile(cliJar)) { + throw new IllegalArgumentException(cliJar + " is not a regular file"); + } + String javaExec = "java"; + boolean isWindows = System.getProperty("os.name").toLowerCase(Locale.ROOT).contains("win"); + + if (isWindows) { + javaExec += ".exe"; + } + Path javaExecutable = Paths.get(System.getProperty("java.home"), "bin", javaExec); + if (false == Files.exists(javaExecutable)) { + throw new IllegalArgumentException(javaExec + " doesn't exist"); + } + if (false == Files.isExecutable(javaExecutable)) { + throw new IllegalArgumentException(javaExec + " isn't executable"); + } + + try (ServerSocket server = new ServerSocket()) { + server.bind(new InetSocketAddress(InetAddress.getLoopbackAddress(), port)); + // write pid file + Path tmp = Files.createTempFile(dir, null, null); + String pid = ManagementFactory.getRuntimeMXBean().getName().split("@")[0]; + Files.write(tmp, Collections.singleton(pid)); + Files.move(tmp, dir.resolve("pid"), StandardCopyOption.ATOMIC_MOVE); + + // write port file + tmp = Files.createTempFile(dir, null, null); + InetSocketAddress bound = (InetSocketAddress) server.getLocalSocketAddress(); + if (bound.getAddress() instanceof Inet6Address) { + Files.write(tmp, singleton("[" + bound.getHostString() + "]:" + bound.getPort())); + } + else { + Files.write(tmp, singleton(bound.getHostString() + ":" + bound.getPort())); + } + Files.move(tmp, dir.resolve("ports"), StandardCopyOption.ATOMIC_MOVE); + + boolean run = true; + // Run forever until killed + while (run) { + try { + println("accepting on localhost:" + server.getLocalPort()); + Socket s = server.accept(); + String line = new BufferedReader(new InputStreamReader(s.getInputStream(), StandardCharsets.UTF_8)).readLine(); + if (line == null || line.isEmpty()) { + continue; + } + List command = new ArrayList<>(); + command.add(javaExecutable.toString()); + command.add("-Dcli.debug=true"); + // command.add("-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=8000"); + // Force a specific terminal type so we have consistent responses for testing. + command.add("-Dorg.jline.terminal.type=xterm-256color"); + // Disable terminal types that won't work with stdin isn't actually a tty + command.add("-Dorg.jline.terminal.jna=false"); + command.add("-Dorg.jline.terminal.jansi=false"); + command.add("-Dorg.jline.terminal.exec=false"); + command.add("-Dorg.jline.terminal.dumb=true"); + command.add("-jar"); + command.add(cliJar.toString()); + command.addAll(Arrays.asList(line.split(" "))); + ProcessBuilder cliBuilder = new ProcessBuilder(command); + // Clear the environment to drop JAVA_TOOLS which prints strange things on startup + cliBuilder.environment().clear(); + cliBuilder.redirectErrorStream(true); + Process process = cliBuilder.start(); + println("started " + command); + new Thread(() -> { + int i; + try { + while ((i = process.getInputStream().read()) != -1) { + s.getOutputStream().write(i); + s.getOutputStream().flush(); + } + } catch (IOException e) { + throw new RuntimeException("failed to copy from process to socket", e); + } finally { + process.destroyForcibly(); + } + }).start(); + new Thread(() -> { + int i; + try { + while ((i = s.getInputStream().read()) != -1) { + process.getOutputStream().write(i); + process.getOutputStream().flush(); + } + } catch (IOException e) { + throw new RuntimeException("failed to copy from socket to process", e); + } finally { + process.destroyForcibly(); + } + }).start(); + process.waitFor(); + } catch (IOException e) { + printStackTrace("error at the top level, continuing", e); + } + } + } + } + + @SuppressForbidden(reason = "cli application") + private static void println(String line) { + System.out.println(line); + } + + @SuppressForbidden(reason = "cli application") + private static void printStackTrace(String reason, Throwable t) { + System.err.println(reason); + t.printStackTrace(); + } +} diff --git a/test/sql-cli-fixture/src/main/java/org/elasticsearch/xpack/sql/cli/fixture/SuppressForbidden.java b/test/sql-cli-fixture/src/main/java/org/elasticsearch/xpack/sql/cli/fixture/SuppressForbidden.java new file mode 100644 index 00000000000..e2232d679dc --- /dev/null +++ b/test/sql-cli-fixture/src/main/java/org/elasticsearch/xpack/sql/cli/fixture/SuppressForbidden.java @@ -0,0 +1,17 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.fixture; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +@Retention(RetentionPolicy.CLASS) +@Target({ ElementType.CONSTRUCTOR, ElementType.FIELD, ElementType.METHOD, ElementType.TYPE }) +public @interface SuppressForbidden { + String reason(); +} \ No newline at end of file