Revert "Merge branch 'feature/sql'"

This reverts commit elastic/x-pack-elasticsearch@2b3f7697a5, reversing
changes made to elastic/x-pack-elasticsearch@b79f16673c.

We're backing out all of SQL.

Original commit: elastic/x-pack-elasticsearch@cc79e19911
This commit is contained in:
Costin Leau 2017-12-13 09:33:13 -05:00
parent 5a6cfafed2
commit 2e60e831c0
806 changed files with 39 additions and 65013 deletions

View File

@ -49,7 +49,7 @@ subprojects {
}
tasks.withType(LicenseHeadersTask.class) {
approvedLicenses = ['Elasticsearch Confidential', 'Generated']
approvedLicenses = ['Elasticsearch Confidential']
additionalLicense 'ESCON', 'Elasticsearch Confidential', 'ELASTICSEARCH CONFIDENTIAL'
}
ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-api:${version}": ':x-pack-elasticsearch:plugin']

View File

@ -5,12 +5,6 @@
<suppressions>
<!-- On Windows, Checkstyle matches files using \ path separator -->
<!-- These files are generated by ANTLR so its silly to hold them to our rules. -->
<suppress files="sql[/\\]server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]xpack[/\\]sql[/\\]parser[/\\]SqlBase(Base(Listener|Visitor)|Lexer|Listener|Parser|Visitor).java" checks="." />
<suppress files="sql[/\\]server[/\\].*.java" checks="LineLength" />
<suppress files="sql[/\\]server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]xpack[/\\]sql[/\\]expression[/\\].*.java" checks="EqualsHashCode" />
<suppress files="plugin[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]xpack[/\\]common[/\\]action[/\\]XPackDeleteByQueryAction.java" checks="LineLength" />
<suppress files="plugin[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]xpack[/\\]ml[/\\]action[/\\]StopDatafeedAction.java" checks="LineLength" />
<suppress files="plugin[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]xpack[/\\]ml[/\\]utils[/\\]DomainSplitFunction.java" checks="LineLength" />

View File

@ -57,20 +57,6 @@ case $key in
"-Dtests.badapples=true"
)
;;
smokeTestSql) # TODO remove this once we are ready to merge sql down
GRADLE_CLI_ARGS=(
"--info"
"-psql"
"check"
":x-pack-elasticsearch:plugin:precommit"
":x-pack-elasticsearch:qa:sql:check"
":x-pack-elasticsearch:qa:sql:multinode:check"
":x-pack-elasticsearch:qa:sql:no-security:check"
":x-pack-elasticsearch:qa:sql:security:check"
":x-pack-elasticsearch:qa:sql:security:no-ssl:check"
":x-pack-elasticsearch:qa:sql:security:ssl:check"
)
;;
releaseTest)
GRADLE_CLI_ARGS=(
"--info"
@ -149,9 +135,7 @@ if [ -z ${USE_EXISTING_ES:+x} ]; then
echo " -> using CI branch $BRANCH from elastic repo"
}
# pick_clone_target NOCOMMIT just use master for testing our feature branch. Do not merge this.....
GH_USER="elastic"
BRANCH="master"
pick_clone_target
DEPTH=1
if [ -n "$BUILD_METADATA" ]; then

View File

@ -199,77 +199,6 @@ setups['my_inactive_watch'] = '''
'''
setups['my_active_watch'] = setups['my_inactive_watch'].replace(
'active: false', 'active: true')
// Used by SQL because it looks SQL-ish
setups['library'] = '''
- do:
indices.create:
index: library
body:
settings:
number_of_shards: 1
number_of_replicas: 1
mappings:
book:
properties:
name:
type: keyword
author:
type: keyword
release_date:
type: date
page_count:
type: short
- do:
bulk:
index: library
type: book
refresh: true
body: |
{"index":{"_id": "Leviathan Wakes"}}
{"name": "Leviathan Wakes", "author": "James S.A. Corey", "release_date": "2011-06-02", "page_count": 561}
{"index":{"_id": "Hyperion"}}
{"name": "Hyperion", "author": "Dan Simmons", "release_date": "1989-05-26", "page_count": 482}
{"index":{"_id": "Dune"}}
{"name": "Dune", "author": "Frank Herbert", "release_date": "1965-06-01", "page_count": 604}
{"index":{"_id": "Consider Phlebas"}}
{"name": "Consider Phlebas", "author": "Iain M. Banks", "release_date": "1987-04-23", "page_count": 471}
{"index":{"_id": "Pandora's Star"}}
{"name": "Pandora's Star", "author": "Peter F. Hamilton", "release_date": "2004-03-02", "page_count": 768}
{"index":{"_id": "Revelation Space"}}
{"name": "Revelation Space", "author": "Alastair Reynolds", "release_date": "2000-03-15", "page_count": 585}
{"index":{"_id": "A Fire Upon the Deep"}}
{"name": "A Fire Upon the Deep", "author": "Vernor Vinge", "release_date": "1992-06-01", "page_count": 613}
{"index":{"_id": "Ender's Game"}}
{"name": "Ender's Game", "author": "Orson Scott Card", "release_date": "1985-06-01", "page_count": 324}
{"index":{"_id": "1984"}}
{"name": "1984", "author": "George Orwell", "release_date": "1985-06-01", "page_count": 328}
{"index":{"_id": "Fahrenheit 451"}}
{"name": "Fahrenheit 451", "author": "Ray Bradbury", "release_date": "1953-10-15", "page_count": 227}
{"index":{"_id": "Brave New World"}}
{"name": "Brave New World", "author": "Aldous Huxley", "release_date": "1932-06-01", "page_count": 268}
{"index":{"_id": "Foundation"}}
{"name": "Foundation", "author": "Isaac Asimov", "release_date": "1951-06-01", "page_count": 224}
{"index":{"_id": "The Giver"}}
{"name": "The Giver", "author": "Lois Lowry", "release_date": "1993-04-26", "page_count": 208}
{"index":{"_id": "Slaughterhouse-Five"}}
{"name": "Slaughterhouse-Five", "author": "Kurt Vonnegut", "release_date": "1969-06-01", "page_count": 275}
{"index":{"_id": "The Hitchhiker's Guide to the Galaxy"}}
{"name": "The Hitchhiker's Guide to the Galaxy", "author": "Douglas Adams", "release_date": "1979-10-12", "page_count": 180}
{"index":{"_id": "Snow Crash"}}
{"name": "Snow Crash", "author": "Neal Stephenson", "release_date": "1992-06-01", "page_count": 470}
{"index":{"_id": "Neuromancer"}}
{"name": "Neuromancer", "author": "William Gibson", "release_date": "1984-07-01", "page_count": 271}
{"index":{"_id": "The Handmaid's Tale"}}
{"name": "The Handmaid's Tale", "author": "Margaret Atwood", "release_date": "1985-06-01", "page_count": 311}
{"index":{"_id": "Starship Troopers"}}
{"name": "Starship Troopers", "author": "Robert A. Heinlein", "release_date": "1959-12-01", "page_count": 335}
{"index":{"_id": "The Left Hand of Darkness"}}
{"name": "The Left Hand of Darkness", "author": "Ursula K. Le Guin", "release_date": "1969-06-01", "page_count": 304}
{"index":{"_id": "The Moon is a Harsh Mistress"}}
{"name": "The Moon is a Harsh Mistress", "author": "Robert A. Heinlein", "release_date": "1966-04-01", "page_count": 288}
'''
setups['server_metrics_index'] = '''
- do:
indices.create:

View File

@ -23,9 +23,6 @@ include::release-notes/xpack-breaking.asciidoc[]
:edit_url:
include::{es-repo-dir}/reference/index-shared3.asciidoc[]
:edit_url!:
include::sql/index.asciidoc[]
:edit_url!:
include::monitoring/index.asciidoc[]

View File

@ -11,4 +11,3 @@ include::ml-settings.asciidoc[]
include::monitoring-settings.asciidoc[]
include::security-settings.asciidoc[]
include::notification-settings.asciidoc[]
include::sql-settings.asciidoc[]

View File

@ -1,17 +0,0 @@
[role="xpack"]
[[sql-settings]]
=== SQL Access Settings in Elasticsearch
++++
<titleabbrev>SQL Access Settings</titleabbrev>
++++
SQL Access is enabled by default when you install {xpack}. You can configure
these SQL Access settings in the `elasticsearch.yml` file.
[float]
[[general-sql-settings]]
==== General SQL Access Settings
`xpack.sql.enabled`::
Set to `false` to disable SQL Access on the node.

View File

@ -1,39 +0,0 @@
[role="xpack"]
[[sql-cli]]
== SQL CLI
The SQL CLI is a stand alone Java application for quick interaction
with X-Pack SQL. You can run it like this:
["source","bash",subs="attributes,callouts"]
--------------------------------------------------
$ java -jar cli-{version}.jar
--------------------------------------------------
You can pass the URL of the Elasticsearch instance to connect to as
the first parameter:
["source","bash",subs="attributes,callouts"]
--------------------------------------------------
$ java -jar cli-{version}.jar https://some.server:9200
--------------------------------------------------
The cli jar is entirely stand alone and can be moved whereever it is
needed.
Once the CLI is running you can use any <<sql-spec,query>> that
Elasticsearch supports:
[source,sqlcli]
--------------------------------------------------
sql> SELECT * FROM library WHERE page_count > 500 ORDER BY page_count DESC;
author | name | page_count
----------------------------+-----------------------+---------------
Victor Hugo |Les Misérables |1463
Miguel De Cervantes Saavedra|Don Quixote |1072
Miguel De Cervantes Saavedra|Don Quixote |1072
Herman Melville |Moby-Dick or, The Whale|720
Charles Dickens |Oliver Twist |608
--------------------------------------------------
// TODO it'd be lovely to be able to assert that this is correct but
// that is probably more work then it is worth right now.

View File

@ -1,4 +0,0 @@
include::sql-rest.asciidoc[]
include::sql-translate.asciidoc[]
include::sql-cli.asciidoc[]
include::sql-jdbc.asciidoc[]

View File

@ -1,48 +0,0 @@
[role="xpack"]
[[sql-jdbc]]
== SQL JDBC
Elasticsearch's SQL jdbc driver is a fully featured JDBC driver
for Elasticsearch. You can connect to it using the two APIs offered
by JDBC, namely `java.sql.Driver` and `DriverManager`:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{jdbc-tests}/JdbcIntegrationTestCase.java[connect-dm]
--------------------------------------------------
<1> The server and port on which Elasticsearch is listening for
HTTP traffic. The port is usually 9200.
<2> Properties for connecting to Elasticsearch. An empty `Properties`
instance is fine for unsecured Elasticsearch.
or `javax.sql.DataSource` through
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{jdbc-tests}/JdbcIntegrationTestCase.java[connect-ds]
--------------------------------------------------
<1> The server and port on which Elasticsearch is listening for
HTTP traffic. The port is usually 9200.
<2> Properties for connecting to Elasticsearch. An empty `Properties`
instance is fine for unsecured Elasticsearch.
Which one to use? Typically client applications that provide most
configuration parameters in the URL rely on the `DriverManager`-style
while `DataSource` is preferred when being _passed_ around since it can be
configured in one place and the consumer only has to call `getConnection`
without having to worry about any other parameters.
To connect to a secured Elasticsearch server the `Properties`
should look like:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{security-tests}/JdbcSecurityIT.java[admin_properties]
--------------------------------------------------
Once you have the connection you can use it like any other JDBC
connection. For example:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{jdbc-tests}/SimpleExampleTestCase.java[simple_example]
--------------------------------------------------

View File

@ -1,186 +0,0 @@
[role="xpack"]
[[sql-rest]]
== SQL REST API
The SQL REST API accepts SQL in a JSON document, executes it,
and returns the results. For example:
[source,js]
--------------------------------------------------
POST /_xpack/sql
{
"query": "SELECT * FROM library ORDER BY page_count DESC LIMIT 5"
}
--------------------------------------------------
// CONSOLE
// TEST[setup:library]
Which returns:
[source,text]
--------------------------------------------------
author | name | page_count | release_date
-----------------+--------------------+---------------+---------------
Peter F. Hamilton|Pandora's Star |768 |1078185600000
Vernor Vinge |A Fire Upon the Deep|613 |707356800000
Frank Herbert |Dune |604 |-144720000000
Alastair Reynolds|Revelation Space |585 |953078400000
James S.A. Corey |Leviathan Wakes |561 |1306972800000
--------------------------------------------------
// TESTRESPONSE[_cat]
You can also choose to get results in a structured format by adding the `format` parameter. Currently supported formats:
- text (default)
- json
- smile
- yaml
- cbor
Alternatively you can set the Accept HTTP header to the appropriate media format.
All formats above are supported, the GET parameter takes precedence over the header.
[source,js]
--------------------------------------------------
POST /_xpack/sql?format=json
{
"query": "SELECT * FROM library ORDER BY page_count DESC",
"fetch_size": 5
}
--------------------------------------------------
// CONSOLE
// TEST[setup:library]
Which returns:
[source,js]
--------------------------------------------------
{
"columns": [
{"name": "author", "type": "keyword"},
{"name": "name", "type": "keyword"},
{"name": "page_count", "type": "short"},
{"name": "release_date", "type": "date"}
],
"size": 5,
"rows": [
["Peter F. Hamilton", "Pandora's Star", 768, 1078185600000],
["Vernor Vinge", "A Fire Upon the Deep", 613, 707356800000],
["Frank Herbert", "Dune", 604, -144720000000],
["Alastair Reynolds", "Revelation Space", 585, 953078400000],
["James S.A. Corey", "Leviathan Wakes", 561, 1306972800000]
],
"cursor": "sDXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAAEWWWdrRlVfSS1TbDYtcW9lc1FJNmlYdw==:BAFmBmF1dGhvcgFmBG5hbWUBZgpwYWdlX2NvdW50AWYMcmVsZWFzZV9kYXRl+v///w8="
}
--------------------------------------------------
// TESTRESPONSE[s/sDXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAAEWWWdrRlVfSS1TbDYtcW9lc1FJNmlYdw==:BAFmBmF1dGhvcgFmBG5hbWUBZgpwYWdlX2NvdW50AWYMcmVsZWFzZV9kYXRl\+v\/\/\/w8=/$body.cursor/]
You can continue to the next page by sending back the `cursor` field. In
case of text format the cursor is returned as `Cursor` http header.
[source,js]
--------------------------------------------------
POST /_xpack/sql?format=json
{
"cursor": "sDXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAAEWYUpOYklQMHhRUEtld3RsNnFtYU1hQQ==:BAFmBGRhdGUBZgVsaWtlcwFzB21lc3NhZ2UBZgR1c2Vy9f///w8="
}
--------------------------------------------------
// CONSOLE
// TEST[continued]
// TEST[s/sDXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAAEWYUpOYklQMHhRUEtld3RsNnFtYU1hQQ==:BAFmBGRhdGUBZgVsaWtlcwFzB21lc3NhZ2UBZgR1c2Vy9f\/\/\/w8=/$body.cursor/]
Which looks like:
[source,js]
--------------------------------------------------
{
"size" : 5,
"rows" : [
["Dan Simmons", "Hyperion", 482, 612144000000],
["Iain M. Banks", "Consider Phlebas", 471, 546134400000],
["Neal Stephenson", "Snow Crash", 470, 707356800000],
["Robert A. Heinlein", "Starship Troopers", 335, -318297600000],
["George Orwell", "1984", 328, 486432000000]
],
"cursor" : "sDXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAAEWODRMaXBUaVlRN21iTlRyWHZWYUdrdw==:BAFmBmF1dGhvcgFmBG5hbWUBZgpwYWdlX2NvdW50AWYMcmVsZWFzZV9kYXRl9f///w8="
}
--------------------------------------------------
// TESTRESPONSE[s/sDXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAAEWODRMaXBUaVlRN21iTlRyWHZWYUdrdw==:BAFmBmF1dGhvcgFmBG5hbWUBZgpwYWdlX2NvdW50AWYMcmVsZWFzZV9kYXRl9f\/\/\/w8=/$body.cursor/]
Note that the `column` object is only part of the first page.
You've reached the last page when there is no `cursor` returned
in the results. Like Elasticsearch's <<search-request-scroll,scroll>>,
SQL may keep state in Elasticsearch to support the cursor. Unlike
scroll, receiving the last page is enough to guarantee that the
Elasticsearch state is cleared.
To clear the state earlier, you can use the clear cursor command:
[source,js]
--------------------------------------------------
POST /_xpack/sql/close
{
"cursor": "sDXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAAEWYUpOYklQMHhRUEtld3RsNnFtYU1hQQ==:BAFmBGRhdGUBZgVsaWtlcwFzB21lc3NhZ2UBZgR1c2Vy9f///w8="
}
--------------------------------------------------
// CONSOLE
// TEST[continued]
// TEST[s/sDXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAAEWYUpOYklQMHhRUEtld3RsNnFtYU1hQQ==:BAFmBGRhdGUBZgVsaWtlcwFzB21lc3NhZ2UBZgR1c2Vy9f\/\/\/w8=/$body.cursor/]
Which will like return the
[source,js]
--------------------------------------------------
{
"succeeded" : true
}
--------------------------------------------------
// TESTRESPONSE
[[sql-rest-filtering]]
You can filter the results that SQL will run on using a standard
Elasticsearch query DSL by specifying the query in the filter
parameter.
[source,js]
--------------------------------------------------
POST /_xpack/sql
{
"query": "SELECT * FROM library ORDER BY page_count DESC",
"filter": {
"range": {
"page_count": {
"gte" : 100,
"lte" : 200
}
}
},
"fetch_size": 5
}
--------------------------------------------------
// CONSOLE
// TEST[setup:library]
Which returns:
[source,text]
--------------------------------------------------
author | name | page_count | release_date
---------------+------------------------------------+---------------+---------------
Douglas Adams |The Hitchhiker's Guide to the Galaxy|180 |308534400000
--------------------------------------------------
// TESTRESPONSE[_cat]
[[sql-rest-fields]]
In addition to the `query` and `cursor` fields, the request can
contain `fetch_size` and `time_zone`. `fetch_size` is a hint for how
many results to return in each page. SQL might chose to return more
or fewer results though. `time_zone` is the time zone to use for date
functions and date parsing. `time_zone` defaults to `utc` and can take
any values documented
http://www.joda.org/joda-time/apidocs/org/joda/time/DateTimeZone.html[here].

View File

@ -1,48 +0,0 @@
[role="xpack"]
[[sql-translate]]
== SQL Translate API
The SQL Translate API accepts SQL in a JSON document and translates it
into native Elasticsearch queries. For example:
[source,js]
--------------------------------------------------
POST /_xpack/sql/translate
{
"query": "SELECT * FROM library ORDER BY page_count DESC",
"fetch_size": 10
}
--------------------------------------------------
// CONSOLE
// TEST[setup:library]
Which returns:
[source,js]
--------------------------------------------------
{
"size" : 10,
"docvalue_fields" : [
"author",
"name",
"page_count",
"release_date"
],
"sort" : [
{
"page_count" : {
"order" : "desc"
}
}
]
}
--------------------------------------------------
// TESTRESPONSE
Which is the request that SQL will run to provide the results.
In this case, SQL will use the <<search-request-scroll,scroll>>
API. If the result contained an aggregation then SQL would use
the normal <<search-request-body,search>> API.
The request body accepts all of the <<sql-rest-fields,fields>> that
the <<sql-rest,SQL REST API>> accepts except `cursor`.

View File

@ -1,12 +0,0 @@
[[sql-functions]]
== Functions and Operators
// logical operators
// comparison
// conversion
// math
// date time
// aggregate
// geospatial

View File

@ -1,34 +0,0 @@
[role="xpack"]
[[xpack-sql]]
= SQL Access
:sql-tests: {docdir}/../../qa/sql
:sql-specs: {sql-tests}/src/main/resources
:jdbc-tests: {sql-tests}/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc
:security-tests: {sql-tests}/security/src/test/java/org/elasticsearch/xpack/qa/sql/security
[partintro]
--
X-Pack includes a SQL feature to execute SQL against Elasticsearch
indices and return tabular results. There are four main components:
<<sql-rest,REST API>>::
Accepts SQL in a JSON document, executes it, and returns the
results.
<<sql-translate,Translate API>>::
Accepts SQL in a JSON document and translates it into a native
Elasticsearch query and returns that.
<<sql-cli,CLI>>::
Command line application that connects to Elasticsearch to excute
SQL and print tabular results.
<<sql-jdbc,JDBC>>::
A JDBC driver for Elasticsearch.
--
include::sql-overview.asciidoc[]
include::sql-getting-started.asciidoc[]
include::endpoints/sql-endpoints.asciidoc[]
include::functions/sql-functions.asciidoc[]
include::language/sql-language.asciidoc[]
:jdbc-tests!:

View File

@ -1,5 +0,0 @@
[[sql-data-types]]
=== Data Type and Mapping
List of data types in SQL and how they actually map to Elasticsearch.
Also mention the corner cases - multi-fields, names with dots, etc...

View File

@ -1,9 +0,0 @@
[[sql-spec]]
== SQL Language
This chapter describes the SQL syntax and data types supported in X-Pack.
As a general rule, the syntax tries to adhere as much as possible to ANSI SQL to make the transition seamless.
include::sql-data-types.asciidoc[]
include::sql-syntax.asciidoc[]
include::sql-reserved.asciidoc[]

View File

@ -1,78 +0,0 @@
[[sql-spec-reserved]]
=== Reserved Keywords
Table with reserved keywords that need to be quoted. Also provide an example to make it more obvious.
The following table lists all of the keywords that are reserved in Presto,
along with their status in the SQL standard. These reserved keywords must
be quoted (using double quotes) in order to be used as an identifier, for example:
[source, sql]
----
SELECT "AS" FROM index
----
[cols="^,^,^",options="header"]
|===
|Keyword |SQL:2016 |SQL-92
|`ALL` |reserved |reserved
|`AND` |reserved |reserved
|`ANY` |reserved |reserved
|`AS` |reserved |reserved
|`ASC` |reserved |reserved
|`BETWEEN` |reserved |reserved
|`BY` |reserved |reserved
|`CAST` |reserved |reserved
|`COLUMN` |reserved |reserved
|`CROSS` |reserved |reserved
|`DESC` |reserved |reserved
|`DESCRIBE` |reserved |reserved
|`DISTINCT` |reserved |reserved
|`EXISTS` |reserved |reserved
|`EXPLAIN` |reserved |reserved
|`EXTRACT` |reserved |reserved
|`FALSE` |reserved |reserved
|`FOR` |reserved |reserved
|`FROM` |reserved |reserved
|`FULL` |reserved |reserved
|`GROUP` |reserved |reserved
|`GROUPING` |reserved |reserved
|`HAVING` |reserved |reserved
|`IN` |reserved |reserved
|`INNER` |reserved |reserved
|`INTEGER` |reserved |reserved
|`INTO` |reserved |reserved
|`IS` |reserved |reserved
|`JOIN` |reserved |reserved
|`LAST` |reserved |reserved
|`LEFT` |reserved |reserved
|`LIKE` |reserved |reserved
|`LIMIT` |reserved |reserved
|`MATCH` |reserved |reserved
|`NATURAL` |reserved |reserved
|`NO` |reserved |reserved
|`NOT` |reserved |reserved
|`NULL` |reserved |reserved
|`ON` |reserved |reserved
|`OPTION` |reserved |reserved
|`OR` |reserved |reserved
|`ORDER` |reserved |reserved
|`OUTER` |reserved |reserved
|`RIGHT` |reserved |reserved
|`SELECT` |reserved |reserved
|`SESSION` | |reserved
|`SET` |reserved |reserved
|`TABLE` |reserved |reserved
|`THEN` |reserved |reserved
|`TO` |reserved |reserved
|`TRUE` |reserved |reserved
|`USING` |reserved |reserved
|`WHEN` |reserved |reserved
|`WHERE` |reserved |reserved
|`WITH` |reserved |reserved
|===

View File

@ -1,11 +0,0 @@
[[sql-spec-syntax]]
=== SQL Statement Syntax
Big list of the entire syntax in SQL
Each entry might get its own file and code snippet
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-spec}/select.sql-spec[wildcardWithOrder]
--------------------------------------------------

View File

@ -1,5 +0,0 @@
[[sql-getting-started]]
== Getting Started with SQL
Basic chapter on using REST, CLI and JDBC to run some basic queries and return some results.
To keep this chapter should be about adding 3-4 entries of data, then using each technology to get the results out.

View File

@ -1,4 +0,0 @@
[[sql-overview]]
== Overview
Overview of the difference chapters in SQL docs.

View File

@ -1,9 +0,0 @@
[[elasticsearch-sql-standalone]]
= Elasticsearch SQL Standalone
:es-repo-dir: {docdir}/../../../../../elasticsearch/docs
:edit_url:
include::{es-repo-dir}/reference/index-shared3.asciidoc[]
:edit_url!:
include::index.asciidoc[]

View File

@ -10,7 +10,6 @@ import java.nio.file.StandardCopyOption
group 'org.elasticsearch.plugin'
apply plugin: 'elasticsearch.esplugin'
esplugin {
name 'x-pack'
description 'Elasticsearch Expanded Pack Plugin'
@ -31,22 +30,11 @@ dependencyLicenses {
mapping from: /owasp-java-html-sanitizer.*/, to: 'owasp-java-html-sanitizer'
mapping from: /transport-netty.*/, to: 'elasticsearch'
mapping from: /elasticsearch-rest-client.*/, to: 'elasticsearch'
mapping from: /server.*/, to: 'elasticsearch'
mapping from: /jdbc-proto.*/, to: 'elasticsearch'
mapping from: /cli-proto.*/, to: 'elasticsearch'
mapping from: /shared-proto.*/, to: 'elasticsearch'
mapping from: /aggs-matrix-stats.*/, to: 'elasticsearch' //pulled in by sql:server
mapping from: /http.*/, to: 'httpclient' // pulled in by rest client
mapping from: /commons-.*/, to: 'commons' // pulled in by rest client
ignoreSha 'elasticsearch-rest-client'
ignoreSha 'transport-netty4'
ignoreSha 'tribe'
ignoreSha 'server'
ignoreSha 'jdbc-proto'
ignoreSha 'cli-proto'
ignoreSha 'shared-proto'
ignoreSha 'elasticsearch-rest-client-sniffer'
ignoreSha 'aggs-matrix-stats'
}
licenseHeaders {
@ -98,9 +86,6 @@ dependencies {
nativeBundle "org.elasticsearch.ml:ml-cpp:${project.version}@zip"
testCompile 'org.ini4j:ini4j:0.5.2'
// sql's server components and its transitive dependencies
compile project(':x-pack-elasticsearch:sql:server')
// common test deps
testCompile 'org.elasticsearch:securemock:1.2'
testCompile "org.elasticsearch:mocksocket:${versions.mocksocket}"

View File

@ -1 +0,0 @@
2609e36f18f7e8d593cc1cddfb2ac776dc96b8e0

View File

@ -1,26 +0,0 @@
[The "BSD license"]
Copyright (c) 2015 Terence Parr, Sam Harwell
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -57,9 +57,6 @@ public class XPackLicenseState {
messages.put(XPackPlugin.UPGRADE, new String[] {
"Upgrade API is disabled"
});
messages.put(XPackPlugin.SQL, new String[] {
"SQL support is disabled"
});
EXPIRATION_MESSAGES = Collections.unmodifiableMap(messages);
}
@ -76,7 +73,6 @@ public class XPackLicenseState {
messages.put(XPackPlugin.GRAPH, XPackLicenseState::graphAcknowledgementMessages);
messages.put(XPackPlugin.MACHINE_LEARNING, XPackLicenseState::machineLearningAcknowledgementMessages);
messages.put(XPackPlugin.LOGSTASH, XPackLicenseState::logstashAcknowledgementMessages);
messages.put(XPackPlugin.SQL, XPackLicenseState::sqlAcknowledgementMessages);
ACKNOWLEDGMENT_MESSAGES = Collections.unmodifiableMap(messages);
}
@ -213,21 +209,6 @@ public class XPackLicenseState {
return Strings.EMPTY_ARRAY;
}
private static String[] sqlAcknowledgementMessages(OperationMode currentMode, OperationMode newMode) {
switch (newMode) {
case BASIC:
case STANDARD:
case GOLD:
switch (currentMode) {
case TRIAL:
case PLATINUM:
return new String[] { "JDBC support will be disabled, but you can continue to use SQL CLI and REST endpoint" };
}
break;
}
return Strings.EMPTY_ARRAY;
}
/** A wrapper for the license mode and state, to allow atomically swapping. */
private static class Status {
@ -510,29 +491,4 @@ public class XPackLicenseState {
// Should work on all active licenses
return localStatus.active;
}
/**
* Determine if SQL support should be enabled.
* <p>
* SQL is available for all license types except {@link OperationMode#MISSING}
*/
public boolean isSqlAllowed() {
return status.active;
}
/**
* Determine if JDBC support should be enabled.
* <p>
* JDBC is available only in for {@link OperationMode#PLATINUM} and {@link OperationMode#TRIAL} licences
*/
public boolean isJdbcAllowed() {
// status is volatile
Status localStatus = status;
OperationMode operationMode = localStatus.mode;
boolean licensed = operationMode == OperationMode.TRIAL || operationMode == OperationMode.PLATINUM;
return licensed && localStatus.active;
}
}

View File

@ -41,7 +41,6 @@ import org.elasticsearch.index.IndexModule;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.ingest.Processor;
import org.elasticsearch.license.LicenseService;
import org.elasticsearch.license.LicenseUtils;
import org.elasticsearch.license.Licensing;
import org.elasticsearch.license.XPackLicenseState;
import org.elasticsearch.plugins.ActionPlugin;
@ -86,8 +85,6 @@ import org.elasticsearch.xpack.rest.action.RestXPackUsageAction;
import org.elasticsearch.xpack.security.Security;
import org.elasticsearch.xpack.security.authc.AuthenticationService;
import org.elasticsearch.xpack.security.authc.support.UsernamePasswordToken;
import org.elasticsearch.xpack.sql.plugin.SqlLicenseChecker;
import org.elasticsearch.xpack.sql.plugin.SqlPlugin;
import org.elasticsearch.xpack.ssl.SSLConfigurationReloader;
import org.elasticsearch.xpack.ssl.SSLService;
import org.elasticsearch.xpack.upgrade.Upgrade;
@ -144,9 +141,6 @@ public class XPackPlugin extends Plugin implements ScriptPlugin, ActionPlugin, I
/** Name constant for the upgrade feature. */
public static final String UPGRADE = "upgrade";
/** Name constant for the sql feature. */
public static final String SQL = "sql";
// inside of YAML settings we still use xpack do not having handle issues with dashes
private static final String SETTINGS_NAME = "xpack";
@ -197,7 +191,6 @@ public class XPackPlugin extends Plugin implements ScriptPlugin, ActionPlugin, I
protected Deprecation deprecation;
protected Upgrade upgrade;
protected SqlPlugin sql;
public XPackPlugin(
final Settings settings,
@ -217,19 +210,6 @@ public class XPackPlugin extends Plugin implements ScriptPlugin, ActionPlugin, I
this.logstash = new Logstash(settings);
this.deprecation = new Deprecation();
this.upgrade = new Upgrade(settings);
// sql projects don't depend on x-pack and as a result we cannot pass XPackLicenseState object to SqlPlugin directly here
this.sql = new SqlPlugin(XPackSettings.SQL_ENABLED.get(settings), new SqlLicenseChecker(
() -> {
if (!licenseState.isSqlAllowed()) {
throw LicenseUtils.newComplianceException(XPackPlugin.SQL);
}
},
() -> {
if (!licenseState.isJdbcAllowed()) {
throw LicenseUtils.newComplianceException("jdbc");
}
})
);
// Check if the node is a transport client.
if (transportClientMode == false) {
this.extensionsService = new XPackExtensionsService(settings, resolveXPackExtensionsFile(env), getExtensions());
@ -299,11 +279,6 @@ public class XPackPlugin extends Plugin implements ScriptPlugin, ActionPlugin, I
components.addAll(upgrade.createComponents(client, clusterService, threadPool, resourceWatcherService,
scriptService, xContentRegistry));
/* Note that we need *client*, not *internalClient* because client preserves the
* authenticated user while internalClient throws that user away and acts as the
* x-pack user. */
components.addAll(sql.createComponents(client));
PersistentTasksExecutorRegistry registry = new PersistentTasksExecutorRegistry(settings, tasksExecutors);
PersistentTasksClusterService persistentTasksClusterService = new PersistentTasksClusterService(settings, registry, clusterService);
components.add(persistentTasksClusterService);
@ -402,7 +377,6 @@ public class XPackPlugin extends Plugin implements ScriptPlugin, ActionPlugin, I
actions.addAll(machineLearning.getActions());
actions.addAll(deprecation.getActions());
actions.addAll(upgrade.getActions());
actions.addAll(sql.getActions());
return actions;
}
@ -441,8 +415,6 @@ public class XPackPlugin extends Plugin implements ScriptPlugin, ActionPlugin, I
indexNameExpressionResolver, nodesInCluster));
handlers.addAll(upgrade.getRestHandlers(settings, restController, clusterSettings, indexScopedSettings, settingsFilter,
indexNameExpressionResolver, nodesInCluster));
handlers.addAll(sql.getRestHandlers(settings, restController, clusterSettings, indexScopedSettings, settingsFilter,
indexNameExpressionResolver, nodesInCluster));
return handlers;
}
@ -459,7 +431,6 @@ public class XPackPlugin extends Plugin implements ScriptPlugin, ActionPlugin, I
entries.addAll(machineLearning.getNamedWriteables());
entries.addAll(licensing.getNamedWriteables());
entries.addAll(Security.getNamedWriteables());
entries.addAll(SqlPlugin.getNamedWriteables());
entries.addAll(Monitoring.getNamedWriteables());
entries.addAll(Graph.getNamedWriteables());
return entries;

View File

@ -76,9 +76,6 @@ public class XPackSettings {
}
}, Setting.Property.NodeScope);
/** Setting for enabling or disabling sql. Defaults to true. */
public static final Setting<Boolean> SQL_ENABLED = Setting.boolSetting("xpack.sql.enabled", true, Setting.Property.NodeScope);
/*
* SSL settings. These are the settings that are specifically registered for SSL. Many are private as we do not explicitly use them
* but instead parse based on a prefix (eg *.ssl.*)
@ -141,7 +138,6 @@ public class XPackSettings {
settings.add(HTTP_SSL_ENABLED);
settings.add(RESERVED_REALM_ENABLED_SETTING);
settings.add(TOKEN_SERVICE_ENABLED_SETTING);
settings.add(SQL_ENABLED);
return Collections.unmodifiableList(settings);
}
}

View File

@ -5,6 +5,15 @@
*/
package org.elasticsearch.xpack.security.authz;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.Predicate;
import org.elasticsearch.ElasticsearchSecurityException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.CompositeIndicesRequest;
@ -64,23 +73,12 @@ import org.elasticsearch.xpack.security.user.SystemUser;
import org.elasticsearch.xpack.security.user.User;
import org.elasticsearch.xpack.security.user.XPackSecurityUser;
import org.elasticsearch.xpack.security.user.XPackUser;
import org.elasticsearch.xpack.sql.plugin.SqlClearCursorAction;
import org.elasticsearch.xpack.sql.plugin.SqlTranslateAction;
import org.elasticsearch.xpack.sql.plugin.sql.action.SqlAction;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.Predicate;
import static org.elasticsearch.xpack.security.Security.setting;
import static org.elasticsearch.xpack.security.support.Exceptions.authorizationError;
public class AuthorizationService extends AbstractComponent {
public static final Setting<Boolean> ANONYMOUS_AUTHORIZATION_EXCEPTION_SETTING =
Setting.boolSetting(setting("authc.anonymous.authz_exception"), true, Property.NodeScope);
public static final String INDICES_PERMISSIONS_KEY = "_indices_permissions";
@ -228,7 +226,7 @@ public class AuthorizationService extends AbstractComponent {
grant(authentication, action, request, permission.names());
return;
} else {
// we do this here in addition to the denial below since we might run into an assertion on scroll request below if we
// we do this here in addition to the denial below since we might run into an assertion on scroll requrest below if we
// don't have permission to read cross cluster but wrap a scroll request.
throw denial(authentication, action, request, permission.names());
}
@ -275,8 +273,7 @@ public class AuthorizationService extends AbstractComponent {
final MetaData metaData = clusterService.state().metaData();
final AuthorizedIndices authorizedIndices = new AuthorizedIndices(authentication.getUser(), permission, action, metaData);
final ResolvedIndices resolvedIndices = resolveIndexNames(authentication, action, request,
metaData, authorizedIndices, permission);
final ResolvedIndices resolvedIndices = resolveIndexNames(authentication, action, request, metaData, authorizedIndices, permission);
assert !resolvedIndices.isEmpty()
: "every indices request needs to have its indices set thus the resolved indices must not be empty";
@ -419,8 +416,8 @@ public class AuthorizationService extends AbstractComponent {
throw new IllegalArgumentException("No equivalent action for opType [" + docWriteRequest.opType() + "]");
}
private ResolvedIndices resolveIndexNames(Authentication authentication, String action, TransportRequest request,
MetaData metaData, AuthorizedIndices authorizedIndices, Role permission) {
private ResolvedIndices resolveIndexNames(Authentication authentication, String action, TransportRequest request, MetaData metaData,
AuthorizedIndices authorizedIndices, Role permission) {
try {
return indicesAndAliasesResolver.resolve(request, metaData, authorizedIndices);
} catch (Exception e) {
@ -481,9 +478,7 @@ public class AuthorizationService extends AbstractComponent {
action.equals("indices:data/read/mpercolate") ||
action.equals("indices:data/read/msearch/template") ||
action.equals("indices:data/read/search/template") ||
action.equals("indices:data/write/reindex") ||
action.equals(SqlAction.NAME) ||
action.equals(SqlTranslateAction.NAME);
action.equals("indices:data/write/reindex");
}
private static boolean isTranslatedToBulkAction(String action) {
@ -502,7 +497,6 @@ public class AuthorizationService extends AbstractComponent {
action.equals(SearchTransportService.QUERY_SCROLL_ACTION_NAME) ||
action.equals(SearchTransportService.FREE_CONTEXT_SCROLL_ACTION_NAME) ||
action.equals(ClearScrollAction.NAME) ||
action.equals(SqlClearCursorAction.NAME) ||
action.equals(SearchTransportService.CLEAR_SCROLL_CONTEXTS_ACTION_NAME);
}

View File

@ -53,22 +53,14 @@ public class XPackLicenseStateTests extends ESTestCase {
return randomFrom(OperationMode.values());
}
public static OperationMode randomTrialStandardGoldOrPlatinumMode() {
static OperationMode randomTrialStandardGoldOrPlatinumMode() {
return randomFrom(TRIAL, STANDARD, GOLD, PLATINUM);
}
public static OperationMode randomTrialOrPlatinumMode() {
static OperationMode randomTrialOrPlatinumMode() {
return randomFrom(TRIAL, PLATINUM);
}
public static OperationMode randomTrialBasicStandardGoldOrPlatinumMode() {
return randomFrom(TRIAL, BASIC, STANDARD, GOLD, PLATINUM);
}
public static OperationMode randomBasicStandardOrGold() {
return randomFrom(BASIC, STANDARD, GOLD);
}
public void testSecurityDefaults() {
XPackLicenseState licenseState = new XPackLicenseState();
assertThat(licenseState.isAuthAllowed(), is(true));
@ -318,83 +310,4 @@ public class XPackLicenseStateTests extends ESTestCase {
assertAllowed(PLATINUM, false, XPackLicenseState::isLogstashAllowed, false);
assertAllowed(STANDARD, false, XPackLicenseState::isLogstashAllowed, false);
}
public void testSqlDefaults() {
XPackLicenseState licenseState = new XPackLicenseState();
assertThat(licenseState.isSqlAllowed(), is(true));
assertThat(licenseState.isJdbcAllowed(), is(true));
}
public void testSqlBasic() {
XPackLicenseState licenseState = new XPackLicenseState();
licenseState.update(BASIC, true);
assertThat(licenseState.isSqlAllowed(), is(true));
assertThat(licenseState.isJdbcAllowed(), is(false));
}
public void testSqlBasicExpired() {
XPackLicenseState licenseState = new XPackLicenseState();
licenseState.update(BASIC, false);
assertThat(licenseState.isSqlAllowed(), is(false));
assertThat(licenseState.isJdbcAllowed(), is(false));
}
public void testSqlStandard() {
XPackLicenseState licenseState = new XPackLicenseState();
licenseState.update(STANDARD, true);
assertThat(licenseState.isSqlAllowed(), is(true));
assertThat(licenseState.isJdbcAllowed(), is(false));
}
public void testSqlStandardExpired() {
XPackLicenseState licenseState = new XPackLicenseState();
licenseState.update(STANDARD, false);
assertThat(licenseState.isSqlAllowed(), is(false));
assertThat(licenseState.isJdbcAllowed(), is(false));
}
public void testSqlGold() {
XPackLicenseState licenseState = new XPackLicenseState();
licenseState.update(GOLD, true);
assertThat(licenseState.isSqlAllowed(), is(true));
assertThat(licenseState.isJdbcAllowed(), is(false));
}
public void testSqlGoldExpired() {
XPackLicenseState licenseState = new XPackLicenseState();
licenseState.update(GOLD, false);
assertThat(licenseState.isSqlAllowed(), is(false));
assertThat(licenseState.isJdbcAllowed(), is(false));
}
public void testSqlPlatinum() {
XPackLicenseState licenseState = new XPackLicenseState();
licenseState.update(PLATINUM, true);
assertThat(licenseState.isSqlAllowed(), is(true));
assertThat(licenseState.isJdbcAllowed(), is(true));
}
public void testSqlPlatinumExpired() {
XPackLicenseState licenseState = new XPackLicenseState();
licenseState.update(PLATINUM, false);
assertThat(licenseState.isSqlAllowed(), is(false));
assertThat(licenseState.isJdbcAllowed(), is(false));
}
public void testSqlAckAnyToTrialOrPlatinum() {
assertAckMesssages(XPackPlugin.SQL, randomMode(), randomTrialOrPlatinumMode(), 0);
}
public void testSqlAckTrialOrPlatinumToNotTrialOrPlatinum() {
assertAckMesssages(XPackPlugin.SQL, randomTrialOrPlatinumMode(), randomBasicStandardOrGold(), 1);
}
}

View File

@ -5,6 +5,15 @@
*/
package org.elasticsearch.xpack.security.authz;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
import org.elasticsearch.ElasticsearchSecurityException;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
@ -120,20 +129,9 @@ import org.elasticsearch.xpack.security.user.ElasticUser;
import org.elasticsearch.xpack.security.user.SystemUser;
import org.elasticsearch.xpack.security.user.User;
import org.elasticsearch.xpack.security.user.XPackUser;
import org.elasticsearch.xpack.sql.plugin.sql.action.SqlAction;
import org.elasticsearch.xpack.sql.plugin.sql.action.SqlRequest;
import org.junit.Before;
import org.mockito.Mockito;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
import static org.elasticsearch.test.SecurityTestsUtils.assertAuthenticationException;
import static org.elasticsearch.test.SecurityTestsUtils.assertThrowsAuthorizationException;
import static org.elasticsearch.test.SecurityTestsUtils.assertThrowsAuthorizationExceptionRunAs;
@ -308,17 +306,6 @@ public class AuthorizationServiceTests extends ESTestCase {
verifyNoMoreInteractions(auditTrail);
}
public void testUserWithNoRolesCannotSql() {
TransportRequest request = new SqlRequest();
User user = new User("test user");
mockEmptyMetaData();
assertThrowsAuthorizationException(
() -> authorize(createAuthentication(user), SqlAction.NAME, request),
SqlAction.NAME, "test user");
verify(auditTrail).accessDenied(user, SqlAction.NAME, request, Role.EMPTY.names());
verifyNoMoreInteractions(auditTrail);
}
/**
* Verifies that the behaviour tested in {@link #testUserWithNoRolesCanPerformRemoteSearch}
* does not work for requests that are not remote-index-capable.
@ -336,19 +323,13 @@ public class AuthorizationServiceTests extends ESTestCase {
}
public void testUnknownRoleCausesDenial() {
@SuppressWarnings("unchecked")
Tuple<String, TransportRequest> tuple = randomFrom(
new Tuple<>(SearchAction.NAME, new SearchRequest()),
new Tuple<>(IndicesExistsAction.NAME, new IndicesExistsRequest()),
new Tuple<>(SqlAction.NAME, new SqlRequest()));
String action = tuple.v1();
TransportRequest request = tuple.v2();
TransportRequest request = new SearchRequest();
User user = new User("test user", "non-existent-role");
mockEmptyMetaData();
assertThrowsAuthorizationException(
() -> authorize(createAuthentication(user), action, request),
action, "test user");
verify(auditTrail).accessDenied(user, action, request, Role.EMPTY.names());
() -> authorize(createAuthentication(user), "indices:a", request),
"indices:a", "test user");
verify(auditTrail).accessDenied(user, "indices:a", request, Role.EMPTY.names());
verifyNoMoreInteractions(auditTrail);
}
@ -367,22 +348,16 @@ public class AuthorizationServiceTests extends ESTestCase {
}
public void testThatRoleWithNoIndicesIsDenied() {
@SuppressWarnings("unchecked")
Tuple<String, TransportRequest> tuple = randomFrom(
new Tuple<>(SearchAction.NAME, new SearchRequest()),
new Tuple<>(IndicesExistsAction.NAME, new IndicesExistsRequest()),
new Tuple<>(SqlAction.NAME, new SqlRequest()));
String action = tuple.v1();
TransportRequest request = tuple.v2();
TransportRequest request = new IndicesExistsRequest("a");
User user = new User("test user", "no_indices");
RoleDescriptor role = new RoleDescriptor("a_role", null, null, null);
roleMap.put("no_indices", role);
mockEmptyMetaData();
assertThrowsAuthorizationException(
() -> authorize(createAuthentication(user), action, request),
action, "test user");
verify(auditTrail).accessDenied(user, action, request, new String[] { role.getName() });
() -> authorize(createAuthentication(user), "indices:a", request),
"indices:a", "test user");
verify(auditTrail).accessDenied(user, "indices:a", request, new String[] { role.getName() });
verifyNoMoreInteractions(auditTrail);
}
@ -457,8 +432,7 @@ public class AuthorizationServiceTests extends ESTestCase {
new String[] { role.getName() });
authorize(createAuthentication(user), SearchTransportService.QUERY_SCROLL_ACTION_NAME, request);
verify(auditTrail).accessGranted(user, SearchTransportService.QUERY_SCROLL_ACTION_NAME, request,
new String[] { role.getName() });
verify(auditTrail).accessGranted(user, SearchTransportService.QUERY_SCROLL_ACTION_NAME, request, new String[] { role.getName() });
authorize(createAuthentication(user), SearchTransportService.FREE_CONTEXT_SCROLL_ACTION_NAME, request);
verify(auditTrail).accessGranted(user, SearchTransportService.FREE_CONTEXT_SCROLL_ACTION_NAME, request,
@ -533,7 +507,6 @@ public class AuthorizationServiceTests extends ESTestCase {
assertThrowsAuthorizationException(
() -> authorize(createAuthentication(anonymousUser), "indices:a", request),
"indices:a", anonymousUser.principal());
verify(auditTrail).accessDenied(anonymousUser, "indices:a", request, new String[] { role.getName() });
verifyNoMoreInteractions(auditTrail);
verify(clusterService, times(1)).state();
@ -578,7 +551,7 @@ public class AuthorizationServiceTests extends ESTestCase {
() -> authorize(createAuthentication(user), GetIndexAction.NAME, request));
assertThat(nfe.getIndex(), is(notNullValue()));
assertThat(nfe.getIndex().getName(), is("not-an-index-*"));
verify(auditTrail).accessDenied(user, GetIndexAction.NAME, request, new String[]{ role.getName() });
verify(auditTrail).accessDenied(user, GetIndexAction.NAME, request, new String[] { role.getName() });
verifyNoMoreInteractions(auditTrail);
verify(clusterService).state();
verify(state, times(1)).metaData();
@ -728,7 +701,7 @@ public class AuthorizationServiceTests extends ESTestCase {
}
// we should allow waiting for the health of the index or any index if the user has this permission
TransportRequest request = new ClusterHealthRequest(SecurityLifecycleService.SECURITY_INDEX_NAME);
ClusterHealthRequest request = new ClusterHealthRequest(SecurityLifecycleService.SECURITY_INDEX_NAME);
authorize(createAuthentication(user), ClusterHealthAction.NAME, request);
verify(auditTrail).accessGranted(user, ClusterHealthAction.NAME, request, new String[] { role.getName() });
@ -736,7 +709,6 @@ public class AuthorizationServiceTests extends ESTestCase {
request = new ClusterHealthRequest(SecurityLifecycleService.SECURITY_INDEX_NAME, "foo", "bar");
authorize(createAuthentication(user), ClusterHealthAction.NAME, request);
verify(auditTrail).accessGranted(user, ClusterHealthAction.NAME, request, new String[] { role.getName() });
verifyNoMoreInteractions(auditTrail);
SearchRequest searchRequest = new SearchRequest("_all");
authorize(createAuthentication(user), SearchAction.NAME, searchRequest);

View File

@ -1,61 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql;
import org.elasticsearch.analysis.common.CommonAnalysisPlugin;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.reindex.ReindexPlugin;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.discovery.TestZenDiscovery;
import org.elasticsearch.xpack.XPackPlugin;
import org.elasticsearch.xpack.XPackSettings;
import org.elasticsearch.xpack.ml.MachineLearning;
import java.util.Arrays;
import java.util.Collection;
public abstract class AbstractSqlIntegTestCase extends ESIntegTestCase {
@Override
protected boolean ignoreExternalCluster() {
return true;
}
@Override
protected Settings nodeSettings(int nodeOrdinal) {
Settings.Builder settings = Settings.builder().put(super.nodeSettings(nodeOrdinal));
settings.put(XPackSettings.SECURITY_ENABLED.getKey(), false);
settings.put(XPackSettings.MONITORING_ENABLED.getKey(), false);
settings.put(XPackSettings.WATCHER_ENABLED.getKey(), false);
settings.put(XPackSettings.GRAPH_ENABLED.getKey(), false);
settings.put(XPackSettings.MACHINE_LEARNING_ENABLED.getKey(), false);
settings.put(MachineLearning.AUTODETECT_PROCESS.getKey(), false);
return settings.build();
}
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return Arrays.asList(XPackPlugin.class, CommonAnalysisPlugin.class, ReindexPlugin.class);
}
@Override
protected Collection<Class<? extends Plugin>> transportClientPlugins() {
return nodePlugins();
}
@Override
protected Settings transportClientSettings() {
// Plugin should be loaded on the transport client as well
return nodeSettings(0);
}
@Override
protected Collection<Class<? extends Plugin>> getMockPlugins() {
return Arrays.asList(TestZenDiscovery.TestPlugin.class, TestSeedPlugin.class);
}
}

View File

@ -1,48 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.xpack.sql.plugin.sql.action.SqlAction;
import org.elasticsearch.xpack.sql.plugin.sql.action.SqlResponse;
import org.elasticsearch.xpack.sql.plugin.sql.action.SqlResponse.ColumnInfo;
import java.sql.JDBCType;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasSize;
public class SqlActionIT extends AbstractSqlIntegTestCase {
public void testSqlAction() throws Exception {
assertAcked(client().admin().indices().prepareCreate("test").get());
client().prepareBulk()
.add(new IndexRequest("test", "doc", "1").source("data", "bar", "count", 42))
.add(new IndexRequest("test", "doc", "2").source("data", "baz", "count", 43))
.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)
.get();
ensureYellow("test");
boolean dataBeforeCount = randomBoolean();
String columns = dataBeforeCount ? "data, count" : "count, data";
SqlResponse response = client().prepareExecute(SqlAction.INSTANCE)
.query("SELECT " + columns + " FROM test ORDER BY count").get();
assertThat(response.size(), equalTo(2L));
assertThat(response.columns(), hasSize(2));
int dataIndex = dataBeforeCount ? 0 : 1;
int countIndex = dataBeforeCount ? 1 : 0;
assertEquals(new ColumnInfo("data", "text", JDBCType.VARCHAR, 0), response.columns().get(dataIndex));
assertEquals(new ColumnInfo("count", "long", JDBCType.BIGINT, 20), response.columns().get(countIndex));
assertThat(response.rows(), hasSize(2));
assertEquals("bar", response.rows().get(0).get(dataIndex));
assertEquals(42L, response.rows().get(0).get(countIndex));
assertEquals("baz", response.rows().get(1).get(dataIndex));
assertEquals(43L, response.rows().get(1).get(countIndex));
}
}

View File

@ -1,92 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.xpack.sql.plugin.SqlClearCursorAction;
import org.elasticsearch.xpack.sql.plugin.SqlClearCursorAction.Response;
import org.elasticsearch.xpack.sql.plugin.sql.action.SqlAction;
import org.elasticsearch.xpack.sql.plugin.sql.action.SqlResponse;
import org.elasticsearch.xpack.sql.session.Cursor;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.notNullValue;
public class SqlClearCursorActionIT extends AbstractSqlIntegTestCase {
public void testSqlClearCursorAction() throws Exception {
assertAcked(client().admin().indices().prepareCreate("test").get());
BulkRequestBuilder bulkRequestBuilder = client().prepareBulk();
int indexSize = randomIntBetween(100, 300);
logger.info("Indexing {} records", indexSize);
for (int i = 0; i < indexSize; i++) {
bulkRequestBuilder.add(new IndexRequest("test", "doc", "id" + i).source("data", "bar", "count", i));
}
bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get();
ensureYellow("test");
assertEquals(0, getNumberOfSearchContexts());
int fetchSize = randomIntBetween(5, 20);
logger.info("Fetching {} records at a time", fetchSize);
SqlResponse sqlResponse = client().prepareExecute(SqlAction.INSTANCE).query("SELECT * FROM test").fetchSize(fetchSize).get();
assertEquals(fetchSize, sqlResponse.size());
assertThat(getNumberOfSearchContexts(), greaterThan(0L));
assertThat(sqlResponse.cursor(), notNullValue());
assertThat(sqlResponse.cursor(), not(equalTo(Cursor.EMPTY)));
Response cleanCursorResponse = client().prepareExecute(SqlClearCursorAction.INSTANCE).cursor(sqlResponse.cursor()).get();
assertTrue(cleanCursorResponse.isSucceeded());
assertEquals(0, getNumberOfSearchContexts());
}
public void testAutoCursorCleanup() throws Exception {
assertAcked(client().admin().indices().prepareCreate("test").get());
BulkRequestBuilder bulkRequestBuilder = client().prepareBulk();
int indexSize = randomIntBetween(100, 300);
logger.info("Indexing {} records", indexSize);
for (int i = 0; i < indexSize; i++) {
bulkRequestBuilder.add(new IndexRequest("test", "doc", "id" + i).source("data", "bar", "count", i));
}
bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get();
ensureYellow("test");
assertEquals(0, getNumberOfSearchContexts());
int fetchSize = randomIntBetween(5, 20);
logger.info("Fetching {} records at a time", fetchSize);
SqlResponse sqlResponse = client().prepareExecute(SqlAction.INSTANCE).query("SELECT * FROM test").fetchSize(fetchSize).get();
assertEquals(fetchSize, sqlResponse.size());
assertThat(getNumberOfSearchContexts(), greaterThan(0L));
assertThat(sqlResponse.cursor(), notNullValue());
assertThat(sqlResponse.cursor(), not(equalTo(Cursor.EMPTY)));
long fetched = sqlResponse.size();
do {
sqlResponse = client().prepareExecute(SqlAction.INSTANCE).cursor(sqlResponse.cursor()).get();
fetched += sqlResponse.size();
} while (sqlResponse.cursor().equals(Cursor.EMPTY) == false);
assertEquals(indexSize, fetched);
Response cleanCursorResponse = client().prepareExecute(SqlClearCursorAction.INSTANCE).cursor(sqlResponse.cursor()).get();
assertFalse(cleanCursorResponse.isSucceeded());
assertEquals(0, getNumberOfSearchContexts());
}
private long getNumberOfSearchContexts() {
return client().admin().indices().prepareStats("test").clear().setSearch(true).get()
.getIndex("test").getTotal().getSearch().getOpenContexts();
}
}

View File

@ -1,42 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.xpack.XPackSettings;
import org.elasticsearch.xpack.sql.plugin.sql.action.SqlAction;
import static org.hamcrest.CoreMatchers.either;
import static org.hamcrest.CoreMatchers.startsWith;
public class SqlDisabledIT extends AbstractSqlIntegTestCase {
@Override
protected Settings nodeSettings(int nodeOrdinal) {
return Settings.builder()
.put(super.nodeSettings(nodeOrdinal))
.put(XPackSettings.SQL_ENABLED.getKey(), false)
.build();
}
@Override
protected Settings transportClientSettings() {
return Settings.builder()
.put(super.transportClientSettings())
.put(XPackSettings.SQL_ENABLED.getKey(), randomBoolean())
.build();
}
public void testSqlAction() throws Exception {
Throwable throwable = expectThrows(Throwable.class,
() -> client().prepareExecute(SqlAction.INSTANCE).query("SHOW tables").get());
assertThat(throwable.getMessage(),
either(startsWith("no proxy found for action")) // disabled on client
.or(startsWith("failed to find action")) // disabled on proxy client
.or(startsWith("No handler for action [indices:data/read/sql]"))); // disabled on server
}
}

View File

@ -1,206 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql;
import org.apache.http.HttpEntity;
import org.apache.http.entity.ByteArrayEntity;
import org.apache.http.entity.ContentType;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.ElasticsearchSecurityException;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.client.ResponseException;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.license.AbstractLicensesIntegrationTestCase;
import org.elasticsearch.license.License;
import org.elasticsearch.license.License.OperationMode;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import org.elasticsearch.transport.Netty4Plugin;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.MetaTableRequest;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.MetaTableResponse;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto;
import org.elasticsearch.xpack.sql.plugin.sql.action.SqlAction;
import org.elasticsearch.xpack.sql.plugin.sql.action.SqlResponse;
import org.elasticsearch.xpack.sql.plugin.SqlTranslateAction;
import org.elasticsearch.xpack.sql.protocol.shared.Request;
import org.elasticsearch.xpack.sql.protocol.shared.Response;
import org.hamcrest.Matchers;
import org.junit.Before;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Locale;
import static java.util.Collections.emptyMap;
import static org.elasticsearch.license.XPackLicenseStateTests.randomBasicStandardOrGold;
import static org.elasticsearch.license.XPackLicenseStateTests.randomTrialBasicStandardGoldOrPlatinumMode;
import static org.elasticsearch.license.XPackLicenseStateTests.randomTrialOrPlatinumMode;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
public class SqlLicenseIT extends AbstractLicensesIntegrationTestCase {
@Override
protected boolean ignoreExternalCluster() {
return true;
}
@Before
public void resetLicensing() throws Exception {
enableJdbcLicensing();
}
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
// Add Netty so we can test JDBC licensing because only exists on the REST layer.
List<Class<? extends Plugin>> plugins = new ArrayList<>(super.nodePlugins());
plugins.add(Netty4Plugin.class);
return plugins;
}
@Override
protected Settings nodeSettings(int nodeOrdinal) {
// Enable http so we can test JDBC licensing because only exists on the REST layer.
return Settings.builder()
.put(super.nodeSettings(nodeOrdinal))
.put(NetworkModule.HTTP_ENABLED.getKey(), true)
.put(NetworkModule.HTTP_TYPE_KEY, Netty4Plugin.NETTY_HTTP_TRANSPORT_NAME)
.build();
}
private static OperationMode randomValidSqlLicenseType() {
return randomTrialBasicStandardGoldOrPlatinumMode();
}
private static OperationMode randomInvalidSqlLicenseType() {
return OperationMode.MISSING;
}
private static OperationMode randomValidJdbcLicenseType() {
return randomTrialOrPlatinumMode();
}
private static OperationMode randomInvalidJdbcLicenseType() {
return randomBasicStandardOrGold();
}
public void enableSqlLicensing() throws Exception {
updateLicensing(randomValidSqlLicenseType());
}
public void disableSqlLicensing() throws Exception {
updateLicensing(randomInvalidSqlLicenseType());
}
public void enableJdbcLicensing() throws Exception {
updateLicensing(randomValidJdbcLicenseType());
}
public void disableJdbcLicensing() throws Exception {
updateLicensing(randomInvalidJdbcLicenseType());
}
public void updateLicensing(OperationMode licenseOperationMode) throws Exception {
String licenseType = licenseOperationMode.name().toLowerCase(Locale.ROOT);
wipeAllLicenses();
if (licenseType.equals("missing")) {
putLicenseTombstone();
} else {
License license = org.elasticsearch.license.TestUtils.generateSignedLicense(licenseType, TimeValue.timeValueMinutes(1));
putLicense(license);
}
}
public void testSqlActionLicense() throws Exception {
setupTestIndex();
disableSqlLicensing();
ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class,
() -> client().prepareExecute(SqlAction.INSTANCE).query("SELECT * FROM test").get());
assertThat(e.getMessage(), equalTo("current license is non-compliant for [sql]"));
enableSqlLicensing();
SqlResponse response = client().prepareExecute(SqlAction.INSTANCE).query("SELECT * FROM test").get();
assertThat(response.size(), Matchers.equalTo(2L));
}
public void testSqlTranslateActionLicense() throws Exception {
setupTestIndex();
disableSqlLicensing();
ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class,
() -> client().prepareExecute(SqlTranslateAction.INSTANCE).query("SELECT * FROM test").get());
assertThat(e.getMessage(), equalTo("current license is non-compliant for [sql]"));
enableSqlLicensing();
SqlTranslateAction.Response response = client().prepareExecute(SqlTranslateAction.INSTANCE).query("SELECT * FROM test").get();
SearchSourceBuilder source = response.source();
assertThat(source.docValueFields(), Matchers.contains("count"));
FetchSourceContext fetchSource = source.fetchSource();
assertThat(fetchSource.includes(), Matchers.arrayContaining("data"));
}
public void testJdbcActionLicense() throws Exception {
setupTestIndex();
disableJdbcLicensing();
Request request = new MetaTableRequest("test");
ResponseException responseException = expectThrows(ResponseException.class, () -> jdbc(request));
assertThat(responseException.getMessage(), containsString("current license is non-compliant for [jdbc]"));
assertThat(responseException.getMessage(), containsString("security_exception"));
enableJdbcLicensing();
Response response = jdbc(request);
assertThat(response, instanceOf(MetaTableResponse.class));
}
private Response jdbc(Request request) throws IOException {
// Convert the request to the HTTP entity that JDBC uses
HttpEntity entity;
try (BytesStreamOutput bytes = new BytesStreamOutput()) {
DataOutput out = new DataOutputStream(bytes);
Proto.INSTANCE.writeRequest(request, out);
entity = new ByteArrayEntity(BytesRef.deepCopyOf(bytes.bytes().toBytesRef()).bytes, ContentType.APPLICATION_JSON);
}
// Execute
InputStream response = getRestClient().performRequest("POST", "/_xpack/sql/jdbc", emptyMap(), entity).getEntity().getContent();
// Deserialize bytes to response like JDBC does
try {
DataInput in = new DataInputStream(response);
return Proto.INSTANCE.readResponse(request, in);
} finally {
response.close();
}
}
// TODO test SqlGetIndicesAction. Skipping for now because of lack of serialization support.
private void setupTestIndex() {
assertAcked(client().admin().indices().prepareCreate("test").get());
client().prepareBulk()
.add(new IndexRequest("test", "doc", "1").source("data", "bar", "count", 42))
.add(new IndexRequest("test", "doc", "2").source("data", "baz", "count", 43))
.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)
.get();
}
}

View File

@ -1,40 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import org.elasticsearch.search.sort.SortBuilders;
import org.elasticsearch.xpack.sql.plugin.SqlTranslateAction;
import static java.util.Collections.singletonList;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
public class SqlTranslateActionIT extends AbstractSqlIntegTestCase {
public void testSqlTranslateAction() throws Exception {
assertAcked(client().admin().indices().prepareCreate("test").get());
client().prepareBulk()
.add(new IndexRequest("test", "doc", "1").source("data", "bar", "count", 42))
.add(new IndexRequest("test", "doc", "2").source("data", "baz", "count", 43))
.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)
.get();
ensureYellow("test");
boolean columnOrder = randomBoolean();
String columns = columnOrder ? "data, count" : "count, data";
SqlTranslateAction.Response response = client().prepareExecute(SqlTranslateAction.INSTANCE)
.query("SELECT " + columns + " FROM test ORDER BY count").get();
SearchSourceBuilder source = response.source();
FetchSourceContext fetch = source.fetchSource();
assertEquals(true, fetch.fetchSource());
assertArrayEquals(new String[] { "data" }, fetch.includes());
assertEquals(singletonList("count"), source.docValueFields());
assertEquals(singletonList(SortBuilders.fieldSort("count")), source.sorts());
}
}

View File

@ -1,15 +0,0 @@
{
"xpack.sql.clear_cursor": {
"documentation": "Clear SQL cursor",
"methods": [ "POST"],
"url": {
"path": "/_xpack/sql/close",
"paths": [ "/_xpack/sql/close" ],
"parts": {}
},
"body": {
"description" : "Specify the cursor value in the `cursor` element to clean the cursor.",
"required" : true
}
}
}

View File

@ -1,21 +0,0 @@
{
"xpack.sql.query": {
"documentation": "Execute SQL",
"methods": [ "POST", "GET" ],
"url": {
"path": "/_xpack/sql",
"paths": [ "/_xpack/sql" ],
"parts": {},
"params": {
"format": {
"type" : "string",
"description" : "a short version of the Accept header, e.g. json, yaml"
}
}
},
"body": {
"description" : "Use the `query` element to start a query. Use the `cursor` element to continue a query.",
"required" : true
}
}
}

View File

@ -1,16 +0,0 @@
{
"xpack.sql.translate": {
"documentation": "Translate SQL into Elasticsearch queries",
"methods": [ "POST", "GET" ],
"url": {
"path": "/_xpack/sql/translate",
"paths": [ "/_xpack/sql/translate" ],
"parts": {},
"params": {}
},
"body": {
"description" : "Specify the query in the `query` element.",
"required" : true
}
}
}

View File

@ -1,120 +0,0 @@
---
setup:
- do:
bulk:
refresh: true
body:
- index:
_index: test
_type: doc
_id: 1
- str: test1
int: 1
- index:
_index: test
_type: doc
_id: 2
- str: test2
int: 2
- index:
_index: test
_type: doc
_id: 3
- str: test3
int: 3
---
"Execute some SQL":
- do:
xpack.sql.query:
format: json
body:
query: "SELECT * FROM test ORDER BY int asc"
- match: { columns.0.name: int }
- match: { columns.1.name: str }
- match: { rows.0.0: 1 }
- match: { rows.0.1: test1 }
- match: { rows.1.0: 2 }
- match: { rows.1.1: test2 }
- match: { rows.2.0: 3 }
- match: { rows.2.1: test3 }
---
"Paging through results":
- do:
xpack.sql.query:
format: json
body:
query: "SELECT * FROM test ORDER BY int asc"
fetch_size: 2
- match: { columns.0.name: int }
- match: { columns.1.name: str }
- match: { rows.0.0: 1 }
- match: { rows.0.1: test1 }
- match: { rows.1.0: 2 }
- match: { rows.1.1: test2 }
- is_true: cursor
- set: { cursor: cursor }
- do:
xpack.sql.query:
format: json
body:
cursor: "$cursor"
- match: { rows.0.0: 3 }
- match: { rows.0.1: test3 }
- is_false: columns
- is_true: cursor
- set: { cursor: cursor }
- do:
xpack.sql.query:
format: json
body:
cursor: "$cursor"
- is_false: columns
- is_false: cursor
- length: { rows: 0 }
---
"Getting textual representation":
- do:
xpack.sql.query:
format: text
body:
query: "SELECT * FROM test ORDER BY int asc"
- match:
$body: |
/^ \s+ int \s+ \| \s+ str \s+ \n
---------------\+---------------\n
1 \s+ \|test1 \s+ \n
2 \s+ \|test2 \s+ \n
3 \s+ \|test3 \s+ \n
$/
---
"Clean cursor":
- do:
xpack.sql.query:
format: json
body:
query: "SELECT * FROM test ORDER BY int asc"
fetch_size: 2
- match: { columns.0.name: int }
- match: { columns.1.name: str }
- match: { rows.0.0: 1 }
- match: { rows.0.1: test1 }
- is_true: cursor
- set: { cursor: cursor}
- do:
xpack.sql.clear_cursor:
body:
cursor: "$cursor"
- match: { "succeeded": true }
- do:
indices.stats: { index: 'test' }
- match: { indices.test.total.search.open_contexts: 0 }

View File

@ -1,29 +0,0 @@
---
"Translate SQL":
- do:
bulk:
refresh: true
body:
- index:
_index: test
_type: doc
_id: 1
- str: test1
int: 1
- do:
xpack.sql.translate:
body:
query: "SELECT * FROM test ORDER BY int asc"
- match:
$body:
size: 1000
_source:
includes:
- str
excludes: []
docvalue_fields:
- int
sort:
- int:
order: asc

View File

@ -276,23 +276,6 @@ public class FullClusterRestartIT extends ESRestTestCase {
}
}
public void testSqlFailsOnIndexWithTwoTypes() throws IOException {
// TODO this isn't going to trigger until we backport to 6.1
assumeTrue("It is only possible to build an index that sql doesn't like before 6.0.0",
oldClusterVersion.before(Version.V_6_0_0_alpha1));
if (runningAgainstOldCluster) {
client().performRequest("POST", "/testsqlfailsonindexwithtwotypes/type1", emptyMap(),
new StringEntity("{}", ContentType.APPLICATION_JSON));
client().performRequest("POST", "/testsqlfailsonindexwithtwotypes/type2", emptyMap(),
new StringEntity("{}", ContentType.APPLICATION_JSON));
return;
}
ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest("POST", "/_xpack/sql", emptyMap(),
new StringEntity("{\"query\":\"SELECT * FROM testsqlfailsonindexwithtwotypes\"}", ContentType.APPLICATION_JSON)));
assertEquals(400, e.getResponse().getStatusLine().getStatusCode());
assertThat(e.getMessage(), containsString("Invalid index testsqlfailsonindexwithtwotypes; contains more than one type"));
}
private String loadWatch(String watch) throws IOException {
return StreamsUtils.copyToStringFromClasspath("/org/elasticsearch/xpack/restart/" + watch);
}

View File

@ -1,175 +0,0 @@
import org.elasticsearch.gradle.precommit.PrecommitTasks
import org.elasticsearch.gradle.test.RunTask
description = 'Integration tests for SQL'
apply plugin: 'elasticsearch.build'
dependencies {
compile "org.elasticsearch.test:framework:${versions.elasticsearch}"
// JDBC testing dependencies
if (false == isEclipse && false == isIdea) {
// If we're not doing IDE stuff use the shadowed jar
compile(project(path: ':x-pack-elasticsearch:sql:jdbc', configuration: 'shadow'))
} else {
/* If we're doing IDE stuff then use then use the project
* dependency so the IDEs don't get confused. Transitive
* deps are OK here too because this is the only time we
* pull all of those deps in. We make sure exclude them
* below so they don't cause jar hell with the shadowed
* jar. */
compile(project(':x-pack-elasticsearch:sql:jdbc'))
}
compile "net.sourceforge.csvjdbc:csvjdbc:1.0.34"
runtime "com.h2database:h2:1.4.194"
// used for running debug tests
runtime 'org.antlr:antlr4-runtime:4.5.3'
// There are *no* CLI testing dependencies because we
// communicate fork a new CLI process when we need it.
// Used to support embedded testing mode
compile(project(':x-pack-elasticsearch:sql:server')) {
transitive = false
}
compile(project(':x-pack-elasticsearch:sql:cli-proto')) {
transitive = false
}
compile "org.elasticsearch.client:transport:${version}"
}
/* disable unit tests because these are all integration tests used
* other qa projects. */
test.enabled = false
dependencyLicenses.enabled = false
// Allow for com.sun.net.httpserver.* usage for embedded mode
eclipse {
classpath.file {
whenMerged { cp ->
def con = entries.find { e ->
e.kind == "con" && e.toString().contains("org.eclipse.jdt.launching.JRE_CONTAINER")
}
con.accessRules.add(new org.gradle.plugins.ide.eclipse.model.AccessRule(
"accessible", "com/sun/net/httpserver/*"))
}
}
}
forbiddenApisMain {
bundledSignatures -= 'jdk-non-portable'
bundledSignatures += 'jdk-internal'
}
// the main files are actually test files, so use the appropriate forbidden api sigs
forbiddenApisMain {
signaturesURLs = [PrecommitTasks.getResource('/forbidden/es-all-signatures.txt'),
PrecommitTasks.getResource('/forbidden/es-test-signatures.txt')]
}
thirdPartyAudit.excludes = [
// H2 dependencies that we don't actually use....
'javax.servlet.ServletConfig',
'javax.servlet.ServletContext',
'javax.servlet.ServletContextEvent',
'javax.servlet.ServletContextListener',
'javax.servlet.ServletOutputStream',
'javax.servlet.http.HttpServlet',
'javax.servlet.http.HttpServletRequest',
'javax.servlet.http.HttpServletResponse',
'org.apache.lucene.document.Field$Index',
'org.apache.lucene.queryParser.QueryParser',
'org.osgi.framework.BundleActivator',
'org.osgi.framework.BundleContext',
'org.osgi.service.jdbc.DataSourceFactory',
'org.slf4j.Logger',
'org.slf4j.LoggerFactory',
]
subprojects {
apply plugin: 'elasticsearch.standalone-rest-test'
configurations {
cliFixture
}
dependencies {
/* Since we're a standalone rest test we actually get transitive
* dependencies but we don't really want them because they cause
* all kinds of trouble with the jar hell checks. So we suppress
* them explicitly for non-es projects. */
testCompile(project(':x-pack-elasticsearch:qa:sql')) {
transitive = false
}
testCompile "org.elasticsearch.test:framework:${versions.elasticsearch}"
// JDBC testing dependencies
testRuntime(project(':x-pack-elasticsearch:sql:jdbc')) {
if (false == isEclipse && false == isIdea) {
/* Skip the transitive dependencies of the server when outside
* of an IDE because outside of an IDE we use the jdbc jar
* which includes all the transitive dependencies *already*.
* If we didn't skip these dependencies the jar hell checks
* would fail. And we need the transitive dependencies to
* run in embedded mode but only do that inside of an IDE. */
transitive = false
}
}
testRuntime("net.sourceforge.csvjdbc:csvjdbc:1.0.34") {
transitive = false
}
testRuntime("com.h2database:h2:1.4.194") {
transitive = false
}
testRuntime("org.antlr:antlr4-runtime:4.5.3") {
transitive = false
}
cliFixture project(':x-pack-elasticsearch:test:sql-cli-fixture')
// Used to support embedded testing mode
testRuntime(project(':x-pack-elasticsearch:sql:server')) {
transitive = false
}
testRuntime "org.elasticsearch.client:transport:${version}"
}
if (project.name != 'security') {
// The security project just configures it subprojects
apply plugin: 'elasticsearch.rest-test'
task cliFixture(type: org.elasticsearch.gradle.test.AntFixture) {
Project cli = project(':x-pack-elasticsearch:sql:cli')
dependsOn project.configurations.cliFixture
dependsOn cli.jar
executable = new File(project.javaHome, 'bin/java')
env 'CLASSPATH', "${ -> project.configurations.cliFixture.asPath }"
args 'org.elasticsearch.xpack.sql.cli.fixture.CliFixture',
baseDir, "${ -> cli.jar.outputs.files.singleFile}"
}
integTestCluster {
distribution = 'zip'
plugin project(':x-pack-elasticsearch:plugin').path
setting 'xpack.monitoring.enabled', 'false'
setting 'xpack.ml.enabled', 'false'
setting 'xpack.watcher.enabled', 'false'
setting 'script.max_compilations_rate', '1000/1m'
dependsOn cliFixture
}
integTestRunner {
systemProperty 'tests.cli.fixture', "${ -> cliFixture.addressAndPort }"
finalizedBy cliFixture.stopTask
}
task run(type: RunTask) {
distribution = 'zip'
plugin project(':x-pack-elasticsearch:plugin').path
setting 'xpack.monitoring.enabled', 'false'
setting 'xpack.ml.enabled', 'false'
setting 'xpack.watcher.enabled', 'false'
setting 'script.max_compilations_rate', '1000/1m'
dependsOn cliFixture
}
run.finalizedBy cliFixture.stopTask
}
}

View File

@ -1,4 +0,0 @@
integTestCluster {
numNodes = 2
setting 'xpack.security.enabled', 'false'
}

View File

@ -1,11 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.multinode;
import org.elasticsearch.xpack.qa.sql.cli.ErrorsTestCase;
public class CliErrorsIT extends ErrorsTestCase {
}

View File

@ -1,11 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.multinode;
import org.elasticsearch.xpack.qa.sql.cli.FetchSizeTestCase;
public class CliFetchSizeIT extends FetchSizeTestCase {
}

View File

@ -1,11 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.multinode;
import org.elasticsearch.xpack.qa.sql.cli.SelectTestCase;
public class CliSelectIT extends SelectTestCase {
}

View File

@ -1,11 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.multinode;
import org.elasticsearch.xpack.qa.sql.cli.ShowTestCase;
public class CliShowIT extends ShowTestCase {
}

View File

@ -1,11 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.multinode;
import org.elasticsearch.xpack.qa.sql.jdbc.ConnectionTestCase;
public class JdbcConnectionIT extends ConnectionTestCase {
}

View File

@ -1,14 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.multinode;
import org.elasticsearch.xpack.qa.sql.jdbc.CsvSpecTestCase;
public class JdbcCsvSpecIT extends CsvSpecTestCase {
public JdbcCsvSpecIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) {
super(fileName, groupName, testName, lineNumber, testCase);
}
}

View File

@ -1,11 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.multinode;
import org.elasticsearch.xpack.qa.sql.jdbc.DatabaseMetaDataTestCase;
public class JdbcDatabaseMetaDataIT extends DatabaseMetaDataTestCase {
}

View File

@ -1,11 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.multinode;
import org.elasticsearch.xpack.qa.sql.jdbc.ErrorsTestCase;
public class JdbcErrorsIT extends ErrorsTestCase {
}

View File

@ -1,11 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.multinode;
import org.elasticsearch.xpack.qa.sql.jdbc.FetchSizeTestCase;
public class JdbcFetchSizeIT extends FetchSizeTestCase {
}

View File

@ -1,11 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.multinode;
import org.elasticsearch.xpack.qa.sql.jdbc.ShowTablesTestCase;
public class JdbcShowTablesIT extends ShowTablesTestCase {
}

View File

@ -1,11 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.multinode;
import org.elasticsearch.xpack.qa.sql.jdbc.SimpleExampleTestCase;
public class JdbcSimpleExampleIT extends SimpleExampleTestCase {
}

View File

@ -1,14 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.multinode;
import org.elasticsearch.xpack.qa.sql.jdbc.SqlSpecTestCase;
public class JdbcSqlSpecIT extends SqlSpecTestCase {
public JdbcSqlSpecIT(String fileName, String groupName, String testName, Integer lineNumber, String query) {
super(fileName, groupName, testName, lineNumber, query);
}
}

View File

@ -1,15 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.multinode;
import org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase;
/**
* Integration test for the rest sql action. The one that speaks json directly to a
* user rather than to the JDBC driver or CLI.
*/
public class RestSqlIT extends RestSqlTestCase {
}

View File

@ -1,116 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.multinode;
import org.apache.http.HttpHost;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.elasticsearch.client.Response;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.test.NotEqualMessageBuilder;
import org.elasticsearch.test.rest.ESRestTestCase;
import java.io.IOException;
import java.io.InputStream;
import java.nio.charset.UnsupportedCharsetException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static java.util.Collections.emptyMap;
import static java.util.Collections.singletonList;
import static java.util.Collections.singletonMap;
import static org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase.columnInfo;
/**
* Tests specific to multiple nodes.
*/
public class RestSqlMultinodeIT extends ESRestTestCase {
/**
* Tests count of index run across multiple nodes.
*/
public void testIndexSpread() throws IOException {
int documents = between(10, 100);
createTestData(documents);
assertCount(client(), documents);
}
/**
* Tests count against index on a node that doesn't have any shards of the index.
*/
public void testIndexOnWrongNode() throws IOException {
HttpHost firstHost = getClusterHosts().get(0);
String firstHostName = null;
String match = firstHost.getHostName() + ":" + firstHost.getPort();
Map<String, Object> nodesInfo = responseToMap(client().performRequest("GET", "/_nodes"));
@SuppressWarnings("unchecked")
Map<String, Object> nodes = (Map<String, Object>) nodesInfo.get("nodes");
for (Map.Entry<String, Object> node : nodes.entrySet()) {
String name = node.getKey();
Map<?, ?> nodeEntries = (Map<?, ?>) node.getValue();
Map<?, ?> http = (Map<?, ?>) nodeEntries.get("http");
List<?> boundAddress = (List<?>) http.get("bound_address");
if (boundAddress.contains(match)) {
firstHostName = name;
break;
}
}
assertNotNull("Didn't find first host among published addresses", firstHostName);
XContentBuilder index = JsonXContent.contentBuilder().prettyPrint().startObject();
index.startObject("settings"); {
index.field("routing.allocation.exclude._name", firstHostName);
}
index.endObject();
index.endObject();
client().performRequest("PUT", "/test", emptyMap(), new StringEntity(index.string(), ContentType.APPLICATION_JSON));
int documents = between(10, 100);
createTestData(documents);
try (RestClient firstNodeClient = buildClient(restClientSettings(), new HttpHost[] {firstHost})) {
assertCount(firstNodeClient, documents);
}
}
private void createTestData(int documents) throws UnsupportedCharsetException, IOException {
StringBuilder bulk = new StringBuilder();
for (int i = 0; i < documents; i++) {
int a = 3 * i;
int b = a + 1;
int c = b + 1;
bulk.append("{\"index\":{\"_id\":\"" + i + "\"}\n");
bulk.append("{\"a\": " + a + ", \"b\": " + b + ", \"c\": " + c + "}\n");
}
client().performRequest("PUT", "/test/test/_bulk", singletonMap("refresh", "true"),
new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON));
}
private Map<String, Object> responseToMap(Response response) throws IOException {
try (InputStream content = response.getEntity().getContent()) {
return XContentHelper.convertToMap(JsonXContent.jsonXContent, content, false);
}
}
private void assertCount(RestClient client, int count) throws IOException {
Map<String, Object> expected = new HashMap<>();
expected.put("columns", singletonList(columnInfo("COUNT(1)", "long")));
expected.put("rows", singletonList(singletonList(count)));
expected.put("size", 1);
Map<String, Object> actual = responseToMap(client.performRequest("POST", "/_xpack/sql", singletonMap("format", "json"),
new StringEntity("{\"query\": \"SELECT COUNT(*) FROM test\"}", ContentType.APPLICATION_JSON)));
if (false == expected.equals(actual)) {
NotEqualMessageBuilder message = new NotEqualMessageBuilder();
message.compareMaps(actual, expected);
fail("Response does not match:\n" + message.toString());
}
}
}

View File

@ -1,7 +0,0 @@
integTestCluster {
setting 'xpack.security.enabled', 'false'
}
run {
setting 'xpack.security.enabled', 'false'
}

View File

@ -1,11 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.nosecurity;
import org.elasticsearch.xpack.qa.sql.cli.ErrorsTestCase;
public class CliErrorsIT extends ErrorsTestCase {
}

View File

@ -1,139 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.nosecurity;
import org.elasticsearch.xpack.qa.sql.cli.CliIntegrationTestCase;
import java.io.IOException;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.startsWith;
public class CliExplainIT extends CliIntegrationTestCase {
public void testExplainBasic() throws IOException {
index("test", body -> body.field("test_field", "test_value"));
assertThat(command("EXPLAIN (PLAN PARSED) SELECT * FROM test"), containsString("plan"));
assertThat(readLine(), startsWith("----------"));
assertThat(readLine(), startsWith("With[{}]"));
assertThat(readLine(), startsWith("\\_Project[[?*]]"));
assertThat(readLine(), startsWith(" \\_UnresolvedRelation[[index=test],null,Unknown index [test]]"));
assertEquals("", readLine());
assertThat(command("EXPLAIN " + (randomBoolean() ? "" : "(PLAN ANALYZED) ") + "SELECT * FROM test"), containsString("plan"));
assertThat(readLine(), startsWith("----------"));
assertThat(readLine(), startsWith("Project[[test_field{r}#"));
assertThat(readLine(), startsWith("\\_SubQueryAlias[test]"));
assertThat(readLine(), startsWith(" \\_EsRelation[test][test_field{r}#"));
assertEquals("", readLine());
assertThat(command("EXPLAIN (PLAN OPTIMIZED) SELECT * FROM test"), containsString("plan"));
assertThat(readLine(), startsWith("----------"));
assertThat(readLine(), startsWith("Project[[test_field{r}#"));
assertThat(readLine(), startsWith("\\_EsRelation[test][test_field{r}#"));
assertEquals("", readLine());
// TODO in this case we should probably remove the source filtering entirely. Right? It costs but we don't need it.
assertThat(command("EXPLAIN (PLAN EXECUTABLE) SELECT * FROM test"), containsString("plan"));
assertThat(readLine(), startsWith("----------"));
assertThat(readLine(), startsWith("EsQueryExec[test,{"));
assertThat(readLine(), startsWith(" \"_source\" : {"));
assertThat(readLine(), startsWith(" \"includes\" : ["));
assertThat(readLine(), startsWith(" \"test_field\""));
assertThat(readLine(), startsWith(" ],"));
assertThat(readLine(), startsWith(" \"excludes\" : [ ]"));
assertThat(readLine(), startsWith(" }"));
assertThat(readLine(), startsWith("}]"));
assertEquals("", readLine());
}
public void testExplainWithWhere() throws IOException {
index("test", body -> body.field("test_field", "test_value1").field("i", 1));
index("test", body -> body.field("test_field", "test_value2").field("i", 2));
assertThat(command("EXPLAIN (PLAN PARSED) SELECT * FROM test WHERE i = 2"), containsString("plan"));
assertThat(readLine(), startsWith("----------"));
assertThat(readLine(), startsWith("With[{}]"));
assertThat(readLine(), startsWith("\\_Project[[?*]]"));
assertThat(readLine(), startsWith(" \\_Filter[?i = 2]"));
assertThat(readLine(), startsWith(" \\_UnresolvedRelation[[index=test],null,Unknown index [test]]"));
assertEquals("", readLine());
assertThat(command("EXPLAIN " + (randomBoolean() ? "" : "(PLAN ANALYZED) ") + "SELECT * FROM test WHERE i = 2"),
containsString("plan"));
assertThat(readLine(), startsWith("----------"));
assertThat(readLine(), startsWith("Project[[i{r}#"));
assertThat(readLine(), startsWith("\\_Filter[i{r}#"));
assertThat(readLine(), startsWith(" \\_SubQueryAlias[test]"));
assertThat(readLine(), startsWith(" \\_EsRelation[test][i{r}#"));
assertEquals("", readLine());
assertThat(command("EXPLAIN (PLAN OPTIMIZED) SELECT * FROM test WHERE i = 2"), containsString("plan"));
assertThat(readLine(), startsWith("----------"));
assertThat(readLine(), startsWith("Project[[i{r}#"));
assertThat(readLine(), startsWith("\\_Filter[i{r}#"));
assertThat(readLine(), startsWith(" \\_EsRelation[test][i{r}#"));
assertEquals("", readLine());
assertThat(command("EXPLAIN (PLAN EXECUTABLE) SELECT * FROM test WHERE i = 2"), containsString("plan"));
assertThat(readLine(), startsWith("----------"));
assertThat(readLine(), startsWith("EsQueryExec[test,{"));
assertThat(readLine(), startsWith(" \"query\" : {"));
assertThat(readLine(), startsWith(" \"term\" : {"));
assertThat(readLine(), startsWith(" \"i\" : {"));
assertThat(readLine(), startsWith(" \"value\" : 2,"));
assertThat(readLine(), startsWith(" \"boost\" : 1.0"));
assertThat(readLine(), startsWith(" }"));
assertThat(readLine(), startsWith(" }"));
assertThat(readLine(), startsWith(" },"));
assertThat(readLine(), startsWith(" \"_source\" : {"));
assertThat(readLine(), startsWith(" \"includes\" : ["));
assertThat(readLine(), startsWith(" \"test_field\""));
assertThat(readLine(), startsWith(" ],"));
assertThat(readLine(), startsWith(" \"excludes\" : [ ]"));
assertThat(readLine(), startsWith(" },"));
assertThat(readLine(), startsWith(" \"docvalue_fields\" : ["));
assertThat(readLine(), startsWith(" \"i\""));
assertThat(readLine(), startsWith(" ]"));
assertThat(readLine(), startsWith("}]"));
assertEquals("", readLine());
}
public void testExplainWithCount() throws IOException {
index("test", body -> body.field("test_field", "test_value1").field("i", 1));
index("test", body -> body.field("test_field", "test_value2").field("i", 2));
assertThat(command("EXPLAIN (PLAN PARSED) SELECT COUNT(*) FROM test"), containsString("plan"));
assertThat(readLine(), startsWith("----------"));
assertThat(readLine(), startsWith("With[{}]"));
assertThat(readLine(), startsWith("\\_Project[[?COUNT(?*)]]"));
assertThat(readLine(), startsWith(" \\_UnresolvedRelation[[index=test],null,Unknown index [test]]"));
assertEquals("", readLine());
assertThat(command("EXPLAIN " + (randomBoolean() ? "" : "(PLAN ANALYZED) ") + "SELECT COUNT(*) FROM test"),
containsString("plan"));
assertThat(readLine(), startsWith("----------"));
assertThat(readLine(), startsWith("Aggregate[[],[COUNT(1)#"));
assertThat(readLine(), startsWith("\\_SubQueryAlias[test]"));
assertThat(readLine(), startsWith(" \\_EsRelation[test][i{r}#"));
assertEquals("", readLine());
assertThat(command("EXPLAIN (PLAN OPTIMIZED) SELECT COUNT(*) FROM test"), containsString("plan"));
assertThat(readLine(), startsWith("----------"));
assertThat(readLine(), startsWith("Aggregate[[],[COUNT(1)#"));
assertThat(readLine(), startsWith("\\_EsRelation[test][i{r}#"));
assertEquals("", readLine());
assertThat(command("EXPLAIN (PLAN EXECUTABLE) SELECT COUNT(*) FROM test"), containsString("plan"));
assertThat(readLine(), startsWith("----------"));
assertThat(readLine(), startsWith("EsQueryExec[test,{"));
assertThat(readLine(), startsWith(" \"size\" : 0,"));
assertThat(readLine(), startsWith(" \"_source\" : false,"));
assertThat(readLine(), startsWith(" \"stored_fields\" : \"_none_\""));
assertThat(readLine(), startsWith("}]"));
assertEquals("", readLine());
}
}

View File

@ -1,11 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.nosecurity;
import org.elasticsearch.xpack.qa.sql.cli.FetchSizeTestCase;
public class CliFetchSizeIT extends FetchSizeTestCase {
}

View File

@ -1,11 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.nosecurity;
import org.elasticsearch.xpack.qa.sql.cli.SelectTestCase;
public class CliSelectIT extends SelectTestCase {
}

View File

@ -1,11 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.nosecurity;
import org.elasticsearch.xpack.qa.sql.cli.ShowTestCase;
public class CliShowIT extends ShowTestCase {
}

View File

@ -1,11 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.nosecurity;
import org.elasticsearch.xpack.qa.sql.jdbc.ConnectionTestCase;
public class JdbcConnectionIT extends ConnectionTestCase {
}

View File

@ -1,14 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.nosecurity;
import org.elasticsearch.xpack.qa.sql.jdbc.CsvSpecTestCase;
public class JdbcCsvSpecIT extends CsvSpecTestCase {
public JdbcCsvSpecIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) {
super(fileName, groupName, testName, lineNumber, testCase);
}
}

View File

@ -1,11 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.nosecurity;
import org.elasticsearch.xpack.qa.sql.jdbc.DatabaseMetaDataTestCase;
public class JdbcDatabaseMetaDataIT extends DatabaseMetaDataTestCase {
}

View File

@ -1,11 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.nosecurity;
import org.elasticsearch.xpack.qa.sql.jdbc.ErrorsTestCase;
public class JdbcErrorsIT extends ErrorsTestCase {
}

View File

@ -1,11 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.nosecurity;
import org.elasticsearch.xpack.qa.sql.jdbc.FetchSizeTestCase;
public class JdbcFetchSizeIT extends FetchSizeTestCase {
}

View File

@ -1,11 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.nosecurity;
import org.elasticsearch.xpack.qa.sql.jdbc.ShowTablesTestCase;
public class JdbcShowTablesIT extends ShowTablesTestCase {
}

View File

@ -1,11 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.nosecurity;
import org.elasticsearch.xpack.qa.sql.jdbc.SimpleExampleTestCase;
public class JdbcSimpleExampleIT extends SimpleExampleTestCase {
}

View File

@ -1,14 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.nosecurity;
import org.elasticsearch.xpack.qa.sql.jdbc.SqlSpecTestCase;
public class JdbcSqlSpecIT extends SqlSpecTestCase {
public JdbcSqlSpecIT(String fileName, String groupName, String testName, Integer lineNumber, String query) {
super(fileName, groupName, testName, lineNumber, query);
}
}

View File

@ -1,15 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.nosecurity;
import org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase;
/**
* Integration test for the rest sql action. The one that speaks json directly to a
* user rather than to the JDBC driver or CLI.
*/
public class RestSqlIT extends RestSqlTestCase {
}

View File

@ -1,57 +0,0 @@
dependencies {
testCompile(project(path: ':x-pack-elasticsearch:plugin', configuration: 'runtime')) {
transitive = false
}
}
Project mainProject = project
subprojects {
// Use resources from the parent project in subprojects
sourceSets {
test {
java {
srcDirs = ["${mainProject.projectDir}/src/test/java"]
}
resources {
srcDirs = ["${mainProject.projectDir}/src/test/resources"]
}
}
}
dependencies {
testCompile(project(path: ':x-pack-elasticsearch:plugin', configuration: 'runtime')) {
transitive = false
}
}
integTestCluster {
// Setup auditing so we can use it in some tests
setting 'xpack.security.audit.enabled', 'true'
setting 'xpack.security.audit.outputs', 'logfile'
// Setup roles used by tests
extraConfigFile 'x-pack/roles.yml', '../roles.yml'
/* Setup the one admin user that we run the tests as.
* Tests use "run as" to get different users. */
setupCommand 'setupUser#test_admin',
'bin/x-pack/users', 'useradd', 'test_admin', '-p', 'x-pack-test-password', '-r', 'superuser'
// Subprojects override the wait condition to work properly with security
}
integTestRunner {
systemProperty 'tests.audit.logfile',
"${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }_access.log"
}
run {
// Setup auditing so we can use it in some tests
setting 'xpack.security.audit.enabled', 'true'
setting 'xpack.security.audit.outputs', 'logfile'
// Setup roles used by tests
extraConfigFile 'x-pack/roles.yml', '../roles.yml'
/* Setup the one admin user that we run the tests as.
* Tests use "run as" to get different users. */
setupCommand 'setupUser#test_admin',
'bin/x-pack/users', 'useradd', 'test_admin', '-p', 'x-pack-test-password', '-r', 'superuser'
}
}

View File

@ -1,16 +0,0 @@
integTestRunner {
systemProperty 'tests.ssl.enabled', 'false'
}
integTestCluster {
waitCondition = { node, ant ->
File tmpFile = new File(node.cwd, 'wait.success')
ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow",
dest: tmpFile.toString(),
username: 'test_admin',
password: 'x-pack-test-password',
ignoreerrors: true,
retries: 10)
return tmpFile.exists()
}
}

View File

@ -1,60 +0,0 @@
read_all:
cluster:
- "cluster:monitor/main" # Used by JDBC's MetaData
indices:
- names: test
privileges: [read, "indices:admin/get"]
- names: bort
privileges: [read, "indices:admin/get"]
read_something_else:
cluster:
- "cluster:monitor/main" # Used by JDBC's MetaData
indices:
- names: something_that_isnt_test
privileges: [read, "indices:admin/get"]
read_test_a:
cluster:
- "cluster:monitor/main" # Used by JDBC's MetaData
indices:
- names: test
privileges: [read, "indices:admin/get"]
field_security:
grant: [a]
read_test_a_and_b:
cluster:
- "cluster:monitor/main" # Used by JDBC's MetaData
indices:
- names: test
privileges: [read, "indices:admin/get"]
field_security:
grant: ["*"]
except: [c]
read_test_without_c_3:
cluster:
- "cluster:monitor/main" # Used by JDBC's MetaData
indices:
- names: test
privileges: [read, "indices:admin/get"]
query: |
{
"bool": {
"must_not": [
{
"match": {
"c": 3
}
}
]
}
}
read_bort:
cluster:
- "cluster:monitor/main" # Used by JDBC's MetaData
indices:
- names: bort
privileges: [read, "indices:admin/get"]

View File

@ -1,27 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.security;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.xpack.qa.sql.cli.ErrorsTestCase;
import org.elasticsearch.xpack.qa.sql.cli.RemoteCli.SecurityConfig;
public class CliErrorsIT extends ErrorsTestCase {
@Override
protected Settings restClientSettings() {
return RestSqlIT.securitySettings();
}
@Override
protected String getProtocol() {
return RestSqlIT.SSL_ENABLED ? "https" : "http";
}
@Override
protected SecurityConfig securityConfig() {
return CliSecurityIT.adminSecurityConfig();
}
}

View File

@ -1,27 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.security;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.xpack.qa.sql.cli.FetchSizeTestCase;
import org.elasticsearch.xpack.qa.sql.cli.RemoteCli.SecurityConfig;
public class CliFetchSizeIT extends FetchSizeTestCase {
@Override
protected Settings restClientSettings() {
return RestSqlIT.securitySettings();
}
@Override
protected String getProtocol() {
return RestSqlIT.SSL_ENABLED ? "https" : "http";
}
@Override
protected SecurityConfig securityConfig() {
return CliSecurityIT.adminSecurityConfig();
}
}

View File

@ -1,171 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.security;
import org.elasticsearch.common.CheckedConsumer;
import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.xpack.qa.sql.cli.RemoteCli;
import org.elasticsearch.xpack.qa.sql.cli.RemoteCli.SecurityConfig;
import java.net.URISyntaxException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import static org.elasticsearch.xpack.qa.sql.cli.CliIntegrationTestCase.elasticsearchAddress;
import static org.hamcrest.Matchers.both;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.startsWith;
public class CliSecurityIT extends SqlSecurityTestCase {
static SecurityConfig adminSecurityConfig() {
String keystoreLocation;
String keystorePassword;
if (RestSqlIT.SSL_ENABLED) {
Path keyStore;
try {
keyStore = PathUtils.get(RestSqlIT.class.getResource("/test-node.jks").toURI());
} catch (URISyntaxException e) {
throw new RuntimeException("exception while reading the store", e);
}
if (!Files.exists(keyStore)) {
throw new IllegalStateException("Keystore file [" + keyStore + "] does not exist.");
}
keystoreLocation = keyStore.toAbsolutePath().toString();
keystorePassword = "keypass";
} else {
keystoreLocation = null;
keystorePassword = null;
}
return new SecurityConfig(RestSqlIT.SSL_ENABLED, "test_admin", "x-pack-test-password", keystoreLocation, keystorePassword);
}
/**
* Perform security test actions using the CLI.
*/
private static class CliActions implements Actions {
private SecurityConfig userSecurity(String user) {
SecurityConfig admin = adminSecurityConfig();
if (user == null) {
return admin;
}
return new SecurityConfig(RestSqlIT.SSL_ENABLED, user, "testpass", admin.keystoreLocation(), admin.keystorePassword());
}
@Override
public void queryWorksAsAdmin() throws Exception {
try (RemoteCli cli = new RemoteCli(elasticsearchAddress(), true, adminSecurityConfig())) {
assertThat(cli.command("SELECT * FROM test ORDER BY a"), containsString("a | b | c"));
assertEquals("---------------+---------------+---------------", cli.readLine());
assertThat(cli.readLine(), containsString("1 |2 |3"));
assertThat(cli.readLine(), containsString("4 |5 |6"));
assertEquals("", cli.readLine());
}
}
@Override
public void expectMatchesAdmin(String adminSql, String user, String userSql) throws Exception {
expectMatchesAdmin(adminSql, user, userSql, cli -> {});
}
@Override
public void expectScrollMatchesAdmin(String adminSql, String user, String userSql) throws Exception {
expectMatchesAdmin(adminSql, user, userSql, cli -> {
assertEquals("fetch size set to [90m1[0m", cli.command("fetch size = 1"));
assertEquals("fetch separator set to \"[90m -- fetch sep -- [0m\"",
cli.command("fetch separator = \" -- fetch sep -- \""));
});
}
public void expectMatchesAdmin(String adminSql, String user, String userSql,
CheckedConsumer<RemoteCli, Exception> customizer) throws Exception {
List<String> adminResult = new ArrayList<>();
try (RemoteCli cli = new RemoteCli(elasticsearchAddress(), true, adminSecurityConfig())) {
customizer.accept(cli);
adminResult.add(cli.command(adminSql));
String line;
do {
line = cli.readLine();
adminResult.add(line);
} while (false == (line.equals("[0m") || line.equals("")));
adminResult.add(line);
}
Iterator<String> expected = adminResult.iterator();
try (RemoteCli cli = new RemoteCli(elasticsearchAddress(), true, userSecurity(user))) {
customizer.accept(cli);
assertTrue(expected.hasNext());
assertEquals(expected.next(), cli.command(userSql));
String line;
do {
line = cli.readLine();
assertTrue(expected.hasNext());
assertEquals(expected.next(), line);
} while (false == (line.equals("[0m") || line.equals("")));
assertTrue(expected.hasNext());
assertEquals(expected.next(), line);
assertFalse(expected.hasNext());
}
}
@Override
public void expectDescribe(Map<String, String> columns, String user) throws Exception {
try (RemoteCli cli = new RemoteCli(elasticsearchAddress(), true, userSecurity(user))) {
assertThat(cli.command("DESCRIBE test"), containsString("column | type"));
assertEquals("---------------+---------------", cli.readLine());
for (Map.Entry<String, String> column : columns.entrySet()) {
assertThat(cli.readLine(), both(startsWith(column.getKey())).and(containsString("|" + column.getValue())));
}
assertEquals("", cli.readLine());
}
}
@Override
public void expectShowTables(List<String> tables, String user) throws Exception {
try (RemoteCli cli = new RemoteCli(elasticsearchAddress(), true, userSecurity(user))) {
assertThat(cli.command("SHOW TABLES"), containsString("table"));
assertEquals("---------------", cli.readLine());
for (String table : tables) {
assertThat(cli.readLine(), containsString(table));
}
assertEquals("", cli.readLine());
}
}
@Override
public void expectUnknownIndex(String user, String sql) throws Exception {
try (RemoteCli cli = new RemoteCli(elasticsearchAddress(), true, userSecurity(user))) {
assertThat(cli.command(sql), containsString("Bad request"));
assertThat(cli.readLine(), containsString("Unknown index"));
}
}
@Override
public void expectForbidden(String user, String sql) throws Exception {
/*
* Cause the CLI to skip its connection test on startup so we
* can get a forbidden exception when we run the query.
*/
try (RemoteCli cli = new RemoteCli(elasticsearchAddress(), false, userSecurity(user))) {
assertThat(cli.command(sql), containsString("is unauthorized for user [" + user + "]"));
}
}
@Override
public void expectUnknownColumn(String user, String sql, String column) throws Exception {
try (RemoteCli cli = new RemoteCli(elasticsearchAddress(), true, userSecurity(user))) {
assertThat(cli.command(sql), containsString("[1;31mBad request"));
assertThat(cli.readLine(), containsString("Unknown column [" + column + "][1;23;31m][0m"));
}
}
}
public CliSecurityIT() {
super(new CliActions());
}
}

View File

@ -1,27 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.security;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.xpack.qa.sql.cli.RemoteCli.SecurityConfig;
import org.elasticsearch.xpack.qa.sql.cli.SelectTestCase;
public class CliSelectIT extends SelectTestCase {
@Override
protected Settings restClientSettings() {
return RestSqlIT.securitySettings();
}
@Override
protected String getProtocol() {
return RestSqlIT.SSL_ENABLED ? "https" : "http";
}
@Override
protected SecurityConfig securityConfig() {
return CliSecurityIT.adminSecurityConfig();
}
}

View File

@ -1,27 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.security;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.xpack.qa.sql.cli.RemoteCli.SecurityConfig;
import org.elasticsearch.xpack.qa.sql.cli.ShowTestCase;
public class CliShowIT extends ShowTestCase {
@Override
protected Settings restClientSettings() {
return RestSqlIT.securitySettings();
}
@Override
protected String getProtocol() {
return RestSqlIT.SSL_ENABLED ? "https" : "http";
}
@Override
protected SecurityConfig securityConfig() {
return CliSecurityIT.adminSecurityConfig();
}
}

View File

@ -1,30 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.security;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.xpack.qa.sql.jdbc.ConnectionTestCase;
import java.util.Properties;
public class JdbcConnectionIT extends ConnectionTestCase {
@Override
protected Settings restClientSettings() {
return RestSqlIT.securitySettings();
}
@Override
protected String getProtocol() {
return RestSqlIT.SSL_ENABLED ? "https" : "http";
}
@Override
protected Properties connectionProperties() {
Properties properties = super.connectionProperties();
properties.putAll(JdbcSecurityIT.adminProperties());
return properties;
}
}

View File

@ -1,34 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.security;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.xpack.qa.sql.jdbc.CsvSpecTestCase;
import java.util.Properties;
public class JdbcCsvSpecIT extends CsvSpecTestCase {
public JdbcCsvSpecIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) {
super(fileName, groupName, testName, lineNumber, testCase);
}
@Override
protected Settings restClientSettings() {
return RestSqlIT.securitySettings();
}
@Override
protected String getProtocol() {
return RestSqlIT.SSL_ENABLED ? "https" : "http";
}
@Override
protected Properties connectionProperties() {
Properties sp = super.connectionProperties();
sp.putAll(JdbcSecurityIT.adminProperties());
return sp;
}
}

View File

@ -1,30 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.security;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.xpack.qa.sql.jdbc.DatabaseMetaDataTestCase;
import java.util.Properties;
public class JdbcDatabaseMetaDataIT extends DatabaseMetaDataTestCase {
@Override
protected Settings restClientSettings() {
return RestSqlIT.securitySettings();
}
@Override
protected String getProtocol() {
return RestSqlIT.SSL_ENABLED ? "https" : "http";
}
@Override
protected Properties connectionProperties() {
Properties properties = super.connectionProperties();
properties.putAll(JdbcSecurityIT.adminProperties());
return properties;
}
}

View File

@ -1,30 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.security;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.xpack.qa.sql.jdbc.ErrorsTestCase;
import java.util.Properties;
public class JdbcErrorsIT extends ErrorsTestCase {
@Override
protected Settings restClientSettings() {
return RestSqlIT.securitySettings();
}
@Override
protected String getProtocol() {
return RestSqlIT.SSL_ENABLED ? "https" : "http";
}
@Override
protected Properties connectionProperties() {
Properties properties = super.connectionProperties();
properties.putAll(JdbcSecurityIT.adminProperties());
return properties;
}
}

View File

@ -1,30 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.security;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.xpack.qa.sql.jdbc.FetchSizeTestCase;
import java.util.Properties;
public class JdbcFetchSizeIT extends FetchSizeTestCase {
@Override
protected Settings restClientSettings() {
return RestSqlIT.securitySettings();
}
@Override
protected String getProtocol() {
return RestSqlIT.SSL_ENABLED ? "https" : "http";
}
@Override
protected Properties connectionProperties() {
Properties properties = super.connectionProperties();
properties.putAll(JdbcSecurityIT.adminProperties());
return properties;
}
}

View File

@ -1,359 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.security;
import org.elasticsearch.action.admin.indices.get.GetIndexAction;
import org.elasticsearch.common.CheckedConsumer;
import org.elasticsearch.common.CheckedFunction;
import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.xpack.qa.sql.jdbc.LocalH2;
import java.net.URISyntaxException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import static org.elasticsearch.xpack.qa.sql.jdbc.JdbcAssert.assertResultSets;
import static org.elasticsearch.xpack.qa.sql.jdbc.JdbcIntegrationTestCase.elasticsearchAddress;
import static org.elasticsearch.xpack.qa.sql.jdbc.JdbcIntegrationTestCase.randomKnownTimeZone;
import static org.elasticsearch.xpack.qa.sql.security.RestSqlIT.SSL_ENABLED;
import static org.hamcrest.Matchers.contains;
import static org.hamcrest.Matchers.containsString;
public class JdbcSecurityIT extends SqlSecurityTestCase {
static Properties adminProperties() {
// tag::admin_properties
Properties properties = new Properties();
properties.put("user", "test_admin");
properties.put("password", "x-pack-test-password");
// end::admin_properties
addSslPropertiesIfNeeded(properties);
return properties;
}
static Connection es(Properties properties) throws SQLException {
Properties props = new Properties();
props.put("timezone", randomKnownTimeZone());
props.putAll(properties);
String scheme = SSL_ENABLED ? "https" : "http";
return DriverManager.getConnection("jdbc:es://" + scheme + "://" + elasticsearchAddress(), props);
}
static Properties userProperties(String user) {
if (user == null) {
return adminProperties();
}
Properties prop = new Properties();
prop.put("user", user);
prop.put("password", "testpass");
addSslPropertiesIfNeeded(prop);
return prop;
}
private static void addSslPropertiesIfNeeded(Properties properties) {
if (false == SSL_ENABLED) {
return;
}
Path keyStore;
try {
keyStore = PathUtils.get(RestSqlIT.class.getResource("/test-node.jks").toURI());
} catch (URISyntaxException e) {
throw new RuntimeException("exception while reading the store", e);
}
if (!Files.exists(keyStore)) {
throw new IllegalStateException("Keystore file [" + keyStore + "] does not exist.");
}
String keyStoreStr = keyStore.toAbsolutePath().toString();
properties.put("ssl", "true");
properties.put("ssl.keystore.location", keyStoreStr);
properties.put("ssl.keystore.pass", "keypass");
properties.put("ssl.truststore.location", keyStoreStr);
properties.put("ssl.truststore.pass", "keypass");
}
static void expectActionMatchesAdmin(CheckedFunction<Connection, ResultSet, SQLException> adminAction,
String user, CheckedFunction<Connection, ResultSet, SQLException> userAction) throws Exception {
try (Connection adminConnection = es(adminProperties());
Connection userConnection = es(userProperties(user))) {
assertResultSets(adminAction.apply(adminConnection), userAction.apply(userConnection));
}
}
static void expectForbidden(String user, CheckedConsumer<Connection, SQLException> action) throws Exception {
expectError(user, action, "is unauthorized for user [" + user + "]");
}
static void expectUnknownIndex(String user, CheckedConsumer<Connection, SQLException> action) throws Exception {
expectError(user, action, "Unknown index");
}
static void expectError(String user, CheckedConsumer<Connection, SQLException> action, String errorMessage) throws Exception {
SQLException e;
try (Connection connection = es(userProperties(user))) {
e = expectThrows(SQLException.class, () -> action.accept(connection));
}
assertThat(e.getMessage(), containsString(errorMessage));
}
static void expectActionThrowsUnknownColumn(String user,
CheckedConsumer<Connection, SQLException> action, String column) throws Exception {
SQLException e;
try (Connection connection = es(userProperties(user))) {
e = expectThrows(SQLException.class, () -> action.accept(connection));
}
assertThat(e.getMessage(), containsString("Unknown column [" + column + "]"));
}
private static class JdbcActions implements Actions {
@Override
public void queryWorksAsAdmin() throws Exception {
try (Connection h2 = LocalH2.anonymousDb();
Connection es = es(adminProperties())) {
h2.createStatement().executeUpdate("CREATE TABLE test (a BIGINT, b BIGINT, c BIGINT)");
h2.createStatement().executeUpdate("INSERT INTO test (a, b, c) VALUES (1, 2, 3), (4, 5, 6)");
ResultSet expected = h2.createStatement().executeQuery("SELECT * FROM test ORDER BY a");
assertResultSets(expected, es.createStatement().executeQuery("SELECT * FROM test ORDER BY a"));
}
}
@Override
public void expectMatchesAdmin(String adminSql, String user, String userSql) throws Exception {
expectActionMatchesAdmin(
con -> con.createStatement().executeQuery(adminSql),
user,
con -> con.createStatement().executeQuery(userSql));
}
@Override
public void expectScrollMatchesAdmin(String adminSql, String user, String userSql) throws Exception {
expectActionMatchesAdmin(
con -> {
Statement st = con.createStatement();
st.setFetchSize(1);
return st.executeQuery(adminSql);
},
user,
con -> {
Statement st = con.createStatement();
st.setFetchSize(1);
return st.executeQuery(userSql);
});
}
@Override
public void expectDescribe(Map<String, String> columns, String user) throws Exception {
try (Connection h2 = LocalH2.anonymousDb();
Connection es = es(userProperties(user))) {
// h2 doesn't have the same sort of DESCRIBE that we have so we emulate it
h2.createStatement().executeUpdate("CREATE TABLE mock (column VARCHAR, type VARCHAR)");
if (columns.size() > 0) {
StringBuilder insert = new StringBuilder();
insert.append("INSERT INTO mock (column, type) VALUES ");
boolean first = true;
for (Map.Entry<String, String> column : columns.entrySet()) {
if (first) {
first = false;
} else {
insert.append(", ");
}
insert.append("('").append(column.getKey()).append("', '").append(column.getValue()).append("')");
}
h2.createStatement().executeUpdate(insert.toString());
}
ResultSet expected = h2.createStatement().executeQuery("SELECT * FROM mock");
assertResultSets(expected, es.createStatement().executeQuery("DESCRIBE test"));
}
}
@Override
public void expectShowTables(List<String> tables, String user) throws Exception {
try (Connection h2 = LocalH2.anonymousDb();
Connection es = es(userProperties(user))) {
// h2 doesn't spit out the same columns we do so we emulate
h2.createStatement().executeUpdate("CREATE TABLE mock (table VARCHAR)");
StringBuilder insert = new StringBuilder();
insert.append("INSERT INTO mock (table) VALUES ");
boolean first = true;
for (String table : tables) {
if (first) {
first = false;
} else {
insert.append(", ");
}
insert.append("('").append(table).append("')");
}
h2.createStatement().executeUpdate(insert.toString());
ResultSet expected = h2.createStatement().executeQuery("SELECT * FROM mock ORDER BY table");
assertResultSets(expected, es.createStatement().executeQuery("SHOW TABLES"));
}
}
@Override
public void expectForbidden(String user, String sql) throws Exception {
JdbcSecurityIT.expectForbidden(user, con -> con.createStatement().executeQuery(sql));
}
@Override
public void expectUnknownIndex(String user, String sql) throws Exception {
JdbcSecurityIT.expectUnknownIndex(user, con -> con.createStatement().executeQuery(sql));
}
@Override
public void expectUnknownColumn(String user, String sql, String column) throws Exception {
expectActionThrowsUnknownColumn(
user,
con -> con.createStatement().executeQuery(sql),
column);
}
}
public JdbcSecurityIT() {
super(new JdbcActions());
}
// Metadata methods only available to JDBC
public void testMetaDataGetTablesWithFullAccess() throws Exception {
createUser("full_access", "read_all");
expectActionMatchesAdmin(
con -> con.getMetaData().getTables("%", "%", "%", null),
"full_access",
con -> con.getMetaData().getTables("%", "%", "%", null));
new AuditLogAsserter()
.expect(true, GetIndexAction.NAME, "test_admin", contains("bort", "test"))
.expect(true, GetIndexAction.NAME, "full_access", contains("bort", "test"))
.assertLogs();
}
public void testMetaDataGetTablesWithNoAccess() throws Exception {
createUser("no_access", "read_nothing");
expectForbidden("no_access", con -> con.getMetaData().getTables("%", "%", "%", null));
new AuditLogAsserter()
// TODO figure out why this generates *no* logs
// .expect(false, GetIndexAction.NAME, "no_access", contains("bort", "test"))
.assertLogs();
}
public void testMetaDataGetTablesWithLimitedAccess() throws Exception {
createUser("read_bort", "read_bort");
expectActionMatchesAdmin(
con -> con.getMetaData().getTables("%", "%", "bort", null),
"read_bort",
con -> con.getMetaData().getTables("%", "%", "%", null));
new AuditLogAsserter()
.expect(true, GetIndexAction.NAME, "test_admin", contains("bort"))
.expect(true, GetIndexAction.NAME, "read_bort", contains("bort"))
.assertLogs();
}
public void testMetaDataGetTablesWithInAccessibleIndex() throws Exception {
createUser("read_bort", "read_bort");
expectActionMatchesAdmin(
con -> con.getMetaData().getTables("%", "%", "not_created", null),
"read_bort",
con -> con.getMetaData().getTables("%", "%", "test", null));
new AuditLogAsserter()
.expect(true, GetIndexAction.NAME, "test_admin", contains("*", "-*"))
.expect(true, GetIndexAction.NAME, "read_bort", contains("*", "-*"))
.assertLogs();
}
public void testMetaDataGetColumnsWorksAsFullAccess() throws Exception {
createUser("full_access", "read_all");
expectActionMatchesAdmin(
con -> con.getMetaData().getColumns("%", "%", "%", "%"),
"full_access",
con -> con.getMetaData().getColumns("%", "%", "%", "%"));
new AuditLogAsserter()
.expect(true, GetIndexAction.NAME, "test_admin", contains("bort", "test"))
.expect(true, GetIndexAction.NAME, "full_access", contains("bort", "test"))
.assertLogs();
}
public void testMetaDataGetColumnsWithNoAccess() throws Exception {
createUser("no_access", "read_nothing");
expectForbidden("no_access", con -> con.getMetaData().getColumns("%", "%", "%", "%"));
new AuditLogAsserter()
// TODO figure out why this generates *no* logs
// .expect(false, GetIndexAction.NAME, "no_access", contains("bort", "test"))
.assertLogs();
}
public void testMetaDataGetColumnsWithWrongAccess() throws Exception {
createUser("wrong_access", "read_something_else");
expectActionMatchesAdmin(
con -> con.getMetaData().getColumns("%", "%", "not_created", "%"),
"wrong_access",
con -> con.getMetaData().getColumns("%", "%", "test", "%"));
new AuditLogAsserter()
.expect(true, GetIndexAction.NAME, "test_admin", contains("*", "-*"))
.expect(true, GetIndexAction.NAME, "wrong_access", contains("*", "-*"))
.assertLogs();
}
public void testMetaDataGetColumnsSingleFieldGranted() throws Exception {
createUser("only_a", "read_test_a");
expectActionMatchesAdmin(
con -> con.getMetaData().getColumns("%", "%", "test", "a"),
"only_a",
con -> con.getMetaData().getColumns("%", "%", "test", "%"));
new AuditLogAsserter()
.expect(true, GetIndexAction.NAME, "test_admin", contains("test"))
.expect(true, GetIndexAction.NAME, "only_a", contains("test"))
.assertLogs();
}
public void testMetaDataGetColumnsSingleFieldExcepted() throws Exception {
createUser("not_c", "read_test_a_and_b");
/* Since there is no easy way to get a result from the admin side with
* both 'a' and 'b' we'll have to roll our own assertion here, but we
* are intentionally much less restrictive then the tests elsewhere. */
try (Connection con = es(userProperties("not_c"))) {
ResultSet result = con.getMetaData().getColumns("%", "%", "test", "%");
assertTrue(result.next());
String columnName = result.getString(4);
assertEquals("a", columnName);
assertTrue(result.next());
columnName = result.getString(4);
assertEquals("b", columnName);
assertFalse(result.next());
}
new AuditLogAsserter()
.expect(true, GetIndexAction.NAME, "not_c", contains("test"))
.assertLogs();
}
public void testMetaDataGetColumnsDocumentExcluded() throws Exception {
createUser("no_3s", "read_test_without_c_3");
expectActionMatchesAdmin(
con -> con.getMetaData().getColumns("%", "%", "test", "%"),
"no_3s",
con -> con.getMetaData().getColumns("%", "%", "test", "%"));
new AuditLogAsserter()
.expect(true, GetIndexAction.NAME, "test_admin", contains("test"))
.expect(true, GetIndexAction.NAME, "no_3s", contains("test"))
.assertLogs();
}
}

View File

@ -1,30 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.security;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.xpack.qa.sql.jdbc.ShowTablesTestCase;
import java.util.Properties;
public class JdbcShowTablesIT extends ShowTablesTestCase {
@Override
protected Settings restClientSettings() {
return RestSqlIT.securitySettings();
}
@Override
protected String getProtocol() {
return RestSqlIT.SSL_ENABLED ? "https" : "http";
}
@Override
protected Properties connectionProperties() {
Properties sp = super.connectionProperties();
sp.putAll(JdbcSecurityIT.adminProperties());
return sp;
}
}

View File

@ -1,30 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.security;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.xpack.qa.sql.jdbc.SimpleExampleTestCase;
import java.util.Properties;
public class JdbcSimpleExampleIT extends SimpleExampleTestCase {
@Override
protected Settings restClientSettings() {
return RestSqlIT.securitySettings();
}
@Override
protected String getProtocol() {
return RestSqlIT.SSL_ENABLED ? "https" : "http";
}
@Override
protected Properties connectionProperties() {
Properties properties = super.connectionProperties();
properties.putAll(JdbcSecurityIT.adminProperties());
return properties;
}
}

View File

@ -1,34 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.security;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.xpack.qa.sql.jdbc.SqlSpecTestCase;
import java.util.Properties;
public class JdbcSqlSpecIT extends SqlSpecTestCase {
public JdbcSqlSpecIT(String fileName, String groupName, String testName, Integer lineNumber, String query) {
super(fileName, groupName, testName, lineNumber, query);
}
@Override
protected Settings restClientSettings() {
return RestSqlIT.securitySettings();
}
@Override
protected String getProtocol() {
return RestSqlIT.SSL_ENABLED ? "https" : "http";
}
@Override
protected Properties connectionProperties() {
Properties sp = super.connectionProperties();
sp.putAll(JdbcSecurityIT.adminProperties());
return sp;
}
}

View File

@ -1,58 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.security;
import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.settings.SecureString;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.test.rest.ESRestTestCase;
import org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase;
import static org.elasticsearch.xpack.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue;
import java.net.URISyntaxException;
import java.nio.file.Files;
import java.nio.file.Path;
/**
* Integration test for the rest sql action. The one that speaks json directly to a
* user rather than to the JDBC driver or CLI.
*/
public class RestSqlIT extends RestSqlTestCase {
static final boolean SSL_ENABLED = Booleans.parseBoolean(System.getProperty("tests.ssl.enabled"));
static Settings securitySettings() {
String token = basicAuthHeaderValue("test_admin", new SecureString("x-pack-test-password".toCharArray()));
Settings.Builder builder = Settings.builder()
.put(ThreadContext.PREFIX + ".Authorization", token);
if (SSL_ENABLED) {
Path keyStore;
try {
keyStore = PathUtils.get(RestSqlIT.class.getResource("/test-node.jks").toURI());
} catch (URISyntaxException e) {
throw new RuntimeException("exception while reading the store", e);
}
if (!Files.exists(keyStore)) {
throw new IllegalStateException("Keystore file [" + keyStore + "] does not exist.");
}
builder.put(ESRestTestCase.TRUSTSTORE_PATH, keyStore)
.put(ESRestTestCase.TRUSTSTORE_PASSWORD, "keypass");
}
return builder.build();
}
@Override
protected Settings restClientSettings() {
return securitySettings();
}
@Override
protected String getProtocol() {
return RestSqlIT.SSL_ENABLED ? "https" : "http";
}
}

View File

@ -1,192 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.security;
import org.apache.http.Header;
import org.apache.http.HttpEntity;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.apache.http.message.BasicHeader;
import org.elasticsearch.client.Response;
import org.elasticsearch.client.ResponseException;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.test.NotEqualMessageBuilder;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static java.util.Collections.singletonMap;
import static org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase.columnInfo;
import static java.util.Collections.singletonList;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.equalTo;
public class RestSqlSecurityIT extends SqlSecurityTestCase {
private static class RestActions implements Actions {
@Override
public void queryWorksAsAdmin() throws Exception {
Map<String, Object> expected = new HashMap<>();
expected.put("columns", Arrays.asList(
columnInfo("a", "long"),
columnInfo("b", "long"),
columnInfo("c", "long")));
expected.put("rows", Arrays.asList(
Arrays.asList(1, 2, 3),
Arrays.asList(4, 5, 6)));
expected.put("size", 2);
assertResponse(expected, runSql(null, "SELECT * FROM test ORDER BY a"));
}
@Override
public void expectMatchesAdmin(String adminSql, String user, String userSql) throws Exception {
assertResponse(runSql(null, adminSql), runSql(user, userSql));
}
@Override
public void expectScrollMatchesAdmin(String adminSql, String user, String userSql) throws Exception {
Map<String, Object> adminResponse = runSql(null,
new StringEntity("{\"query\": \"" + adminSql + "\", \"fetch_size\": 1}", ContentType.APPLICATION_JSON));
Map<String, Object> otherResponse = runSql(user,
new StringEntity("{\"query\": \"" + adminSql + "\", \"fetch_size\": 1}", ContentType.APPLICATION_JSON));
String adminCursor = (String) adminResponse.remove("cursor");
String otherCursor = (String) otherResponse.remove("cursor");
assertNotNull(adminCursor);
assertNotNull(otherCursor);
assertResponse(adminResponse, otherResponse);
while (true) {
adminResponse = runSql(null, new StringEntity("{\"cursor\": \"" + adminCursor + "\"}", ContentType.APPLICATION_JSON));
otherResponse = runSql(user, new StringEntity("{\"cursor\": \"" + otherCursor + "\"}", ContentType.APPLICATION_JSON));
adminCursor = (String) adminResponse.remove("cursor");
otherCursor = (String) otherResponse.remove("cursor");
assertResponse(adminResponse, otherResponse);
if (adminCursor == null) {
assertNull(otherCursor);
return;
}
assertNotNull(otherCursor);
}
}
@Override
public void expectDescribe(Map<String, String> columns, String user) throws Exception {
Map<String, Object> expected = new HashMap<>(3);
expected.put("columns", Arrays.asList(
columnInfo("column", "keyword"),
columnInfo("type", "keyword")));
List<List<String>> rows = new ArrayList<>(columns.size());
for (Map.Entry<String, String> column : columns.entrySet()) {
rows.add(Arrays.asList(column.getKey(), column.getValue()));
}
expected.put("rows", rows);
expected.put("size", columns.size());
assertResponse(expected, runSql(user, "DESCRIBE test"));
}
@Override
public void expectShowTables(List<String> tables, String user) throws Exception {
Map<String, Object> expected = new HashMap<>();
expected.put("columns", singletonList(columnInfo("table", "keyword")));
List<List<String>> rows = new ArrayList<>();
for (String table : tables) {
rows.add(singletonList(table));
}
expected.put("rows", rows);
expected.put("size", tables.size());
assertResponse(expected, runSql(user, "SHOW TABLES"));
}
@Override
public void expectForbidden(String user, String sql) {
ResponseException e = expectThrows(ResponseException.class, () -> runSql(user, sql));
assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(403));
assertThat(e.getMessage(), containsString("unauthorized"));
}
@Override
public void expectUnknownIndex(String user, String sql) {
ResponseException e = expectThrows(ResponseException.class, () -> runSql(user, sql));
assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(400));
assertThat(e.getMessage(), containsString("Unknown index"));
}
@Override
public void expectUnknownColumn(String user, String sql, String column) throws Exception {
ResponseException e = expectThrows(ResponseException.class, () -> runSql(user, sql));
assertThat(e.getMessage(), containsString("Unknown column [" + column + "]"));
}
private static Map<String, Object> runSql(@Nullable String asUser, String sql) throws IOException {
return runSql(asUser, new StringEntity("{\"query\": \"" + sql + "\"}", ContentType.APPLICATION_JSON));
}
private static Map<String, Object> runSql(@Nullable String asUser, HttpEntity entity) throws IOException {
Header[] headers = asUser == null ? new Header[0] : new Header[] {new BasicHeader("es-security-runas-user", asUser)};
Response response = client().performRequest("POST", "/_xpack/sql", singletonMap("format", "json"), entity, headers);
return toMap(response);
}
private static void assertResponse(Map<String, Object> expected, Map<String, Object> actual) {
if (false == expected.equals(actual)) {
NotEqualMessageBuilder message = new NotEqualMessageBuilder();
message.compareMaps(actual, expected);
fail("Response does not match:\n" + message.toString());
}
}
private static Map<String, Object> toMap(Response response) throws IOException {
try (InputStream content = response.getEntity().getContent()) {
return XContentHelper.convertToMap(JsonXContent.jsonXContent, content, false);
}
}
}
public RestSqlSecurityIT() {
super(new RestActions());
}
/**
* Test the hijacking a scroll fails. This test is only implemented for
* REST because it is the only API where it is simple to hijack a scroll.
* It should excercise the same code as the other APIs but if we were truly
* paranoid we'd hack together something to test the others as well.
*/
public void testHijackScrollFails() throws Exception {
createUser("full_access", "read_all");
Map<String, Object> adminResponse = RestActions.runSql(null,
new StringEntity("{\"query\": \"SELECT * FROM test\", \"fetch_size\": 1}", ContentType.APPLICATION_JSON));
String cursor = (String) adminResponse.remove("cursor");
assertNotNull(cursor);
ResponseException e = expectThrows(ResponseException.class, () ->
RestActions.runSql("full_access", new StringEntity("{\"cursor\":\"" + cursor + "\"}", ContentType.APPLICATION_JSON)));
// TODO return a better error message for bad scrolls
assertThat(e.getMessage(), containsString("No search context found for id"));
assertEquals(404, e.getResponse().getStatusLine().getStatusCode());
new AuditLogAsserter()
.expectSqlCompositeAction("test_admin", "test")
.expect(true, SQL_ACTION_NAME, "full_access", empty())
// One scroll access denied per shard
.expect(false, SQL_ACTION_NAME, "full_access", empty(), "InternalScrollSearchRequest")
.expect(false, SQL_ACTION_NAME, "full_access", empty(), "InternalScrollSearchRequest")
.expect(false, SQL_ACTION_NAME, "full_access", empty(), "InternalScrollSearchRequest")
.expect(false, SQL_ACTION_NAME, "full_access", empty(), "InternalScrollSearchRequest")
.expect(false, SQL_ACTION_NAME, "full_access", empty(), "InternalScrollSearchRequest")
.assertLogs();
}
}

View File

@ -1,619 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.security;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.apache.lucene.util.SuppressForbidden;
import org.elasticsearch.SpecialPermission;
import org.elasticsearch.action.admin.indices.get.GetIndexAction;
import org.elasticsearch.action.admin.indices.get.GetIndexRequest;
import org.elasticsearch.client.ResponseException;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.test.rest.ESRestTestCase;
import org.hamcrest.Matcher;
import org.junit.AfterClass;
import org.junit.Before;
import java.io.BufferedReader;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.security.AccessController;
import java.security.PrivilegedAction;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.function.Function;
import java.util.regex.Pattern;
import static java.util.Collections.emptyMap;
import static java.util.Collections.singletonMap;
import static org.hamcrest.Matchers.contains;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.hasItems;
public abstract class SqlSecurityTestCase extends ESRestTestCase {
/**
* Actions taken by this test.
* <p>
* For methods that take {@code user} a {@code null} user means "use the admin".
*/
protected interface Actions {
void queryWorksAsAdmin() throws Exception;
/**
* Assert that running some sql as a user returns the same result as running it as
* the administrator.
*/
void expectMatchesAdmin(String adminSql, String user, String userSql) throws Exception;
/**
* Same as {@link #expectMatchesAdmin(String, String, String)} but sets the scroll size
* to 1 and completely scrolls the results.
*/
void expectScrollMatchesAdmin(String adminSql, String user, String userSql) throws Exception;
void expectDescribe(Map<String, String> columns, String user) throws Exception;
void expectShowTables(List<String> tables, String user) throws Exception;
void expectForbidden(String user, String sql) throws Exception;
void expectUnknownIndex(String user, String sql) throws Exception;
void expectUnknownColumn(String user, String sql, String column) throws Exception;
}
protected static final String SQL_ACTION_NAME = "indices:data/read/sql";
/**
* Location of the audit log file. We could technically figure this out by reading the admin
* APIs but it isn't worth doing because we also have to give ourselves permission to read
* the file and that must be done by setting a system property and reading it in
* {@code plugin-security.policy}. So we may as well have gradle set the property.
*/
private static final Path AUDIT_LOG_FILE = lookupAuditLog();
@SuppressForbidden(reason="security doesn't work with mock filesystem")
private static Path lookupAuditLog() {
String auditLogFileString = System.getProperty("tests.audit.logfile");
if (null == auditLogFileString) {
throw new IllegalStateException("tests.audit.logfile must be set to run this test. It is automatically "
+ "set by gradle. If you must set it yourself then it should be the absolute path to the audit "
+ "log file generated by running x-pack with audit logging enabled.");
}
return Paths.get(auditLogFileString);
}
private static boolean oneTimeSetup = false;
private static boolean auditFailure = false;
/**
* The actions taken by this test.
*/
private final Actions actions;
/**
* How much of the audit log was written before the test started.
*/
private long auditLogWrittenBeforeTestStart;
public SqlSecurityTestCase(Actions actions) {
this.actions = actions;
}
/**
* All tests run as a an administrative user but use
* <code>es-security-runas-user</code> to become a less privileged user when needed.
*/
@Override
protected Settings restClientSettings() {
return RestSqlIT.securitySettings();
}
@Override
protected boolean preserveIndicesUponCompletion() {
/* We can't wipe the cluster between tests because that nukes the audit
* trail index which makes the auditing flaky. Instead we wipe all
* indices after the entire class is finished. */
return true;
}
@Before
public void oneTimeSetup() throws Exception {
if (oneTimeSetup) {
/* Since we don't wipe the cluster between tests we only need to
* write the test data once. */
return;
}
StringBuilder bulk = new StringBuilder();
bulk.append("{\"index\":{\"_index\": \"test\", \"_type\": \"doc\", \"_id\":\"1\"}\n");
bulk.append("{\"a\": 1, \"b\": 2, \"c\": 3}\n");
bulk.append("{\"index\":{\"_index\": \"test\", \"_type\": \"doc\", \"_id\":\"2\"}\n");
bulk.append("{\"a\": 4, \"b\": 5, \"c\": 6}\n");
bulk.append("{\"index\":{\"_index\": \"bort\", \"_type\": \"doc\", \"_id\":\"1\"}\n");
bulk.append("{\"a\": \"test\"}\n");
client().performRequest("PUT", "/_bulk", singletonMap("refresh", "true"),
new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON));
oneTimeSetup = true;
}
@Before
public void setInitialAuditLogOffset() {
SecurityManager sm = System.getSecurityManager();
if (sm != null) {
sm.checkPermission(new SpecialPermission());
}
AccessController.doPrivileged((PrivilegedAction<Void>) () -> {
if (false == Files.exists(AUDIT_LOG_FILE)) {
auditLogWrittenBeforeTestStart = 0;
return null;
}
if (false == Files.isRegularFile(AUDIT_LOG_FILE)) {
throw new IllegalStateException("expected tests.audit.logfile [" + AUDIT_LOG_FILE + "]to be a plain file but wasn't");
}
try {
auditLogWrittenBeforeTestStart = Files.size(AUDIT_LOG_FILE);
} catch (IOException e) {
throw new RuntimeException(e);
}
return null;
});
}
@AfterClass
public static void wipeIndicesAfterTests() throws IOException {
try {
adminClient().performRequest("DELETE", "*");
} catch (ResponseException e) {
// 404 here just means we had no indexes
if (e.getResponse().getStatusLine().getStatusCode() != 404) {
throw e;
}
} finally {
// Clear the static state so other subclasses can reuse it later
oneTimeSetup = false;
auditFailure = false;
}
}
@Override
protected String getProtocol() {
return RestSqlIT.SSL_ENABLED ? "https" : "http";
}
public void testQueryWorksAsAdmin() throws Exception {
actions.queryWorksAsAdmin();
new AuditLogAsserter()
.expectSqlCompositeAction("test_admin", "test")
.assertLogs();
}
public void testQueryWithFullAccess() throws Exception {
createUser("full_access", "read_all");
actions.expectMatchesAdmin("SELECT * FROM test ORDER BY a", "full_access", "SELECT * FROM test ORDER BY a");
new AuditLogAsserter()
.expectSqlCompositeAction("test_admin", "test")
.expectSqlCompositeAction("full_access", "test")
.assertLogs();
}
public void testScrollWithFullAccess() throws Exception {
createUser("full_access", "read_all");
actions.expectScrollMatchesAdmin("SELECT * FROM test ORDER BY a", "full_access", "SELECT * FROM test ORDER BY a");
new AuditLogAsserter()
.expectSqlCompositeAction("test_admin", "test")
/* Scrolling doesn't have to access the index again, at least not through sql.
* If we asserted query and scroll logs then we would see the scroll. */
.expect(true, SQL_ACTION_NAME, "test_admin", empty())
.expect(true, SQL_ACTION_NAME, "test_admin", empty())
.expectSqlCompositeAction("full_access", "test")
.expect(true, SQL_ACTION_NAME, "full_access", empty())
.expect(true, SQL_ACTION_NAME, "full_access", empty())
.assertLogs();
}
public void testQueryNoAccess() throws Exception {
createUser("no_access", "read_nothing");
actions.expectForbidden("no_access", "SELECT * FROM test");
new AuditLogAsserter()
.expect(false, SQL_ACTION_NAME, "no_access", empty())
.assertLogs();
}
public void testQueryWrongAccess() throws Exception {
createUser("wrong_access", "read_something_else");
actions.expectUnknownIndex("wrong_access", "SELECT * FROM test");
new AuditLogAsserter()
//This user has permission to run sql queries so they are given preliminary authorization
.expect(true, SQL_ACTION_NAME, "wrong_access", empty())
//the following get index is granted too but against the no indices placeholder, as ignore_unavailable=true
.expect(true, GetIndexAction.NAME, "wrong_access", hasItems("*", "-*"))
.assertLogs();
}
public void testQuerySingleFieldGranted() throws Exception {
createUser("only_a", "read_test_a");
actions.expectMatchesAdmin("SELECT a FROM test ORDER BY a", "only_a", "SELECT * FROM test ORDER BY a");
new AuditLogAsserter()
.expectSqlCompositeAction("test_admin", "test")
.expectSqlCompositeAction("only_a", "test")
.assertLogs();
}
public void testScrollWithSingleFieldGranted() throws Exception {
createUser("only_a", "read_test_a");
actions.expectScrollMatchesAdmin("SELECT a FROM test ORDER BY a", "only_a", "SELECT * FROM test ORDER BY a");
new AuditLogAsserter()
.expectSqlCompositeAction("test_admin", "test")
/* Scrolling doesn't have to access the index again, at least not through sql.
* If we asserted query and scroll logs then we would see the scoll. */
.expect(true, SQL_ACTION_NAME, "test_admin", empty())
.expect(true, SQL_ACTION_NAME, "test_admin", empty())
.expectSqlCompositeAction("only_a", "test")
.expect(true, SQL_ACTION_NAME, "only_a", empty())
.expect(true, SQL_ACTION_NAME, "only_a", empty())
.assertLogs();
}
public void testQueryStringSingeFieldGrantedWrongRequested() throws Exception {
createUser("only_a", "read_test_a");
actions.expectUnknownColumn("only_a", "SELECT c FROM test", "c");
/* The user has permission to query the index but one of the
* columns that they explicitly mention is hidden from them
* by field level access control. This *looks* like a successful
* query from the audit side because all the permissions checked
* out but it failed in SQL because it couldn't compile the
* query without the metadata for the missing field. */
new AuditLogAsserter()
.expectSqlCompositeAction("only_a", "test")
.assertLogs();
}
public void testQuerySingleFieldExcepted() throws Exception {
createUser("not_c", "read_test_a_and_b");
actions.expectMatchesAdmin("SELECT a, b FROM test ORDER BY a", "not_c", "SELECT * FROM test ORDER BY a");
new AuditLogAsserter()
.expectSqlCompositeAction("test_admin", "test")
.expectSqlCompositeAction("not_c", "test")
.assertLogs();
}
public void testScrollWithSingleFieldExcepted() throws Exception {
createUser("not_c", "read_test_a_and_b");
actions.expectScrollMatchesAdmin("SELECT a, b FROM test ORDER BY a", "not_c", "SELECT * FROM test ORDER BY a");
new AuditLogAsserter()
.expectSqlCompositeAction("test_admin", "test")
/* Scrolling doesn't have to access the index again, at least not through sql.
* If we asserted query and scroll logs then we would see the scroll. */
.expect(true, SQL_ACTION_NAME, "test_admin", empty())
.expect(true, SQL_ACTION_NAME, "test_admin", empty())
.expectSqlCompositeAction("not_c", "test")
.expect(true, SQL_ACTION_NAME, "not_c", empty())
.expect(true, SQL_ACTION_NAME, "not_c", empty())
.assertLogs();
}
public void testQuerySingleFieldExceptionedWrongRequested() throws Exception {
createUser("not_c", "read_test_a_and_b");
actions.expectUnknownColumn("not_c", "SELECT c FROM test", "c");
/* The user has permission to query the index but one of the
* columns that they explicitly mention is hidden from them
* by field level access control. This *looks* like a successful
* query from the audit side because all the permissions checked
* out but it failed in SQL because it couldn't compile the
* query without the metadata for the missing field. */
new AuditLogAsserter()
.expectSqlCompositeAction("not_c", "test")
.assertLogs();
}
public void testQueryDocumentExcluded() throws Exception {
createUser("no_3s", "read_test_without_c_3");
actions.expectMatchesAdmin("SELECT * FROM test WHERE c != 3 ORDER BY a", "no_3s", "SELECT * FROM test ORDER BY a");
new AuditLogAsserter()
.expectSqlCompositeAction("test_admin", "test")
.expectSqlCompositeAction("no_3s", "test")
.assertLogs();
}
public void testShowTablesWorksAsAdmin() throws Exception {
actions.expectShowTables(Arrays.asList("bort", "test"), null);
new AuditLogAsserter()
.expectSqlCompositeAction("test_admin", "bort", "test")
.assertLogs();
}
public void testShowTablesWorksAsFullAccess() throws Exception {
createUser("full_access", "read_all");
actions.expectMatchesAdmin("SHOW TABLES", "full_access", "SHOW TABLES");
new AuditLogAsserter()
.expectSqlCompositeAction("test_admin", "bort", "test")
.expectSqlCompositeAction("full_access", "bort", "test")
.assertLogs();
}
public void testShowTablesWithNoAccess() throws Exception {
createUser("no_access", "read_nothing");
actions.expectForbidden("no_access", "SHOW TABLES");
new AuditLogAsserter()
.expect(false, SQL_ACTION_NAME, "no_access", empty())
.assertLogs();
}
public void testShowTablesWithLimitedAccess() throws Exception {
createUser("read_bort", "read_bort");
actions.expectMatchesAdmin("SHOW TABLES LIKE 'bort'", "read_bort", "SHOW TABLES");
new AuditLogAsserter()
.expectSqlCompositeAction("test_admin", "bort")
.expectSqlCompositeAction("read_bort", "bort")
.assertLogs();
}
public void testShowTablesWithLimitedAccessUnaccessableIndex() throws Exception {
createUser("read_bort", "read_bort");
actions.expectMatchesAdmin("SHOW TABLES LIKE 'not_created'", "read_bort", "SHOW TABLES LIKE 'test'");
new AuditLogAsserter()
.expect(true, SQL_ACTION_NAME, "test_admin", empty())
.expect(true, GetIndexAction.NAME, "test_admin", contains("*", "-*"))
.expect(true, SQL_ACTION_NAME, "read_bort", empty())
.expect(true, GetIndexAction.NAME, "read_bort", contains("*", "-*"))
.assertLogs();
}
public void testDescribeWorksAsAdmin() throws Exception {
Map<String, String> expected = new TreeMap<>();
expected.put("a", "BIGINT");
expected.put("b", "BIGINT");
expected.put("c", "BIGINT");
actions.expectDescribe(expected, null);
new AuditLogAsserter()
.expectSqlCompositeAction("test_admin", "test")
.assertLogs();
}
public void testDescribeWorksAsFullAccess() throws Exception {
createUser("full_access", "read_all");
actions.expectMatchesAdmin("DESCRIBE test", "full_access", "DESCRIBE test");
new AuditLogAsserter()
.expectSqlCompositeAction("test_admin", "test")
.expectSqlCompositeAction("full_access", "test")
.assertLogs();
}
public void testDescribeWithNoAccess() throws Exception {
createUser("no_access", "read_nothing");
actions.expectForbidden("no_access", "DESCRIBE test");
new AuditLogAsserter()
.expect(false, SQL_ACTION_NAME, "no_access", empty())
.assertLogs();
}
public void testDescribeWithWrongAccess() throws Exception {
createUser("wrong_access", "read_something_else");
actions.expectDescribe(Collections.emptyMap(), "wrong_access");
new AuditLogAsserter()
//This user has permission to run sql queries so they are given preliminary authorization
.expect(true, SQL_ACTION_NAME, "wrong_access", empty())
//the following get index is granted too but against the no indices placeholder, as ignore_unavailable=true
.expect(true, GetIndexAction.NAME, "wrong_access", hasItems("*", "-*"))
.assertLogs();
}
public void testDescribeSingleFieldGranted() throws Exception {
createUser("only_a", "read_test_a");
actions.expectDescribe(singletonMap("a", "BIGINT"), "only_a");
new AuditLogAsserter()
.expectSqlCompositeAction("only_a", "test")
.assertLogs();
}
public void testDescribeSingleFieldExcepted() throws Exception {
createUser("not_c", "read_test_a_and_b");
Map<String, String> expected = new TreeMap<>();
expected.put("a", "BIGINT");
expected.put("b", "BIGINT");
actions.expectDescribe(expected, "not_c");
new AuditLogAsserter()
.expectSqlCompositeAction("not_c", "test")
.assertLogs();
}
public void testDescribeDocumentExcluded() throws Exception {
createUser("no_3s", "read_test_without_c_3");
actions.expectMatchesAdmin("DESCRIBE test", "no_3s", "DESCRIBE test");
new AuditLogAsserter()
.expectSqlCompositeAction("test_admin", "test")
.expectSqlCompositeAction("no_3s", "test")
.assertLogs();
}
protected static void createUser(String name, String role) throws IOException {
XContentBuilder user = JsonXContent.contentBuilder().prettyPrint().startObject(); {
user.field("password", "testpass");
user.field("roles", role);
}
user.endObject();
client().performRequest("PUT", "/_xpack/security/user/" + name, emptyMap(),
new StringEntity(user.string(), ContentType.APPLICATION_JSON));
}
/**
* Used to assert audit logs. Logs are asserted to match in any order because
* we don't always scroll in the same order but each log checker must match a
* single log and all logs must be matched.
*/
protected final class AuditLogAsserter {
private final List<Function<Map<String, Object>, Boolean>> logCheckers = new ArrayList<>();
public AuditLogAsserter expectSqlCompositeAction(String user, String... indices) {
expect(true, SQL_ACTION_NAME, user, empty());
expect(true, GetIndexAction.NAME, user, hasItems(indices));
return this;
}
public AuditLogAsserter expect(boolean granted, String action, String principal,
Matcher<? extends Iterable<? extends String>> indicesMatcher) {
String request;
switch (action) {
case SQL_ACTION_NAME:
request = "SqlRequest";
break;
case GetIndexAction.NAME:
request = GetIndexRequest.class.getSimpleName();
break;
default:
throw new IllegalArgumentException("Unknown action [" + action + "]");
}
return expect(granted, action, principal, indicesMatcher, request);
}
public AuditLogAsserter expect(boolean granted, String action, String principal,
Matcher<? extends Iterable<? extends String>> indicesMatcher, String request) {
String eventType = granted ? "access_granted" : "access_denied";
logCheckers.add(m -> eventType.equals(m.get("event_type"))
&& action.equals(m.get("action"))
&& principal.equals(m.get("principal"))
&& indicesMatcher.matches(m.get("indices"))
&& request.equals(m.get("request"))
);
return this;
}
public void assertLogs() throws Exception {
assertFalse("Previous test had an audit-related failure. All subsequent audit related assertions are bogus because we can't "
+ "guarantee that we fully cleaned up after the last test.", auditFailure);
try {
assertBusy(() -> {
SecurityManager sm = System.getSecurityManager();
if (sm != null) {
sm.checkPermission(new SpecialPermission());
}
BufferedReader logReader = AccessController.doPrivileged((PrivilegedAction<BufferedReader>) () -> {
try {
return Files.newBufferedReader(AUDIT_LOG_FILE, StandardCharsets.UTF_8);
} catch (IOException e) {
throw new RuntimeException(e);
}
});
logReader.skip(auditLogWrittenBeforeTestStart);
List<Map<String, Object>> logs = new ArrayList<>();
String line;
Pattern logPattern = Pattern.compile(
("PART PART PART origin_type=PART, origin_address=PART, "
+ "principal=PART, (?:run_as_principal=PART, )?(?:run_by_principal=PART, )?"
+ "roles=PART, action=\\[(.*?)\\], (?:indices=PART, )?request=PART")
.replace(" ", "\\s+").replace("PART", "\\[([^\\]]*)\\]"));
// fail(logPattern.toString());
while ((line = logReader.readLine()) != null) {
java.util.regex.Matcher m = logPattern.matcher(line);
if (false == m.matches()) {
throw new IllegalArgumentException("Unrecognized log: " + line);
}
int i = 1;
Map<String, Object> log = new HashMap<>();
/* We *could* parse the date but leaving it in the original format makes it
* easier to find the lines in the file that this log comes from. */
log.put("time", m.group(i++));
log.put("origin", m.group(i++));
String eventType = m.group(i++);
if (false == ("access_denied".equals(eventType) || "access_granted".equals(eventType))) {
continue;
}
log.put("event_type", eventType);
log.put("origin_type", m.group(i++));
log.put("origin_address", m.group(i++));
String principal = m.group(i++);
log.put("principal", principal);
log.put("run_as_principal", m.group(i++));
log.put("run_by_principal", m.group(i++));
log.put("roles", m.group(i++));
String action = m.group(i++);
if (false == (SQL_ACTION_NAME.equals(action) || GetIndexAction.NAME.equals(action))) {
//TODO we may want to extend this and the assertions to SearchAction.NAME as well
continue;
}
log.put("action", action);
// Use a sorted list for indices for consistent error reporting
List<String> indices = new ArrayList<>(Strings.tokenizeByCommaToSet(m.group(i++)));
Collections.sort(indices);
if ("test_admin".equals(principal)) {
/* Sometimes we accidentally sneak access to the security tables. This is fine, SQL
* drops them from the interface. So we might have access to them, but we don't show
* them. */
indices.remove(".security");
indices.remove(".security-6");
}
log.put("indices", indices);
log.put("request", m.group(i));
logs.add(log);
}
List<Map<String, Object>> allLogs = new ArrayList<>(logs);
List<Integer> notMatching = new ArrayList<>();
checker: for (int c = 0; c < logCheckers.size(); c++) {
Function<Map<String, Object>, Boolean> logChecker = logCheckers.get(c);
for (Iterator<Map<String, Object>> logsItr = logs.iterator(); logsItr.hasNext();) {
Map<String, Object> log = logsItr.next();
if (logChecker.apply(log)) {
logsItr.remove();
continue checker;
}
}
notMatching.add(c);
}
if (false == notMatching.isEmpty()) {
fail("Some checkers " + notMatching + " didn't match any logs. All logs:" + logsMessage(allLogs)
+ "\nRemaining logs:" + logsMessage(logs));
}
if (false == logs.isEmpty()) {
fail("Not all logs matched. Unmatched logs:" + logsMessage(logs));
}
});
} catch (AssertionError e) {
auditFailure = true;
logger.warn("Failed to find an audit log. Skipping remaining tests in this class after this the missing audit"
+ "logs could turn up later.");
throw e;
}
}
private String logsMessage(List<Map<String, Object>> logs) {
if (logs.isEmpty()) {
return " none!";
}
StringBuilder logsMessage = new StringBuilder();
for (Map<String, Object> log : logs) {
logsMessage.append('\n').append(log);
}
return logsMessage.toString();
}
}
}

View File

@ -1,8 +0,0 @@
grant {
// Needed to read the audit log file
permission java.io.FilePermission "${tests.audit.logfile}", "read";
//// Required by ssl subproject:
// Required for the net client to setup ssl rather than use global ssl.
permission java.lang.RuntimePermission "setFactory";
};

View File

@ -1,366 +0,0 @@
import org.elasticsearch.gradle.LoggedExec
import org.elasticsearch.gradle.MavenFilteringHack
import org.elasticsearch.gradle.test.NodeInfo
import javax.net.ssl.HttpsURLConnection
import javax.net.ssl.KeyManagerFactory
import javax.net.ssl.SSLContext
import javax.net.ssl.TrustManagerFactory
import java.nio.charset.StandardCharsets
import java.security.KeyStore
import java.security.SecureRandom
// Tell the tests we're running with ssl enabled
integTestRunner {
systemProperty 'tests.ssl.enabled', 'true'
}
// needed to be consistent with ssl host checking
Object san = new SanEvaluator()
// location of generated keystores and certificates
File keystoreDir = new File(project.buildDir, 'keystore')
// Generate the node's keystore
File nodeKeystore = new File(keystoreDir, 'test-node.jks')
task createNodeKeyStore(type: LoggedExec) {
doFirst {
if (nodeKeystore.parentFile.exists() == false) {
nodeKeystore.parentFile.mkdirs()
}
if (nodeKeystore.exists()) {
delete nodeKeystore
}
}
executable = new File(project.javaHome, 'bin/keytool')
standardInput = new ByteArrayInputStream('FirstName LastName\nUnit\nOrganization\nCity\nState\nNL\nyes\n\n'.getBytes('UTF-8'))
args '-genkey',
'-alias', 'test-node',
'-keystore', nodeKeystore,
'-keyalg', 'RSA',
'-keysize', '2048',
'-validity', '712',
'-dname', 'CN=smoke-test-plugins-ssl',
'-keypass', 'keypass',
'-storepass', 'keypass',
'-ext', san
}
// Generate the client's keystore
File clientKeyStore = new File(keystoreDir, 'test-client.jks')
task createClientKeyStore(type: LoggedExec) {
doFirst {
if (clientKeyStore.parentFile.exists() == false) {
clientKeyStore.parentFile.mkdirs()
}
if (clientKeyStore.exists()) {
delete clientKeyStore
}
}
executable = new File(project.javaHome, 'bin/keytool')
standardInput = new ByteArrayInputStream('FirstName LastName\nUnit\nOrganization\nCity\nState\nNL\nyes\n\n'.getBytes('UTF-8'))
args '-genkey',
'-alias', 'test-client',
'-keystore', clientKeyStore,
'-keyalg', 'RSA',
'-keysize', '2048',
'-validity', '712',
'-dname', 'CN=smoke-test-plugins-ssl',
'-keypass', 'keypass',
'-storepass', 'keypass',
'-ext', san
}
// Export the node's certificate
File nodeCertificate = new File(keystoreDir, 'test-node.cert')
task exportNodeCertificate(type: LoggedExec) {
doFirst {
if (nodeCertificate.parentFile.exists() == false) {
nodeCertificate.parentFile.mkdirs()
}
if (nodeCertificate.exists()) {
delete nodeCertificate
}
}
executable = new File(project.javaHome, 'bin/keytool')
args '-export',
'-alias', 'test-node',
'-keystore', nodeKeystore,
'-storepass', 'keypass',
'-file', nodeCertificate
}
// Import the node certificate in the client's keystore
task importNodeCertificateInClientKeyStore(type: LoggedExec) {
dependsOn exportNodeCertificate
executable = new File(project.javaHome, 'bin/keytool')
args '-import',
'-alias', 'test-node',
'-keystore', clientKeyStore,
'-storepass', 'keypass',
'-file', nodeCertificate,
'-noprompt'
}
// Export the client's certificate
File clientCertificate = new File(keystoreDir, 'test-client.cert')
task exportClientCertificate(type: LoggedExec) {
doFirst {
if (clientCertificate.parentFile.exists() == false) {
clientCertificate.parentFile.mkdirs()
}
if (clientCertificate.exists()) {
delete clientCertificate
}
}
executable = new File(project.javaHome, 'bin/keytool')
args '-export',
'-alias', 'test-client',
'-keystore', clientKeyStore,
'-storepass', 'keypass',
'-file', clientCertificate
}
// Import the client certificate in the node's keystore
task importClientCertificateInNodeKeyStore(type: LoggedExec) {
dependsOn exportClientCertificate
executable = new File(project.javaHome, 'bin/keytool')
args '-import',
'-alias', 'test-client',
'-keystore', nodeKeystore,
'-storepass', 'keypass',
'-file', clientCertificate,
'-noprompt'
}
forbiddenPatterns {
exclude '**/*.cert'
}
// Add keystores to test classpath: it expects it there
sourceSets.test.resources.srcDir(keystoreDir)
processTestResources.dependsOn(
createNodeKeyStore, createClientKeyStore,
importNodeCertificateInClientKeyStore, importClientCertificateInNodeKeyStore
)
integTestCluster.dependsOn(importClientCertificateInNodeKeyStore)
integTestCluster {
// The setup that we actually want
setting 'xpack.security.http.ssl.enabled', 'true'
setting 'xpack.security.transport.ssl.enabled', 'true'
// ceremony to set up ssl
setting 'xpack.ssl.keystore.path', 'test-node.jks'
keystoreSetting 'xpack.ssl.keystore.secure_password', 'keypass'
// copy keystores into config/
extraConfigFile nodeKeystore.name, nodeKeystore
extraConfigFile clientKeyStore.name, clientKeyStore
// Override the wait condition to work properly with security and SSL
waitCondition = { NodeInfo node, AntBuilder ant ->
File tmpFile = new File(node.cwd, 'wait.success')
KeyStore keyStore = KeyStore.getInstance("JKS");
keyStore.load(clientKeyStore.newInputStream(), 'keypass'.toCharArray());
KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm());
kmf.init(keyStore, 'keypass'.toCharArray());
TrustManagerFactory tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
tmf.init(keyStore);
SSLContext sslContext = SSLContext.getInstance("TLSv1.2");
sslContext.init(kmf.getKeyManagers(), tmf.getTrustManagers(), new SecureRandom());
for (int i = 0; i < 10; i++) {
// we use custom wait logic here for HTTPS
HttpsURLConnection httpURLConnection = null;
try {
httpURLConnection = (HttpsURLConnection) new URL("https://${node.httpUri()}/_cluster/health?wait_for_nodes=${numNodes}&wait_for_status=yellow").openConnection();
httpURLConnection.setSSLSocketFactory(sslContext.getSocketFactory());
httpURLConnection.setRequestProperty("Authorization", "Basic " +
Base64.getEncoder().encodeToString("test_admin:x-pack-test-password".getBytes(StandardCharsets.UTF_8)));
httpURLConnection.setRequestMethod("GET");
httpURLConnection.connect();
if (httpURLConnection.getResponseCode() == 200) {
tmpFile.withWriter StandardCharsets.UTF_8.name(), {
it.write(httpURLConnection.getInputStream().getText(StandardCharsets.UTF_8.name()))
}
}
} catch (IOException e) {
if (i == 9) {
logger.error("final attempt of calling cluster health failed", e)
} else {
logger.debug("failed to call cluster health", e)
}
} finally {
if (httpURLConnection != null) {
httpURLConnection.disconnect();
}
}
// did not start, so wait a bit before trying again
Thread.sleep(500L);
}
return tmpFile.exists()
}
}
/** A lazy evaluator to find the san to use for certificate generation. */
class SanEvaluator {
private static String san = null
String toString() {
synchronized (SanEvaluator.class) {
if (san == null) {
san = getSubjectAlternativeNameString()
}
}
return san
}
// Code stolen from NetworkUtils/InetAddresses/NetworkAddress to support SAN
/** Return all interfaces (and subinterfaces) on the system */
private static List<NetworkInterface> getInterfaces() throws SocketException {
List<NetworkInterface> all = new ArrayList<>();
addAllInterfaces(all, Collections.list(NetworkInterface.getNetworkInterfaces()));
Collections.sort(all, new Comparator<NetworkInterface>() {
@Override
public int compare(NetworkInterface left, NetworkInterface right) {
return Integer.compare(left.getIndex(), right.getIndex());
}
});
return all;
}
/** Helper for getInterfaces, recursively adds subinterfaces to {@code target} */
private static void addAllInterfaces(List<NetworkInterface> target, List<NetworkInterface> level) {
if (!level.isEmpty()) {
target.addAll(level);
for (NetworkInterface intf : level) {
addAllInterfaces(target, Collections.list(intf.getSubInterfaces()));
}
}
}
private static String getSubjectAlternativeNameString() {
List<InetAddress> list = new ArrayList<>();
for (NetworkInterface intf : getInterfaces()) {
if (intf.isUp()) {
// NOTE: some operating systems (e.g. BSD stack) assign a link local address to the loopback interface
// while technically not a loopback address, some of these treat them as one (e.g. OS X "localhost") so we must too,
// otherwise things just won't work out of box. So we include all addresses from loopback interfaces.
for (InetAddress address : Collections.list(intf.getInetAddresses())) {
if (intf.isLoopback() || address.isLoopbackAddress()) {
list.add(address);
}
}
}
}
if (list.isEmpty()) {
throw new IllegalArgumentException("no up-and-running loopback addresses found, got " + getInterfaces());
}
StringBuilder builder = new StringBuilder("san=");
for (int i = 0; i < list.size(); i++) {
InetAddress address = list.get(i);
String hostAddress;
if (address instanceof Inet6Address) {
hostAddress = compressedIPV6Address((Inet6Address)address);
} else {
hostAddress = address.getHostAddress();
}
builder.append("ip:").append(hostAddress);
String hostname = address.getHostName();
if (hostname.equals(address.getHostAddress()) == false) {
builder.append(",dns:").append(hostname);
}
if (i != (list.size() - 1)) {
builder.append(",");
}
}
return builder.toString();
}
private static String compressedIPV6Address(Inet6Address inet6Address) {
byte[] bytes = inet6Address.getAddress();
int[] hextets = new int[8];
for (int i = 0; i < hextets.length; i++) {
hextets[i] = (bytes[2 * i] & 255) << 8 | bytes[2 * i + 1] & 255;
}
compressLongestRunOfZeroes(hextets);
return hextetsToIPv6String(hextets);
}
/**
* Identify and mark the longest run of zeroes in an IPv6 address.
*
* <p>Only runs of two or more hextets are considered. In case of a tie, the
* leftmost run wins. If a qualifying run is found, its hextets are replaced
* by the sentinel value -1.
*
* @param hextets {@code int[]} mutable array of eight 16-bit hextets
*/
private static void compressLongestRunOfZeroes(int[] hextets) {
int bestRunStart = -1;
int bestRunLength = -1;
int runStart = -1;
for (int i = 0; i < hextets.length + 1; i++) {
if (i < hextets.length && hextets[i] == 0) {
if (runStart < 0) {
runStart = i;
}
} else if (runStart >= 0) {
int runLength = i - runStart;
if (runLength > bestRunLength) {
bestRunStart = runStart;
bestRunLength = runLength;
}
runStart = -1;
}
}
if (bestRunLength >= 2) {
Arrays.fill(hextets, bestRunStart, bestRunStart + bestRunLength, -1);
}
}
/**
* Convert a list of hextets into a human-readable IPv6 address.
*
* <p>In order for "::" compression to work, the input should contain negative
* sentinel values in place of the elided zeroes.
*
* @param hextets {@code int[]} array of eight 16-bit hextets, or -1s
*/
private static String hextetsToIPv6String(int[] hextets) {
/*
* While scanning the array, handle these state transitions:
* start->num => "num" start->gap => "::"
* num->num => ":num" num->gap => "::"
* gap->num => "num" gap->gap => ""
*/
StringBuilder buf = new StringBuilder(39);
boolean lastWasNumber = false;
for (int i = 0; i < hextets.length; i++) {
boolean thisIsNumber = hextets[i] >= 0;
if (thisIsNumber) {
if (lastWasNumber) {
buf.append(':');
}
buf.append(Integer.toHexString(hextets[i]));
} else {
if (i == 0 || lastWasNumber) {
buf.append("::");
}
}
lastWasNumber = thisIsNumber;
}
return buf.toString();
}
}

View File

@ -1,18 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql;
/**
* Interface implemented once per SQL access method to ensure that we
* test the same minimal set of error cases. Note that this does not
* include security related failures, those are tracked in another test.
*/
public interface ErrorsTestCase {
void testSelectInvalidSql() throws Exception;
void testSelectFromMissingIndex() throws Exception;
void testSelectMissingField() throws Exception;
void testSelectMissingFunction() throws Exception;
}

View File

@ -1,146 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.cli;
import org.apache.http.HttpEntity;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.CheckedConsumer;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.test.rest.ESRestTestCase;
import org.elasticsearch.transport.client.PreBuiltTransportClient;
import org.elasticsearch.xpack.qa.sql.cli.RemoteCli.SecurityConfig;
import org.elasticsearch.xpack.qa.sql.embed.CliHttpServer;
import org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase;
import org.junit.After;
import org.junit.Before;
import org.junit.ClassRule;
import org.junit.rules.ExternalResource;
import java.io.IOException;
import java.net.InetAddress;
import java.security.AccessControlException;
import java.util.function.Supplier;
import static java.util.Collections.singletonMap;
import static org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase.assertNoSearchContexts;
public abstract class CliIntegrationTestCase extends ESRestTestCase {
/**
* Should the HTTP server that serves SQL be embedded in the test
* process (true) or should the JDBC driver connect to Elasticsearch
* running at {@code tests.rest.cluster}. Note that to use embedded
* HTTP you have to have Elasticsearch's transport protocol open on
* port 9300 but the Elasticsearch running there does not need to have
* the SQL plugin installed. Note also that embedded HTTP is faster
* but is not canonical because it runs against a different HTTP server
* then JDBC will use in production. Gradle always uses non-embedded.
*/
private static final boolean EMBED_SQL = Booleans.parseBoolean(System.getProperty("tests.embed.sql", "false"));
@ClassRule
public static final EmbeddedCliServer EMBEDDED = EMBED_SQL ? new EmbeddedCliServer() : null;
public static final Supplier<String> ES = EMBED_SQL ? EMBEDDED::address : CliIntegrationTestCase::elasticsearchAddress;
/**
* Read an address for Elasticsearch suitable for the CLI from the system properties.
*/
public static String elasticsearchAddress() {
String cluster = System.getProperty("tests.rest.cluster");
// CLI only supports a single node at a time so we just give it one.
return cluster.split(",")[0];
}
private RemoteCli cli;
/**
* Asks the CLI Fixture to start a CLI instance.
*/
@Before
public void startCli() throws IOException {
cli = new RemoteCli(ES.get(), true, securityConfig());
}
@After
public void orderlyShutdown() throws Exception {
if (cli == null) {
// failed to connect to the cli so there is nothing to do here
return;
}
cli.close();
assertNoSearchContexts();
}
/**
* Override to add security configuration to the cli.
*/
protected SecurityConfig securityConfig() {
return null;
}
protected void index(String index, CheckedConsumer<XContentBuilder, IOException> body) throws IOException {
XContentBuilder builder = JsonXContent.contentBuilder().startObject();
body.accept(builder);
builder.endObject();
HttpEntity doc = new StringEntity(builder.string(), ContentType.APPLICATION_JSON);
client().performRequest("PUT", "/" + index + "/doc/1", singletonMap("refresh", "true"), doc);
}
public String command(String command) throws IOException {
return cli.command(command);
}
public String readLine() throws IOException {
return cli.readLine();
}
/**
* Embedded CLI server that runs against a running Elasticsearch
* server using the transport protocol.
*/
private static class EmbeddedCliServer extends ExternalResource {
private Client client;
private CliHttpServer server;
@Override
@SuppressWarnings("resource")
protected void before() throws Throwable {
try {
Settings settings = Settings.builder()
.put("client.transport.ignore_cluster_name", true)
.build();
client = new PreBuiltTransportClient(settings)
.addTransportAddress(new TransportAddress(InetAddress.getLoopbackAddress(), 9300));
} catch (ExceptionInInitializerError e) {
if (e.getCause() instanceof AccessControlException) {
throw new RuntimeException(getClass().getSimpleName() + " is not available with the security manager", e);
} else {
throw e;
}
}
server = new CliHttpServer(client);
server.start(0);
}
@Override
protected void after() {
client.close();
client = null;
server.stop();
server = null;
}
private String address() {
return server.address().getAddress().getHostAddress() + ":" + server.address().getPort();
}
}
}

View File

@ -1,39 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.cli;
import java.io.IOException;
/**
* Tests for error messages.
*/
public abstract class ErrorsTestCase extends CliIntegrationTestCase implements org.elasticsearch.xpack.qa.sql.ErrorsTestCase {
@Override
public void testSelectInvalidSql() throws Exception {
assertEquals("[1;31mBad request [[22;3;33mFound 1 problem(s)", command("SELECT * FRO"));
assertEquals("line 1:8: Cannot determine columns for *[1;23;31m][0m", readLine());
}
@Override
public void testSelectFromMissingIndex() throws IOException {
assertEquals("[1;31mBad request [[22;3;33mFound 1 problem(s)", command("SELECT * FROM test"));
assertEquals("line 1:15: Unknown index [test][1;23;31m][0m", readLine());
}
@Override
public void testSelectMissingField() throws IOException {
index("test", body -> body.field("test", "test"));
assertEquals("[1;31mBad request [[22;3;33mFound 1 problem(s)", command("SELECT missing FROM test"));
assertEquals("line 1:8: Unknown column [missing][1;23;31m][0m", readLine());
}
@Override
public void testSelectMissingFunction() throws Exception {
index("test", body -> body.field("foo", 1));
assertEquals("[1;31mBad request [[22;3;33mFound 1 problem(s)", command("SELECT missing(foo) FROM test"));
assertEquals("line 1:8: Unknown function [missing][1;23;31m][0m", readLine());
}
}

View File

@ -1,49 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.cli;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import java.io.IOException;
import static java.util.Collections.singletonMap;
import static org.hamcrest.Matchers.containsString;
/**
* Test for setting the fetch size.
*/
public abstract class FetchSizeTestCase extends CliIntegrationTestCase {
public void testSelect() throws IOException {
StringBuilder bulk = new StringBuilder();
for (int i = 0; i < 20; i++) {
bulk.append("{\"index\":{}}\n");
bulk.append("{\"test_field\":" + i + "}\n");
}
client().performRequest("PUT", "/test/doc/_bulk", singletonMap("refresh", "true"),
new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON));
assertEquals("fetch size set to [90m4[0m", command("fetch size = 4"));
assertEquals("fetch separator set to \"[90m -- fetch sep -- [0m\"", command("fetch separator = \" -- fetch sep -- \""));
assertThat(command("SELECT * FROM test ORDER BY test_field ASC"), containsString("test_field"));
assertThat(readLine(), containsString("----------"));
int i = 0;
while (i < 20) {
assertThat(readLine(), containsString(Integer.toString(i++)));
assertThat(readLine(), containsString(Integer.toString(i++)));
assertThat(readLine(), containsString(Integer.toString(i++)));
assertThat(readLine(), containsString(Integer.toString(i++)));
assertThat(readLine(), containsString(" -- fetch sep -- "));
}
assertEquals("", readLine());
}
public void testInvalidFetchSize() throws IOException {
assertEquals("[1;31mInvalid fetch size [[22;3;33mcat[1;23;31m][0m", command("fetch size = cat"));
assertEquals("[1;31mInvalid fetch size [[22;3;33m0[1;23;31m]. Must be > 0.[0m", command("fetch size = 0"));
assertEquals("[1;31mInvalid fetch size [[22;3;33m-1231[1;23;31m]. Must be > 0.[0m", command("fetch size = -1231"));
assertEquals("[1;31mInvalid fetch size [[22;3;33m" + Long.MAX_VALUE + "[1;23;31m][0m", command("fetch size = " + Long.MAX_VALUE));
}
}

Some files were not shown because too many files have changed in this diff Show More