SQL: Switch JDBC to REST protocol (elastic/x-pack-elasticsearch#3543)

Replaces binary serialization protocol in JDBC driver with the common REST protocol.

relates elastic/x-pack-elasticsearch#3419

Original commit: elastic/x-pack-elasticsearch@532c106658
This commit is contained in:
Igor Motov 2018-01-16 13:26:06 -05:00 committed by GitHub
parent ebbb49da18
commit 12d4f10faa
128 changed files with 1936 additions and 4319 deletions

View File

@ -194,7 +194,7 @@ indices that are available to the user execute:
--------------------------------------------------
POST /_xpack/sql/tables
{
"table_pattern": "lib*"
"table_pattern": "lib%"
}
--------------------------------------------------
// CONSOLE
@ -228,12 +228,14 @@ POST /_xpack/sql/columns
--------------------------------------------------
{
"columns": [
{"table": "library", "name": "author", "type": "text"},
{"table": "library", "name": "name", "type": "text"},
{"table": "library", "name": "page_count", "type": "short"},
{"table": "library", "name": "release_date", "type": "date"}
{"table": "library", "name": "author", "type": "text", "position": 1},
{"table": "library", "name": "name", "type": "text", "position": 2},
{"table": "library", "name": "page_count", "type": "short", "position": 3},
{"table": "library", "name": "release_date", "type": "date", "position": 4}
]
}
--------------------------------------------------
// TESTRESPONSE
The `position` is the position in the original table and won't match the position in the
array if the `column_pattern` removes any columns.

View File

@ -32,9 +32,7 @@ dependencyLicenses {
mapping from: /transport-netty.*/, to: 'elasticsearch'
mapping from: /elasticsearch-rest-client.*/, to: 'elasticsearch'
mapping from: /server.*/, to: 'elasticsearch'
mapping from: /jdbc-proto.*/, to: 'elasticsearch'
mapping from: /rest-proto.*/, to: 'elasticsearch'
mapping from: /shared-proto.*/, to: 'elasticsearch'
mapping from: /aggs-matrix-stats.*/, to: 'elasticsearch' //pulled in by sql:server
mapping from: /http.*/, to: 'httpclient' // pulled in by rest client
mapping from: /commons-.*/, to: 'commons' // pulled in by rest client
@ -42,9 +40,7 @@ dependencyLicenses {
ignoreSha 'transport-netty4'
ignoreSha 'tribe'
ignoreSha 'server'
ignoreSha 'jdbc-proto'
ignoreSha 'rest-proto'
ignoreSha 'shared-proto'
ignoreSha 'elasticsearch-rest-client-sniffer'
ignoreSha 'aggs-matrix-stats'
ignoreSha 'x-pack-core'

View File

@ -8,14 +8,14 @@ package org.elasticsearch.xpack.sql;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.xpack.sql.plugin.AbstractSqlRequest;
import org.elasticsearch.xpack.sql.plugin.AbstractSqlRequest.Mode;
import org.elasticsearch.xpack.sql.plugin.ColumnInfo;
import org.elasticsearch.xpack.sql.plugin.SqlQueryAction;
import org.elasticsearch.xpack.sql.plugin.MetaColumnInfo;
import org.elasticsearch.xpack.sql.plugin.SqlListColumnsAction;
import org.elasticsearch.xpack.sql.plugin.SqlListColumnsResponse;
import org.elasticsearch.xpack.sql.plugin.SqlListTablesAction;
import org.elasticsearch.xpack.sql.plugin.SqlListTablesResponse;
import org.elasticsearch.xpack.sql.plugin.SqlQueryAction;
import org.elasticsearch.xpack.sql.plugin.SqlQueryResponse;
import java.io.IOException;
@ -25,6 +25,7 @@ import java.util.stream.Collectors;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.contains;
import static org.hamcrest.Matchers.containsInAnyOrder;
import static org.hamcrest.Matchers.emptyCollectionOf;
import static org.hamcrest.Matchers.equalTo;
@ -72,7 +73,7 @@ public class SqlActionIT extends AbstractSqlIntegTestCase {
assertThat(tables, containsInAnyOrder("foo", "bar", "baz", "broken"));
response = client().prepareExecute(SqlListTablesAction.INSTANCE).pattern("b*").get();
response = client().prepareExecute(SqlListTablesAction.INSTANCE).pattern("b%").get();
tables = removeInternal(response.getTables());
assertThat(tables, hasSize(3));
assertThat(tables, containsInAnyOrder("bar", "baz", "broken"));
@ -95,11 +96,20 @@ public class SqlActionIT extends AbstractSqlIntegTestCase {
SqlListColumnsResponse response = client().prepareExecute(SqlListColumnsAction.INSTANCE)
.indexPattern("bar").columnPattern("").mode(Mode.JDBC).get();
List<ColumnInfo> columns = response.getColumns();
List<MetaColumnInfo> columns = response.getColumns();
assertThat(columns, hasSize(2));
assertThat(columns, containsInAnyOrder(
new ColumnInfo("bar", "str_field", "text", JDBCType.VARCHAR, 0),
new ColumnInfo("bar", "int_field", "integer", JDBCType.INTEGER, 11)
assertThat(columns, contains(
new MetaColumnInfo("bar", "int_field", "integer", JDBCType.INTEGER, 10, 1),
new MetaColumnInfo("bar", "str_field", "text", JDBCType.VARCHAR, Integer.MAX_VALUE, 2)
));
response = client().prepareExecute(SqlListColumnsAction.INSTANCE)
.indexPattern("bar").columnPattern("").mode(Mode.PLAIN).get();
columns = response.getColumns();
assertThat(columns, hasSize(2));
assertThat(columns, contains(
new MetaColumnInfo("bar", "int_field", "integer", null, 0, 1),
new MetaColumnInfo("bar", "str_field", "text", null, 0, 2)
));
}

View File

@ -5,15 +5,9 @@
*/
package org.elasticsearch.xpack.sql;
import org.apache.http.HttpEntity;
import org.apache.http.entity.ByteArrayEntity;
import org.apache.http.entity.ContentType;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.ElasticsearchSecurityException;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.client.ResponseException;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
@ -24,39 +18,25 @@ import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import org.elasticsearch.transport.Netty4Plugin;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.MetaTableRequest;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.MetaTableResponse;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto;
import org.elasticsearch.xpack.sql.plugin.SqlListColumnsAction;
import org.elasticsearch.xpack.sql.plugin.SqlListColumnsResponse;
import org.elasticsearch.xpack.sql.plugin.SqlQueryAction;
import org.elasticsearch.xpack.sql.plugin.SqlQueryResponse;
import org.elasticsearch.xpack.sql.plugin.SqlTranslateAction;
import org.elasticsearch.xpack.sql.protocol.shared.Request;
import org.elasticsearch.xpack.sql.protocol.shared.Response;
import org.hamcrest.Matchers;
import org.junit.Before;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Locale;
import static java.util.Collections.emptyMap;
import static org.elasticsearch.license.XPackLicenseStateTests.randomBasicStandardOrGold;
import static org.elasticsearch.license.XPackLicenseStateTests.randomTrialBasicStandardGoldOrPlatinumMode;
import static org.elasticsearch.license.XPackLicenseStateTests.randomTrialOrPlatinumMode;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue;
@ -207,42 +187,6 @@ public class SqlLicenseIT extends AbstractLicensesIntegrationTestCase {
assertThat(fetchSource.includes(), Matchers.arrayContaining("data"));
}
public void testJdbcActionLicense() throws Exception {
setupTestIndex();
disableJdbcLicensing();
Request request = new MetaTableRequest("test");
ResponseException responseException = expectThrows(ResponseException.class, () -> jdbc(request));
assertThat(responseException.getMessage(), containsString("current license is non-compliant for [jdbc]"));
assertThat(responseException.getMessage(), containsString("security_exception"));
enableJdbcLicensing();
Response response = jdbc(request);
assertThat(response, instanceOf(MetaTableResponse.class));
}
private Response jdbc(Request request) throws IOException {
// Convert the request to the HTTP entity that JDBC uses
HttpEntity entity;
try (BytesStreamOutput bytes = new BytesStreamOutput()) {
DataOutput out = new DataOutputStream(bytes);
Proto.INSTANCE.writeRequest(request, out);
entity = new ByteArrayEntity(BytesRef.deepCopyOf(bytes.bytes().toBytesRef()).bytes, ContentType.APPLICATION_JSON);
}
// Execute
InputStream response = getRestClient().performRequest("POST", "/_xpack/sql/jdbc", emptyMap(), entity).getEntity().getContent();
// Deserialize bytes to response like JDBC does
try {
DataInput in = new DataInputStream(response);
return Proto.INSTANCE.readResponse(request, in);
} finally {
response.close();
}
}
// TODO test SqlGetIndicesAction. Skipping for now because of lack of serialization support.
private void setupTestIndex() {

View File

@ -123,7 +123,7 @@ setup:
- do:
xpack.sql.tables:
body:
table_pattern: "t*"
table_pattern: "t%"
- length: { tables: 1 }
- match: { tables.0: 'test' }
@ -133,7 +133,7 @@ setup:
- do:
xpack.sql.columns:
body:
table_pattern: "t*"
table_pattern: "t%"
column_pattern: ""
- length: { columns: 2 }

View File

@ -7,19 +7,8 @@ apply plugin: 'elasticsearch.build'
dependencies {
compile "org.elasticsearch.test:framework:${versions.elasticsearch}"
// JDBC testing dependencies
if (false == isEclipse && false == isIdea) {
// If we're not doing IDE stuff use the shadowed jar
compile(project(path: ':x-pack-elasticsearch:sql:jdbc', configuration: 'shadow'))
} else {
/* If we're doing IDE stuff then use then use the project
* dependency so the IDEs don't get confused. Transitive
* deps are OK here too because this is the only time we
* pull all of those deps in. We make sure exclude them
* below so they don't cause jar hell with the shadowed
* jar. */
compile(project(':x-pack-elasticsearch:sql:jdbc'))
}
// TODO: Restore shading when https://github.com/elastic/elasticsearch/pull/27955 gets in
compile(project(':x-pack-elasticsearch:sql:jdbc'))
compile "net.sourceforge.csvjdbc:csvjdbc:1.0.34"
runtime "com.h2database:h2:1.4.194"
// used for running debug tests
@ -36,6 +25,10 @@ dependencies {
transitive = false
}
compile "org.elasticsearch.client:transport:${version}"
// Needed by embedded server
compile project(path: ':modules:lang-painless', configuration: 'runtime')
}
/* disable unit tests because these are all integration tests used
@ -101,18 +94,13 @@ subprojects {
}
testCompile "org.elasticsearch.test:framework:${versions.elasticsearch}"
// Needed by embedded server
testCompile project(path: ':modules:lang-painless', configuration: 'runtime')
// JDBC testing dependencies
testRuntime(project(':x-pack-elasticsearch:sql:jdbc')) {
if (false == isEclipse && false == isIdea) {
/* Skip the transitive dependencies of the server when outside
* of an IDE because outside of an IDE we use the jdbc jar
* which includes all the transitive dependencies *already*.
* If we didn't skip these dependencies the jar hell checks
* would fail. And we need the transitive dependencies to
* run in embedded mode but only do that inside of an IDE. */
transitive = false
}
}
testRuntime(project(':x-pack-elasticsearch:sql:jdbc'))
// TODO: Restore shading when https://github.com/elastic/elasticsearch/pull/27955 gets in
testRuntime("net.sourceforge.csvjdbc:csvjdbc:1.0.34") {
transitive = false
}

View File

@ -3,23 +3,23 @@ read_all:
- "cluster:monitor/main" # Used by JDBC's MetaData
indices:
- names: test
privileges: [read, "indices:admin/get"]
privileges: [read, "indices:admin/get", "indices:admin/sql/tables", "indices:admin/sql/columns"]
- names: bort
privileges: [read, "indices:admin/get"]
privileges: [read, "indices:admin/get", "indices:admin/sql/tables", "indices:admin/sql/columns"]
read_something_else:
cluster:
- "cluster:monitor/main" # Used by JDBC's MetaData
indices:
- names: something_that_isnt_test
privileges: [read, "indices:admin/get"]
privileges: [read, "indices:admin/get", "indices:admin/sql/tables", "indices:admin/sql/columns"]
read_test_a:
cluster:
- "cluster:monitor/main" # Used by JDBC's MetaData
indices:
- names: test
privileges: [read, "indices:admin/get"]
privileges: [read, "indices:admin/get", "indices:admin/sql/tables", "indices:admin/sql/columns"]
field_security:
grant: [a]
@ -28,7 +28,7 @@ read_test_a_and_b:
- "cluster:monitor/main" # Used by JDBC's MetaData
indices:
- names: test
privileges: [read, "indices:admin/get"]
privileges: [read, "indices:admin/get", "indices:admin/sql/tables", "indices:admin/sql/columns"]
field_security:
grant: ["*"]
except: [c]
@ -38,7 +38,7 @@ read_test_without_c_3:
- "cluster:monitor/main" # Used by JDBC's MetaData
indices:
- names: test
privileges: [read, "indices:admin/get"]
privileges: [read, "indices:admin/get", "indices:admin/sql/tables", "indices:admin/sql/columns"]
query: |
{
"bool": {
@ -57,4 +57,4 @@ read_bort:
- "cluster:monitor/main" # Used by JDBC's MetaData
indices:
- names: bort
privileges: [read, "indices:admin/get"]
privileges: [read, "indices:admin/get", "indices:admin/sql/tables", "indices:admin/sql/columns"]

View File

@ -5,29 +5,44 @@
*/
package org.elasticsearch.xpack.qa.sql.embed;
import org.elasticsearch.client.Client;
import org.apache.http.HttpHost;
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.transport.client.PreBuiltTransportClient;
import org.elasticsearch.painless.PainlessPlugin;
import org.elasticsearch.test.InternalTestCluster;
import org.elasticsearch.test.NodeConfigurationSource;
import org.elasticsearch.transport.Netty4Plugin;
import org.elasticsearch.xpack.qa.sql.jdbc.DataLoader;
import org.junit.rules.ExternalResource;
import java.net.InetAddress;
import java.security.AccessControlException;
import java.io.IOException;
import java.nio.file.Path;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.util.Arrays;
import java.util.Properties;
import java.util.function.Function;
import static org.apache.lucene.util.LuceneTestCase.createTempDir;
import static org.apache.lucene.util.LuceneTestCase.random;
import static org.elasticsearch.test.ESTestCase.randomLong;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.fail;
/**
* Embedded JDBC server that uses the transport client to power
* the jdbc endpoints in the same JVM as the tests.
* Embedded JDBC server that uses the internal test cluster in the same JVM as the tests.
*/
public class EmbeddedJdbcServer extends ExternalResource {
private Client client;
private JdbcHttpServer server;
private InternalTestCluster internalTestCluster;
private String jdbcUrl;
private final Properties properties;
@ -45,37 +60,59 @@ public class EmbeddedJdbcServer extends ExternalResource {
@Override
@SuppressWarnings("resource")
protected void before() throws Throwable {
try {
Settings settings = Settings.builder()
.put("client.transport.ignore_cluster_name", true)
.build();
client = new PreBuiltTransportClient(settings)
.addTransportAddress(new TransportAddress(InetAddress.getLoopbackAddress(), 9300));
} catch (ExceptionInInitializerError e) {
if (e.getCause() instanceof AccessControlException) {
throw new RuntimeException(getClass().getSimpleName() + " is not available with the security manager", e);
} else {
throw e;
int numNodes = 1;
internalTestCluster = new InternalTestCluster(randomLong(), createTempDir(), false, true, numNodes, numNodes,
"sql_embed", new SqlNodeConfigurationSource(), 0, false, "sql_embed",
Arrays.asList(Netty4Plugin.class, SqlEmbedPlugin.class, PainlessPlugin.class),
Function.identity());
internalTestCluster.beforeTest(random(), 0.5);
Tuple<String, Integer> address = getHttpAddress();
jdbcUrl = "jdbc:es://" + address.v1() + ":" + address.v2();
System.setProperty("tests.rest.cluster", address.v1() + ":" + address.v2());
}
private Tuple<String, Integer> getHttpAddress() {
NodesInfoResponse nodesInfoResponse = internalTestCluster.client().admin().cluster().prepareNodesInfo().get();
assertFalse(nodesInfoResponse.hasFailures());
for (NodeInfo node : nodesInfoResponse.getNodes()) {
if (node.getHttp() != null) {
TransportAddress publishAddress = node.getHttp().address().publishAddress();
return new Tuple<>(publishAddress.getAddress(), publishAddress.getPort());
}
}
server = new JdbcHttpServer(client);
server.start(0);
jdbcUrl = server.url();
throw new IllegalStateException("No http servers found");
}
@Override
protected void after() {
client.close();
client = null;
server.stop();
server = null;
try {
internalTestCluster.afterTest();
} catch (IOException e) {
fail("Failed to shutdown server " + e.getMessage());
} finally {
internalTestCluster.close();
}
}
public Connection connection(Properties props) throws SQLException {
assertNotNull("ES JDBC Server is null - make sure ES is properly run as a @ClassRule", server);
assertNotNull("ES JDBC Server is null - make sure ES is properly run as a @ClassRule", jdbcUrl);
Properties p = new Properties(properties);
p.putAll(props);
return DriverManager.getConnection(jdbcUrl, p);
}
private static class SqlNodeConfigurationSource extends NodeConfigurationSource {
@Override
public Settings nodeSettings(int nodeOrdinal) {
return Settings.builder()
.put(NetworkModule.HTTP_ENABLED.getKey(), true) //This test requires HTTP
.build();
}
@Override
public Path nodeConfigPath(int nodeOrdinal) {
return null;
}
}
}

View File

@ -1,52 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.embed;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.FilterClient;
import org.elasticsearch.xpack.sql.execution.PlanExecutor;
import org.elasticsearch.xpack.sql.plugin.SqlQueryAction;
import org.elasticsearch.xpack.sql.plugin.SqlQueryRequest;
import org.elasticsearch.xpack.sql.plugin.SqlQueryResponse;
import org.elasticsearch.xpack.sql.plugin.TransportSqlQueryAction;
import java.util.Objects;
/**
* Implements embedded sql mode by intercepting requests to SQL APIs and executing them locally.
*/
public class EmbeddedModeFilterClient extends FilterClient {
private PlanExecutor planExecutor;
public EmbeddedModeFilterClient(Client in) {
super(in);
}
public void setPlanExecutor(PlanExecutor executor) {
this.planExecutor = executor;
}
@Override
@SuppressWarnings("unchecked")
protected < Request extends ActionRequest,
Response extends ActionResponse,
RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>>
void doExecute(Action<Request, Response, RequestBuilder> action,
Request request, ActionListener<Response> listener) {
Objects.requireNonNull(planExecutor, "plan executor not set on EmbeddedClient");
if (action == SqlQueryAction.INSTANCE) {
TransportSqlQueryAction.operation(planExecutor, (SqlQueryRequest) request, (ActionListener<SqlQueryResponse>) listener);
} else {
super.doExecute(action, request, listener);
}
}
}

View File

@ -1,23 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.embed;
import org.elasticsearch.client.Client;
/**
* Internal server used for testing without starting a new Elasticsearch instance.
*/
public class JdbcHttpServer extends ProtoHttpServer {
public JdbcHttpServer(Client client) {
super(client, new JdbcProtoHandler(client), "/_xpack/sql/jdbc");
}
@Override
public String url() {
return "jdbc:es://" + super.url();
}
}

View File

@ -1,37 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.embed;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.rest.RestChannel;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.xpack.sql.analysis.index.IndexResolver;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto;
import org.elasticsearch.xpack.sql.plugin.RestSqlJdbcAction;
import org.elasticsearch.xpack.sql.plugin.SqlLicenseChecker;
import java.io.DataInput;
import java.io.IOException;
import static org.mockito.Mockito.mock;
class JdbcProtoHandler extends ProtoHandler {
private final RestSqlJdbcAction action;
JdbcProtoHandler(Client client) {
super(client);
action = new RestSqlJdbcAction(Settings.EMPTY, mock(RestController.class),
new SqlLicenseChecker((mode) -> {
}),
new IndexResolver(client));
}
@Override
protected void handle(RestChannel channel, DataInput in) throws IOException {
action.operation(Proto.INSTANCE.readRequest(in), client).accept(channel);
}
}

View File

@ -1,106 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.embed;
import io.netty.handler.codec.http.HttpHeaderNames;
import com.sun.net.httpserver.Headers;
import com.sun.net.httpserver.HttpExchange;
import com.sun.net.httpserver.HttpHandler;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.rest.BytesRestResponse;
import org.elasticsearch.rest.RestChannel;
import org.elasticsearch.rest.RestResponse;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.test.rest.FakeRestChannel;
import org.elasticsearch.test.rest.FakeRestRequest;
import org.elasticsearch.xpack.sql.analysis.index.IndexResolver;
import org.elasticsearch.xpack.sql.execution.PlanExecutor;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.IOException;
import static java.util.Collections.singletonList;
import static java.util.Collections.singletonMap;
public abstract class ProtoHandler implements HttpHandler, AutoCloseable {
private static PlanExecutor planExecutor(EmbeddedModeFilterClient client) {
return new PlanExecutor(client, new IndexResolver(client));
}
protected static final Logger log = ESLoggerFactory.getLogger(ProtoHandler.class.getName());
private final TimeValue TV = TimeValue.timeValueSeconds(5);
protected final EmbeddedModeFilterClient client;
protected final NodeInfo info;
protected final String clusterName;
protected ProtoHandler(Client client) {
NodesInfoResponse niResponse = client.admin().cluster().prepareNodesInfo("_local").clear().get(TV);
this.client = client instanceof EmbeddedModeFilterClient ? (EmbeddedModeFilterClient) client : new EmbeddedModeFilterClient(client);
this.client.setPlanExecutor(planExecutor(this.client));
info = niResponse.getNodes().get(0);
clusterName = niResponse.getClusterName().value();
}
@Override
public void handle(HttpExchange http) throws IOException {
log.debug("Received query call...");
if ("HEAD".equals(http.getRequestMethod())) {
http.sendResponseHeaders(RestStatus.OK.getStatus(), 0);
http.close();
return;
}
FakeRestChannel channel = new FakeRestChannel(
new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withParams(singletonMap("error_trace", "")).build(), true, 1);
try (DataInputStream in = new DataInputStream(http.getRequestBody())) {
handle(channel, in);
while (false == channel.await()) {
}
sendHttpResponse(http, channel.capturedResponse());
} catch (Exception e) {
sendHttpResponse(http, new BytesRestResponse(channel, e));
}
}
protected abstract void handle(RestChannel channel, DataInput in) throws IOException;
protected void sendHttpResponse(HttpExchange http, RestResponse response) throws IOException {
try {
// first do the conversion in case an exception is triggered
if (http.getResponseHeaders().isEmpty()) {
http.sendResponseHeaders(response.status().getStatus(), response.content().length());
Headers headers = http.getResponseHeaders();
headers.putIfAbsent(HttpHeaderNames.CONTENT_TYPE.toString(), singletonList(response.contentType()));
if (response.getHeaders() != null) {
headers.putAll(response.getHeaders());
}
}
response.content().writeTo(http.getResponseBody());
} catch (IOException ex) {
log.error("Caught error while trying to catch error", ex);
} finally {
http.close();
}
}
@Override
public void close() {
// no-op
}
}

View File

@ -1,63 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.embed;
import com.sun.net.httpserver.HttpServer;
import org.elasticsearch.client.Client;
import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.SynchronousQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
public abstract class ProtoHttpServer {
private final ProtoHandler handler;
private final String protoSuffix;
private final Client client;
private HttpServer server;
private ExecutorService executor;
public ProtoHttpServer(Client client, ProtoHandler handler, String protoSuffix) {
this.client = client;
this.handler = handler;
this.protoSuffix = protoSuffix;
}
public void start(int port) throws IOException {
// similar to Executors.newCached but with a smaller bound and much smaller keep-alive
executor = new ThreadPoolExecutor(0, 10, 250, TimeUnit.MILLISECONDS, new SynchronousQueue<Runnable>());
server = HttpServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress(), port), 0);
server.createContext("/", new RootHandler());
server.createContext(protoSuffix, handler);
server.setExecutor(executor);
server.start();
}
public void stop() {
server.stop(1);
server = null;
executor.shutdownNow();
executor = null;
}
public InetSocketAddress address() {
return server != null ? server.getAddress() : null;
}
public String url() {
return server != null ? "localhost:" + address().getPort() : "<not started>";
}
public Client client() {
return client;
}
}

View File

@ -1,47 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.embed;
import com.sun.net.httpserver.HttpExchange;
import com.sun.net.httpserver.HttpHandler;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.rest.RestStatus;
import java.io.IOException;
class RootHandler implements HttpHandler {
private static final Logger log = ESLoggerFactory.getLogger(RootHandler.class.getName());
@Override
public void handle(HttpExchange http) throws IOException {
log.debug("Received query call...");
if ("HEAD".equals(http.getRequestMethod())) {
http.sendResponseHeaders(RestStatus.OK.getStatus(), 0);
http.close();
return;
}
fail(http, new UnsupportedOperationException("only HEAD allowed"));
}
protected void fail(HttpExchange http, Exception ex) {
log.error("Caught error while transmitting response", ex);
try {
// the error conversion has failed, halt
if (http.getResponseHeaders().isEmpty()) {
http.sendResponseHeaders(RestStatus.INTERNAL_SERVER_ERROR.getStatus(), -1);
}
} catch (IOException ioEx) {
log.error("Caught error while trying to catch error", ex);
} finally {
http.close();
}
}
}

View File

@ -0,0 +1,99 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.qa.sql.embed;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.IndexScopedSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsFilter;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.env.Environment;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.plugins.ActionPlugin;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestHandler;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.watcher.ResourceWatcherService;
import org.elasticsearch.xpack.sql.analysis.index.IndexResolver;
import org.elasticsearch.xpack.sql.execution.PlanExecutor;
import org.elasticsearch.xpack.sql.plugin.RestSqlClearCursorAction;
import org.elasticsearch.xpack.sql.plugin.RestSqlListColumnsAction;
import org.elasticsearch.xpack.sql.plugin.RestSqlListTablesAction;
import org.elasticsearch.xpack.sql.plugin.RestSqlQueryAction;
import org.elasticsearch.xpack.sql.plugin.SqlClearCursorAction;
import org.elasticsearch.xpack.sql.plugin.SqlLicenseChecker;
import org.elasticsearch.xpack.sql.plugin.SqlListColumnsAction;
import org.elasticsearch.xpack.sql.plugin.SqlListTablesAction;
import org.elasticsearch.xpack.sql.plugin.SqlQueryAction;
import org.elasticsearch.xpack.sql.plugin.SqlTranslateAction;
import org.elasticsearch.xpack.sql.plugin.TransportSqlClearCursorAction;
import org.elasticsearch.xpack.sql.plugin.TransportSqlListColumnsAction;
import org.elasticsearch.xpack.sql.plugin.TransportSqlListTablesAction;
import org.elasticsearch.xpack.sql.plugin.TransportSqlQueryAction;
import org.elasticsearch.xpack.sql.session.Cursor;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.function.Supplier;
/**
* Plugin for adding SQL functionality to internal test cluster
* <p>
* It is used by in the embeded test mode by {@link EmbeddedJdbcServer}.
*/
public class SqlEmbedPlugin extends Plugin implements ActionPlugin {
public List<NamedWriteableRegistry.Entry> getNamedWriteables() {
return Cursor.getNamedWriteables();
}
private final SqlLicenseChecker sqlLicenseChecker = new SqlLicenseChecker(mode -> { });
public SqlEmbedPlugin() {
}
@Override
public Collection<Object> createComponents(Client client, ClusterService clusterService, ThreadPool threadPool,
ResourceWatcherService resourceWatcherService, ScriptService scriptService,
NamedXContentRegistry xContentRegistry, Environment environment,
NodeEnvironment nodeEnvironment, NamedWriteableRegistry namedWriteableRegistry) {
IndexResolver indexResolver = new IndexResolver(client);
return Arrays.asList(sqlLicenseChecker, indexResolver, new PlanExecutor(client, indexResolver));
}
@Override
public List<RestHandler> getRestHandlers(Settings settings, RestController restController,
ClusterSettings clusterSettings, IndexScopedSettings indexScopedSettings,
SettingsFilter settingsFilter, IndexNameExpressionResolver indexNameExpressionResolver,
Supplier<DiscoveryNodes> nodesInCluster) {
return Arrays.asList(new RestSqlQueryAction(settings, restController),
new SqlTranslateAction.RestAction(settings, restController),
new RestSqlClearCursorAction(settings, restController),
new RestSqlListTablesAction(settings, restController),
new RestSqlListColumnsAction(settings, restController));
}
@Override
public List<ActionHandler<? extends ActionRequest, ? extends ActionResponse>> getActions() {
return Arrays.asList(new ActionHandler<>(SqlQueryAction.INSTANCE, TransportSqlQueryAction.class),
new ActionHandler<>(SqlTranslateAction.INSTANCE, SqlTranslateAction.TransportAction.class),
new ActionHandler<>(SqlClearCursorAction.INSTANCE, TransportSqlClearCursorAction.class),
new ActionHandler<>(SqlListTablesAction.INSTANCE, TransportSqlListTablesAction.class),
new ActionHandler<>(SqlListColumnsAction.INSTANCE, TransportSqlListColumnsAction.class));
}
}

View File

@ -40,14 +40,8 @@ import static org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase.assertNoSearch
public abstract class JdbcIntegrationTestCase extends ESRestTestCase {
/**
* Should the HTTP server that serves SQL be embedded in the test
* process (true) or should the JDBC driver connect to Elasticsearch
* running at {@code tests.rest.cluster}. Note that to use embedded
* HTTP you have to have Elasticsearch's transport protocol open on
* port 9300 but the Elasticsearch running there does not need to have
* the SQL plugin installed. Note also that embedded HTTP is faster
* but is not canonical because it runs against a different HTTP server
* then JDBC will use in production. Gradle always uses non-embedded.
* Starts an internal cluster instead of using external REST cluster. Useful for IDE debugging.
* Use: -Dtests.embed.sql=true -Dtests.security.manager=false
*/
protected static final boolean EMBED_SQL = Booleans.parseBoolean(System.getProperty("tests.embed.sql", "false"));

View File

@ -8,7 +8,6 @@ dependencies {
compile "org.jline:jline:3.3.1"
compile project(':x-pack-elasticsearch:sql:shared-client')
compile project(':x-pack-elasticsearch:sql:rest-proto')
compile project(':x-pack-elasticsearch:sql:shared-proto')
compile project(':server:cli')
runtime "org.fusesource.jansi:jansi:1.16"
@ -19,13 +18,11 @@ dependencyLicenses {
mapping from: /rest-proto.*/, to: 'elasticsearch'
mapping from: /elasticsearch-core.*/, to: 'elasticsearch'
mapping from: /shared-client.*/, to: 'elasticsearch'
mapping from: /shared-proto.*/, to: 'elasticsearch'
mapping from: /elasticsearch-cli.*/, to: 'elasticsearch'
mapping from: /jackson-.*/, to: 'jackson'
mapping from: /lucene-.*/, to: 'lucene'
ignoreSha 'rest-proto'
ignoreSha 'shared-client'
ignoreSha 'shared-proto'
ignoreSha 'elasticsearch-cli'
ignoreSha 'elasticsearch-core'
ignoreSha 'elasticsearch'

View File

@ -9,7 +9,7 @@ import org.elasticsearch.action.main.MainResponse;
import org.elasticsearch.xpack.sql.client.HttpClient;
import org.elasticsearch.xpack.sql.client.shared.ClientException;
import org.elasticsearch.xpack.sql.client.shared.Version;
import org.elasticsearch.xpack.sql.protocol.shared.AbstractQueryInitRequest;
import org.elasticsearch.xpack.sql.plugin.AbstractSqlQueryRequest;
import java.sql.SQLException;
@ -18,7 +18,7 @@ import java.sql.SQLException;
*/
public class CliSession {
private final HttpClient httpClient;
private int fetchSize = AbstractQueryInitRequest.DEFAULT_FETCH_SIZE;
private int fetchSize = AbstractSqlQueryRequest.DEFAULT_FETCH_SIZE;
private String fetchSeparator = "";
private boolean debug;

View File

@ -1,17 +0,0 @@
description = 'Request and response objects shared by the jdbc driver and ' +
'its backend in :sql:server'
dependencies {
compile project(':x-pack-elasticsearch:sql:shared-proto')
testCompile project(':x-pack-elasticsearch:sql:test-utils')
}
forbiddenApisMain {
// does not depend on core, so only jdk and http signatures should be checked
signaturesURLs = [this.class.getResource('/forbidden/jdk-signatures.txt')]
}
dependencyLicenses {
mapping from: /shared-proto.*/, to: 'elasticsearch'
ignoreSha 'shared-proto'
}

View File

@ -1,5 +0,0 @@
Elasticsearch
Copyright 2009-2017 Elasticsearch
This product includes software developed by The Apache Software
Foundation (http://www.apache.org/).

View File

@ -1,37 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.RequestType;
import org.elasticsearch.xpack.sql.protocol.shared.AbstractInfoRequest;
import org.elasticsearch.xpack.sql.protocol.shared.SqlDataInput;
import java.io.IOException;
/**
* Request general information about the server.
*/
public class InfoRequest extends AbstractInfoRequest {
/**
* Build the info request containing information about the current JVM.
*/
public InfoRequest() {
super();
}
public InfoRequest(String jvmVersion, String jvmVendor, String jvmClassPath, String osName, String osVersion) {
super(jvmVersion, jvmVendor, jvmClassPath, osName, osVersion);
}
InfoRequest(SqlDataInput in) throws IOException {
super(in);
}
@Override
public RequestType requestType() {
return RequestType.INFO;
}
}

View File

@ -1,38 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.RequestType;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.ResponseType;
import org.elasticsearch.xpack.sql.protocol.shared.AbstractInfoResponse;
import org.elasticsearch.xpack.sql.protocol.shared.Request;
import java.io.DataInput;
import java.io.IOException;
/**
* General information about the server.
*/
public class InfoResponse extends AbstractInfoResponse {
public InfoResponse(String nodeName, String clusterName, byte versionMajor, byte versionMinor, String version,
String versionHash, String versionDate) {
super(nodeName, clusterName, versionMajor, versionMinor, version, versionHash, versionDate);
}
InfoResponse(Request request, DataInput in) throws IOException {
super(request, in);
}
@Override
public RequestType requestType() {
return RequestType.INFO;
}
@Override
public ResponseType responseType() {
return ResponseType.INFO;
}
}

View File

@ -1,77 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.sql.JDBCType;
import java.util.Objects;
public class MetaColumnInfo {
public final String table, name;
public final JDBCType type;
public final int size, position;
public MetaColumnInfo(String table, String name, JDBCType type, int size, int position) {
if (table == null) {
throw new IllegalArgumentException("[table] must not be null");
}
if (name == null) {
throw new IllegalArgumentException("[name] must not be null");
}
if (type == null) {
throw new IllegalArgumentException("[type] must not be null");
}
this.table = table;
this.name = name;
this.type = type;
this.size = size;
this.position = position;
}
MetaColumnInfo(DataInput in) throws IOException {
table = in.readUTF();
name = in.readUTF();
type = JDBCType.valueOf(in.readInt());
size = in.readInt();
position = in.readInt();
}
void writeTo(DataOutput out) throws IOException {
out.writeUTF(table);
out.writeUTF(name);
out.writeInt(type.getVendorTypeNumber());
out.writeInt(size);
out.writeInt(position);
}
@Override
public String toString() {
return table + "." + name
+ "<type=[" + type
+ "] size=[" + size
+ "] position=[" + position + "]>";
}
@Override
public boolean equals(Object obj) {
if (obj == null || obj.getClass() != getClass()) {
return false;
}
MetaColumnInfo other = (MetaColumnInfo) obj;
return table.equals(other.table)
&& name.equals(other.name)
&& type.equals(other.type)
&& size == other.size
&& position == other.position;
}
@Override
public int hashCode() {
return Objects.hash(table, name, type, size, position);
}
}

View File

@ -1,68 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.RequestType;
import org.elasticsearch.xpack.sql.protocol.shared.Request;
import org.elasticsearch.xpack.sql.protocol.shared.SqlDataInput;
import org.elasticsearch.xpack.sql.protocol.shared.SqlDataOutput;
import java.io.IOException;
import java.util.Objects;
public class MetaColumnRequest extends Request {
private final String tablePattern, columnPattern;
public MetaColumnRequest(String tablePattern, String columnPattern) {
this.tablePattern = tablePattern == null ? "" : tablePattern;
this.columnPattern = columnPattern == null ? "" : columnPattern;
}
MetaColumnRequest(SqlDataInput in) throws IOException {
tablePattern = in.readUTF();
columnPattern = in.readUTF();
}
@Override
protected void writeTo(SqlDataOutput out) throws IOException {
out.writeUTF(tablePattern);
out.writeUTF(columnPattern);
}
public String tablePattern() {
return tablePattern;
}
public String columnPattern() {
return columnPattern;
}
@Override
protected String toStringBody() {
return "table=[" + tablePattern
+ "] column=[" + columnPattern + "]";
}
@Override
public RequestType requestType() {
return RequestType.META_COLUMN;
}
@Override
public boolean equals(Object obj) {
if (obj == null || obj.getClass() != getClass()) {
return false;
}
MetaColumnRequest other = (MetaColumnRequest) obj;
return Objects.equals(tablePattern, other.tablePattern)
&& Objects.equals(columnPattern, other.columnPattern);
}
@Override
public int hashCode() {
return Objects.hash(tablePattern, columnPattern);
}
}

View File

@ -1,78 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.RequestType;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.ResponseType;
import org.elasticsearch.xpack.sql.protocol.shared.Request;
import org.elasticsearch.xpack.sql.protocol.shared.Response;
import org.elasticsearch.xpack.sql.protocol.shared.SqlDataOutput;
import java.io.DataInput;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import static java.util.Collections.unmodifiableList;
import static java.util.stream.Collectors.joining;
public class MetaColumnResponse extends Response {
public final List<MetaColumnInfo> columns;
public MetaColumnResponse(List<MetaColumnInfo> columns) {
if (columns == null) {
throw new IllegalArgumentException("[columns] must not be null");
}
this.columns = columns;
}
public MetaColumnResponse(Request request, DataInput in) throws IOException {
int length = in.readInt();
List<MetaColumnInfo> list = new ArrayList<>(length);
for (int i = 0; i < length; i++) {
list.add(new MetaColumnInfo(in));
}
columns = unmodifiableList(list);
}
@Override
protected void writeTo(SqlDataOutput out) throws IOException {
out.writeInt(columns.size());
for (MetaColumnInfo info : columns) {
info.writeTo(out);
}
}
@Override
protected String toStringBody() {
return columns.stream().map(Object::toString).collect(joining(", "));
}
@Override
public RequestType requestType() {
return RequestType.META_COLUMN;
}
@Override
public ResponseType responseType() {
return ResponseType.META_COLUMN;
}
@Override
public boolean equals(Object obj) {
if (obj == null || obj.getClass() != getClass()) {
return false;
}
MetaColumnResponse other = (MetaColumnResponse) obj;
return columns.equals(other.columns);
}
@Override
public int hashCode() {
return columns.hashCode();
}
}

View File

@ -1,58 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.RequestType;
import org.elasticsearch.xpack.sql.protocol.shared.Request;
import org.elasticsearch.xpack.sql.protocol.shared.SqlDataInput;
import org.elasticsearch.xpack.sql.protocol.shared.SqlDataOutput;
import java.io.IOException;
public class MetaTableRequest extends Request {
private final String pattern;
public MetaTableRequest(String pattern) {
this.pattern = pattern == null ? "" : pattern;
}
MetaTableRequest(SqlDataInput in) throws IOException {
this.pattern = in.readUTF();
}
@Override
public void writeTo(SqlDataOutput out) throws IOException {
out.writeUTF(pattern);
}
public String pattern() {
return pattern;
}
@Override
protected String toStringBody() {
return pattern;
}
@Override
public RequestType requestType() {
return RequestType.META_TABLE;
}
@Override
public boolean equals(Object obj) {
if (obj == null || obj.getClass() != getClass()) {
return false;
}
MetaTableRequest other = (MetaTableRequest) obj;
return pattern.equals(other.pattern);
}
@Override
public int hashCode() {
return pattern.hashCode();
}
}

View File

@ -1,77 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.RequestType;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.ResponseType;
import org.elasticsearch.xpack.sql.protocol.shared.Request;
import org.elasticsearch.xpack.sql.protocol.shared.Response;
import org.elasticsearch.xpack.sql.protocol.shared.SqlDataOutput;
import java.io.DataInput;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import static java.util.Collections.unmodifiableList;
public class MetaTableResponse extends Response {
public final List<String> tables;
public MetaTableResponse(List<String> tables) {
if (tables == null) {
throw new IllegalArgumentException("[tables] must not be null");
}
this.tables = tables;
}
MetaTableResponse(Request request, DataInput in) throws IOException {
int length = in.readInt();
List<String> list = new ArrayList<>(length);
for (int i = 0; i < length; i++) {
list.add(in.readUTF());
}
tables = unmodifiableList(list);
}
@Override
public void writeTo(SqlDataOutput out) throws IOException {
out.writeInt(tables.size());
for (String t : tables) {
out.writeUTF(t);
}
}
@Override
protected String toStringBody() {
return String.join(", ", tables);
}
@Override
public RequestType requestType() {
return RequestType.META_TABLE;
}
@Override
public ResponseType responseType() {
return ResponseType.META_TABLE;
}
@Override
public boolean equals(Object obj) {
if (obj == null || obj.getClass() != getClass()) {
return false;
}
MetaTableResponse other = (MetaTableResponse) obj;
return tables.equals(other.tables);
}
@Override
public int hashCode() {
return tables.hashCode();
}
}

View File

@ -1,178 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
import org.elasticsearch.xpack.sql.protocol.shared.SqlDataInput;
import org.elasticsearch.xpack.sql.protocol.shared.SqlDataOutput;
import java.io.IOException;
import java.lang.reflect.Array;
import java.sql.JDBCType;
import java.util.List;
import java.util.Objects;
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.ProtoUtils.classOf;
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.ProtoUtils.readValue;
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.ProtoUtils.writeValue;
/**
* Stores a page of data in a columnar format.
*/
public class Page implements Payload {
private final List<ColumnInfo> columnInfo;
/**
* The actual data, one array per column.
*/
private final Object[][] data;
/**
* The number of rows in this page. The {@link #data} arrays may be larger
* but data after the end of the arrays is garbage.
*/
private int rows;
private int maxRows;
/**
* Build empty, call {@link #readFrom(SqlDataInput)} after to fill it.
*/
Page(List<ColumnInfo> columnInfo) {
this.columnInfo = columnInfo;
data = new Object[columnInfo.size()][];
}
/**
* Build with a particular set of rows. Use this for testing.
*/
Page(List<ColumnInfo> columnInfo, Object[][] rows) {
this(columnInfo);
makeRoomFor(rows.length);
this.rows = rows.length;
for (int row = 0; row < rows.length; row++) {
if (columnInfo.size() != rows[row].length) {
throw new IllegalArgumentException("Column count mismatch. Got [" + columnInfo.size()
+ "] ColumnInfos but [" + rows.length + "] columns on the [" + row + "] row.");
}
}
for (int column = 0; column < columnInfo.size(); column++) {
for (int row = 0; row < rows.length; row++) {
data[column][row] = rows[row][column];
}
}
}
public int rows() {
return rows;
}
public List<ColumnInfo> columnInfo() {
return columnInfo;
}
Object[] column(int index) {
if (index < 0 || index >= data.length) {
// NB: exception is caught higher up in the JDBC driver
throw new IllegalArgumentException("Invalid column [" + index + "] (max is [" + (data.length - 1) + "])");
}
return data[index];
}
public Object entry(int row, int column) {
if (row < 0 || row >= rows) {
// NB: exception is caught higher up in the JDBC driver
throw new IllegalArgumentException("Invalid row [" + row + "] (max is [" + (rows -1) + "])");
}
return column(column)[row];
}
@Override
public void readFrom(SqlDataInput in) throws IOException {
int rows = in.readInt();
// this.rows may be less than the number of rows we have space for
if (rows > maxRows) {
makeRoomFor(rows);
}
this.rows = rows;
for (int row = 0; row < rows; row++) {
for (int column = 0; column < columnInfo.size(); column++) {
data[column][row] = readValue(in, columnInfo.get(column).type);
}
}
}
@Override
public void writeTo(SqlDataOutput out) throws IOException {
int rows = rows();
out.writeInt(rows);
for (int row = 0; row < rows; row++) {
for (int column = 0; column < columnInfo.size(); column++) {
JDBCType columnType = columnInfo.get(column).type;
writeValue(out, entry(row, column), columnType);
}
}
}
@Override
public String toString() {
StringBuilder b = new StringBuilder();
for (int row = 0; row < rows(); row++) {
for (int column = 0; column < columnInfo.size(); column++) {
if (column > 0) {
b.append(", ");
}
b.append(entry(row, column));
}
b.append('\n');
}
return b.toString();
}
@Override
public boolean equals(Object obj) {
if (obj == null || obj instanceof Page == false) {
return false;
}
Page other = (Page) obj;
if (rows != other.rows) {
return false;
}
if (false == columnInfo.equals(other.columnInfo)) {
return false;
}
for (int row = 0; row < rows(); row++) {
for (int column = 0; column < columnInfo.size(); column++) {
if (false == Objects.equals(entry(row, column), other.entry(row, column))) {
return false;
}
}
}
return true;
}
@Override
public int hashCode() {
int result = Objects.hash(rows(), columnInfo.size());
for (int row = 0; row < rows(); row++) {
for (int column = 0; column < columnInfo.size(); column++) {
Object entry = entry(row, column);
result = result * 31 + (entry == null ? 0 : entry.hashCode());
}
}
return result;
}
private void makeRoomFor(int rows) {
maxRows = rows;
for (int i = 0; i < columnInfo.size(); i++) {
Class<?> type = classOf(columnInfo.get(i).type);
data[i] = (Object[]) Array.newInstance(type, rows);
}
}
}

View File

@ -1,18 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
import org.elasticsearch.xpack.sql.protocol.shared.SqlDataInput;
import org.elasticsearch.xpack.sql.protocol.shared.SqlDataOutput;
import java.io.IOException;
public interface Payload {
void readFrom(SqlDataInput in) throws IOException;
void writeTo(SqlDataOutput out) throws IOException;
}

View File

@ -1,102 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
import org.elasticsearch.xpack.sql.protocol.shared.AbstractProto;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
/**
* Binary protocol for the JDBC. All backwards compatibility is done using the
* version number sent in the header.
*/
public final class Proto extends AbstractProto {
public static final Proto INSTANCE = new Proto();
private Proto() {}
@Override
protected RequestType readRequestType(DataInput in) throws IOException {
return RequestType.readFrom(in);
}
@Override
protected ResponseType readResponseType(DataInput in) throws IOException {
return ResponseType.readFrom(in);
}
public enum RequestType implements AbstractProto.RequestType {
INFO(InfoRequest::new),
META_TABLE(MetaTableRequest::new),
META_COLUMN(MetaColumnRequest::new),
QUERY_INIT(QueryInitRequest::new),
QUERY_PAGE(QueryPageRequest::new),
QUERY_CLOSE(QueryCloseRequest::new)
;
private final RequestReader reader;
RequestType(RequestReader reader) {
this.reader = reader;
}
static RequestType readFrom(DataInput in) throws IOException {
byte b = in.readByte();
try {
return values()[b];
} catch (ArrayIndexOutOfBoundsException e) {
throw new IllegalArgumentException("Unknown response type [" + b + "]", e);
}
}
@Override
public void writeTo(DataOutput out) throws IOException {
out.writeByte(ordinal());
}
@Override
public RequestReader reader() {
return reader;
}
}
public enum ResponseType implements AbstractProto.ResponseType {
INFO(InfoResponse::new),
META_TABLE(MetaTableResponse::new),
META_COLUMN(MetaColumnResponse::new),
QUERY_INIT(QueryInitResponse::new),
QUERY_PAGE(QueryPageResponse::new),
QUERY_CLOSE(QueryCloseResponse::new)
;
private final ResponseReader reader;
ResponseType(ResponseReader reader) {
this.reader = reader;
}
static ResponseType readFrom(DataInput in) throws IOException {
byte b = in.readByte();
try {
return values()[b];
} catch (ArrayIndexOutOfBoundsException e) {
throw new IllegalArgumentException("Unknown response type [" + b + "]", e);
}
}
@Override
public void writeTo(DataOutput out) throws IOException {
out.writeByte(ordinal());
}
@Override
public ResponseReader reader() {
return reader;
}
}
}

View File

@ -1,184 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.math.BigDecimal;
import java.sql.Blob;
import java.sql.Clob;
import java.sql.JDBCType;
public class ProtoUtils {
// See Jdbc spec, appendix B
public static Object readValue(DataInput in, JDBCType type) throws IOException {
Object result;
byte hasNext = in.readByte();
if (hasNext == 0) { // TODO feels like a bitmask at the start of the row would be better.
return null;
}
// TODO we ought to make sure we use all of these
switch (type) {
case NULL:
// used to move the stream forward
// TODO why serialize NULL types at all?
in.readBoolean();
return null;
case BIT:
case BOOLEAN:
result = Boolean.valueOf(in.readBoolean());
break;
case TINYINT:
result = Byte.valueOf(in.readByte());
break;
case SMALLINT:
result = Short.valueOf(in.readShort());
break;
case INTEGER:
result = Integer.valueOf(in.readInt());
break;
case BIGINT:
result = Long.valueOf(in.readLong());
break;
case FLOAT:
case DOUBLE:
result = Double.valueOf(in.readDouble());
break;
case REAL:
result = Float.valueOf(in.readFloat());
break;
case BINARY:
case VARBINARY:
case LONGVARBINARY:
int size = in.readInt();
byte[] ar = new byte[size];
in.readFully(ar, 0, size);
result = ar;
break;
case CHAR:
case VARCHAR:
case LONGVARCHAR:
result = in.readUTF();
break;
// NB: date/time is kept in its raw form since the JdbcDriver has to do calendar/timezone
// conversion anyway and thus the long value is relevant
case TIMESTAMP:
result = in.readLong();
break;
default:
throw new IOException("Don't know how to read type [" + type + "]");
}
return result;
}
public static void writeValue(DataOutput out, Object o, JDBCType type) throws IOException {
if (o == null) {
out.writeByte(0);
return;
}
out.writeByte(1);
switch (type) {
// TODO: we ought to make sure we use all of these
case NULL:
// used to move the stream forward
out.writeBoolean(false);
return;
case BIT:
case BOOLEAN:
out.writeBoolean((Boolean) o);
return;
case TINYINT:
out.writeByte(((Number) o).byteValue());
return;
case SMALLINT:
out.writeShort(((Number) o).shortValue());
return;
case INTEGER:
out.writeInt(((Number) o).intValue());
return;
case BIGINT:
out.writeLong(((Number) o).longValue());
return;
case FLOAT:
case DOUBLE:
out.writeDouble(((Number) o).doubleValue());
return;
case REAL:
out.writeFloat(((Number) o).floatValue());
return;
case BINARY:
case VARBINARY:
case LONGVARBINARY:
byte[] a = (byte[]) o;
if (a == null || a.length == 0) {
out.writeInt(0);
return;
}
out.writeInt(a.length);
out.write(a);
return;
case CHAR:
case VARCHAR:
case LONGVARCHAR:
out.writeUTF(o.toString());
return;
case TIMESTAMP:
out.writeLong(((Number) o).longValue());
return;
default:
throw new IOException("Don't know how to write type [" + type + "]");
}
}
/**
* The type of the array used to store columns of this type.
*/
// NB: JDBC requires the use of Objects not primitive
// (in fact primitives are never used through-out the API)
public static Class<?> classOf(JDBCType jdbcType) {
switch (jdbcType) {
case NUMERIC:
case DECIMAL:
return BigDecimal.class;
case BOOLEAN:
case BIT:
return Boolean.class;
case TINYINT:
return Byte.class;
case SMALLINT:
return Short.class;
case INTEGER:
return Integer.class;
case BIGINT:
return Long.class;
case REAL:
return Float.class;
case FLOAT:
case DOUBLE:
return Double.class;
case BINARY:
case VARBINARY:
case LONGVARBINARY:
return byte[].class;
case CHAR:
case VARCHAR:
case LONGVARCHAR:
return String.class;
case DATE:
case TIME:
case TIMESTAMP:
return Long.class;
case BLOB:
return Blob.class;
case CLOB:
return Clob.class;
default:
throw new IllegalArgumentException("Unsupported JDBC type [" + jdbcType + "]");
}
}
}

View File

@ -1,32 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.RequestType;
import org.elasticsearch.xpack.sql.protocol.shared.AbstractQueryCloseRequest;
import org.elasticsearch.xpack.sql.protocol.shared.AbstractQueryPageRequest;
import org.elasticsearch.xpack.sql.protocol.shared.Nullable;
import org.elasticsearch.xpack.sql.protocol.shared.SqlDataInput;
import org.elasticsearch.xpack.sql.protocol.shared.TimeoutInfo;
import java.io.IOException;
public class QueryCloseRequest extends AbstractQueryCloseRequest {
public QueryCloseRequest(String cursor) {
super(cursor);
}
QueryCloseRequest(SqlDataInput in) throws IOException {
super(in);
}
@Override
public RequestType requestType() {
return RequestType.QUERY_CLOSE;
}
}

View File

@ -1,35 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.RequestType;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.ResponseType;
import org.elasticsearch.xpack.sql.protocol.shared.AbstractInfoResponse;
import org.elasticsearch.xpack.sql.protocol.shared.AbstractQueryCloseResponse;
import org.elasticsearch.xpack.sql.protocol.shared.Request;
import java.io.DataInput;
import java.io.IOException;
public class QueryCloseResponse extends AbstractQueryCloseResponse {
public QueryCloseResponse(boolean succeeded) {
super(succeeded);
}
QueryCloseResponse(Request request, DataInput in) throws IOException {
super(request, in);
}
@Override
public RequestType requestType() {
return RequestType.QUERY_CLOSE;
}
@Override
public ResponseType responseType() {
return ResponseType.QUERY_CLOSE;
}
}

View File

@ -1,29 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.RequestType;
import org.elasticsearch.xpack.sql.protocol.shared.AbstractQueryInitRequest;
import org.elasticsearch.xpack.sql.protocol.shared.TimeoutInfo;
import org.elasticsearch.xpack.sql.protocol.shared.SqlDataInput;
import java.io.IOException;
import java.util.TimeZone;
public class QueryInitRequest extends AbstractQueryInitRequest {
public QueryInitRequest(String query, int fetchSize, TimeZone timeZone, TimeoutInfo timeout) {
super(query, fetchSize, timeZone, timeout);
}
QueryInitRequest(SqlDataInput in) throws IOException {
super(in);
}
@Override
public RequestType requestType() {
return RequestType.QUERY_INIT;
}
}

View File

@ -1,88 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.RequestType;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.ResponseType;
import org.elasticsearch.xpack.sql.protocol.shared.AbstractQueryResponse;
import org.elasticsearch.xpack.sql.protocol.shared.Request;
import org.elasticsearch.xpack.sql.protocol.shared.SqlDataInput;
import org.elasticsearch.xpack.sql.protocol.shared.SqlDataOutput;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import static java.util.Collections.unmodifiableList;
public class QueryInitResponse extends AbstractQueryResponse {
public final List<ColumnInfo> columns;
public final Payload data;
public QueryInitResponse(long tookNanos, String cursor, List<ColumnInfo> columns, Payload data) {
super(tookNanos, cursor);
this.columns = columns;
this.data = data;
}
QueryInitResponse(Request request, SqlDataInput in) throws IOException {
super(request, in);
int size = in.readInt();
List<ColumnInfo> columns = new ArrayList<>(size);
for (int i = 0; i < size; i++) {
columns.add(new ColumnInfo(in));
}
this.columns = unmodifiableList(columns);
// TODO - Page is a client class, it shouldn't leak here
Page data = new Page(columns);
data.readFrom(in);
this.data = data;
}
@Override
public void writeTo(SqlDataOutput out) throws IOException {
super.writeTo(out);
out.writeInt(columns.size());
for (ColumnInfo c : columns) {
c.writeTo(out);
}
data.writeTo(out);
}
@Override
protected String toStringBody() {
return super.toStringBody()
+ " columns=" + columns
+ " data=[\n" + data + "]";
}
@Override
public RequestType requestType() {
return RequestType.QUERY_INIT;
}
@Override
public ResponseType responseType() {
return ResponseType.QUERY_INIT;
}
@Override
public int hashCode() {
return Objects.hash(super.hashCode(), columns, data);
}
@Override
public boolean equals(Object obj) {
if (false == super.equals(obj)) {
return false;
}
QueryInitResponse other = (QueryInitResponse) obj;
return Objects.equals(columns, other.columns)
&& Objects.equals(data, other.data);
}
}

View File

@ -1,39 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.RequestType;
import org.elasticsearch.xpack.sql.protocol.shared.AbstractQueryPageRequest;
import org.elasticsearch.xpack.sql.protocol.shared.Nullable;
import org.elasticsearch.xpack.sql.protocol.shared.TimeoutInfo;
import org.elasticsearch.xpack.sql.protocol.shared.SqlDataInput;
import java.io.IOException;
public class QueryPageRequest extends AbstractQueryPageRequest {
private final transient Payload data;
public QueryPageRequest(String cursor, TimeoutInfo timeout, @Nullable Payload data) {
super(cursor, timeout);
this.data = data;
}
QueryPageRequest(SqlDataInput in) throws IOException {
super(in);
this.data = null; // data isn't used on the server side
}
public Payload data() {
return data;
}
@Override
public RequestType requestType() {
return RequestType.QUERY_PAGE;
}
// not overriding hashCode and equals because we're intentionally ignore the data field
}

View File

@ -1,67 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.RequestType;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.ResponseType;
import org.elasticsearch.xpack.sql.protocol.shared.AbstractQueryResponse;
import org.elasticsearch.xpack.sql.protocol.shared.Request;
import org.elasticsearch.xpack.sql.protocol.shared.SqlDataInput;
import org.elasticsearch.xpack.sql.protocol.shared.SqlDataOutput;
import java.io.IOException;
import java.util.Objects;
public class QueryPageResponse extends AbstractQueryResponse {
private final Payload data;
public QueryPageResponse(long tookNanos, String cursor, Payload data) {
super(tookNanos, cursor);
this.data = data;
}
QueryPageResponse(Request request, SqlDataInput in) throws IOException {
super(request, in);
QueryPageRequest queryPageRequest = (QueryPageRequest) request;
data = queryPageRequest.data();
queryPageRequest.data().readFrom(in);
}
@Override
public void writeTo(SqlDataOutput out) throws IOException {
super.writeTo(out);
data.writeTo(out);
}
@Override
protected String toStringBody() {
return super.toStringBody() + " data=[\n" + data + "]";
}
@Override
public RequestType requestType() {
return RequestType.QUERY_PAGE;
}
@Override
public ResponseType responseType() {
return ResponseType.QUERY_PAGE;
}
@Override
public boolean equals(Object obj) {
if (false == super.equals(obj)) {
return false;
}
QueryPageResponse other = (QueryPageResponse) obj;
return data.equals(other.data);
}
@Override
public int hashCode() {
return Objects.hash(data);
}
}

View File

@ -1,28 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.assertRoundTripCurrentVersion;
public class InfoRequestTests extends ESTestCase {
static InfoRequest randomInfoRequest() {
return new InfoRequest(randomAlphaOfLength(5), randomAlphaOfLength(5), randomAlphaOfLength(5),
randomAlphaOfLength(5), randomAlphaOfLength(5));
}
public void testRoundTrip() throws IOException {
assertRoundTripCurrentVersion(randomInfoRequest());
}
public void testToString() {
assertEquals("InfoRequest<jvm=[version=[1.8.0_131] vendor=[testvendor] classPath=[testcp]] os=[name=[Mac OS X] version=[10.12.5]]>",
new InfoRequest("1.8.0_131", "testvendor", "testcp", "Mac OS X", "10.12.5").toString());
}
}

View File

@ -1,28 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.assertRoundTripCurrentVersion;
public class InfoResponseTests extends ESTestCase {
static InfoResponse randomInfoResponse() {
return new InfoResponse(randomAlphaOfLength(5), randomAlphaOfLength(5), randomByte(), randomByte(),
randomAlphaOfLength(5), randomAlphaOfLength(5), randomAlphaOfLength(5));
}
public void testRoundTrip() throws IOException {
assertRoundTripCurrentVersion(InfoRequestTests::randomInfoRequest, randomInfoResponse());
}
public void testToString() {
assertEquals("InfoResponse<node=[adsf] cluster=[test_cluster] version=[6.0.0]/[major=[6] minor=[0] hash=[feed] date=[date]]>",
new InfoResponse("adsf", "test_cluster", (byte) 6, (byte) 0, "6.0.0", "feed", "date").toString());
}
}

View File

@ -1,36 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
import org.elasticsearch.xpack.sql.protocol.shared.Request;
import org.elasticsearch.xpack.sql.protocol.shared.Response;
import org.elasticsearch.xpack.sql.protocol.shared.TimeoutInfo;
import org.elasticsearch.xpack.sql.test.RoundTripTestUtils;
import java.io.IOException;
import java.util.function.Supplier;
import static org.elasticsearch.test.ESTestCase.randomNonNegativeLong;
public final class JdbcRoundTripTestUtils {
private JdbcRoundTripTestUtils() {
// Just static utilities
}
static void assertRoundTripCurrentVersion(Request request) throws IOException {
RoundTripTestUtils.assertRoundTrip(request, Proto.INSTANCE::writeRequest, Proto.INSTANCE::readRequest);
}
static void assertRoundTripCurrentVersion(Supplier<Request> request, Response response) throws IOException {
RoundTripTestUtils.assertRoundTrip(response,
(r, out) -> Proto.INSTANCE.writeResponse(r, Proto.CURRENT_VERSION, out),
in -> Proto.INSTANCE.readResponse(request.get(), in));
}
static TimeoutInfo randomTimeoutInfo() {
return new TimeoutInfo(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong());
}
}

View File

@ -1,29 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import java.sql.JDBCType;
import static org.elasticsearch.xpack.sql.test.RoundTripTestUtils.assertRoundTrip;
public class MetaColumnInfoTests extends ESTestCase {
static MetaColumnInfo randomMetaColumnInfo() {
return new MetaColumnInfo(randomAlphaOfLength(5), randomAlphaOfLength(5), randomFrom(JDBCType.values()),
between(1, Integer.MAX_VALUE), between(1, Integer.MAX_VALUE));
}
public void testRoundTrip() throws IOException {
assertRoundTrip(randomMetaColumnInfo(), MetaColumnInfo::writeTo, MetaColumnInfo::new);
}
public void testToString() {
assertEquals("test.doc.col<type=[VARCHAR] size=[100] position=[1]>",
new MetaColumnInfo("test.doc", "col", JDBCType.VARCHAR, 100, 1).toString());
}
}

View File

@ -1,26 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.assertRoundTripCurrentVersion;
public class MetaColumnRequestTests extends ESTestCase {
public static MetaColumnRequest randomMetaColumnRequest() {
return new MetaColumnRequest(randomAlphaOfLength(10), randomAlphaOfLength(10));
}
public void testRoundTrip() throws IOException {
assertRoundTripCurrentVersion(randomMetaColumnRequest());
}
public void testToString() {
assertEquals("MetaColumnRequest<table=[test.do%] column=[d%]>", new MetaColumnRequest("test.do%", "d%").toString());
}
}

View File

@ -1,44 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import java.sql.JDBCType;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import static java.util.Collections.emptyList;
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.assertRoundTripCurrentVersion;
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.MetaColumnInfoTests.randomMetaColumnInfo;
public class MetaColumnResponseTests extends ESTestCase {
static MetaColumnResponse randomMetaColumnResponse() {
int size = between(0, 10);
List<MetaColumnInfo> columns = new ArrayList<>(size);
for (int i = 0; i < size; i++) {
columns.add(randomMetaColumnInfo());
}
return new MetaColumnResponse(columns);
}
public void testRoundTrip() throws IOException {
assertRoundTripCurrentVersion(MetaColumnRequestTests::randomMetaColumnRequest, randomMetaColumnResponse());
}
public void testToString() {
assertEquals("MetaColumnResponse<>", new MetaColumnResponse(emptyList()).toString());
assertEquals("MetaColumnResponse<a.doc.col1<type=[VARCHAR] size=[100] position=[1]>, "
+ "a.doc.col2<type=[INTEGER] size=[16] position=[2]>, "
+ "b.doc.col1<type=[VARCHAR] size=[100] position=[1]>>", new MetaColumnResponse(Arrays.asList(
new MetaColumnInfo("a.doc", "col1", JDBCType.VARCHAR, 100, 1),
new MetaColumnInfo("a.doc", "col2", JDBCType.INTEGER, 16, 2),
new MetaColumnInfo("b.doc", "col1", JDBCType.VARCHAR, 100, 1))).toString());
}
}

View File

@ -1,26 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.assertRoundTripCurrentVersion;
public class MetaTableRequestTests extends ESTestCase {
public static MetaTableRequest randomMetaTableRequest() {
return new MetaTableRequest(randomAlphaOfLength(10));
}
public void testRoundTrip() throws IOException {
assertRoundTripCurrentVersion(randomMetaTableRequest());
}
public void testToString() {
assertEquals("MetaTableRequest<test.do%>", new MetaTableRequest("test.do%").toString());
}
}

View File

@ -1,36 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import static java.util.Collections.emptyList;
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.assertRoundTripCurrentVersion;
public class MetaTableResponseTests extends ESTestCase {
static MetaTableResponse randomMetaTableResponse() {
int size = between(0, 10);
List<String> tables = new ArrayList<>(size);
for (int i = 0; i < size; i++) {
tables.add(randomAlphaOfLength(5));
}
return new MetaTableResponse(tables);
}
public void testRoundTrip() throws IOException {
assertRoundTripCurrentVersion(MetaTableRequestTests::randomMetaTableRequest, randomMetaTableResponse());
}
public void testToString() {
assertEquals("MetaTableResponse<>", new MetaTableResponse(emptyList()).toString());
assertEquals("MetaTableResponse<a.doc, b.doc, c.doc>", new MetaTableResponse(Arrays.asList("a.doc", "b.doc", "c.doc")).toString());
}
}

View File

@ -1,108 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
import org.elasticsearch.common.CheckedBiConsumer;
import org.elasticsearch.common.CheckedFunction;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.xpack.sql.protocol.shared.AbstractProto;
import org.elasticsearch.xpack.sql.protocol.shared.SqlDataInput;
import org.elasticsearch.xpack.sql.protocol.shared.SqlDataOutput;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.function.Supplier;
import static java.util.Collections.emptyList;
import static java.util.Collections.singletonList;
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfoTests.doubleInfo;
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfoTests.intInfo;
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfoTests.randomValueFor;
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfoTests.varcharInfo;
import static org.elasticsearch.xpack.sql.test.RoundTripTestUtils.assertRoundTrip;
import static org.elasticsearch.xpack.sql.test.RoundTripTestUtils.roundTrip;
public class PageTests extends ESTestCase {
static Page randomPage() {
int columns = between(0, 10);
List<ColumnInfo> columnInfo = new ArrayList<>();
for (int c = 0; c < columns; c++) {
@SuppressWarnings("unchecked")
Supplier<ColumnInfo> info = randomFrom(
() -> varcharInfo(randomAlphaOfLength(5)),
() -> intInfo(randomAlphaOfLength(5)),
() -> doubleInfo(randomAlphaOfLength(5)));
columnInfo.add(info.get());
}
return randomPageContents(columnInfo);
}
static Page randomPageContents(List<ColumnInfo> columnInfo) {
Object[][] rows = new Object[between(0, 10)][];
for (int r = 0; r < rows.length; r++) {
rows[r] = new Object[columnInfo.size()];
for (int c = 0; c < columnInfo.size(); c++) {
rows[r][c] = randomValueFor(columnInfo.get(c));
}
}
return new Page(columnInfo, rows);
}
public void testRoundTripNoReuse() throws IOException {
Page example = randomPage();
assertRoundTrip(example, writeTo(AbstractProto.CURRENT_VERSION), in -> {
Page page = new Page(example.columnInfo());
page.readFrom(new SqlDataInput(in, AbstractProto.CURRENT_VERSION));
return page;
});
}
public void testRoundTripReuse() throws IOException {
Page example = randomPage();
Page target = new Page(example.columnInfo());
CheckedFunction<DataInput, Page, IOException> readFrom = in -> {
target.readFrom(new SqlDataInput(in, AbstractProto.CURRENT_VERSION));
return null;
};
roundTrip(example, writeTo(AbstractProto.CURRENT_VERSION), readFrom);
assertEquals(example, target);
example = randomPageContents(example.columnInfo());
roundTrip(example, writeTo(AbstractProto.CURRENT_VERSION), readFrom);
assertEquals(example, target);
}
public void testToString() {
assertEquals("\n\n",
new Page(emptyList(), new Object[][] {
new Object[] {},
new Object[] {},
}).toString());
assertEquals("test\n",
new Page(singletonList(varcharInfo("a")), new Object[][] {
new Object[] {"test"}
}).toString());
assertEquals("test, 1\n",
new Page(Arrays.asList(varcharInfo("a"), intInfo("b")), new Object[][] {
new Object[] {"test", 1}
}).toString());
assertEquals("test, 1\nbar, 7\n",
new Page(Arrays.asList(varcharInfo("a"), intInfo("b")), new Object[][] {
new Object[] {"test", 1},
new Object[] {"bar", 7}
}).toString());
}
private static CheckedBiConsumer<Page, DataOutput, IOException> writeTo(int version) {
return (page, in) ->
page.writeTo(new SqlDataOutput(in, version));
}
}

View File

@ -1,29 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.xpack.sql.protocol.shared.TimeoutInfo;
import java.io.IOException;
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.assertRoundTripCurrentVersion;
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.randomTimeoutInfo;
public class QueryCloseRequestTests extends ESTestCase {
static QueryCloseRequest randomQueryCloseRequest() {
String cursor = randomAlphaOfLength(10);
return new QueryCloseRequest(cursor);
}
public void testRoundTrip() throws IOException {
assertRoundTripCurrentVersion(randomQueryCloseRequest());
}
public void testToString() {
assertEquals("QueryCloseRequest<123>", new QueryCloseRequest("123").toString());
}
}

View File

@ -1,26 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.assertRoundTripCurrentVersion;
public class QueryCloseResponseTests extends ESTestCase {
static QueryCloseResponse randomQueryCloseResponse() {
return new QueryCloseResponse(randomBoolean());
}
public void testRoundTrip() throws IOException {
assertRoundTripCurrentVersion(QueryCloseRequestTests::randomQueryCloseRequest, randomQueryCloseResponse());
}
public void testToString() {
assertEquals("QueryCloseResponse<true>", new QueryCloseResponse(true).toString());
}
}

View File

@ -1,33 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.xpack.sql.protocol.shared.TimeoutInfo;
import java.io.IOException;
import java.util.TimeZone;
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.assertRoundTripCurrentVersion;
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.randomTimeoutInfo;
public class QueryInitRequestTests extends ESTestCase {
static QueryInitRequest randomQueryInitRequest() {
return new QueryInitRequest(randomAlphaOfLength(5), between(0, Integer.MAX_VALUE), randomTimeZone(random()), randomTimeoutInfo());
}
public void testRoundTrip() throws IOException {
assertRoundTripCurrentVersion(randomQueryInitRequest());
}
public void testToString() {
assertEquals("QueryInitRequest<query=[SELECT * FROM test.doc]>",
new QueryInitRequest("SELECT * FROM test.doc", 10, TimeZone.getTimeZone("UTC"), new TimeoutInfo(1, 1, 1)).toString());
assertEquals("QueryInitRequest<query=[SELECT * FROM test.doc] timeZone=[GMT-05:00]>",
new QueryInitRequest("SELECT * FROM test.doc", 10, TimeZone.getTimeZone("GMT-5"), new TimeoutInfo(1, 1, 1)).toString());
}
}

View File

@ -1,37 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import static java.util.Collections.singletonList;
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfoTests.varcharInfo;
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.assertRoundTripCurrentVersion;
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.PageTests.randomPage;
public class QueryInitResponseTests extends ESTestCase {
static QueryInitResponse randomQueryInitResponse() {
String cursor = randomAlphaOfLength(10);
Page page = randomPage();
return new QueryInitResponse(randomNonNegativeLong(), cursor, page.columnInfo(), page);
}
public void testRoundTrip() throws IOException {
assertRoundTripCurrentVersion(QueryInitRequestTests::randomQueryInitRequest, randomQueryInitResponse());
}
public void testToString() {
Page page = new Page(singletonList(varcharInfo("a")), new Object[][] {
new Object[] {"test"},
new Object[] {"string"},
});
assertEquals("QueryInitResponse<tookNanos=[123] cursor=[0120] columns=[a<type=[VARCHAR]>] data=["
+ "\ntest\nstring\n]>",
new QueryInitResponse(123, "0120", page.columnInfo(), page).toString());
}
}

View File

@ -1,29 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.xpack.sql.protocol.shared.TimeoutInfo;
import java.io.IOException;
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.assertRoundTripCurrentVersion;
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.randomTimeoutInfo;
public class QueryPageRequestTests extends ESTestCase {
static QueryPageRequest randomQueryPageRequest(Page page) {
String cursor = randomAlphaOfLength(10);
return new QueryPageRequest(cursor, randomTimeoutInfo(), page);
}
public void testRoundTrip() throws IOException {
assertRoundTripCurrentVersion(randomQueryPageRequest(null));
}
public void testToString() {
assertEquals("QueryPageRequest<0320>", new QueryPageRequest("0320", new TimeoutInfo(1, 1, 1), null).toString());
}
}

View File

@ -1,36 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import static java.util.Collections.singletonList;
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfoTests.varcharInfo;
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.assertRoundTripCurrentVersion;
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.PageTests.randomPage;
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.QueryPageRequestTests.randomQueryPageRequest;
public class QueryPageResponseTests extends ESTestCase {
static QueryPageResponse randomQueryPageResponse(Page page) {
String cursor = randomAlphaOfLength(10);
return new QueryPageResponse(randomNonNegativeLong(), cursor, page);
}
public void testRoundTrip() throws IOException {
Page page = randomPage();
assertRoundTripCurrentVersion(() -> randomQueryPageRequest(new Page(page.columnInfo())), randomQueryPageResponse(page));
}
public void testToString() {
Page results = new Page(singletonList(varcharInfo("a")), new Object[][] {
new Object[] {"test"}
});
assertEquals("QueryPageResponse<tookNanos=[123] cursor=[0810] data=[\ntest\n]>",
new QueryPageResponse(123, "0810", results).toString());
}
}

View File

@ -1,9 +1,3 @@
plugins {
id 'com.github.johnrengelman.shadow' version '2.0.2'
}
import org.elasticsearch.gradle.test.RunTask
description = 'JDBC driver for Elasticsearch'
forbiddenApisMain {
@ -12,12 +6,8 @@ forbiddenApisMain {
}
dependencies {
compile (project(':x-pack-elasticsearch:sql:shared-client')) {
// TODO: For now remove extra dependencies, we will add them back when we migrate JDBC to REST
transitive = false
}
compile project(':x-pack-elasticsearch:sql:jdbc-proto')
compile project(':x-pack-elasticsearch:sql:shared-proto')
compile project(':x-pack-elasticsearch:sql:shared-client')
compile project(':x-pack-elasticsearch:sql:rest-proto')
runtime "com.fasterxml.jackson.core:jackson-core:${versions.jackson}"
/* We want to limit these dependencies so we don't have a huge jar.
* Since we shadow these dependencies we don't have to be super careful
@ -26,44 +16,15 @@ dependencies {
}
dependencyLicenses {
mapping from: /jdbc-proto.*/, to: 'elasticsearch'
mapping from: /shared-client.*/, to: 'elasticsearch'
mapping from: /shared-proto.*/, to: 'elasticsearch'
mapping from: /jackson-.*/, to: 'jackson'
mapping from: /rest-proto.*/, to: 'elasticsearch'
mapping from: /lucene-.*/, to: 'lucene'
mapping from: /elasticsearch-core.*/, to: 'elasticsearch'
ignoreSha 'rest-proto'
ignoreSha 'jdbc-proto'
ignoreSha 'shared-client'
ignoreSha 'shared-proto'
ignoreSha 'elasticsearch'
ignoreSha 'elasticsearch-core'
}
/* Disable the jar task configured by the java plugin. We're not going to
* distribute an unshaded jar so there is no need making one. */
jar {
enabled = false
}
configurations.archives.artifacts.removeAll { it.archiveTask.is jar }
/* Move the shaded jar to the empty classifier because it is the only one
* we're shipping. */
shadowJar {
classifier = null
// We only need to relocate jackson
relocate 'com.fasterxml.jackson', 'org.elasticsearch.xpack.sql.jdbc.shadow.jacksonp'
manifest {
inheritFrom jar.manifest
}
}
assemble.dependsOn shadowJar
artifacts {
archives shadowJar
}
// And for better realism let's use the shaded jar for testing
test {
classpath -= compileJava.outputs.files
classpath -= configurations.compile
classpath -= configurations.runtime
classpath += shadowJar.outputs.files
dependsOn shadowJar
}
// TODO: Restore shading when https://github.com/elastic/elasticsearch/pull/27955 gets in

View File

@ -0,0 +1 @@
5f01da7306363fad2028b916f3eab926262de928

View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,5 @@
=============================================================================
= NOTICE file corresponding to section 4d of the Apache License Version 2.0 =
=============================================================================
This product includes software developed by
Joda.org (http://www.joda.org/).

View File

@ -0,0 +1 @@
7a2999229464e7a324aa503c0a52ec0f05efe7bd

View File

@ -187,7 +187,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Copyright 1999-2005 The Apache Software Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -0,0 +1,5 @@
Apache log4j
Copyright 2007 The Apache Software Foundation
This product includes software developed at
The Apache Software Foundation (http://www.apache.org/).

View File

@ -0,0 +1 @@
c041978c686866ee8534f538c6220238db3bb6be

View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 1999-2005 The Apache Software Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,5 @@
Apache log4j
Copyright 2007 The Apache Software Foundation
This product includes software developed at
The Apache Software Foundation (http://www.apache.org/).

View File

@ -0,0 +1,475 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Some code in core/src/java/org/apache/lucene/util/UnicodeUtil.java was
derived from unicode conversion examples available at
http://www.unicode.org/Public/PROGRAMS/CVTUTF. Here is the copyright
from those sources:
/*
* Copyright 2001-2004 Unicode, Inc.
*
* Disclaimer
*
* This source code is provided as is by Unicode, Inc. No claims are
* made as to fitness for any particular purpose. No warranties of any
* kind are expressed or implied. The recipient agrees to determine
* applicability of information provided. If this file has been
* purchased on magnetic or optical media from Unicode, Inc., the
* sole remedy for any claim will be exchange of defective media
* within 90 days of receipt.
*
* Limitations on Rights to Redistribute This Code
*
* Unicode, Inc. hereby grants the right to freely use the information
* supplied in this file in the creation of products supporting the
* Unicode Standard, and to make copies of this file in any form
* for internal or external distribution as long as this notice
* remains attached.
*/
Some code in core/src/java/org/apache/lucene/util/ArrayUtil.java was
derived from Python 2.4.2 sources available at
http://www.python.org. Full license is here:
http://www.python.org/download/releases/2.4.2/license/
Some code in core/src/java/org/apache/lucene/util/UnicodeUtil.java was
derived from Python 3.1.2 sources available at
http://www.python.org. Full license is here:
http://www.python.org/download/releases/3.1.2/license/
Some code in core/src/java/org/apache/lucene/util/automaton was
derived from Brics automaton sources available at
www.brics.dk/automaton/. Here is the copyright from those sources:
/*
* Copyright (c) 2001-2009 Anders Moeller
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
The levenshtein automata tables in core/src/java/org/apache/lucene/util/automaton
were automatically generated with the moman/finenight FSA package.
Here is the copyright for those sources:
# Copyright (c) 2010, Jean-Philippe Barrette-LaPierre, <jpb@rrette.com>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
Some code in core/src/java/org/apache/lucene/util/UnicodeUtil.java was
derived from ICU (http://www.icu-project.org)
The full license is available here:
http://source.icu-project.org/repos/icu/icu/trunk/license.html
/*
* Copyright (C) 1999-2010, International Business Machines
* Corporation and others. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, and/or sell copies of the
* Software, and to permit persons to whom the Software is furnished to do so,
* provided that the above copyright notice(s) and this permission notice appear
* in all copies of the Software and that both the above copyright notice(s) and
* this permission notice appear in supporting documentation.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS NOTICE BE
* LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR
* ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
* IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
* Except as contained in this notice, the name of a copyright holder shall not
* be used in advertising or otherwise to promote the sale, use or other
* dealings in this Software without prior written authorization of the
* copyright holder.
*/
The following license applies to the Snowball stemmers:
Copyright (c) 2001, Dr Martin Porter
Copyright (c) 2002, Richard Boulton
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holders nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The following license applies to the KStemmer:
Copyright © 2003,
Center for Intelligent Information Retrieval,
University of Massachusetts, Amherst.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. The names "Center for Intelligent Information Retrieval" and
"University of Massachusetts" must not be used to endorse or promote products
derived from this software without prior written permission. To obtain
permission, contact info@ciir.cs.umass.edu.
THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF MASSACHUSETTS AND OTHER CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
The following license applies to the Morfologik project:
Copyright (c) 2006 Dawid Weiss
Copyright (c) 2007-2011 Dawid Weiss, Marcin Miłkowski
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Morfologik nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
---
The dictionary comes from Morfologik project. Morfologik uses data from
Polish ispell/myspell dictionary hosted at http://www.sjp.pl/slownik/en/ and
is licenced on the terms of (inter alia) LGPL and Creative Commons
ShareAlike. The part-of-speech tags were added in Morfologik project and
are not found in the data from sjp.pl. The tagset is similar to IPI PAN
tagset.
---
The following license applies to the Morfeusz project,
used by org.apache.lucene.analysis.morfologik.
BSD-licensed dictionary of Polish (SGJP)
http://sgjp.pl/morfeusz/
Copyright © 2011 Zygmunt Saloni, Włodzimierz Gruszczyński,
Marcin Woliński, Robert Wołosz
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDERS “AS IS” AND ANY EXPRESS
OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -0,0 +1,192 @@
Apache Lucene
Copyright 2014 The Apache Software Foundation
This product includes software developed at
The Apache Software Foundation (http://www.apache.org/).
Includes software from other Apache Software Foundation projects,
including, but not limited to:
- Apache Ant
- Apache Jakarta Regexp
- Apache Commons
- Apache Xerces
ICU4J, (under analysis/icu) is licensed under an MIT styles license
and Copyright (c) 1995-2008 International Business Machines Corporation and others
Some data files (under analysis/icu/src/data) are derived from Unicode data such
as the Unicode Character Database. See http://unicode.org/copyright.html for more
details.
Brics Automaton (under core/src/java/org/apache/lucene/util/automaton) is
BSD-licensed, created by Anders Møller. See http://www.brics.dk/automaton/
The levenshtein automata tables (under core/src/java/org/apache/lucene/util/automaton) were
automatically generated with the moman/finenight FSA library, created by
Jean-Philippe Barrette-LaPierre. This library is available under an MIT license,
see http://sites.google.com/site/rrettesite/moman and
http://bitbucket.org/jpbarrette/moman/overview/
The class org.apache.lucene.util.WeakIdentityMap was derived from
the Apache CXF project and is Apache License 2.0.
The Google Code Prettify is Apache License 2.0.
See http://code.google.com/p/google-code-prettify/
JUnit (junit-4.10) is licensed under the Common Public License v. 1.0
See http://junit.sourceforge.net/cpl-v10.html
This product includes code (JaspellTernarySearchTrie) from Java Spelling Checkin
g Package (jaspell): http://jaspell.sourceforge.net/
License: The BSD License (http://www.opensource.org/licenses/bsd-license.php)
The snowball stemmers in
analysis/common/src/java/net/sf/snowball
were developed by Martin Porter and Richard Boulton.
The snowball stopword lists in
analysis/common/src/resources/org/apache/lucene/analysis/snowball
were developed by Martin Porter and Richard Boulton.
The full snowball package is available from
http://snowball.tartarus.org/
The KStem stemmer in
analysis/common/src/org/apache/lucene/analysis/en
was developed by Bob Krovetz and Sergio Guzman-Lara (CIIR-UMass Amherst)
under the BSD-license.
The Arabic,Persian,Romanian,Bulgarian, Hindi and Bengali analyzers (common) come with a default
stopword list that is BSD-licensed created by Jacques Savoy. These files reside in:
analysis/common/src/resources/org/apache/lucene/analysis/ar/stopwords.txt,
analysis/common/src/resources/org/apache/lucene/analysis/fa/stopwords.txt,
analysis/common/src/resources/org/apache/lucene/analysis/ro/stopwords.txt,
analysis/common/src/resources/org/apache/lucene/analysis/bg/stopwords.txt,
analysis/common/src/resources/org/apache/lucene/analysis/hi/stopwords.txt,
analysis/common/src/resources/org/apache/lucene/analysis/bn/stopwords.txt
See http://members.unine.ch/jacques.savoy/clef/index.html.
The German,Spanish,Finnish,French,Hungarian,Italian,Portuguese,Russian and Swedish light stemmers
(common) are based on BSD-licensed reference implementations created by Jacques Savoy and
Ljiljana Dolamic. These files reside in:
analysis/common/src/java/org/apache/lucene/analysis/de/GermanLightStemmer.java
analysis/common/src/java/org/apache/lucene/analysis/de/GermanMinimalStemmer.java
analysis/common/src/java/org/apache/lucene/analysis/es/SpanishLightStemmer.java
analysis/common/src/java/org/apache/lucene/analysis/fi/FinnishLightStemmer.java
analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchLightStemmer.java
analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchMinimalStemmer.java
analysis/common/src/java/org/apache/lucene/analysis/hu/HungarianLightStemmer.java
analysis/common/src/java/org/apache/lucene/analysis/it/ItalianLightStemmer.java
analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseLightStemmer.java
analysis/common/src/java/org/apache/lucene/analysis/ru/RussianLightStemmer.java
analysis/common/src/java/org/apache/lucene/analysis/sv/SwedishLightStemmer.java
The Stempel analyzer (stempel) includes BSD-licensed software developed
by the Egothor project http://egothor.sf.net/, created by Leo Galambos, Martin Kvapil,
and Edmond Nolan.
The Polish analyzer (stempel) comes with a default
stopword list that is BSD-licensed created by the Carrot2 project. The file resides
in stempel/src/resources/org/apache/lucene/analysis/pl/stopwords.txt.
See http://project.carrot2.org/license.html.
The SmartChineseAnalyzer source code (smartcn) was
provided by Xiaoping Gao and copyright 2009 by www.imdict.net.
WordBreakTestUnicode_*.java (under modules/analysis/common/src/test/)
is derived from Unicode data such as the Unicode Character Database.
See http://unicode.org/copyright.html for more details.
The Morfologik analyzer (morfologik) includes BSD-licensed software
developed by Dawid Weiss and Marcin Miłkowski (http://morfologik.blogspot.com/).
Morfologik uses data from Polish ispell/myspell dictionary
(http://www.sjp.pl/slownik/en/) licenced on the terms of (inter alia)
LGPL and Creative Commons ShareAlike.
Morfologic includes data from BSD-licensed dictionary of Polish (SGJP)
(http://sgjp.pl/morfeusz/)
Servlet-api.jar and javax.servlet-*.jar are under the CDDL license, the original
source code for this can be found at http://www.eclipse.org/jetty/downloads.php
===========================================================================
Kuromoji Japanese Morphological Analyzer - Apache Lucene Integration
===========================================================================
This software includes a binary and/or source version of data from
mecab-ipadic-2.7.0-20070801
which can be obtained from
http://atilika.com/releases/mecab-ipadic/mecab-ipadic-2.7.0-20070801.tar.gz
or
http://jaist.dl.sourceforge.net/project/mecab/mecab-ipadic/2.7.0-20070801/mecab-ipadic-2.7.0-20070801.tar.gz
===========================================================================
mecab-ipadic-2.7.0-20070801 Notice
===========================================================================
Nara Institute of Science and Technology (NAIST),
the copyright holders, disclaims all warranties with regard to this
software, including all implied warranties of merchantability and
fitness, in no event shall NAIST be liable for
any special, indirect or consequential damages or any damages
whatsoever resulting from loss of use, data or profits, whether in an
action of contract, negligence or other tortuous action, arising out
of or in connection with the use or performance of this software.
A large portion of the dictionary entries
originate from ICOT Free Software. The following conditions for ICOT
Free Software applies to the current dictionary as well.
Each User may also freely distribute the Program, whether in its
original form or modified, to any third party or parties, PROVIDED
that the provisions of Section 3 ("NO WARRANTY") will ALWAYS appear
on, or be attached to, the Program, which is distributed substantially
in the same form as set out herein and that such intended
distribution, if actually made, will neither violate or otherwise
contravene any of the laws and regulations of the countries having
jurisdiction over the User or the intended distribution itself.
NO WARRANTY
The program was produced on an experimental basis in the course of the
research and development conducted during the project and is provided
to users as so produced on an experimental basis. Accordingly, the
program is provided without any warranty whatsoever, whether express,
implied, statutory or otherwise. The term "warranty" used herein
includes, but is not limited to, any warranty of the quality,
performance, merchantability and fitness for a particular purpose of
the program and the nonexistence of any infringement or violation of
any right of any third party.
Each user of the program will agree and understand, and be deemed to
have agreed and understood, that there is no warranty whatsoever for
the program and, accordingly, the entire risk arising from or
otherwise connected with the program is assumed by the user.
Therefore, neither ICOT, the copyright holder, or any other
organization that participated in or was otherwise related to the
development of the program and their respective officials, directors,
officers and other employees shall be held liable for any and all
damages, including, without limitation, general, special, incidental
and consequential damages, arising out of or otherwise in connection
with the use or inability to use the program or any product, material
or result produced or otherwise obtained by using the program,
regardless of whether they have been advised of, or otherwise had
knowledge of, the possibility of such damages at any time during the
project or thereafter. Each user will be deemed to have agreed to the
foregoing by his or her commencement of use of the program. The term
"use" as used herein includes, but is not limited to, the use,
modification, copying and distribution of the program and the
production of secondary products from the program.
In the case where the program, whether in its original form or
modified, was distributed or delivered to or received by a user from
any person, organization or entity other than ICOT, unless it makes or
grants independently of ICOT any specific warranty to the user in
writing, such person, organization or entity, will also be exempted
from and not be held liable to the user for any such damages as noted
above as far as the program is concerned.

View File

@ -0,0 +1 @@
91897dbbbbada95ccddbd90505f0a0ba6bf7c199

View File

@ -20,6 +20,7 @@ import java.util.List;
import java.util.Properties;
import java.util.Set;
import java.util.TimeZone;
import java.util.concurrent.TimeUnit;
import static org.elasticsearch.xpack.sql.client.shared.UriUtils.parseURI;
import static org.elasticsearch.xpack.sql.client.shared.UriUtils.removeQuery;
@ -73,7 +74,7 @@ public class JdbcConfiguration extends ConnectionConfiguration {
// mutable ones
private TimeZone timeZone;
public static JdbcConfiguration create(String u, Properties props) throws JdbcSQLException {
public static JdbcConfiguration create(String u, Properties props, int loginTimeoutSeconds) throws JdbcSQLException {
URI uri = parseUrl(u);
Properties urlProps = parseProperties(uri, u);
uri = removeQuery(uri, u, DEFAULT_URI);
@ -83,6 +84,10 @@ public class JdbcConfiguration extends ConnectionConfiguration {
urlProps.putAll(props);
}
if (loginTimeoutSeconds > 0) {
urlProps.setProperty(CONNECT_TIMEOUT, Long.toString(TimeUnit.SECONDS.toMillis(loginTimeoutSeconds)));
}
try {
return new JdbcConfiguration(uri, u, urlProps);
} catch (JdbcSQLException e) {

View File

@ -9,8 +9,8 @@ import org.elasticsearch.xpack.sql.client.shared.ObjectUtils;
import org.elasticsearch.xpack.sql.jdbc.JdbcSQLException;
import org.elasticsearch.xpack.sql.jdbc.net.client.Cursor;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfo;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.MetaColumnInfo;
import org.elasticsearch.xpack.sql.client.shared.Version;
import org.elasticsearch.xpack.sql.plugin.MetaColumnInfo;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
@ -817,21 +817,21 @@ class JdbcDatabaseMetaData implements DatabaseMetaData, JdbcWrapper {
row[ 0] = cat;
row[ 1] = "";
row[ 2] = col.table;
row[ 3] = col.name;
row[ 4] = col.type.getVendorTypeNumber();
row[ 5] = col.type.getName();
row[ 6] = col.size;
row[ 2] = col.table();
row[ 3] = col.name();
row[ 4] = col.jdbcType().getVendorTypeNumber();
row[ 5] = col.jdbcType().getName();
row[ 6] = col.size();
row[ 7] = null;
row[ 8] = null;
row[ 9] = numericPrecisionRadix(col.type.getVendorTypeNumber());
row[ 9] = numericPrecisionRadix(col.jdbcType().getVendorTypeNumber());
row[10] = columnNullable;
row[11] = null;
row[12] = null;
row[13] = null;
row[14] = null;
row[15] = null;
row[16] = col.position;
row[16] = col.position();
row[17] = "YES";
row[18] = null;
row[19] = null;

View File

@ -19,6 +19,8 @@ import java.util.Properties;
import java.util.concurrent.TimeUnit;
import java.util.logging.Logger;
import static org.elasticsearch.xpack.sql.client.shared.ConnectionConfiguration.CONNECT_TIMEOUT;
public class JdbcDriver implements java.sql.Driver {
private static final JdbcDriver INSTANCE = new JdbcDriver();
@ -79,13 +81,7 @@ public class JdbcDriver implements java.sql.Driver {
}
private static JdbcConfiguration initCfg(String url, Properties props) throws JdbcSQLException {
JdbcConfiguration ci = JdbcConfiguration.create(url, props);
// if there's a timeout set on the DriverManager, make sure to use it
if (DriverManager.getLoginTimeout() > 0) {
ci.connectTimeout(TimeUnit.SECONDS.toMillis(DriverManager.getLoginTimeout()));
}
return ci;
return JdbcConfiguration.create(url, props, DriverManager.getLoginTimeout());
}
@Override
@ -98,7 +94,7 @@ public class JdbcDriver implements java.sql.Driver {
if (!acceptsURL(url)) {
return new DriverPropertyInfo[0];
}
return JdbcConfiguration.create(url, info).driverPropertyInfo();
return JdbcConfiguration.create(url, info, DriverManager.getLoginTimeout()).driverPropertyInfo();
}
@Override

View File

@ -7,7 +7,7 @@ package org.elasticsearch.xpack.sql.jdbc.jdbc;
import org.elasticsearch.xpack.sql.jdbc.net.client.Cursor;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfo;
import org.elasticsearch.xpack.sql.protocol.shared.Nullable;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Nullable;
import java.io.InputStream;
import java.io.Reader;

View File

@ -136,37 +136,72 @@ abstract class TypeConverter {
}
// keep in check with JdbcUtils#columnType
private static Object asNative(Object v, JDBCType columnType) {
Object result = null;
static Object asNative(Object v, JDBCType columnType) {
switch (columnType) {
case BIT:
case BOOLEAN:
case TINYINT:
case SMALLINT:
case INTEGER:
case BIGINT:
case REAL:
case FLOAT:
case DOUBLE:
case BINARY:
case VARBINARY:
case LONGVARBINARY:
case CHAR:
case VARCHAR:
case LONGVARCHAR:
return v;
case TINYINT:
return ((Number) v).byteValue();
case SMALLINT:
return ((Number) v).shortValue();
case INTEGER:
return ((Number) v).intValue();
case BIGINT:
return ((Number) v).longValue();
case FLOAT:
case DOUBLE:
return doubleValue(v);
case REAL:
return floatValue(v);
case TIMESTAMP:
result = v;
break;
return ((Number) v).longValue();
// since the date is already in UTC_CALENDAR just do calendar math
case DATE:
result = new Date(utcMillisRemoveTime(((Long) v).longValue()));
break;
return new Date(utcMillisRemoveTime(((Number) v).longValue()));
case TIME:
result = new Time(utcMillisRemoveDate(((Long) v).longValue()));
break;
return new Time(utcMillisRemoveDate(((Number) v).longValue()));
default:
return null;
}
return result;
}
private static Double doubleValue(Object v) {
if (v instanceof String) {
switch ((String) v) {
case "NaN":
return Double.NaN;
case "Infinity":
return Double.POSITIVE_INFINITY;
case "-Infinity":
return Double.NEGATIVE_INFINITY;
default:
return Double.parseDouble((String) v);
}
}
return ((Number) v).doubleValue();
}
private static Float floatValue(Object v) {
if (v instanceof String) {
switch ((String) v) {
case "NaN":
return Float.NaN;
case "Infinity":
return Float.POSITIVE_INFINITY;
case "-Infinity":
return Float.NEGATIVE_INFINITY;
default:
return Float.parseFloat((String) v);
}
}
return ((Number) v).floatValue();
}
private static String asString(Object nativeValue) {

View File

@ -22,6 +22,8 @@ import java.util.logging.Logger;
import javax.sql.DataSource;
import static org.elasticsearch.xpack.sql.client.shared.ConnectionConfiguration.CONNECT_TIMEOUT;
public class JdbcDataSource implements DataSource, Wrapper {
static {
@ -98,10 +100,7 @@ public class JdbcDataSource implements DataSource, Wrapper {
}
private Connection doGetConnection(Properties p) throws SQLException {
JdbcConfiguration cfg = JdbcConfiguration.create(url, p);
if (loginTimeout > 0) {
cfg.connectTimeout(TimeUnit.SECONDS.toMillis(loginTimeout));
}
JdbcConfiguration cfg = JdbcConfiguration.create(url, p, loginTimeout);
JdbcConnection con = new JdbcConnection(cfg);
// enable logging if needed
return cfg.debug() ? Debug.proxy(cfg, con, writer) : con;

View File

@ -5,8 +5,8 @@
*/
package org.elasticsearch.xpack.sql.jdbc.net.client;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfo;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Page;
import java.sql.SQLException;
import java.util.List;
@ -16,31 +16,35 @@ class DefaultCursor implements Cursor {
private final JdbcHttpClient client;
private final RequestMeta meta;
private final Page page;
private final List<ColumnInfo> columnInfos;
private List<List<Object>> rows;
private int row = -1;
private String cursor;
DefaultCursor(JdbcHttpClient client, String cursor, Page page, RequestMeta meta) {
DefaultCursor(JdbcHttpClient client, String cursor, List<ColumnInfo> columnInfos, List<List<Object>> rows, RequestMeta meta) {
this.client = client;
this.meta = meta;
this.cursor = cursor;
this.page = page;
this.columnInfos = columnInfos;
this.rows = rows;
}
@Override
public List<ColumnInfo> columns() {
return page.columnInfo();
return columnInfos;
}
@Override
public boolean next() throws SQLException {
if (row < page.rows() - 1) {
if (row < rows.size() - 1) {
row++;
return true;
}
else {
if (cursor.isEmpty() == false) {
cursor = client.nextPage(cursor, page, meta);
Tuple<String, List<List<Object>>> nextPage = client.nextPage(cursor, meta);
cursor = nextPage.v1();
rows = nextPage.v2();
row = -1;
return next();
}
@ -50,12 +54,12 @@ class DefaultCursor implements Cursor {
@Override
public Object column(int column) {
return page.entry(row, column);
return rows.get(row).get(column);
}
@Override
public int batchSize() {
return page.rows();
return rows.size();
}
@Override

View File

@ -1,63 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.jdbc.net.client;
import org.elasticsearch.xpack.sql.client.shared.ClientException;
import org.elasticsearch.xpack.sql.client.shared.JreHttpUrlConnection;
import org.elasticsearch.xpack.sql.client.shared.JreHttpUrlConnection.ResponseOrException;
import org.elasticsearch.xpack.sql.jdbc.JdbcSQLException;
import org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcConfiguration;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto;
import org.elasticsearch.xpack.sql.protocol.shared.Request;
import org.elasticsearch.xpack.sql.protocol.shared.Response;
import java.security.AccessController;
import java.security.PrivilegedAction;
import java.sql.SQLException;
// http client
// handles nodes discovery, fail-over, errors, etc...
class HttpClient {
private final JdbcConfiguration cfg;
HttpClient(JdbcConfiguration connectionInfo) throws SQLException {
this.cfg = connectionInfo;
}
void setNetworkTimeout(long millis) {
cfg.networkTimeout(millis);
}
long getNetworkTimeout() {
return cfg.networkTimeout();
}
boolean head() throws JdbcSQLException {
try {
return AccessController.doPrivileged((PrivilegedAction<Boolean>) () ->
JreHttpUrlConnection.http("", "error_trace", cfg, JreHttpUrlConnection::head));
} catch (ClientException ex) {
throw new JdbcSQLException(ex, "Cannot ping server");
}
}
Response post(Request request) throws SQLException {
try {
return AccessController.doPrivileged((PrivilegedAction<ResponseOrException<Response>>) () ->
JreHttpUrlConnection.http("_xpack/sql/jdbc", "error_trace", cfg, con ->
con.post(
out -> Proto.INSTANCE.writeRequest(request, out),
in -> Proto.INSTANCE.readResponse(request, in)
)
)
).getResponseOrThrowException();
} catch (ClientException ex) {
throw new JdbcSQLException(ex, "Transport failure");
}
}
}

View File

@ -5,77 +5,65 @@
*/
package org.elasticsearch.xpack.sql.jdbc.net.client;
import org.elasticsearch.action.main.MainResponse;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.xpack.sql.client.HttpClient;
import org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcConfiguration;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.InfoRequest;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfo;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.InfoResponse;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.MetaColumnInfo;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.MetaColumnRequest;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.MetaColumnResponse;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.MetaTableRequest;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.MetaTableResponse;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Page;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.QueryCloseRequest;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.QueryCloseResponse;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.QueryInitRequest;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.QueryInitResponse;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.QueryPageRequest;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.QueryPageResponse;
import org.elasticsearch.xpack.sql.protocol.shared.TimeoutInfo;
import org.elasticsearch.xpack.sql.plugin.AbstractSqlRequest;
import org.elasticsearch.xpack.sql.plugin.MetaColumnInfo;
import org.elasticsearch.xpack.sql.plugin.SqlListColumnsRequest;
import org.elasticsearch.xpack.sql.plugin.SqlListTablesRequest;
import org.elasticsearch.xpack.sql.plugin.SqlQueryRequest;
import org.elasticsearch.xpack.sql.plugin.SqlQueryResponse;
import org.joda.time.DateTimeZone;
import java.io.DataInput;
import java.io.IOException;
import java.sql.SQLException;
import java.time.Instant;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import static org.elasticsearch.xpack.sql.client.shared.StringUtils.EMPTY;
public class JdbcHttpClient {
@FunctionalInterface
interface DataInputFunction<R> {
R apply(DataInput in) throws IOException, SQLException;
}
private final HttpClient http;
private final HttpClient httpClient;
private final JdbcConfiguration conCfg;
private InfoResponse serverInfo;
public JdbcHttpClient(JdbcConfiguration conCfg) throws SQLException {
http = new HttpClient(conCfg);
httpClient = new HttpClient(conCfg);
this.conCfg = conCfg;
}
public boolean ping(long timeoutInMs) throws SQLException {
long oldTimeout = http.getNetworkTimeout();
try {
// this works since the connection is single-threaded and its configuration not shared
// with others connections
http.setNetworkTimeout(timeoutInMs);
return http.head();
} finally {
http.setNetworkTimeout(oldTimeout);
}
return httpClient.ping(timeoutInMs);
}
public Cursor query(String sql, RequestMeta meta) throws SQLException {
int fetch = meta.fetchSize() > 0 ? meta.fetchSize() : conCfg.pageSize();
QueryInitRequest request = new QueryInitRequest(sql, fetch, conCfg.timeZone(), timeout(meta));
QueryInitResponse response = (QueryInitResponse) http.post(request);
return new DefaultCursor(this, response.cursor(), (Page) response.data, meta);
SqlQueryRequest sqlRequest = new SqlQueryRequest(AbstractSqlRequest.Mode.JDBC, sql, null, DateTimeZone.UTC, fetch,
TimeValue.timeValueMillis(meta.timeoutInMs()), TimeValue.timeValueMillis(meta.timeoutInMs()), "");
SqlQueryResponse response = httpClient.query(sqlRequest);
return new DefaultCursor(this, response.cursor(), toJdbcColumnInfo(response.columns()), response.rows(), meta);
}
/**
* Read the next page of results, updating the {@link Page} and returning
* Read the next page of results and returning
* the scroll id to use to fetch the next page.
*/
public String nextPage(String cursor, Page page, RequestMeta meta) throws SQLException {
QueryPageRequest request = new QueryPageRequest(cursor, timeout(meta), page);
return ((QueryPageResponse) http.post(request)).cursor();
public Tuple<String, List<List<Object>>> nextPage(String cursor, RequestMeta meta) throws SQLException {
TimeValue timeValue = TimeValue.timeValueMillis(meta.timeoutInMs());
SqlQueryRequest sqlRequest = new SqlQueryRequest().cursor(cursor);
sqlRequest.mode(AbstractSqlRequest.Mode.JDBC);
sqlRequest.requestTimeout(timeValue);
sqlRequest.pageTimeout(timeValue);
SqlQueryResponse response = httpClient.query(sqlRequest);
return new Tuple<>(response.cursor(), response.rows());
}
public boolean queryClose(String cursor) throws SQLException {
QueryCloseRequest request = new QueryCloseRequest(cursor);
return ((QueryCloseResponse) http.post(request)).succeeded();
return httpClient.queryClose(cursor);
}
public InfoResponse serverInfo() throws SQLException {
@ -86,37 +74,25 @@ public class JdbcHttpClient {
}
private InfoResponse fetchServerInfo() throws SQLException {
InfoRequest request = new InfoRequest();
return (InfoResponse) http.post(request);
MainResponse mainResponse = httpClient.serverInfo();
return new InfoResponse(mainResponse.getClusterName().value(), mainResponse.getVersion().major, mainResponse.getVersion().minor);
}
public List<String> metaInfoTables(String pattern) throws SQLException {
MetaTableRequest request = new MetaTableRequest(pattern);
return ((MetaTableResponse) http.post(request)).tables;
return httpClient.listTables(new SqlListTablesRequest(AbstractSqlRequest.Mode.JDBC, pattern)).getTables();
}
public List<MetaColumnInfo> metaInfoColumns(String tablePattern, String columnPattern) throws SQLException {
MetaColumnRequest request = new MetaColumnRequest(tablePattern, columnPattern);
return ((MetaColumnResponse) http.post(request)).columns;
return httpClient.listColumns(new SqlListColumnsRequest(AbstractSqlRequest.Mode.JDBC, tablePattern, columnPattern)).getColumns();
}
public void setNetworkTimeout(long millis) {
http.setNetworkTimeout(millis);
/**
* Converts REST column metadata into JDBC column metadata
*/
private List<ColumnInfo> toJdbcColumnInfo(List<org.elasticsearch.xpack.sql.plugin.ColumnInfo> columns) {
return columns.stream().map(columnInfo ->
new ColumnInfo(columnInfo.name(), columnInfo.jdbcType(), EMPTY, EMPTY, EMPTY, EMPTY, columnInfo.displaySize())
).collect(Collectors.toList());
}
public long getNetworkTimeout() {
return http.getNetworkTimeout();
}
private TimeoutInfo timeout(RequestMeta meta) {
// client time
long clientTime = Instant.now().toEpochMilli();
// timeout (in ms)
long timeout = meta.timeoutInMs();
if (timeout == 0) {
timeout = conCfg.queryTimeout();
}
return new TimeoutInfo(clientTime, timeout, conCfg.pageTimeout());
}
}

View File

@ -5,16 +5,17 @@
*/
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.sql.JDBCType;
import java.util.Objects;
public class ColumnInfo {
public String catalog, schema, table, label, name;
public int displaySize;
public JDBCType type;
public final String catalog;
public final String schema;
public final String table;
public final String label;
public final String name;
public final int displaySize;
public final JDBCType type;
public ColumnInfo(String name, JDBCType type, String table, String catalog, String schema, String label, int displaySize) {
if (name == null) {
@ -44,26 +45,6 @@ public class ColumnInfo {
this.displaySize = displaySize;
}
ColumnInfo(DataInput in) throws IOException {
name = in.readUTF();
type = JDBCType.valueOf(in.readInt());
table = in.readUTF();
catalog = in.readUTF();
schema = in.readUTF();
label = in.readUTF();
displaySize = in.readInt();
}
void writeTo(DataOutput out) throws IOException {
out.writeUTF(name);
out.writeInt(type.getVendorTypeNumber());
out.writeUTF(table);
out.writeUTF(catalog);
out.writeUTF(schema);
out.writeUTF(label);
out.writeInt(displaySize);
}
public int displaySize() {
// 0 - means unknown
return displaySize;

View File

@ -0,0 +1,21 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
/**
* General information about the server.
*/
public class InfoResponse {
public final String cluster;
public final int majorVersion;
public final int minorVersion;
public InfoResponse(String clusterName, byte versionMajor, byte versionMinor) {
this.cluster = clusterName;
this.majorVersion = versionMajor;
this.minorVersion = versionMinor;
}
}

View File

@ -3,7 +3,7 @@
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.protocol.shared;
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;

View File

@ -1,20 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.jdbc.util;
import java.io.DataOutputStream;
import java.io.OutputStream;
public class AccessibleDataOutputStream extends DataOutputStream {
public AccessibleDataOutputStream(OutputStream out) {
super(out);
}
public OutputStream wrappedStream() {
return out;
}
}

View File

@ -9,13 +9,17 @@ import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcConfiguration;
import java.sql.SQLException;
import java.util.Properties;
import static org.elasticsearch.xpack.sql.client.shared.ConnectionConfiguration.CONNECT_TIMEOUT;
import static org.elasticsearch.xpack.sql.client.shared.ConnectionConfiguration.PAGE_TIMEOUT;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
public class JdbcConfigurationTests extends ESTestCase {
private JdbcConfiguration ci(String url) throws SQLException {
return JdbcConfiguration.create(url, null);
return JdbcConfiguration.create(url, null, 0);
}
public void testJustThePrefix() throws Exception {
@ -79,4 +83,28 @@ public class JdbcConfigurationTests extends ESTestCase {
assertThat(ci.baseUri().toString(), is("http://test:9200/"));
}
public void testTimoutOverride() throws Exception {
Properties properties = new Properties();
properties.setProperty(CONNECT_TIMEOUT, "3"); // Should be overridden
properties.setProperty(PAGE_TIMEOUT, "4");
String url = "jdbc:es://test?connect.timeout=1&page.timeout=2";
// No properties
JdbcConfiguration ci = JdbcConfiguration.create(url, null, 0);
assertThat(ci.connectTimeout(), equalTo(1L));
assertThat(ci.pageTimeout(), equalTo(2L));
// Properties override
ci = JdbcConfiguration.create(url, properties, 0);
assertThat(ci.connectTimeout(), equalTo(3L));
assertThat(ci.pageTimeout(), equalTo(4L));
// Driver default override for connection timeout
ci = JdbcConfiguration.create(url, properties, 5);
assertThat(ci.connectTimeout(), equalTo(5000L));
assertThat(ci.pageTimeout(), equalTo(4L));
}
}

View File

@ -0,0 +1,61 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.jdbc.jdbc;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.xpack.sql.plugin.AbstractSqlRequest;
import org.elasticsearch.xpack.sql.plugin.SqlQueryResponse;
import org.joda.time.DateTime;
import java.io.IOException;
import java.sql.JDBCType;
import static org.hamcrest.Matchers.instanceOf;
public class TypeConverterTests extends ESTestCase {
public void testFloatAsNative() throws IOException {
assertThat(convertAsNative(42.0f, JDBCType.REAL), instanceOf(Float.class));
assertThat(convertAsNative(42.0, JDBCType.REAL), instanceOf(Float.class));
assertEquals(42.0f, (float) convertAsNative(42.0, JDBCType.REAL), 0.001f);
assertEquals(Float.NaN, convertAsNative(Float.NaN, JDBCType.REAL));
assertEquals(Float.NEGATIVE_INFINITY, convertAsNative(Float.NEGATIVE_INFINITY, JDBCType.REAL));
assertEquals(Float.POSITIVE_INFINITY, convertAsNative(Float.POSITIVE_INFINITY, JDBCType.REAL));
}
public void testDoubleAsNative() throws IOException {
JDBCType type = randomFrom(JDBCType.FLOAT, JDBCType.DOUBLE);
assertThat(convertAsNative(42.0, type), instanceOf(Double.class));
assertEquals(42.0f, (double) convertAsNative(42.0, type), 0.001f);
assertEquals(Double.NaN, convertAsNative(Double.NaN, type));
assertEquals(Double.NEGATIVE_INFINITY, convertAsNative(Double.NEGATIVE_INFINITY, type));
assertEquals(Double.POSITIVE_INFINITY, convertAsNative(Double.POSITIVE_INFINITY, type));
}
public void testTimestampAsNative() throws IOException {
DateTime now = DateTime.now();
assertThat(convertAsNative(now, JDBCType.TIMESTAMP), instanceOf(Long.class));
assertEquals(now.getMillis(), convertAsNative(now, JDBCType.TIMESTAMP));
}
private Object convertAsNative(Object value, JDBCType type) throws IOException {
// Simulate sending over XContent
XContentBuilder builder = JsonXContent.contentBuilder();
builder.startObject();
builder.field("value");
SqlQueryResponse.value(builder, AbstractSqlRequest.Mode.JDBC, value);
builder.endObject();
builder.close();
Object copy = XContentHelper.convertToMap(builder.bytes(), false, builder.contentType()).v2().get("value");
return TypeConverter.asNative(copy, type);
}
}

View File

@ -7,11 +7,8 @@ package org.elasticsearch.xpack.sql.jdbc.net.protocol;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import java.sql.JDBCType;
import static org.elasticsearch.xpack.sql.test.RoundTripTestUtils.assertRoundTrip;
public class ColumnInfoTests extends ESTestCase {
static ColumnInfo varcharInfo(String name) {
return new ColumnInfo(name, JDBCType.VARCHAR, "", "", "", "", 0);
@ -25,25 +22,6 @@ public class ColumnInfoTests extends ESTestCase {
return new ColumnInfo(name, JDBCType.DOUBLE, "", "", "", "", 25);
}
static Object randomValueFor(ColumnInfo info) {
switch (info.type) {
case VARCHAR: return randomAlphaOfLength(5);
case INTEGER: return randomInt();
case DOUBLE: return randomDouble();
default:
throw new IllegalArgumentException("Unsupported type [" + info.type + "]");
}
}
static ColumnInfo randomColumnInfo() {
return new ColumnInfo(randomAlphaOfLength(5), randomFrom(JDBCType.values()), randomAlphaOfLength(5), randomAlphaOfLength(5),
randomAlphaOfLength(5), randomAlphaOfLength(5), randomInt(25));
}
public void testRoundTrip() throws IOException {
assertRoundTrip(randomColumnInfo(), ColumnInfo::writeTo, ColumnInfo::new);
}
public void testToString() {
assertEquals("test.doc.a<type=[VARCHAR] catalog=[as] schema=[ads] label=[lab]>",
new ColumnInfo("a", JDBCType.VARCHAR, "test.doc", "as", "ads", "lab", 0).toString());

View File

@ -2,7 +2,6 @@ description = 'Request and response objects shared by the cli and ' +
'its backend in :sql:server'
dependencies {
compile project(':x-pack-elasticsearch:sql:shared-proto')
compile (project(':server')) {
transitive = false
}
@ -19,11 +18,9 @@ dependencies {
}
dependencyLicenses {
mapping from: /shared-proto.*/, to: 'elasticsearch'
mapping from: /elasticsearch-core.*/, to: 'elasticsearch'
mapping from: /jackson-.*/, to: 'jackson'
mapping from: /lucene-.*/, to: 'lucene'
ignoreSha 'shared-proto'
ignoreSha 'elasticsearch'
ignoreSha 'elasticsearch-core'
}

View File

@ -16,8 +16,6 @@ import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.query.AbstractQueryBuilder;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.xpack.sql.protocol.shared.AbstractQueryInitRequest;
import org.elasticsearch.xpack.sql.protocol.shared.TimeoutInfo;
import org.joda.time.DateTimeZone;
import java.io.IOException;
@ -28,11 +26,14 @@ import java.util.function.Supplier;
* Base class for requests that contain sql queries (Query and Translate)
*/
public abstract class AbstractSqlQueryRequest extends AbstractSqlRequest implements CompositeIndicesRequest, ToXContentFragment {
public static final DateTimeZone DEFAULT_TIME_ZONE = DateTimeZone.UTC;
public static final int DEFAULT_FETCH_SIZE = AbstractQueryInitRequest.DEFAULT_FETCH_SIZE;
public static final TimeValue DEFAULT_REQUEST_TIMEOUT = TimeValue.timeValueMillis(TimeoutInfo.DEFAULT_REQUEST_TIMEOUT);
public static final TimeValue DEFAULT_PAGE_TIMEOUT = TimeValue.timeValueMillis(TimeoutInfo.DEFAULT_PAGE_TIMEOUT);
/**
* Global choice for the default fetch size.
*/
public static final int DEFAULT_FETCH_SIZE = 1000;
public static final TimeValue DEFAULT_REQUEST_TIMEOUT = TimeValue.timeValueSeconds(90);
public static final TimeValue DEFAULT_PAGE_TIMEOUT = TimeValue.timeValueSeconds(45);
private String query = "";
private DateTimeZone timeZone = DEFAULT_TIME_ZONE;

View File

@ -34,6 +34,12 @@ public abstract class AbstractSqlRequest extends ActionRequest implements ToXCon
}
return Mode.valueOf(mode.toUpperCase(Locale.ROOT));
}
@Override
public String toString() {
return this.name().toLowerCase(Locale.ROOT);
}
}
private Mode mode = Mode.PLAIN;

View File

@ -24,7 +24,7 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constru
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;
/**
* Information about a column.
* Information about a column returned with first query response
*/
public final class ColumnInfo implements Writeable, ToXContentObject {

View File

@ -0,0 +1,191 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.plugin;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.internal.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
import java.sql.JDBCType;
import java.util.Objects;
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;
/**
* Information about a column returned by the listColumns response
*/
public class MetaColumnInfo implements Writeable, ToXContentObject {
private static final ConstructingObjectParser<MetaColumnInfo, Void> PARSER =
new ConstructingObjectParser<>("column_info", true, objects ->
new MetaColumnInfo(
(String) objects[0],
(String) objects[1],
(String) objects[2],
objects[3] == null ? null : JDBCType.valueOf((int) objects[3]),
objects[4] == null ? 0 : (int) objects[4],
(int) objects[5]));
private static final ParseField TABLE = new ParseField("table");
private static final ParseField NAME = new ParseField("name");
private static final ParseField ES_TYPE = new ParseField("type");
private static final ParseField JDBC_TYPE = new ParseField("jdbc_type");
private static final ParseField SIZE = new ParseField("size");
private static final ParseField POSITION = new ParseField("position");
static {
PARSER.declareString(constructorArg(), TABLE);
PARSER.declareString(constructorArg(), NAME);
PARSER.declareString(constructorArg(), ES_TYPE);
PARSER.declareInt(optionalConstructorArg(), JDBC_TYPE);
PARSER.declareInt(optionalConstructorArg(), SIZE);
PARSER.declareInt(constructorArg(), POSITION);
}
private final String table;
private final String name;
private final String esType;
@Nullable
private final JDBCType jdbcType;
private final int size;
private final int position;
public MetaColumnInfo(String table, String name, String esType, JDBCType jdbcType, int size, int position) {
this.table = table;
this.name = name;
this.esType = esType;
this.jdbcType = jdbcType;
this.size = size;
this.position = position;
}
public MetaColumnInfo(String table, String name, String esType, int position) {
this(table, name, esType, null, 0, position);
}
MetaColumnInfo(StreamInput in) throws IOException {
table = in.readString();
name = in.readString();
esType = in.readString();
if (in.readBoolean()) {
jdbcType = JDBCType.valueOf(in.readVInt());
size = in.readVInt();
} else {
jdbcType = null;
size = 0;
}
position = in.readVInt();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(table);
out.writeString(name);
out.writeString(esType);
if (jdbcType != null) {
out.writeBoolean(true);
out.writeVInt(jdbcType.getVendorTypeNumber());
out.writeVInt(size);
} else {
out.writeBoolean(false);
}
out.writeVInt(position);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field("table", table);
builder.field("name", name);
builder.field("type", esType);
if (jdbcType != null) {
builder.field("jdbc_type", jdbcType.getVendorTypeNumber());
builder.field("size", size);
}
builder.field("position", position);
return builder.endObject();
}
public static MetaColumnInfo fromXContent(XContentParser parser) {
return PARSER.apply(parser, null);
}
/**
* Name of the table.
*/
public String table() {
return table;
}
/**
* Name of the column.
*/
public String name() {
return name;
}
/**
* The type of the column in Elasticsearch.
*/
public String esType() {
return esType;
}
/**
* The type of the column as it would be returned by a JDBC driver.
*/
public JDBCType jdbcType() {
return jdbcType;
}
/**
* Precision
*/
public int size() {
return size;
}
/**
* Column position with in the tables
*/
public int position() {
return position;
}
@Override
public String toString() {
return Strings.toString(this);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
MetaColumnInfo that = (MetaColumnInfo) o;
return size == that.size &&
position == that.position &&
Objects.equals(table, that.table) &&
Objects.equals(name, that.name) &&
Objects.equals(esType, that.esType) &&
jdbcType == that.jdbcType;
}
@Override
public int hashCode() {
return Objects.hash(table, name, esType, jdbcType, size, position);
}
}

View File

@ -8,6 +8,7 @@ package org.elasticsearch.xpack.sql.plugin;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.CompositeIndicesRequest;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.inject.internal.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
@ -20,6 +21,7 @@ import java.util.Objects;
import static org.elasticsearch.action.ValidateActions.addValidationError;
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;
/**
* Request to get a list of SQL-supported columns of an index
@ -37,11 +39,13 @@ public class SqlListColumnsRequest extends AbstractSqlRequest implements ToXCont
));
static {
PARSER.declareString(constructorArg(), new ParseField("table_pattern"));
PARSER.declareString(constructorArg(), new ParseField("column_pattern"));
PARSER.declareString(optionalConstructorArg(), new ParseField("table_pattern"));
PARSER.declareString(optionalConstructorArg(), new ParseField("column_pattern"));
}
@Nullable
private String tablePattern;
@Nullable
private String columnPattern;
@ -57,20 +61,8 @@ public class SqlListColumnsRequest extends AbstractSqlRequest implements ToXCont
public SqlListColumnsRequest(StreamInput in) throws IOException {
super(in);
this.tablePattern = in.readString();
this.columnPattern = in.readString();
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = super.validate();
if (tablePattern == null) {
validationException = addValidationError("[index_pattern] is required", validationException);
}
if (columnPattern == null) {
validationException = addValidationError("[column_pattern] is required", validationException);
}
return validationException;
this.tablePattern = in.readOptionalString();
this.columnPattern = in.readOptionalString();
}
/**
@ -98,8 +90,8 @@ public class SqlListColumnsRequest extends AbstractSqlRequest implements ToXCont
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(tablePattern);
out.writeString(columnPattern);
out.writeOptionalString(tablePattern);
out.writeOptionalString(columnPattern);
}
@Override
@ -111,8 +103,12 @@ public class SqlListColumnsRequest extends AbstractSqlRequest implements ToXCont
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
{
builder.field("table_pattern", tablePattern);
builder.field("column_pattern", columnPattern);
if (tablePattern != null) {
builder.field("table_pattern", tablePattern);
}
if (columnPattern != null) {
builder.field("column_pattern", columnPattern);
}
}
return builder.endObject();
}

View File

@ -27,20 +27,20 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optiona
public class SqlListColumnsResponse extends ActionResponse implements ToXContentObject {
@SuppressWarnings("unchecked")
public static final ConstructingObjectParser<SqlListColumnsResponse, Void> PARSER = new ConstructingObjectParser<>("sql", true,
objects -> new SqlListColumnsResponse((List<ColumnInfo>) objects[0]));
objects -> new SqlListColumnsResponse((List<MetaColumnInfo>) objects[0]));
public static final ParseField COLUMNS = new ParseField("columns");
static {
PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> ColumnInfo.fromXContent(p), COLUMNS);
PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> MetaColumnInfo.fromXContent(p), COLUMNS);
}
private List<ColumnInfo> columns;
private List<MetaColumnInfo> columns;
public SqlListColumnsResponse() {
}
public SqlListColumnsResponse(List<ColumnInfo> columns) {
public SqlListColumnsResponse(List<MetaColumnInfo> columns) {
this.columns = columns;
}
@ -48,14 +48,14 @@ public class SqlListColumnsResponse extends ActionResponse implements ToXContent
* The key that must be sent back to SQL to access the next page of
* results. If equal to "" then there is no next page.
*/
public List<ColumnInfo> getColumns() {
public List<MetaColumnInfo> getColumns() {
return columns;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
columns = in.readList(ColumnInfo::new);
columns = in.readList(MetaColumnInfo::new);
}
@Override
@ -70,7 +70,7 @@ public class SqlListColumnsResponse extends ActionResponse implements ToXContent
{
builder.startArray("columns");
{
for (ColumnInfo column : columns) {
for (MetaColumnInfo column : columns) {
column.toXContent(builder, params);
}
}

View File

@ -20,6 +20,7 @@ import java.util.Objects;
import static org.elasticsearch.action.ValidateActions.addValidationError;
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;
/**
* Request to get a list of SQL-supported indices
@ -36,7 +37,7 @@ public class SqlListTablesRequest extends AbstractSqlRequest implements ToXConte
(String) objects[0]));
static {
PARSER.declareString(constructorArg(), new ParseField("table_pattern"));
PARSER.declareString(optionalConstructorArg(), new ParseField("table_pattern"));
}
private String pattern;
@ -52,16 +53,7 @@ public class SqlListTablesRequest extends AbstractSqlRequest implements ToXConte
public SqlListTablesRequest(StreamInput in) throws IOException {
super(in);
this.pattern = in.readString();
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = super.validate();
if (pattern == null) {
validationException = addValidationError("[pattern] is required", validationException);
}
return validationException;
this.pattern = in.readOptionalString();
}
/**
@ -78,7 +70,7 @@ public class SqlListTablesRequest extends AbstractSqlRequest implements ToXConte
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(pattern);
out.writeOptionalString(pattern);
}
@Override
@ -90,7 +82,9 @@ public class SqlListTablesRequest extends AbstractSqlRequest implements ToXConte
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
{
builder.field("table_pattern", pattern);
if (pattern != null) {
builder.field("table_pattern", pattern);
}
}
return builder.endObject();
}

View File

@ -28,7 +28,6 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
* Request to perform an sql query
*/
public class SqlQueryRequest extends AbstractSqlQueryRequest implements ToXContentObject {
private static final ObjectParser<SqlQueryRequest, Void> PARSER = objectParser(SqlQueryRequest::new);
public static final ParseField CURSOR = new ParseField("cursor");

View File

@ -16,6 +16,7 @@ import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.joda.time.ReadableDateTime;
import java.io.IOException;
import java.util.ArrayList;
@ -154,6 +155,7 @@ public class SqlQueryResponse extends ActionResponse implements ToXContentObject
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
AbstractSqlRequest.Mode mode = AbstractSqlRequest.Mode.fromString(params.param("mode"));
builder.startObject();
{
if (columns != null) {
@ -169,7 +171,7 @@ public class SqlQueryResponse extends ActionResponse implements ToXContentObject
for (List<Object> row : rows()) {
builder.startArray();
for (Object value : row) {
builder.value(value);
value(builder, mode, value);
}
builder.endArray();
}
@ -182,6 +184,19 @@ public class SqlQueryResponse extends ActionResponse implements ToXContentObject
return builder.endObject();
}
/**
* Serializes the provided value in SQL-compatible way based on the client mode
*/
public static XContentBuilder value(XContentBuilder builder, AbstractSqlRequest.Mode mode, Object value) throws IOException {
if (mode == AbstractSqlRequest.Mode.JDBC && value instanceof ReadableDateTime) {
// JDBC cannot parse dates in string format
builder.value(((ReadableDateTime) value).getMillis());
} else {
builder.value(value);
}
return builder;
}
public static SqlQueryResponse fromXContent(XContentParser parser) {
return PARSER.apply(parser, null);
}

View File

@ -25,11 +25,16 @@ public class SqlListColumnsResponseTests extends AbstractStreamableXContentTestC
@Override
protected SqlListColumnsResponse createTestInstance() {
int columnCount = between(1, 10);
List<ColumnInfo> columns = new ArrayList<>(columnCount);
boolean jdbcMode = randomBoolean();
List<MetaColumnInfo> columns = new ArrayList<>(columnCount);
for (int i = 0; i < columnCount; i++) {
columns.add(new ColumnInfo(randomAlphaOfLength(10), randomAlphaOfLength(10), randomAlphaOfLength(10),
randomFrom(JDBCType.values()), randomInt(25)));
if (jdbcMode) {
columns.add(new MetaColumnInfo(randomAlphaOfLength(10), randomAlphaOfLength(10), randomAlphaOfLength(10),
randomFrom(JDBCType.values()), randomInt(25), randomInt(20)));
} else {
columns.add(new MetaColumnInfo(randomAlphaOfLength(10), randomAlphaOfLength(10), randomAlphaOfLength(10), randomInt(20)));
}
}
return new SqlListColumnsResponse(columns);
}
@ -52,12 +57,18 @@ public class SqlListColumnsResponseTests extends AbstractStreamableXContentTestC
assertThat(columns, hasSize(testInstance.getColumns().size()));
for (int i = 0; i < columns.size(); i++) {
Map<?, ?> columnMap = (Map<?, ?>) columns.get(i);
ColumnInfo columnInfo = testInstance.getColumns().get(i);
MetaColumnInfo columnInfo = testInstance.getColumns().get(i);
assertEquals(columnInfo.table(), columnMap.get("table"));
assertEquals(columnInfo.name(), columnMap.get("name"));
assertEquals(columnInfo.esType(), columnMap.get("type"));
assertEquals(columnInfo.displaySize(), columnMap.get("display_size"));
assertEquals(columnInfo.jdbcType().getVendorTypeNumber(), columnMap.get("jdbc_type"));
if (columnInfo.jdbcType() == null) {
assertNull(columnMap.get("jdbc_type"));
assertNull(columnMap.get("size"));
} else {
assertEquals(columnInfo.jdbcType().getVendorTypeNumber(), columnMap.get("jdbc_type"));
assertEquals(columnInfo.size(), columnMap.get("size"));
}
assertEquals(columnInfo.position(), columnMap.get("position"));
}
} else {
assertNull(rootMap.get("columns"));

View File

@ -44,8 +44,7 @@ public class SqlListTablesRequestTests extends AbstractSerializingTestCase<SqlLi
request -> request.mode(randomValueOtherThan(request.mode(), () -> randomFrom(AbstractSqlRequest.Mode.values()))),
request -> request.setPattern(randomValueOtherThan(request.getPattern(), () -> randomAlphaOfLength(10)))
);
SqlListTablesRequest newRequest =
new SqlListTablesRequest(instance.mode(), instance.getPattern());
SqlListTablesRequest newRequest = new SqlListTablesRequest(instance.mode(), instance.getPattern());
mutator.accept(newRequest);
return newRequest;
}

View File

@ -1,9 +1,7 @@
description = 'The server components of SQL for Elasticsearch'
dependencies {
compile project(':x-pack-elasticsearch:sql:jdbc-proto')
compile project(':x-pack-elasticsearch:sql:rest-proto')
compile project(':x-pack-elasticsearch:sql:shared-proto')
compile 'org.antlr:antlr4-runtime:4.5.3'
provided "org.elasticsearch:elasticsearch:${project.versions.elasticsearch}"
@ -11,12 +9,8 @@ dependencies {
}
dependencyLicenses {
mapping from: /jdbc-proto.*/, to: 'elasticsearch'
mapping from: /rest-proto.*/, to: 'elasticsearch'
mapping from: /shared-proto.*/, to: 'elasticsearch'
ignoreSha 'jdbc-proto'
ignoreSha 'rest-proto'
ignoreSha 'shared-proto'
}
// TODO probably not a good thing to rely on. See https://github.com/elastic/x-pack-elasticsearch/issues/2871

View File

@ -1,74 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.plugin;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.BytesRestResponse;
import org.elasticsearch.rest.RestChannel;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.RestRequest.Method;
import org.elasticsearch.rest.RestResponse;
import org.elasticsearch.rest.action.RestResponseListener;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto;
import org.elasticsearch.xpack.sql.protocol.shared.AbstractProto;
import org.elasticsearch.xpack.sql.protocol.shared.Request;
import org.elasticsearch.xpack.sql.protocol.shared.Response;
import org.elasticsearch.xpack.sql.session.Cursor;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.function.Function;
import static org.elasticsearch.rest.BytesRestResponse.TEXT_CONTENT_TYPE;
import static org.elasticsearch.rest.RestStatus.OK;
import static org.elasticsearch.xpack.sql.util.StringUtils.EMPTY;
public abstract class AbstractSqlProtocolRestAction extends BaseRestHandler {
public static final NamedWriteableRegistry CURSOR_REGISTRY = new NamedWriteableRegistry(Cursor.getNamedWriteables());
private final AbstractProto proto;
protected AbstractSqlProtocolRestAction(Settings settings, AbstractProto proto) {
super(settings);
this.proto = proto;
}
protected abstract RestChannelConsumer innerPrepareRequest(Request request, Client client) throws IOException;
protected <T> ActionListener<T> toActionListener(RestChannel channel, Function<T, Response> responseBuilder) {
return new RestResponseListener<T>(channel) {
@Override
public RestResponse buildResponse(T response) throws Exception {
try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) {
try (DataOutputStream dataOutputStream = new DataOutputStream(bytesStreamOutput)) {
// TODO use the version from the client
// Tracked by https://github.com/elastic/x-pack-elasticsearch/issues/3080
proto.writeResponse(responseBuilder.apply(response), Proto.CURRENT_VERSION, dataOutputStream);
}
return new BytesRestResponse(OK, TEXT_CONTENT_TYPE, bytesStreamOutput.bytes());
}
}
};
}
@Override
protected final RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException {
if (restRequest.method() == Method.HEAD) {
return channel -> channel.sendResponse(new BytesRestResponse(OK, EMPTY));
}
Request request;
try (DataInputStream in = new DataInputStream(restRequest.content().streamInput())) {
request = proto.readRequest(in);
}
return innerPrepareRequest(request, client);
}
}

View File

@ -1,96 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.plugin;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.xpack.sql.session.Configuration;
import org.elasticsearch.xpack.sql.session.Cursor;
import org.elasticsearch.xpack.sql.session.RowSet;
import java.io.IOException;
import java.sql.JDBCType;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
/**
* The cursor that wraps all necessary information for jdbc
*/
public class JdbcCursor implements Cursor {
public static final String NAME = "j";
private final Cursor delegate;
private final List<JDBCType> types;
/**
* If the newCursor is empty, returns an empty cursor. Otherwise, creates a new
* CliFormatterCursor that wraps the newCursor.
*/
public static Cursor wrap(Cursor newCursor, List<JDBCType> types) {
if (newCursor == EMPTY) {
return EMPTY;
}
return new JdbcCursor(newCursor, types);
}
private JdbcCursor(Cursor delegate, List<JDBCType> types) {
this.delegate = delegate;
this.types = types;
}
public JdbcCursor(StreamInput in) throws IOException {
delegate = in.readNamedWriteable(Cursor.class);
int size = in.readVInt();
types = new ArrayList<>(size);
for (int i = 0; i < size; i++) {
types.add(JDBCType.valueOf(in.readVInt()));
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeNamedWriteable(delegate);
out.writeVInt(types.size());
for (JDBCType type : types) {
out.writeVInt(type.getVendorTypeNumber());
}
}
public List<JDBCType> getTypes() {
return types;
}
@Override
public void nextPage(Configuration cfg, Client client, ActionListener<RowSet> listener) {
delegate.nextPage(cfg, client, listener);
}
@Override
public void clear(Configuration cfg, Client client, ActionListener<Boolean> listener) {
delegate.clear(cfg, client, listener);
}
@Override
public String getWriteableName() {
return NAME;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
JdbcCursor that = (JdbcCursor) o;
return Objects.equals(delegate, that.delegate) &&
Objects.equals(types, that.types);
}
@Override
public int hashCode() {
return Objects.hash(delegate, types);
}
}

View File

@ -1,183 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.plugin;
import org.elasticsearch.Version;
import org.elasticsearch.action.main.MainAction;
import org.elasticsearch.action.main.MainRequest;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.rest.RestChannel;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.xpack.sql.analysis.index.EsIndex;
import org.elasticsearch.xpack.sql.analysis.index.IndexResolver;
import org.elasticsearch.xpack.sql.analysis.index.IndexResolver.IndexInfo;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfo;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.InfoResponse;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.MetaColumnInfo;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.MetaColumnRequest;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.MetaColumnResponse;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.MetaTableRequest;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.MetaTableResponse;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.RequestType;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.QueryCloseRequest;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.QueryCloseResponse;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.QueryInitRequest;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.QueryInitResponse;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.QueryPageRequest;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.QueryPageResponse;
import org.elasticsearch.xpack.sql.plugin.AbstractSqlRequest.Mode;
import org.elasticsearch.xpack.sql.protocol.shared.Request;
import org.elasticsearch.xpack.sql.session.Cursor;
import org.elasticsearch.xpack.sql.type.DataType;
import org.elasticsearch.xpack.sql.util.StringUtils;
import org.joda.time.DateTimeZone;
import java.io.IOException;
import java.sql.JDBCType;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.function.Consumer;
import java.util.regex.Pattern;
import static java.util.stream.Collectors.toList;
import static org.elasticsearch.common.Strings.hasText;
import static org.elasticsearch.rest.RestRequest.Method.POST;
import static org.elasticsearch.xpack.sql.util.StringUtils.EMPTY;
public class RestSqlJdbcAction extends AbstractSqlProtocolRestAction {
private final SqlLicenseChecker sqlLicenseChecker;
private final IndexResolver indexResolver;
public RestSqlJdbcAction(Settings settings, RestController controller, SqlLicenseChecker sqlLicenseChecker,
IndexResolver indexResolver) {
super(settings, Proto.INSTANCE);
controller.registerHandler(POST, "/_xpack/sql/jdbc", this);
this.sqlLicenseChecker = sqlLicenseChecker;
this.indexResolver = indexResolver;
}
@Override
public String getName() {
return "xpack_sql_jdbc_action";
}
@Override
protected RestChannelConsumer innerPrepareRequest(Request request, Client client)
throws IOException {
Consumer<RestChannel> consumer = operation(request, client);
return consumer::accept;
}
/**
* Actual implementation of the operation
*/
public Consumer<RestChannel> operation(Request request, Client client) {
sqlLicenseChecker.checkIfSqlAllowed(Mode.JDBC);
RequestType requestType = (RequestType) request.requestType();
switch (requestType) {
case INFO:
return channel -> client.execute(MainAction.INSTANCE, new MainRequest(), toActionListener(channel, response ->
new InfoResponse(response.getNodeName(), response.getClusterName().value(),
response.getVersion().major, response.getVersion().minor, response.getVersion().toString(),
response.getBuild().shortHash(), response.getBuild().date())));
case META_TABLE:
return metaTable((MetaTableRequest) request);
case META_COLUMN:
return metaColumn((MetaColumnRequest) request);
case QUERY_INIT:
return queryInit(client, (QueryInitRequest) request);
case QUERY_PAGE:
return queryPage(client, (QueryPageRequest) request);
case QUERY_CLOSE:
return queryClose(client, (QueryCloseRequest) request);
default:
throw new IllegalArgumentException("Unsupported action [" + requestType + "]");
}
}
private Consumer<RestChannel> metaTable(MetaTableRequest request) {
String indexPattern = hasText(request.pattern()) ? StringUtils.likeToIndexWildcard(request.pattern(), (char) 0) : "*";
String regexPattern = hasText(request.pattern()) ? StringUtils.likeToJavaPattern(request.pattern(), (char) 0) : null;
return channel -> indexResolver.resolveNames(indexPattern, regexPattern, toActionListener(channel, list ->
new MetaTableResponse(list.stream()
.map(IndexInfo::name)
.collect(toList()))));
}
private Consumer<RestChannel> metaColumn(MetaColumnRequest request) {
String indexPattern = hasText(request.tablePattern()) ? StringUtils.likeToIndexWildcard(request.tablePattern(), (char) 0) : "*";
String regexPattern = hasText(request.tablePattern()) ? StringUtils.likeToJavaPattern(request.tablePattern(), (char) 0) : null;
Pattern columnMatcher = hasText(request.columnPattern()) ? Pattern.compile(
StringUtils.likeToJavaPattern(request.columnPattern(), (char) 0)) : null;
return channel -> indexResolver.resolveAsSeparateMappings(indexPattern, regexPattern, toActionListener(channel, esIndices -> {
List<MetaColumnInfo> columns = new ArrayList<>();
for (EsIndex esIndex : esIndices) {
int pos = 0;
for (Map.Entry<String, DataType> entry : esIndex.mapping().entrySet()) {
pos++;
String name = entry.getKey();
if (columnMatcher == null || columnMatcher.matcher(name).matches()) {
DataType type = entry.getValue();
// the column size it's actually its precision (based on the Javadocs)
columns.add(new MetaColumnInfo(esIndex.name(), name, type.sqlType(), type.precision(), pos));
}
}
}
return new MetaColumnResponse(columns);
}));
}
private Consumer<RestChannel> queryInit(Client client, QueryInitRequest request) {
SqlQueryRequest sqlRequest = new SqlQueryRequest(Mode.JDBC, request.query, null, DateTimeZone.forTimeZone(request.timeZone),
request.fetchSize,
TimeValue.timeValueMillis(request.timeout.requestTimeout),
TimeValue.timeValueMillis(request.timeout.pageTimeout),
"");
long start = System.nanoTime();
return channel -> client.execute(SqlQueryAction.INSTANCE, sqlRequest, toActionListener(channel, response -> {
List<JDBCType> types = new ArrayList<>(response.columns().size());
List<ColumnInfo> columns = new ArrayList<>(response.columns().size());
for (org.elasticsearch.xpack.sql.plugin.ColumnInfo info : response.columns()) {
types.add(info.jdbcType());
columns.add(new ColumnInfo(info.name(), info.jdbcType(), EMPTY, EMPTY, EMPTY, EMPTY, info.displaySize()));
}
Cursor cursor = Cursor.decodeFromString(response.cursor());
return new QueryInitResponse(System.nanoTime() - start,
Cursor.encodeToString(Version.CURRENT, JdbcCursor.wrap(cursor, types)), columns,
new SqlResponsePayload(types, response.rows()));
}));
}
private Consumer<RestChannel> queryPage(Client client, QueryPageRequest request) {
Cursor cursor = Cursor.decodeFromString(request.cursor);
if (cursor instanceof JdbcCursor == false) {
throw new IllegalArgumentException("Unexpected cursor type: [" + cursor + "]");
}
List<JDBCType> types = ((JdbcCursor)cursor).getTypes();
// NB: the timezone and page size are locked already by the query so pass in defaults (as they are not read anyway)
SqlQueryRequest sqlRequest = new SqlQueryRequest(Mode.JDBC, EMPTY, null, SqlQueryRequest.DEFAULT_TIME_ZONE, 0,
TimeValue.timeValueMillis(request.timeout.requestTimeout),
TimeValue.timeValueMillis(request.timeout.pageTimeout),
request.cursor);
long start = System.nanoTime();
return channel -> client.execute(SqlQueryAction.INSTANCE, sqlRequest, toActionListener(channel,
response -> new QueryPageResponse(System.nanoTime() - start,
Cursor.encodeToString(Version.CURRENT, JdbcCursor.wrap(Cursor.decodeFromString(response.cursor()), types)),
new SqlResponsePayload(types, response.rows()))));
}
private Consumer<RestChannel> queryClose(Client client, QueryCloseRequest request) {
SqlClearCursorRequest sqlRequest = new SqlClearCursorRequest(Mode.JDBC, request.cursor);
return channel -> client.execute(SqlClearCursorAction.INSTANCE, sqlRequest, toActionListener(channel,
response -> new QueryCloseResponse(response.isSucceeded())));
}
}

View File

@ -66,7 +66,6 @@ public class SqlPlugin implements ActionPlugin {
return Arrays.asList(new RestSqlQueryAction(settings, restController),
new SqlTranslateAction.RestAction(settings, restController),
new RestSqlJdbcAction(settings, restController, sqlLicenseChecker, indexResolver),
new RestSqlClearCursorAction(settings, restController),
new RestSqlListTablesAction(settings, restController),
new RestSqlListColumnsAction(settings, restController));

View File

@ -1,73 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.plugin;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Payload;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.ProtoUtils;
import org.elasticsearch.xpack.sql.protocol.shared.SqlDataInput;
import org.elasticsearch.xpack.sql.protocol.shared.SqlDataOutput;
import org.joda.time.ReadableDateTime;
import java.io.IOException;
import java.sql.JDBCType;
import java.util.List;
import java.util.Objects;
/**
* Implementation {@link Payload} that adapts it to data from
* {@link SqlQueryResponse}.
*/
class SqlResponsePayload implements Payload {
private final List<JDBCType> typeLookup;
private final List<List<Object>> rows;
SqlResponsePayload(List<JDBCType> typeLookup, List<List<Object>> rows) {
this.typeLookup = typeLookup;
this.rows = rows;
}
@Override
public void readFrom(SqlDataInput in) throws IOException {
throw new UnsupportedOperationException("This class can only be serialized");
}
@Override
public void writeTo(SqlDataOutput out) throws IOException {
out.writeInt(rows.size());
// unroll forEach manually to avoid a Consumer + try/catch for each value...
for (List<Object> row : rows) {
for (int c = 0; c < row.size(); c++) {
JDBCType type = typeLookup.get(c);
Object value = row.get(c);
if (value instanceof ReadableDateTime) {
value = Long.valueOf(((ReadableDateTime) value).getMillis());
}
ProtoUtils.writeValue(out, value, type);
}
}
}
@Override
public int hashCode() {
return Objects.hash(typeLookup, rows);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
SqlResponsePayload other = (SqlResponsePayload) obj;
return Objects.equals(typeLookup, other.typeLookup)
&& Objects.equals(rows, other.rows);
}
}

View File

@ -45,28 +45,28 @@ public class TransportSqlListColumnsAction extends HandledTransportAction<SqlLis
protected void doExecute(SqlListColumnsRequest request, ActionListener<SqlListColumnsResponse> listener) {
sqlLicenseChecker.checkIfSqlAllowed(request.mode());
// TODO: This is wrong
// See https://github.com/elastic/x-pack-elasticsearch/pull/3438/commits/61b7c26fe08db2721f0431579f215fe493744af3
// and https://github.com/elastic/x-pack-elasticsearch/issues/3460
String indexPattern = hasText(request.getTablePattern()) ?
StringUtils.likeToIndexWildcard(request.getTablePattern(), (char) 0) : "*";
String regexPattern = hasText(request.getTablePattern()) ?
StringUtils.likeToJavaPattern(request.getTablePattern(), (char) 0) : null;
String indexPattern = hasText(request.getTablePattern()) ? request.getTablePattern() : "*";
String regexPattern = null;
Pattern columnMatcher = hasText(request.getColumnPattern()) ? Pattern.compile(
StringUtils.likeToJavaPattern(request.getColumnPattern(), (char) 0)) : null;
indexResolver.resolveAsSeparateMappings(indexPattern, regexPattern, ActionListener.wrap(esIndices -> {
List<ColumnInfo> columns = new ArrayList<>();
List<MetaColumnInfo> columns = new ArrayList<>();
for (EsIndex esIndex : esIndices) {
int pos = 0;
for (Map.Entry<String, DataType> entry : esIndex.mapping().entrySet()) {
String name = entry.getKey();
pos++; // JDBC is 1-based so we start with 1 here
if (columnMatcher == null || columnMatcher.matcher(name).matches()) {
DataType type = entry.getValue();
// the column size it's actually its precision (based on the Javadocs)
if (request.mode() == JDBC) {
columns.add(new ColumnInfo(esIndex.name(), name, type.esName(), type.sqlType(), type.displaySize()));
// the column size it's actually its precision (based on the Javadocs)
columns.add(new MetaColumnInfo(esIndex.name(), name, type.esName(), type.sqlType(), type.precision(), pos));
} else {
columns.add(new ColumnInfo(esIndex.name(), name, type.esName()));
columns.add(new MetaColumnInfo(esIndex.name(), name, type.esName(), pos));
}
}
}

Some files were not shown because too many files have changed in this diff Show More