Huge protocol refactor
Too big. Sorry. Some good things though: 1. Share some code between CLI and JDBC. Probably a good thing at this point, better as we go on, I think. 2. Add round trip tests for all of proto. 3. Remove the `data` member from `QueryInitResponse` and `QueryPageResponse` so we response serialization is consistent with everything else. Original commit: elastic/x-pack-elasticsearch@c6940a32ed
This commit is contained in:
parent
3d70d7b64e
commit
cf29dea577
|
@ -10,7 +10,6 @@
|
|||
|
||||
<!-- NOCOMMIT Temporary-->
|
||||
<suppress files="sql[/\\]jdbc[/\\].*.java" checks="LineLength" />
|
||||
<suppress files="sql[/\\]jdbc-proto[/\\].*.java" checks="LineLength" />
|
||||
<suppress files="sql[/\\]server[/\\].*.java" checks="LineLength" />
|
||||
<suppress files="sql[/\\]server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]xpack[/\\]sql[/\\]expression[/\\].*.java" checks="EqualsHashCode" />
|
||||
|
||||
|
|
|
@ -59,9 +59,9 @@ import org.elasticsearch.xpack.security.user.ElasticUser;
|
|||
import org.elasticsearch.xpack.security.user.SystemUser;
|
||||
import org.elasticsearch.xpack.security.user.User;
|
||||
import org.elasticsearch.xpack.security.user.XPackUser;
|
||||
import org.elasticsearch.xpack.sql.plugin.jdbc.action.JdbcAction;
|
||||
import org.elasticsearch.xpack.sql.plugin.sql.action.SqlAction;
|
||||
import org.elasticsearch.xpack.sql.server.cli.CliAction;
|
||||
import org.elasticsearch.xpack.sql.server.jdbc.JdbcAction;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
|
|
|
@ -1,10 +1,9 @@
|
|||
description = 'SQL for Elasticsearch'
|
||||
|
||||
import org.gradle.plugins.ide.eclipse.model.*;
|
||||
|
||||
subprojects {
|
||||
apply plugin: 'elasticsearch.build'
|
||||
|
||||
// NOCOMMIT this is abnormal enough it is worth removing
|
||||
sourceSets.test.resources.srcDirs = ["src/test/resources", "src/test/java"]
|
||||
|
||||
dependencies {
|
||||
|
|
|
@ -1,9 +1,8 @@
|
|||
apply plugin: 'elasticsearch.build'
|
||||
|
||||
description = 'Request and response objects shared by the cli and ' +
|
||||
'its backend in :sql:server'
|
||||
|
||||
dependencies {
|
||||
compile project(':x-pack-elasticsearch:sql:shared-proto')
|
||||
testCompile project(':x-pack-elasticsearch:sql:test-utils')
|
||||
}
|
||||
|
||||
|
@ -11,3 +10,8 @@ forbiddenApisMain {
|
|||
// does not depend on core, so only jdk and http signatures should be checked
|
||||
signaturesURLs = [this.class.getResource('/forbidden/jdk-signatures.txt')]
|
||||
}
|
||||
|
||||
dependencyLicenses {
|
||||
mapping from: /shared-proto.*/, to: 'elasticsearch'
|
||||
ignoreSha 'shared-proto'
|
||||
}
|
||||
|
|
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,5 @@
|
|||
Elasticsearch
|
||||
Copyright 2009-2017 Elasticsearch
|
||||
|
||||
This product includes software developed by The Apache Software
|
||||
Foundation (http://www.apache.org/).
|
|
@ -6,6 +6,7 @@
|
|||
package org.elasticsearch.xpack.sql.cli.net.protocol;
|
||||
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.Proto.RequestType;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Request;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
|
|
|
@ -7,6 +7,8 @@ package org.elasticsearch.xpack.sql.cli.net.protocol;
|
|||
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.Proto.RequestType;
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.Proto.ResponseType;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Request;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Response;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
|
@ -26,7 +28,7 @@ public class CommandResponse extends Response {
|
|||
this.data = data;
|
||||
}
|
||||
|
||||
CommandResponse(DataInput in) throws IOException {
|
||||
CommandResponse(Request request, DataInput in) throws IOException {
|
||||
serverTimeQueryReceived = in.readLong();
|
||||
serverTimeResponseSent = in.readLong();
|
||||
requestId = in.readUTF();
|
||||
|
@ -34,7 +36,7 @@ public class CommandResponse extends Response {
|
|||
}
|
||||
|
||||
@Override
|
||||
void write(int clientVersion, DataOutput out) throws IOException {
|
||||
protected void write(int clientVersion, DataOutput out) throws IOException {
|
||||
out.writeLong(serverTimeQueryReceived);
|
||||
out.writeLong(serverTimeResponseSent);
|
||||
out.writeUTF(requestId);
|
||||
|
@ -50,12 +52,12 @@ public class CommandResponse extends Response {
|
|||
}
|
||||
|
||||
@Override
|
||||
RequestType requestType() {
|
||||
public RequestType requestType() {
|
||||
return RequestType.COMMAND;
|
||||
}
|
||||
|
||||
@Override
|
||||
ResponseType responseType() {
|
||||
public ResponseType responseType() {
|
||||
return ResponseType.COMMAND;
|
||||
}
|
||||
|
||||
|
|
|
@ -7,74 +7,26 @@ package org.elasticsearch.xpack.sql.cli.net.protocol;
|
|||
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.Proto.RequestType;
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.Proto.ResponseType;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.AbstractErrorResponse;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Request;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Response sent when there is a server side error.
|
||||
*/
|
||||
public class ErrorResponse extends Response {
|
||||
private final RequestType requestType;
|
||||
public final String message, cause, stack;
|
||||
|
||||
public class ErrorResponse extends AbstractErrorResponse<RequestType> {
|
||||
public ErrorResponse(RequestType requestType, String message, String cause, String stack) {
|
||||
this.requestType = requestType;
|
||||
this.message = message;
|
||||
this.cause = cause;
|
||||
this.stack = stack;
|
||||
super(requestType, message, cause, stack);
|
||||
}
|
||||
|
||||
ErrorResponse(DataInput in) throws IOException {
|
||||
requestType = RequestType.read(in);
|
||||
message = in.readUTF();
|
||||
cause = in.readUTF();
|
||||
stack = in.readUTF();
|
||||
ErrorResponse(Request request, DataInput in) throws IOException {
|
||||
super((RequestType) request.requestType(), in);
|
||||
}
|
||||
|
||||
@Override
|
||||
void write(int clientVersion, DataOutput out) throws IOException {
|
||||
requestType.write(out);
|
||||
out.writeUTF(message);
|
||||
out.writeUTF(cause);
|
||||
out.writeUTF(stack);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String toStringBody() {
|
||||
return "request=[" + requestType
|
||||
+ "] message=[" + message
|
||||
+ "] cause=[" + cause
|
||||
+ "] stack=[" + stack + "]";
|
||||
}
|
||||
|
||||
@Override
|
||||
RequestType requestType() {
|
||||
return requestType;
|
||||
}
|
||||
|
||||
@Override
|
||||
ResponseType responseType() {
|
||||
public ResponseType responseType() {
|
||||
return ResponseType.ERROR;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null || obj.getClass() != getClass()) {
|
||||
return false;
|
||||
}
|
||||
ErrorResponse other = (ErrorResponse) obj;
|
||||
return Objects.equals(requestType, other.requestType)
|
||||
&& Objects.equals(message, other.message)
|
||||
&& Objects.equals(cause, other.cause)
|
||||
&& Objects.equals(stack, other.stack);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(requestType, message, cause, stack);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
|
@ -7,68 +7,27 @@ package org.elasticsearch.xpack.sql.cli.net.protocol;
|
|||
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.Proto.RequestType;
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.Proto.ResponseType;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.AbstractExceptionResponse;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.AbstractProto.SqlExceptionType;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Request;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Response sent when there is a client side error.
|
||||
*/
|
||||
public class ExceptionResponse extends Response {
|
||||
private final RequestType requestType;
|
||||
public final String message, cause;
|
||||
|
||||
public ExceptionResponse(RequestType requestType, String message, String cause) {
|
||||
this.requestType = requestType;
|
||||
this.message = message;
|
||||
this.cause = cause;
|
||||
public class ExceptionResponse extends AbstractExceptionResponse<RequestType> {
|
||||
public ExceptionResponse(RequestType requestType, String message, String cause, SqlExceptionType exceptionType) {
|
||||
super(requestType, message, cause, exceptionType);
|
||||
}
|
||||
|
||||
ExceptionResponse(DataInput in) throws IOException {
|
||||
requestType = RequestType.read(in);
|
||||
message = in.readUTF();
|
||||
cause = in.readUTF();
|
||||
ExceptionResponse(Request request, DataInput in) throws IOException {
|
||||
super((RequestType) request.requestType(), in);
|
||||
}
|
||||
|
||||
@Override
|
||||
void write(int clientVersion, DataOutput out) throws IOException {
|
||||
requestType.write(out);
|
||||
out.writeUTF(message);
|
||||
out.writeUTF(cause);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String toStringBody() {
|
||||
return "request=[" + requestType
|
||||
+ "] message=[" + message
|
||||
+ "] cause=[" + cause + "]";
|
||||
}
|
||||
|
||||
@Override
|
||||
RequestType requestType() {
|
||||
return requestType;
|
||||
}
|
||||
|
||||
@Override
|
||||
ResponseType responseType() {
|
||||
public ResponseType responseType() {
|
||||
return ResponseType.EXCEPTION;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null || obj.getClass() != getClass()) {
|
||||
return false;
|
||||
}
|
||||
ExceptionResponse other = (ExceptionResponse) obj;
|
||||
return Objects.equals(requestType, other.requestType)
|
||||
&& Objects.equals(message, other.message)
|
||||
&& Objects.equals(cause, other.cause);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(requestType, message, cause);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,80 +6,32 @@
|
|||
package org.elasticsearch.xpack.sql.cli.net.protocol;
|
||||
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.Proto.RequestType;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.AbstractInfoRequest;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
public class InfoRequest extends Request {
|
||||
public final String jvmVersion, jvmVendor, jvmClassPath, osName, osVersion;
|
||||
|
||||
/**
|
||||
* Request general information about the server.
|
||||
*/
|
||||
public class InfoRequest extends AbstractInfoRequest {
|
||||
/**
|
||||
* Build the info request containing information about the current JVM.
|
||||
*/
|
||||
public InfoRequest() {
|
||||
jvmVersion = System.getProperty("java.version", "");
|
||||
jvmVendor = System.getProperty("java.vendor", "");
|
||||
jvmClassPath = System.getProperty("java.class.path", "");
|
||||
osName = System.getProperty("os.name", "");
|
||||
osVersion = System.getProperty("os.version", "");
|
||||
super();
|
||||
}
|
||||
|
||||
InfoRequest(String jvmVersion, String jvmVendor, String jvmClassPath, String osName, String osVersion) {
|
||||
this.jvmVersion = jvmVersion;
|
||||
this.jvmVendor = jvmVendor;
|
||||
this.jvmClassPath = jvmClassPath;
|
||||
this.osName = osName;
|
||||
this.osVersion = osVersion;
|
||||
super(jvmVersion, jvmVendor, jvmClassPath, osName, osVersion);
|
||||
}
|
||||
|
||||
InfoRequest(int clientVersion, DataInput in) throws IOException {
|
||||
jvmVersion = in.readUTF();
|
||||
jvmVendor = in.readUTF();
|
||||
jvmClassPath = in.readUTF();
|
||||
osName = in.readUTF();
|
||||
osVersion = in.readUTF();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(DataOutput out) throws IOException {
|
||||
out.writeUTF(jvmVersion);
|
||||
out.writeUTF(jvmVendor);
|
||||
out.writeUTF(jvmClassPath);
|
||||
out.writeUTF(osName);
|
||||
out.writeUTF(osVersion);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String toStringBody() {
|
||||
return "jvm=[version=[" + jvmVersion
|
||||
+ "] vendor=[" + jvmVendor
|
||||
+ "] classPath=[" + jvmClassPath
|
||||
+ "]] os=[name=[" + osName
|
||||
+ "] version=[" + osVersion + "]]";
|
||||
super(clientVersion, in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public RequestType requestType() {
|
||||
return RequestType.INFO;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null || obj.getClass() != getClass()) {
|
||||
return false;
|
||||
}
|
||||
InfoRequest other = (InfoRequest) obj;
|
||||
return Objects.equals(jvmVersion, other.jvmVersion)
|
||||
&& Objects.equals(jvmVendor, other.jvmVendor)
|
||||
&& Objects.equals(jvmClassPath, other.jvmClassPath)
|
||||
&& Objects.equals(osName, other.osName)
|
||||
&& Objects.equals(osVersion, other.osVersion);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(jvmVersion, jvmVendor, jvmClassPath, osName, osVersion);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,88 +7,32 @@ package org.elasticsearch.xpack.sql.cli.net.protocol;
|
|||
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.Proto.RequestType;
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.Proto.ResponseType;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.AbstractInfoResponse;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Request;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
public class InfoResponse extends Response {
|
||||
|
||||
public final String node, cluster, versionString, versionHash, versionDate;
|
||||
public final int majorVersion, minorVersion;
|
||||
|
||||
/**
|
||||
* General information about the server.
|
||||
*/
|
||||
public class InfoResponse extends AbstractInfoResponse {
|
||||
public InfoResponse(String nodeName, String clusterName, byte versionMajor, byte versionMinor, String version,
|
||||
String versionHash, String versionDate) {
|
||||
this.node = nodeName;
|
||||
this.cluster = clusterName;
|
||||
this.versionString = version;
|
||||
this.versionHash = versionHash;
|
||||
this.versionDate = versionDate;
|
||||
|
||||
this.majorVersion = versionMajor;
|
||||
this.minorVersion = versionMinor;
|
||||
super(nodeName, clusterName, versionMajor, versionMinor, version, versionHash, versionDate);
|
||||
}
|
||||
|
||||
InfoResponse(DataInput in) throws IOException {
|
||||
node = in.readUTF();
|
||||
cluster = in.readUTF();
|
||||
majorVersion = in.readByte();
|
||||
minorVersion = in.readByte();
|
||||
versionString = in.readUTF();
|
||||
versionHash = in.readUTF();
|
||||
versionDate = in.readUTF();
|
||||
InfoResponse(Request request, DataInput in) throws IOException {
|
||||
super(request, in);
|
||||
}
|
||||
|
||||
@Override
|
||||
void write(int clientVersion, DataOutput out) throws IOException {
|
||||
out.writeUTF(node);
|
||||
out.writeUTF(cluster);
|
||||
out.writeByte(majorVersion);
|
||||
out.writeByte(minorVersion);
|
||||
out.writeUTF(versionString);
|
||||
out.writeUTF(versionHash);
|
||||
out.writeUTF(versionDate);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String toStringBody() {
|
||||
return "node=[" + node
|
||||
+ "] cluster=[" + cluster
|
||||
+ "] version=[" + versionString
|
||||
+ "]/[major=[" + majorVersion
|
||||
+ "] minor=[" + minorVersion
|
||||
+ "] hash=[" + versionHash
|
||||
+ "] date=[" + versionDate + "]]";
|
||||
}
|
||||
|
||||
@Override
|
||||
RequestType requestType() {
|
||||
public RequestType requestType() {
|
||||
return RequestType.INFO;
|
||||
}
|
||||
|
||||
@Override
|
||||
ResponseType responseType() {
|
||||
public ResponseType responseType() {
|
||||
return ResponseType.INFO;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null || obj.getClass() != getClass()) {
|
||||
return false;
|
||||
}
|
||||
InfoResponse other = (InfoResponse) obj;
|
||||
return Objects.equals(node, other.node)
|
||||
&& Objects.equals(cluster, other.cluster)
|
||||
&& Objects.equals(majorVersion, other.majorVersion)
|
||||
&& Objects.equals(minorVersion, other.minorVersion)
|
||||
&& Objects.equals(versionString, other.versionString)
|
||||
&& Objects.equals(versionHash, other.versionHash)
|
||||
&& Objects.equals(versionDate, other.versionDate);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(node, cluster, majorVersion, minorVersion, versionString, versionHash, versionDate);
|
||||
}
|
||||
}
|
|
@ -5,6 +5,8 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.sql.cli.net.protocol;
|
||||
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.AbstractProto;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
@ -13,73 +15,22 @@ import java.io.IOException;
|
|||
* Binary protocol for the CLI. All backwards compatibility is done using the
|
||||
* version number sent in the header.
|
||||
*/
|
||||
public abstract class Proto {
|
||||
private static final int MAGIC_NUMBER = 0x0C0DEC110;
|
||||
public static final int CURRENT_VERSION = 000_000_001;
|
||||
public final class Proto extends AbstractProto {
|
||||
public static final Proto INSTANCE = new Proto();
|
||||
|
||||
private Proto() {
|
||||
// Static utilities
|
||||
private Proto() {}
|
||||
|
||||
@Override
|
||||
protected RequestType readRequestType(DataInput in) throws IOException {
|
||||
return RequestType.read(in);
|
||||
}
|
||||
|
||||
public static void writeRequest(Request request, DataOutput out) throws IOException {
|
||||
writeHeader(CURRENT_VERSION, out);
|
||||
request.requestType().write(out);
|
||||
request.write(out);
|
||||
@Override
|
||||
protected ResponseType readResponseType(DataInput in) throws IOException {
|
||||
return ResponseType.read(in);
|
||||
}
|
||||
|
||||
public static Request readRequest(DataInput in) throws IOException {
|
||||
int clientVersion = readHeader(in);
|
||||
if (clientVersion > CURRENT_VERSION) {
|
||||
throw new IOException("Unknown client version [" + clientVersion + "]. Always upgrade sql last.");
|
||||
// NOCOMMIT I believe we usually advise upgrading the clients *first* so this might be backwards.....
|
||||
}
|
||||
return RequestType.read(in).reader.read(clientVersion, in);
|
||||
}
|
||||
|
||||
public static void writeResponse(Response response, int clientVersion, DataOutput out) throws IOException {
|
||||
writeHeader(clientVersion, out);
|
||||
response.responseType().write(out);
|
||||
response.write(clientVersion, out);
|
||||
}
|
||||
|
||||
public static Response readResponse(RequestType expectedRequestType, DataInput in) throws IOException {
|
||||
int version = readHeader(in);
|
||||
if (version != CURRENT_VERSION) {
|
||||
throw new IOException("Response version [" + version + "] does not match client version ["
|
||||
+ CURRENT_VERSION + "]. Server is busted.");
|
||||
}
|
||||
Response response = ResponseType.read(in).reader.read(in);
|
||||
if (response.requestType() != expectedRequestType) {
|
||||
throw new IOException("Expected request type to be [" + expectedRequestType
|
||||
+ "] but was [" + response.requestType() + "]. Server is busted.");
|
||||
}
|
||||
return response;
|
||||
}
|
||||
|
||||
private static void writeHeader(int clientVersion, DataOutput out) throws IOException {
|
||||
out.writeInt(MAGIC_NUMBER);
|
||||
out.writeInt(clientVersion);
|
||||
}
|
||||
|
||||
/**
|
||||
* Read the protocol header.
|
||||
* @return the version
|
||||
* @throws IOException if there is an underlying {@linkplain IOException} or if the protocol is malformed
|
||||
*/
|
||||
private static int readHeader(DataInput in) throws IOException {
|
||||
int magic = in.readInt();
|
||||
if (magic != MAGIC_NUMBER) {
|
||||
throw new IOException("Unknown protocol magic number [" + Integer.toHexString(magic) + "]");
|
||||
}
|
||||
int version = in.readInt();
|
||||
return version;
|
||||
}
|
||||
|
||||
@FunctionalInterface
|
||||
interface RequestReader {
|
||||
Request read(int clientVersion, DataInput in) throws IOException;
|
||||
}
|
||||
public enum RequestType {
|
||||
public enum RequestType implements AbstractProto.RequestType {
|
||||
INFO(InfoRequest::new),
|
||||
COMMAND(CommandRequest::new);
|
||||
|
||||
|
@ -89,25 +40,27 @@ public abstract class Proto {
|
|||
this.reader = reader;
|
||||
}
|
||||
|
||||
void write(DataOutput out) throws IOException {
|
||||
out.writeByte(ordinal());
|
||||
}
|
||||
|
||||
static RequestType read(DataInput in) throws IOException {
|
||||
byte b = in.readByte();
|
||||
try {
|
||||
return values()[b];
|
||||
} catch (ArrayIndexOutOfBoundsException e) {
|
||||
throw new IllegalArgumentException("Unknown response type [" + b + "]", e);
|
||||
throw new IllegalArgumentException("Unknown request type [" + b + "]", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(DataOutput out) throws IOException {
|
||||
out.writeByte(ordinal());
|
||||
}
|
||||
|
||||
@Override
|
||||
public RequestReader reader() {
|
||||
return reader;
|
||||
}
|
||||
}
|
||||
|
||||
@FunctionalInterface
|
||||
interface ResponseReader {
|
||||
Response read(DataInput in) throws IOException;
|
||||
}
|
||||
enum ResponseType {
|
||||
enum ResponseType implements AbstractProto.ResponseType {
|
||||
EXCEPTION(ExceptionResponse::new),
|
||||
ERROR(ErrorResponse::new),
|
||||
INFO(InfoResponse::new),
|
||||
|
@ -119,10 +72,6 @@ public abstract class Proto {
|
|||
this.reader = reader;
|
||||
}
|
||||
|
||||
void write(DataOutput out) throws IOException {
|
||||
out.writeByte(ordinal());
|
||||
}
|
||||
|
||||
static ResponseType read(DataInput in) throws IOException {
|
||||
byte b = in.readByte();
|
||||
try {
|
||||
|
@ -131,5 +80,15 @@ public abstract class Proto {
|
|||
throw new IllegalArgumentException("Unknown response type [" + b + "]", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(DataOutput out) throws IOException {
|
||||
out.writeByte(ordinal());
|
||||
}
|
||||
|
||||
@Override
|
||||
public ResponseReader reader() {
|
||||
return reader;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -5,6 +5,8 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.sql.cli.net.protocol;
|
||||
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Request;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Response;
|
||||
import org.elasticsearch.xpack.sql.test.RoundTripTestUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -15,12 +17,12 @@ public final class CliRoundTripTestUtils {
|
|||
}
|
||||
|
||||
static void assertRoundTripCurrentVersion(Request request) throws IOException {
|
||||
RoundTripTestUtils.assertRoundTrip(request, Proto::writeRequest, Proto::readRequest);
|
||||
RoundTripTestUtils.assertRoundTrip(request, Proto.INSTANCE::writeRequest, Proto.INSTANCE::readRequest);
|
||||
}
|
||||
|
||||
static void assertRoundTripCurrentVersion(Response response) throws IOException {
|
||||
static void assertRoundTripCurrentVersion(Request request, Response response) throws IOException {
|
||||
RoundTripTestUtils.assertRoundTrip(response,
|
||||
(r, out) -> Proto.writeResponse(r, Proto.CURRENT_VERSION, out),
|
||||
in -> Proto.readResponse(response.requestType(), in));
|
||||
(r, out) -> Proto.INSTANCE.writeResponse(r, Proto.CURRENT_VERSION, out),
|
||||
in -> Proto.INSTANCE.readResponse(request, in));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -10,6 +10,8 @@ import org.elasticsearch.test.ESTestCase;
|
|||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.xpack.sql.cli.net.protocol.CliRoundTripTestUtils.assertRoundTripCurrentVersion;
|
||||
import static org.elasticsearch.xpack.sql.cli.net.protocol.CommandRequestTests.randomCommandRequest;
|
||||
|
||||
public class CommandResponseTests extends ESTestCase {
|
||||
static CommandResponse randomCommandResponse() {
|
||||
long start = randomNonNegativeLong();
|
||||
|
@ -18,7 +20,7 @@ public class CommandResponseTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testRoundTrip() throws IOException {
|
||||
assertRoundTripCurrentVersion(randomCommandResponse());
|
||||
assertRoundTripCurrentVersion(randomCommandRequest(), randomCommandResponse());
|
||||
}
|
||||
|
||||
public void testToString() {
|
||||
|
|
|
@ -11,14 +11,16 @@ import org.elasticsearch.xpack.sql.cli.net.protocol.Proto.RequestType;
|
|||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.xpack.sql.cli.net.protocol.CliRoundTripTestUtils.assertRoundTripCurrentVersion;
|
||||
import static org.elasticsearch.xpack.sql.cli.net.protocol.CommandRequestTests.randomCommandRequest;
|
||||
|
||||
|
||||
public class ErrorResponseTests extends ESTestCase {
|
||||
static ErrorResponse randomErrorResponse() {
|
||||
return new ErrorResponse(randomFrom(RequestType.values()), randomAlphaOfLength(5), randomAlphaOfLength(5), randomAlphaOfLength(5));
|
||||
return new ErrorResponse(RequestType.COMMAND, randomAlphaOfLength(5), randomAlphaOfLength(5), randomAlphaOfLength(5));
|
||||
}
|
||||
|
||||
public void testRoundTrip() throws IOException {
|
||||
assertRoundTripCurrentVersion(randomErrorResponse());
|
||||
assertRoundTripCurrentVersion(randomCommandRequest(), randomErrorResponse());
|
||||
}
|
||||
|
||||
public void testToString() {
|
||||
|
|
|
@ -7,22 +7,26 @@ package org.elasticsearch.xpack.sql.cli.net.protocol;
|
|||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.Proto.RequestType;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.AbstractProto.SqlExceptionType;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.xpack.sql.cli.net.protocol.CliRoundTripTestUtils.assertRoundTripCurrentVersion;
|
||||
import static org.elasticsearch.xpack.sql.cli.net.protocol.CommandRequestTests.randomCommandRequest;
|
||||
|
||||
|
||||
public class ExceptionResponseTests extends ESTestCase {
|
||||
static ExceptionResponse randomExceptionResponse() {
|
||||
return new ExceptionResponse(randomFrom(RequestType.values()), randomAlphaOfLength(5), randomAlphaOfLength(5));
|
||||
return new ExceptionResponse(RequestType.COMMAND, randomAlphaOfLength(5), randomAlphaOfLength(5),
|
||||
randomFrom(SqlExceptionType.values()));
|
||||
}
|
||||
|
||||
public void testRoundTrip() throws IOException {
|
||||
assertRoundTripCurrentVersion(randomExceptionResponse());
|
||||
assertRoundTripCurrentVersion(randomCommandRequest(), randomExceptionResponse());
|
||||
}
|
||||
|
||||
public void testToString() {
|
||||
assertEquals("ExceptionResponse<request=[COMMAND] message=[test] cause=[test]>",
|
||||
new ExceptionResponse(RequestType.COMMAND, "test", "test").toString());
|
||||
assertEquals("ExceptionResponse<request=[COMMAND] message=[test] cause=[test] type=[SYNTAX]>",
|
||||
new ExceptionResponse(RequestType.COMMAND, "test", "test", SqlExceptionType.SYNTAX).toString());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -10,6 +10,7 @@ import org.elasticsearch.test.ESTestCase;
|
|||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.xpack.sql.cli.net.protocol.CliRoundTripTestUtils.assertRoundTripCurrentVersion;
|
||||
import static org.elasticsearch.xpack.sql.cli.net.protocol.InfoRequestTests.randomInfoRequest;
|
||||
|
||||
public class InfoResponseTests extends ESTestCase {
|
||||
static InfoResponse randomInfoResponse() {
|
||||
|
@ -18,7 +19,7 @@ public class InfoResponseTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testRoundTrip() throws IOException {
|
||||
assertRoundTripCurrentVersion(randomInfoResponse());
|
||||
assertRoundTripCurrentVersion(randomInfoRequest(), randomInfoResponse());
|
||||
}
|
||||
|
||||
public void testToString() {
|
||||
|
|
|
@ -10,22 +10,23 @@ dependencies {
|
|||
compile "org.jline:jline:3.3.1"
|
||||
compile project(':x-pack-elasticsearch:sql:net-client')
|
||||
compile project(':x-pack-elasticsearch:sql:cli-proto')
|
||||
compile project(':x-pack-elasticsearch:sql:shared-proto')
|
||||
|
||||
testCompile project(":x-pack-elasticsearch:transport-client") // NOCOMMIT probably can remove this
|
||||
testCompile project(path: ':x-pack-elasticsearch:plugin', configuration: 'testArtifacts')
|
||||
// Used by embedded sql instance
|
||||
testCompile project(":x-pack-elasticsearch:transport-client")
|
||||
testCompile project(path: ':x-pack-elasticsearch:plugin', configuration: 'testArtifacts') // NOCOMMIT remove this?
|
||||
testCompile project(':x-pack-elasticsearch:sql:test-utils')
|
||||
|
||||
// Used by the hack to run InternalTestCluster if not running against a gradle-started cluster.
|
||||
testCompile project(path: ':modules:lang-painless', configuration: 'runtime')
|
||||
|
||||
runtime "org.fusesource.jansi:jansi:1.16"
|
||||
runtime "org.elasticsearch:jna:4.4.0-1"
|
||||
}
|
||||
|
||||
dependencyLicenses {
|
||||
mapping from: /cli-proto.*/, to: 'elasticsearch'
|
||||
mapping from: /net-client.*/, to: 'elasticsearch'
|
||||
mapping from: /shared-proto.*/, to: 'elasticsearch'
|
||||
mapping from: /net.*/, to: 'elasticsearch'
|
||||
ignoreSha 'cli-proto'
|
||||
ignoreSha 'shared-proto'
|
||||
ignoreSha 'net-client'
|
||||
}
|
||||
|
||||
|
|
|
@ -9,8 +9,8 @@ import org.elasticsearch.xpack.sql.cli.net.protocol.CommandResponse;
|
|||
import org.elasticsearch.xpack.sql.cli.net.protocol.ErrorResponse;
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.ExceptionResponse;
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.InfoResponse;
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.Response;
|
||||
import org.elasticsearch.xpack.sql.net.client.SuppressForbidden;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Response;
|
||||
import org.jline.utils.AttributedStringBuilder;
|
||||
|
||||
import java.awt.Desktop;
|
||||
|
|
|
@ -10,9 +10,8 @@ import org.elasticsearch.xpack.sql.cli.CliException;
|
|||
import org.elasticsearch.xpack.sql.cli.net.protocol.CommandRequest;
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.InfoRequest;
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.Proto;
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.Proto.RequestType;
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.Response;
|
||||
import org.elasticsearch.xpack.sql.net.client.util.Bytes;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Response;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.DataInput;
|
||||
|
@ -27,13 +26,15 @@ public class CliHttpClient implements AutoCloseable {
|
|||
}
|
||||
|
||||
public Response serverInfo() {
|
||||
Bytes ba = http.put(out -> Proto.writeRequest(new InfoRequest(), out));
|
||||
return doIO(ba, in -> Proto.readResponse(RequestType.INFO, in));
|
||||
InfoRequest request = new InfoRequest();
|
||||
Bytes ba = http.post(out -> Proto.INSTANCE.writeRequest(request, out));
|
||||
return doIO(ba, in -> Proto.INSTANCE.readResponse(request, in));
|
||||
}
|
||||
|
||||
public Response command(String command, String requestId) {
|
||||
Bytes ba = http.put(out -> Proto.writeRequest(new CommandRequest(command), out));
|
||||
return doIO(ba, in -> Proto.readResponse(RequestType.COMMAND, in));
|
||||
CommandRequest request = new CommandRequest(command);
|
||||
Bytes ba = http.post(out -> Proto.INSTANCE.writeRequest(request, out));
|
||||
return doIO(ba, in -> Proto.INSTANCE.readResponse(request, in));
|
||||
}
|
||||
|
||||
private static <T> T doIO(Bytes ba, DataInputFunction<T> action) {
|
||||
|
|
|
@ -13,8 +13,6 @@ import org.elasticsearch.xpack.sql.net.client.util.CheckedConsumer;
|
|||
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.security.AccessController;
|
||||
import java.security.PrivilegedAction;
|
||||
|
||||
|
@ -26,32 +24,10 @@ class HttpClient {
|
|||
this.cfg = cfg;
|
||||
}
|
||||
|
||||
private URL url(String subPath) {
|
||||
try {
|
||||
return new URL(cfg.asUrl(), subPath);
|
||||
} catch (MalformedURLException ex) {
|
||||
throw new ClientException(ex, "Invalid subpath %s", subPath);
|
||||
}
|
||||
}
|
||||
|
||||
boolean head(String path) {
|
||||
try {
|
||||
return AccessController.doPrivileged((PrivilegedAction<Boolean>) () -> {
|
||||
return JreHttpUrlConnection.http(url(path), cfg, JreHttpUrlConnection::head);
|
||||
});
|
||||
} catch (ClientException ex) {
|
||||
throw new RuntimeException("Transport failure", ex);
|
||||
}
|
||||
}
|
||||
|
||||
Bytes put(CheckedConsumer<DataOutput, IOException> os) {
|
||||
return put("", os);
|
||||
}
|
||||
|
||||
Bytes put(String path, CheckedConsumer<DataOutput, IOException> os) {
|
||||
Bytes post(CheckedConsumer<DataOutput, IOException> os) {
|
||||
try {
|
||||
return AccessController.doPrivileged((PrivilegedAction<Bytes>) () -> {
|
||||
return JreHttpUrlConnection.http(url(path), cfg, con -> {
|
||||
return JreHttpUrlConnection.http(cfg.asUrl(), cfg, con -> {
|
||||
return con.post(os);
|
||||
});
|
||||
});
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
package org.elasticsearch.xpack.sql.cli;
|
||||
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.Response;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Response;
|
||||
import org.elasticsearch.xpack.sql.test.server.ProtoHttpServer;
|
||||
|
||||
/**
|
||||
|
|
|
@ -10,10 +10,11 @@ import com.sun.net.httpserver.HttpExchange;
|
|||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.xpack.sql.TestUtils;
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.Proto;
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.Request;
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.Response;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.AbstractProto;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Request;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Response;
|
||||
import org.elasticsearch.xpack.sql.server.AbstractSqlServer;
|
||||
import org.elasticsearch.xpack.sql.server.cli.CliServer;
|
||||
import org.elasticsearch.xpack.sql.server.cli.CliServerProtoUtils;
|
||||
import org.elasticsearch.xpack.sql.test.server.ProtoHandler;
|
||||
|
||||
import java.io.DataInput;
|
||||
|
@ -26,14 +27,14 @@ class CliProtoHandler extends ProtoHandler<Response> {
|
|||
private final CliServer server;
|
||||
|
||||
CliProtoHandler(Client client) {
|
||||
super(client, in -> null, CliServerProtoUtils::write);
|
||||
super(client, response -> AbstractSqlServer.write(AbstractProto.CURRENT_VERSION, response));
|
||||
this.server = new CliServer(TestUtils.planExecutor(client), clusterName, () -> info.getNode().getName(), info.getVersion(),
|
||||
info.getBuild());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void handle(HttpExchange http, DataInput in) throws IOException {
|
||||
Request req = Proto.readRequest(in);
|
||||
Request req = Proto.INSTANCE.readRequest(in);
|
||||
server.handle(req, wrap(resp -> sendHttpResponse(http, resp), ex -> fail(http, ex)));
|
||||
}
|
||||
}
|
|
@ -9,6 +9,7 @@ import org.elasticsearch.test.ESTestCase;
|
|||
import org.elasticsearch.xpack.sql.cli.net.protocol.CommandResponse;
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.ExceptionResponse;
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.Proto.RequestType;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.AbstractProto.SqlExceptionType;
|
||||
import org.jline.terminal.Terminal;
|
||||
import org.jline.utils.AttributedStringBuilder;
|
||||
|
||||
|
@ -23,7 +24,8 @@ public class ResponseToStringTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testExceptionResponse() {
|
||||
AttributedStringBuilder s = ResponseToString.toAnsi(new ExceptionResponse(RequestType.INFO, "test message", "test cause"));
|
||||
AttributedStringBuilder s = ResponseToString.toAnsi(new ExceptionResponse(RequestType.INFO, "test message", "test cause",
|
||||
randomFrom(SqlExceptionType.values())));
|
||||
assertEquals("test message", unstyled(s));
|
||||
assertEquals("[1;36mtest message[0m", fullyStyled(s));
|
||||
}
|
||||
|
|
|
@ -16,8 +16,8 @@ import org.elasticsearch.xpack.sql.cli.net.client.CliHttpClient;
|
|||
import org.elasticsearch.xpack.sql.cli.net.protocol.CommandResponse;
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.ExceptionResponse;
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.InfoResponse;
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.Response;
|
||||
import org.elasticsearch.xpack.sql.net.client.SuppressForbidden;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Response;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
|
|
|
@ -1,9 +1,8 @@
|
|||
apply plugin: 'elasticsearch.build'
|
||||
|
||||
description = 'Request and response objects shared by the jdbc driver and ' +
|
||||
'its backend in :sql:server'
|
||||
|
||||
dependencies {
|
||||
compile project(':x-pack-elasticsearch:sql:shared-proto')
|
||||
testCompile project(':x-pack-elasticsearch:sql:test-utils')
|
||||
}
|
||||
|
||||
|
@ -11,3 +10,8 @@ forbiddenApisMain {
|
|||
// does not depend on core, so only jdk and http signatures should be checked
|
||||
signaturesURLs = [this.class.getResource('/forbidden/jdk-signatures.txt')]
|
||||
}
|
||||
|
||||
dependencyLicenses {
|
||||
mapping from: /shared-proto.*/, to: 'elasticsearch'
|
||||
ignoreSha 'shared-proto'
|
||||
}
|
||||
|
|
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,5 @@
|
|||
Elasticsearch
|
||||
Copyright 2009-2017 Elasticsearch
|
||||
|
||||
This product includes software developed by The Apache Software
|
||||
Foundation (http://www.apache.org/).
|
|
@ -5,32 +5,101 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.sql.JDBCType;
|
||||
import java.util.Objects;
|
||||
|
||||
public class ColumnInfo {
|
||||
|
||||
public String catalog, schema, table, label, name;
|
||||
public int type;
|
||||
public JDBCType type;
|
||||
|
||||
public ColumnInfo() {}
|
||||
public ColumnInfo(String name, JDBCType type, String table, String catalog, String schema, String label) {
|
||||
if (name == null) {
|
||||
throw new IllegalArgumentException("[name] must not be null");
|
||||
}
|
||||
if (type == null) {
|
||||
throw new IllegalArgumentException("[type] must not be null");
|
||||
}
|
||||
if (table == null) {
|
||||
throw new IllegalArgumentException("[table] must not be null");
|
||||
}
|
||||
if (catalog == null) {
|
||||
throw new IllegalArgumentException("[catalog] must not be null");
|
||||
}
|
||||
if (schema == null) {
|
||||
throw new IllegalArgumentException("[schema] must not be null");
|
||||
}
|
||||
if (label == null) {
|
||||
throw new IllegalArgumentException("[label] must not be null");
|
||||
}
|
||||
this.name = name;
|
||||
this.type = type;
|
||||
this.table = table;
|
||||
this.catalog = catalog;
|
||||
this.schema = schema;
|
||||
this.label = label;
|
||||
}
|
||||
|
||||
public ColumnInfo(String columnName, int columnType,
|
||||
String tableName,
|
||||
String catalogName,
|
||||
String schemaName,
|
||||
String columnLabel) {
|
||||
this.type = columnType;
|
||||
this.catalog = catalogName;
|
||||
this.table = tableName;
|
||||
this.label = columnLabel;
|
||||
this.name = columnName;
|
||||
this.schema = schemaName;
|
||||
ColumnInfo(DataInput in) throws IOException {
|
||||
name = in.readUTF();
|
||||
type = JDBCType.valueOf(in.readInt());
|
||||
table = in.readUTF();
|
||||
catalog = in.readUTF();
|
||||
schema = in.readUTF();
|
||||
label = in.readUTF();
|
||||
}
|
||||
|
||||
void write(DataOutput out) throws IOException {
|
||||
out.writeUTF(name);
|
||||
out.writeInt(type.getVendorTypeNumber());
|
||||
out.writeUTF(table);
|
||||
out.writeUTF(catalog);
|
||||
out.writeUTF(schema);
|
||||
out.writeUTF(label);
|
||||
}
|
||||
|
||||
public int displaySize() {
|
||||
// NOCOMMIT look at this one.....
|
||||
return -1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return name;
|
||||
StringBuilder b = new StringBuilder();
|
||||
if (false == "".equals(table)) {
|
||||
b.append(table).append('.');
|
||||
}
|
||||
b.append(name).append("<type=[").append(type).append(']');
|
||||
if (false == "".equals(catalog)) {
|
||||
b.append(" catalog=[").append(catalog).append(']');
|
||||
}
|
||||
if (false == "".equals(schema)) {
|
||||
b.append(" schema=[").append(schema).append(']');
|
||||
}
|
||||
if (false == "".equals(label)) {
|
||||
b.append(" label=[").append(label).append(']');
|
||||
}
|
||||
return b.append('>').toString();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null || obj.getClass() != getClass()) {
|
||||
return false;
|
||||
}
|
||||
ColumnInfo other = (ColumnInfo) obj;
|
||||
return name.equals(other.name)
|
||||
&& type.equals(other.type)
|
||||
&& table.equals(other.table)
|
||||
&& catalog.equals(other.catalog)
|
||||
&& schema.equals(other.schema)
|
||||
&& label.equals(other.label);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(name, type, table, catalog, schema, label);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,20 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
|
||||
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.Action;
|
||||
|
||||
public abstract class DataResponse extends Response {
|
||||
|
||||
// there is no type for this field since depending on where it is used, the data is represented differently
|
||||
// on the server it is a RowSetCursor (before being sent to the wire), on the client a Page (after being read from the wire)
|
||||
public final Object data;
|
||||
|
||||
public DataResponse(Action action, Object data) {
|
||||
super(action);
|
||||
this.data = data;
|
||||
}
|
||||
}
|
|
@ -5,36 +5,28 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
|
||||
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.RequestType;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.ResponseType;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.AbstractErrorResponse;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Request;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.Action;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.Status;
|
||||
/**
|
||||
* Response sent when there is a server side error.
|
||||
*/
|
||||
public class ErrorResponse extends AbstractErrorResponse<RequestType> {
|
||||
public ErrorResponse(RequestType requestType, String message, String cause, String stack) {
|
||||
super(requestType, message, cause, stack);
|
||||
}
|
||||
|
||||
public class ErrorResponse extends Response {
|
||||
|
||||
public final String message, cause, stack;
|
||||
|
||||
public ErrorResponse(Action requestedAction, String message, String cause, String stack) {
|
||||
super(requestedAction);
|
||||
this.message = message;
|
||||
this.cause = cause;
|
||||
this.stack = stack;
|
||||
ErrorResponse(Request request, DataInput in) throws IOException {
|
||||
super((RequestType) request.requestType(), in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void encode(DataOutput out) throws IOException {
|
||||
out.writeInt(Status.toError(action));
|
||||
out.writeUTF(message);
|
||||
out.writeUTF(cause);
|
||||
out.writeUTF(stack);
|
||||
}
|
||||
|
||||
public static ErrorResponse decode(DataInput in, Action action) throws IOException {
|
||||
String message = in.readUTF();
|
||||
String cause = in.readUTF();
|
||||
String stack = in.readUTF();
|
||||
return new ErrorResponse(action, message, cause, stack);
|
||||
public ResponseType responseType() {
|
||||
return ResponseType.ERROR;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,38 +5,29 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
|
||||
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.RequestType;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.ResponseType;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.AbstractExceptionResponse;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.AbstractProto.SqlExceptionType;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Request;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.Action;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.SqlExceptionType;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.Status;
|
||||
/**
|
||||
* Response sent when there is a client side error.
|
||||
*/
|
||||
public class ExceptionResponse extends AbstractExceptionResponse<RequestType> {
|
||||
public ExceptionResponse(RequestType requestType, String message, String cause, SqlExceptionType exceptionType) {
|
||||
super(requestType, message, cause, exceptionType);
|
||||
}
|
||||
|
||||
public class ExceptionResponse extends Response {
|
||||
|
||||
public final SqlExceptionType asSql;
|
||||
public final String message, cause;
|
||||
|
||||
public ExceptionResponse(Action requestedAction, String message, String cause, SqlExceptionType asSql) {
|
||||
super(requestedAction);
|
||||
this.message = message;
|
||||
this.cause = cause;
|
||||
this.asSql = asSql;
|
||||
ExceptionResponse(Request request, DataInput in) throws IOException {
|
||||
super((RequestType) request.requestType(), in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void encode(DataOutput out) throws IOException {
|
||||
out.writeInt(Status.toException(action));
|
||||
out.writeUTF(message);
|
||||
out.writeUTF(cause);
|
||||
out.writeInt(asSql.value());
|
||||
}
|
||||
|
||||
public static ExceptionResponse decode(DataInput in, Action action) throws IOException {
|
||||
String message = in.readUTF();
|
||||
String cause = in.readUTF();
|
||||
int sqlType = in.readInt();
|
||||
return new ExceptionResponse(action, message, cause, SqlExceptionType.from(sqlType));
|
||||
public ResponseType responseType() {
|
||||
return ResponseType.EXCEPTION;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,55 +5,33 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
|
||||
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.Action;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.RequestType;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.AbstractInfoRequest;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.StringUtils.EMPTY;
|
||||
|
||||
public class InfoRequest extends Request {
|
||||
public final String jvmVersion, jvmVendor, jvmClassPath, osName, osVersion;
|
||||
|
||||
/**
|
||||
* Request general information about the server.
|
||||
*/
|
||||
public class InfoRequest extends AbstractInfoRequest {
|
||||
/**
|
||||
* Build the info request containing information about the current JVM.
|
||||
*/
|
||||
public InfoRequest() {
|
||||
super(Action.INFO);
|
||||
jvmVersion = System.getProperty("java.version", EMPTY);
|
||||
jvmVendor = System.getProperty("java.vendor", EMPTY);
|
||||
jvmClassPath = System.getProperty("java.class.path", EMPTY);
|
||||
osName = System.getProperty("os.name", EMPTY);
|
||||
osVersion = System.getProperty("os.version", EMPTY);
|
||||
super();
|
||||
}
|
||||
|
||||
public InfoRequest(String jvmVersion, String jvmVendor, String jvmClassPath, String osName, String osVersion) {
|
||||
super(Action.INFO);
|
||||
this.jvmVersion = jvmVersion;
|
||||
this.jvmVendor = jvmVendor;
|
||||
this.jvmClassPath = jvmClassPath;
|
||||
this.osName = osName;
|
||||
this.osVersion = osVersion;
|
||||
InfoRequest(String jvmVersion, String jvmVendor, String jvmClassPath, String osName, String osVersion) {
|
||||
super(jvmVersion, jvmVendor, jvmClassPath, osName, osVersion);
|
||||
}
|
||||
|
||||
InfoRequest(int clientVersion, DataInput in) throws IOException {
|
||||
super(clientVersion, in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void encode(DataOutput out) throws IOException {
|
||||
out.writeInt(action.value());
|
||||
out.writeUTF(jvmVersion);
|
||||
out.writeUTF(jvmVendor);
|
||||
out.writeUTF(jvmClassPath);
|
||||
out.writeUTF(osName);
|
||||
out.writeUTF(osVersion);
|
||||
}
|
||||
|
||||
public static InfoRequest decode(DataInput in) throws IOException {
|
||||
String jvmVersion = in.readUTF();
|
||||
String jvmVendor = in.readUTF();
|
||||
String jvmClassPath = in.readUTF();
|
||||
String osName = in.readUTF();
|
||||
String osVersion = in.readUTF();
|
||||
|
||||
return new InfoRequest(jvmVersion, jvmVendor, jvmClassPath, osName, osVersion);
|
||||
public RequestType requestType() {
|
||||
return RequestType.INFO;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,51 +5,34 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
|
||||
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.RequestType;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.ResponseType;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.AbstractInfoResponse;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Request;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.Action;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.Status;
|
||||
|
||||
public class InfoResponse extends Response {
|
||||
|
||||
public final String node, cluster, versionString, versionHash, versionDate;
|
||||
public final int majorVersion, minorVersion;
|
||||
|
||||
public InfoResponse(String nodeName, String clusterName, byte versionMajor, byte versionMinor, String version, String versionHash, String versionDate) {
|
||||
super(Action.INFO);
|
||||
|
||||
this.node = nodeName;
|
||||
this.cluster = clusterName;
|
||||
this.versionString = version;
|
||||
this.versionHash = versionHash;
|
||||
this.versionDate = versionDate;
|
||||
|
||||
this.majorVersion = versionMajor;
|
||||
this.minorVersion = versionMinor;
|
||||
/**
|
||||
* General information about the server.
|
||||
*/
|
||||
public class InfoResponse extends AbstractInfoResponse {
|
||||
public InfoResponse(String nodeName, String clusterName, byte versionMajor, byte versionMinor, String version,
|
||||
String versionHash, String versionDate) {
|
||||
super(nodeName, clusterName, versionMajor, versionMinor, version, versionHash, versionDate);
|
||||
}
|
||||
|
||||
public void encode(DataOutput out) throws IOException {
|
||||
out.writeInt(Status.toSuccess(action));
|
||||
out.writeUTF(node);
|
||||
out.writeUTF(cluster);
|
||||
out.writeByte(majorVersion);
|
||||
out.writeByte(minorVersion);
|
||||
out.writeUTF(versionString);
|
||||
out.writeUTF(versionHash);
|
||||
out.writeUTF(versionDate);
|
||||
InfoResponse(Request request, DataInput in) throws IOException {
|
||||
super(request, in);
|
||||
}
|
||||
|
||||
public static InfoResponse decode(DataInput in) throws IOException {
|
||||
String node = in.readUTF();
|
||||
String cluster = in.readUTF();
|
||||
byte versionMajor = in.readByte();
|
||||
byte versionMinor = in.readByte();
|
||||
String version = in.readUTF();
|
||||
String versionHash = in.readUTF();
|
||||
String versionBuild = in.readUTF();
|
||||
@Override
|
||||
public RequestType requestType() {
|
||||
return RequestType.INFO;
|
||||
}
|
||||
|
||||
return new InfoResponse(node, cluster, versionMajor, versionMinor, version, versionHash, versionBuild);
|
||||
@Override
|
||||
public ResponseType responseType() {
|
||||
return ResponseType.INFO;
|
||||
}
|
||||
}
|
|
@ -1,27 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
|
||||
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.Action;
|
||||
|
||||
abstract class Message {
|
||||
|
||||
public final Action action;
|
||||
|
||||
protected Message(Action action) {
|
||||
this.action = action;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return action.name();
|
||||
}
|
||||
|
||||
public abstract void encode(DataOutput out) throws IOException;
|
||||
}
|
|
@ -5,31 +5,73 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.sql.JDBCType;
|
||||
import java.util.Locale;
|
||||
|
||||
import static java.lang.String.format;
|
||||
import java.util.Objects;
|
||||
|
||||
public class MetaColumnInfo {
|
||||
public final String table, name;
|
||||
public final JDBCType type;
|
||||
public final int size, position;
|
||||
|
||||
// column.name - string - column name
|
||||
// table.name - string - index.type
|
||||
// data.type - int - data type
|
||||
// column.size - int
|
||||
// ordinal.position - int - position inside table
|
||||
public final String name, table;
|
||||
public final int type, size, position;
|
||||
|
||||
public MetaColumnInfo(String name, String table, int type, int size, int position) {
|
||||
this.name = name;
|
||||
public MetaColumnInfo(String table, String name, JDBCType type, int size, int position) {
|
||||
if (table == null) {
|
||||
throw new IllegalArgumentException("[table] must not be null");
|
||||
}
|
||||
if (name == null) {
|
||||
throw new IllegalArgumentException("[name] must not be null");
|
||||
}
|
||||
if (type == null) {
|
||||
throw new IllegalArgumentException("[type] must not be null");
|
||||
}
|
||||
this.table = table;
|
||||
this.name = name;
|
||||
this.type = type;
|
||||
this.size = size;
|
||||
this.position = position;
|
||||
}
|
||||
|
||||
MetaColumnInfo(DataInput in) throws IOException {
|
||||
table = in.readUTF();
|
||||
name = in.readUTF();
|
||||
type = JDBCType.valueOf(in.readInt());
|
||||
size = in.readInt();
|
||||
position = in.readInt();
|
||||
}
|
||||
|
||||
void write(DataOutput out) throws IOException {
|
||||
out.writeUTF(table);
|
||||
out.writeUTF(name);
|
||||
out.writeInt(type.getVendorTypeNumber());
|
||||
out.writeInt(size);
|
||||
out.writeInt(position);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return format(Locale.ROOT, "%s,%s,%s,%d,%d", name, table, JDBCType.valueOf(type), size, position);
|
||||
return table + "." + name
|
||||
+ "<type=[" + type
|
||||
+ "] size=[" + size
|
||||
+ "] position=[" + position + "]>";
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null || obj.getClass() != getClass()) {
|
||||
return false;
|
||||
}
|
||||
MetaColumnInfo other = (MetaColumnInfo) obj;
|
||||
return table.equals(other.table)
|
||||
&& name.equals(other.name)
|
||||
&& type.equals(other.type)
|
||||
&& size == other.size
|
||||
&& position == other.position;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(table, name, type, size, position);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,48 +5,64 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
|
||||
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.Action;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.RequestType;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Request;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.StringUtils.nullAsEmpty;
|
||||
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.StringUtils.splitToIndexAndType;
|
||||
import java.util.Objects;
|
||||
|
||||
public class MetaColumnRequest extends Request {
|
||||
|
||||
private final String tablePattern, columnPattern;
|
||||
public final String index, type, column;
|
||||
|
||||
public MetaColumnRequest(String tablePattern, String columnPattern) {
|
||||
super(Action.META_COLUMN);
|
||||
this.tablePattern = tablePattern == null ? "" : tablePattern;
|
||||
this.columnPattern = columnPattern == null ? "" : columnPattern;
|
||||
}
|
||||
|
||||
this.tablePattern = nullAsEmpty(tablePattern);
|
||||
this.columnPattern = nullAsEmpty(columnPattern);
|
||||
|
||||
String[] split = splitToIndexAndType(tablePattern);
|
||||
|
||||
this.index = split[0];
|
||||
this.type = split[1];
|
||||
this.column = nullAsEmpty(columnPattern);
|
||||
MetaColumnRequest(int clientVersion, DataInput in) throws IOException {
|
||||
tablePattern = in.readUTF();
|
||||
columnPattern = in.readUTF();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void encode(DataOutput out) throws IOException {
|
||||
out.writeInt(action.value());
|
||||
protected void write(DataOutput out) throws IOException {
|
||||
out.writeUTF(tablePattern);
|
||||
out.writeUTF(columnPattern);
|
||||
}
|
||||
|
||||
public static MetaColumnRequest decode(DataInput in) throws IOException {
|
||||
String tablePattern = in.readUTF();
|
||||
String columnPattern = in.readUTF();
|
||||
return new MetaColumnRequest(tablePattern, columnPattern);
|
||||
public String tablePattern() {
|
||||
return tablePattern;
|
||||
}
|
||||
|
||||
public String columnPattern() {
|
||||
return columnPattern;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "MetaColumn[index=" + index + ", type=" + type + " column=" + column + "]";
|
||||
protected String toStringBody() {
|
||||
return "table=[" + tablePattern
|
||||
+ "] column=[" + columnPattern + "]";
|
||||
}
|
||||
|
||||
@Override
|
||||
public RequestType requestType() {
|
||||
return RequestType.META_COLUMN;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null || obj.getClass() != getClass()) {
|
||||
return false;
|
||||
}
|
||||
MetaColumnRequest other = (MetaColumnRequest) obj;
|
||||
return tablePattern.equals(other.tablePattern)
|
||||
&& columnPattern.equals(other.columnPattern);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(tablePattern, columnPattern);
|
||||
}
|
||||
}
|
|
@ -5,65 +5,74 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
|
||||
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.RequestType;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.ResponseType;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Request;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Response;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.Action;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.Status;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
import static java.util.Collections.unmodifiableList;
|
||||
import static java.util.stream.Collectors.joining;
|
||||
|
||||
public class MetaColumnResponse extends Response {
|
||||
|
||||
public static final MetaColumnResponse EMPTY = new MetaColumnResponse(emptyList());
|
||||
|
||||
public final List<MetaColumnInfo> columns;
|
||||
|
||||
public MetaColumnResponse(List<MetaColumnInfo> columns) {
|
||||
super(Action.META_COLUMN);
|
||||
if (columns == null) {
|
||||
throw new IllegalArgumentException("[columns] must not be null");
|
||||
}
|
||||
this.columns = columns;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void encode(DataOutput out) throws IOException {
|
||||
out.writeInt(Status.toSuccess(action));
|
||||
out.writeInt(columns.size());
|
||||
|
||||
for (MetaColumnInfo info : columns) {
|
||||
// NOCOMMIT core would make MetaColumnInfo know how to read and write itself which feels cleaner.
|
||||
out.writeUTF(info.name);
|
||||
out.writeUTF(info.table);
|
||||
out.writeInt(info.type);
|
||||
out.writeInt(info.size);
|
||||
out.writeInt(info.position);
|
||||
}
|
||||
}
|
||||
|
||||
public static MetaColumnResponse decode(DataInput in) throws IOException {
|
||||
public MetaColumnResponse(Request request, DataInput in) throws IOException {
|
||||
int length = in.readInt();
|
||||
|
||||
if (length < 1) {
|
||||
return MetaColumnResponse.EMPTY;
|
||||
}
|
||||
List<MetaColumnInfo> list = new ArrayList<>(length);
|
||||
|
||||
for (int i = 0; i < length; i++) {
|
||||
String name = in.readUTF();
|
||||
String table = in.readUTF();
|
||||
int type = in.readInt();
|
||||
int size = in.readInt();
|
||||
int pos = in.readInt();
|
||||
list.add(new MetaColumnInfo(name, table, type, size, pos));
|
||||
list.add(new MetaColumnInfo(in));
|
||||
}
|
||||
|
||||
return new MetaColumnResponse(list);
|
||||
columns = unmodifiableList(list);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return columns.toString();
|
||||
protected void write(int clientVersion, DataOutput out) throws IOException {
|
||||
out.writeInt(columns.size());
|
||||
for (MetaColumnInfo info : columns) {
|
||||
info.write(out);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String toStringBody() {
|
||||
return columns.stream().map(Object::toString).collect(joining(", "));
|
||||
}
|
||||
|
||||
@Override
|
||||
public RequestType requestType() {
|
||||
return RequestType.META_COLUMN;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ResponseType responseType() {
|
||||
return ResponseType.META_COLUMN;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null || obj.getClass() != getClass()) {
|
||||
return false;
|
||||
}
|
||||
MetaColumnResponse other = (MetaColumnResponse) obj;
|
||||
return columns.equals(other.columns);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return columns.hashCode();
|
||||
}
|
||||
}
|
|
@ -5,45 +5,57 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
|
||||
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.Action;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.RequestType;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Request;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.Locale;
|
||||
|
||||
import static java.lang.String.format;
|
||||
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.StringUtils.splitToIndexAndType;
|
||||
|
||||
public class MetaTableRequest extends Request {
|
||||
|
||||
private final String pattern;
|
||||
public final String index;
|
||||
public final String type;
|
||||
|
||||
public MetaTableRequest(String pattern) {
|
||||
super(Action.META_TABLE);
|
||||
|
||||
if (pattern == null) {
|
||||
throw new IllegalArgumentException("[pattern] must not be null");
|
||||
}
|
||||
this.pattern = pattern;
|
||||
String[] split = splitToIndexAndType(pattern);
|
||||
}
|
||||
|
||||
this.index = split[0];
|
||||
this.type = split[1];
|
||||
MetaTableRequest(int clientVersion, DataInput in) throws IOException {
|
||||
this.pattern = in.readUTF();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return format(Locale.ROOT, "MetaTable[index=%s, type=%s]", index, type);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void encode(DataOutput out) throws IOException {
|
||||
out.writeInt(action.value());
|
||||
public void write(DataOutput out) throws IOException {
|
||||
out.writeUTF(pattern);
|
||||
}
|
||||
|
||||
public static MetaTableRequest decode(DataInput in) throws IOException {
|
||||
String pattern = in.readUTF();
|
||||
return new MetaTableRequest(pattern);
|
||||
public String pattern() {
|
||||
return pattern;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String toStringBody() {
|
||||
return pattern;
|
||||
}
|
||||
|
||||
@Override
|
||||
public RequestType requestType() {
|
||||
return RequestType.META_TABLE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null || obj.getClass() != getClass()) {
|
||||
return false;
|
||||
}
|
||||
MetaTableRequest other = (MetaTableRequest) obj;
|
||||
return pattern.equals(other.pattern);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return pattern.hashCode();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,53 +5,73 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
|
||||
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.RequestType;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.ResponseType;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Request;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Response;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.Action;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.Status;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
import static java.util.Collections.unmodifiableList;
|
||||
|
||||
public class MetaTableResponse extends Response {
|
||||
|
||||
public static final MetaTableResponse EMPTY = new MetaTableResponse(emptyList());
|
||||
|
||||
public final List<String> tables;
|
||||
|
||||
public MetaTableResponse(List<String> tables) {
|
||||
super(Action.META_TABLE);
|
||||
if (tables == null) {
|
||||
throw new IllegalArgumentException("[tables] must not be null");
|
||||
}
|
||||
this.tables = tables;
|
||||
}
|
||||
|
||||
MetaTableResponse(Request request, DataInput in) throws IOException {
|
||||
int length = in.readInt();
|
||||
List<String> list = new ArrayList<>(length);
|
||||
for (int i = 0; i < length; i++) {
|
||||
list.add(in.readUTF());
|
||||
}
|
||||
tables = unmodifiableList(list);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void encode(DataOutput out) throws IOException {
|
||||
out.writeInt(Status.toSuccess(action));
|
||||
public void write(int clientVersion, DataOutput out) throws IOException {
|
||||
out.writeInt(tables.size());
|
||||
for (String t : tables) {
|
||||
out.writeUTF(t);
|
||||
}
|
||||
}
|
||||
|
||||
public static MetaTableResponse decode(DataInput in) throws IOException {
|
||||
int length = in.readInt();
|
||||
if (length < 1) {
|
||||
return MetaTableResponse.EMPTY;
|
||||
}
|
||||
|
||||
List<String> list = new ArrayList<>(length);
|
||||
|
||||
for (int i = 0; i < length; i++) {
|
||||
list.add(in.readUTF());
|
||||
}
|
||||
return new MetaTableResponse(list);
|
||||
@Override
|
||||
protected String toStringBody() {
|
||||
return String.join(", ", tables);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return tables.toString();
|
||||
public RequestType requestType() {
|
||||
return RequestType.META_TABLE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ResponseType responseType() {
|
||||
return ResponseType.META_TABLE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null || obj.getClass() != getClass()) {
|
||||
return false;
|
||||
}
|
||||
MetaTableResponse other = (MetaTableResponse) obj;
|
||||
return tables.equals(other.tables);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return tables.hashCode();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,175 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.lang.reflect.Array;
|
||||
import java.sql.JDBCType;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Stores a page of data in a columnar format.
|
||||
*/
|
||||
public class Page extends ResultPage {
|
||||
private final List<ColumnInfo> columnInfo;
|
||||
|
||||
/**
|
||||
* The actual data, one array per column.
|
||||
*/
|
||||
private final Object[][] data;
|
||||
|
||||
/**
|
||||
* The number of rows in this page. The {@link #data} arrays may be larger
|
||||
* but data after the end of the arrays is garbage.
|
||||
*/
|
||||
private int rows;
|
||||
|
||||
private int maxRows;
|
||||
|
||||
/**
|
||||
* Build empty, call {@link #read(DataInput)} after to fill it.
|
||||
*/
|
||||
Page(List<ColumnInfo> columnInfo) {
|
||||
this.columnInfo = columnInfo;
|
||||
data = new Object[columnInfo.size()][];
|
||||
}
|
||||
|
||||
/**
|
||||
* Build with a particular set of rows. Use this for testing.
|
||||
*/
|
||||
Page(List<ColumnInfo> columnInfo, Object[][] rows) {
|
||||
this(columnInfo);
|
||||
makeRoomFor(rows.length);
|
||||
this.rows = rows.length;
|
||||
for (int row = 0; row < rows.length; row++) {
|
||||
if (columnInfo.size() != rows[row].length) {
|
||||
throw new IllegalArgumentException("Column count mismatch. Got [" + columnInfo.size()
|
||||
+ "] ColumnInfos but [" + rows.length + "] columns on the [" + row + "] row.");
|
||||
}
|
||||
}
|
||||
for (int column = 0; column < columnInfo.size(); column++) {
|
||||
for (int row = 0; row < rows.length; row++) {
|
||||
data[column][row] = rows[row][column];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public int rows() {
|
||||
return rows;
|
||||
}
|
||||
|
||||
public List<ColumnInfo> columnInfo() {
|
||||
return columnInfo;
|
||||
}
|
||||
|
||||
Object[] column(int index) {
|
||||
if (index < 0 || index >= data.length) {
|
||||
// NOCOMMIT this was once JdbcException. Make sure it isn't now busted
|
||||
throw new IllegalArgumentException("Invalid column [" + index + "] (max is [" + (data.length - 1) + "])");
|
||||
}
|
||||
|
||||
return data[index];
|
||||
}
|
||||
|
||||
public Object entry(int row, int column) {
|
||||
if (row < 0 || row >= rows) {
|
||||
// NOCOMMIT this was once JdbcException. Make sure it isn't now busted
|
||||
throw new IllegalArgumentException("Invalid row [" + row + "] (max is [" + (rows -1) + "])");
|
||||
}
|
||||
return column(column)[row];
|
||||
}
|
||||
|
||||
/**
|
||||
* Read a value from the stream
|
||||
*/
|
||||
void read(DataInput in) throws IOException {
|
||||
int rows = in.readInt();
|
||||
// this.rows may be less than the number of rows we have space for
|
||||
if (rows > maxRows) {
|
||||
makeRoomFor(rows);
|
||||
}
|
||||
this.rows = rows;
|
||||
|
||||
for (int row = 0; row < rows; row++) {
|
||||
for (int column = 0; column < columnInfo.size(); column++) {
|
||||
data[column][row] = readValue(in, columnInfo.get(column).type);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(DataOutput out) throws IOException {
|
||||
int rows = rows();
|
||||
out.writeInt(rows);
|
||||
for (int row = 0; row < rows; row++) {
|
||||
for (int column = 0; column < columnInfo.size(); column++) {
|
||||
JDBCType columnType = columnInfo.get(column).type;
|
||||
writeValue(out, entry(row, column), columnType);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder b = new StringBuilder();
|
||||
for (int row = 0; row < rows(); row++) {
|
||||
for (int column = 0; column < columnInfo.size(); column++) {
|
||||
if (column > 0) {
|
||||
b.append(", ");
|
||||
}
|
||||
b.append(entry(row, column));
|
||||
}
|
||||
b.append('\n');
|
||||
}
|
||||
return b.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null || obj instanceof ResultPage == false) {
|
||||
return false;
|
||||
}
|
||||
Page other = (Page) obj;
|
||||
if (rows != other.rows) {
|
||||
return false;
|
||||
}
|
||||
if (false == columnInfo.equals(other.columnInfo)) {
|
||||
return false;
|
||||
}
|
||||
for (int row = 0; row < rows(); row++) {
|
||||
for (int column = 0; column < columnInfo.size(); column++) {
|
||||
if (false == Objects.equals(entry(row, column), other.entry(row, column))) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = Objects.hash(rows(), columnInfo.size());
|
||||
for (int row = 0; row < rows(); row++) {
|
||||
for (int column = 0; column < columnInfo.size(); column++) {
|
||||
Object entry = entry(row, column);
|
||||
result = result * 31 + (entry == null ? 0 : entry.hashCode());
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
private void makeRoomFor(int rows) {
|
||||
maxRows = rows;
|
||||
for (int i = 0; i < columnInfo.size(); i++) {
|
||||
Class<?> type = classOf(columnInfo.get(i).type);
|
||||
data[i] = (Object[]) Array.newInstance(type, rows);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -5,295 +5,100 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
|
||||
|
||||
import java.sql.SQLClientInfoException;
|
||||
import java.sql.SQLDataException;
|
||||
import java.sql.SQLException;
|
||||
import java.sql.SQLRecoverableException;
|
||||
import java.sql.SQLSyntaxErrorException;
|
||||
import java.sql.SQLTimeoutException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Map;
|
||||
import java.util.function.Function;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.AbstractProto;
|
||||
|
||||
import javax.sql.rowset.serial.SerialException;
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.stream.Collectors.toMap;
|
||||
/**
|
||||
* Binary protocol for the JDBC. All backwards compatibility is done using the
|
||||
* version number sent in the header.
|
||||
*/
|
||||
public final class Proto extends AbstractProto {
|
||||
public static final Proto INSTANCE = new Proto();
|
||||
|
||||
//
|
||||
// Basic tabular messaging for the JDBC driver
|
||||
//
|
||||
// Note this message is transmitted through HTTP and thus things like transport error codes
|
||||
// are handled through that.
|
||||
private Proto() {}
|
||||
|
||||
// The proto is based around a simple, single request-response model.
|
||||
// Note the field order is _important_.
|
||||
// To simplify things, the protocol is not meant to be backwards compatible.
|
||||
//
|
||||
public interface Proto {
|
||||
|
||||
// All requests start with
|
||||
// magic_number - int - just because
|
||||
// version - int - the version the client understands
|
||||
// action - int - action to perform
|
||||
// <action-param> (see below)
|
||||
|
||||
int MAGIC_NUMBER = 0x0C0DE1DBC;
|
||||
int VERSION = 000_000_001;
|
||||
|
||||
public interface Header {
|
||||
int value();
|
||||
@Override
|
||||
protected RequestType readRequestType(DataInput in) throws IOException {
|
||||
return RequestType.read(in);
|
||||
}
|
||||
|
||||
// The response start with a similar pattern
|
||||
// magic_number
|
||||
// version
|
||||
// action reply (status)
|
||||
// payload
|
||||
|
||||
enum Status implements Header {
|
||||
// If successful, each method has its own params (describe for each method)
|
||||
SUCCESS (0x5000000),
|
||||
|
||||
// Expected exceptions contain
|
||||
// message - string - exception message
|
||||
// exception - string - exception class
|
||||
// sql exception - int - to what SqlException type this maps to (see below)
|
||||
EXCEPTION(0x3000000),
|
||||
|
||||
// Unexpected error contains the following fields
|
||||
|
||||
// message - string - exception message
|
||||
// exception - string - exception class
|
||||
// stacktrace - string - exception stacktrace (should be massaged)
|
||||
ERROR (0xF000000);
|
||||
|
||||
private static final Map<Integer, Status> MAP = Arrays.stream(Status.class.getEnumConstants())
|
||||
.collect(toMap(Status::value, Function.identity()));
|
||||
|
||||
private final int value;
|
||||
|
||||
Status(int value) {
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int value() {
|
||||
return value;
|
||||
}
|
||||
|
||||
public static Status from(int value) {
|
||||
return MAP.get(value & 0xF000000);
|
||||
}
|
||||
|
||||
public static int toSuccess(Action action) {
|
||||
return action.value() | SUCCESS.value();
|
||||
}
|
||||
|
||||
public static int toException(Action action) {
|
||||
return action.value() | EXCEPTION.value();
|
||||
}
|
||||
|
||||
public static int toError(Action action) {
|
||||
return action.value() | ERROR.value();
|
||||
}
|
||||
@Override
|
||||
protected ResponseType readResponseType(DataInput in) throws IOException {
|
||||
return ResponseType.read(in);
|
||||
}
|
||||
|
||||
enum SqlExceptionType {
|
||||
UNKNOWN (0x001),
|
||||
SERIAL (0x010),
|
||||
CLIENT_INFO(0x020),
|
||||
DATA (0x100),
|
||||
SYNTAX (0x200),
|
||||
|
||||
RECOVERABLE(0x300),
|
||||
TIMEOUT (0x400);
|
||||
|
||||
public enum RequestType implements AbstractProto.RequestType {
|
||||
INFO(InfoRequest::new),
|
||||
META_TABLE(MetaTableRequest::new),
|
||||
META_COLUMN(MetaColumnRequest::new),
|
||||
QUERY_INIT(QueryInitRequest::new),
|
||||
QUERY_PAGE(QueryPageRequest::new),
|
||||
// QUERY_CLOSE(QueryClosenRequest::new), TODO implement me
|
||||
;
|
||||
|
||||
private static final Map<Integer, SqlExceptionType> MAP = Arrays.stream(SqlExceptionType.class.getEnumConstants())
|
||||
.collect(toMap(SqlExceptionType::value, Function.identity()));
|
||||
private final RequestReader reader;
|
||||
|
||||
private final int value;
|
||||
|
||||
SqlExceptionType(int value) {
|
||||
this.value = value;
|
||||
RequestType(RequestReader reader) {
|
||||
this.reader = reader;
|
||||
}
|
||||
|
||||
public int value() {
|
||||
return value;
|
||||
}
|
||||
|
||||
public static SqlExceptionType from(int value) {
|
||||
return MAP.get(value);
|
||||
}
|
||||
|
||||
public static SQLException asException(SqlExceptionType type, String message) {
|
||||
if (message == null) {
|
||||
message = "";
|
||||
static RequestType read(DataInput in) throws IOException {
|
||||
byte b = in.readByte();
|
||||
try {
|
||||
return values()[b];
|
||||
} catch (ArrayIndexOutOfBoundsException e) {
|
||||
throw new IllegalArgumentException("Unknown response type [" + b + "]", e);
|
||||
}
|
||||
if (type == SERIAL) {
|
||||
return new SerialException(message);
|
||||
}
|
||||
if (type == CLIENT_INFO) {
|
||||
return new SQLClientInfoException(message, emptyMap());
|
||||
}
|
||||
if (type == DATA) {
|
||||
return new SQLDataException(message);
|
||||
}
|
||||
if (type == SYNTAX) {
|
||||
return new SQLSyntaxErrorException(message);
|
||||
}
|
||||
if (type == RECOVERABLE) {
|
||||
return new SQLRecoverableException(message);
|
||||
}
|
||||
if (type == TIMEOUT) {
|
||||
return new SQLTimeoutException(message);
|
||||
}
|
||||
|
||||
return new SQLException("Unexpected 'expected' exception " + type);
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// RPC
|
||||
//
|
||||
|
||||
enum Action implements Header {
|
||||
|
||||
//
|
||||
// Retrieves information about the server
|
||||
//
|
||||
//
|
||||
// java.version - string
|
||||
// java.vendor - string
|
||||
// java.class.path - string
|
||||
// os.name - string
|
||||
// os.version - string
|
||||
//
|
||||
//
|
||||
// node.name - string
|
||||
// cluster.name - string
|
||||
// version.major - byte
|
||||
// version.minor - byte
|
||||
// version.number - string
|
||||
// version.hash - string
|
||||
// version.build - string
|
||||
// # nodes - fall back nodes to connect to
|
||||
// for each node
|
||||
// node.name - string
|
||||
// node.address - string
|
||||
//
|
||||
|
||||
INFO(0x01),
|
||||
|
||||
|
||||
//
|
||||
// Retrieves metadata about tables
|
||||
//
|
||||
// Request:
|
||||
//
|
||||
// name pattern - string
|
||||
//
|
||||
// Response:
|
||||
//
|
||||
// # tables - int - index.type
|
||||
// for each table
|
||||
// name - string - table name
|
||||
//
|
||||
|
||||
META_TABLE(0x04),
|
||||
|
||||
//
|
||||
// Retrieves metadata about columns
|
||||
//
|
||||
// Request:
|
||||
//
|
||||
// table pattern - string
|
||||
// column pattern - string
|
||||
//
|
||||
// Response:
|
||||
//
|
||||
// # columns - int - columns that match
|
||||
// for each column (MetaColumnInfo):
|
||||
// table.name - string - index.type
|
||||
// column.name - string - column name
|
||||
// data.type - int - data type
|
||||
// column.size - int
|
||||
// ordinal.position - int - position inside table
|
||||
|
||||
META_COLUMN(0x05),
|
||||
|
||||
|
||||
// Request (QueryInfo):
|
||||
// Contains several _header_ fields
|
||||
//
|
||||
// fetch-size - int - the number of results returned in a response by the server
|
||||
// (TimeoutInfo)
|
||||
// client_time - long - milliseconds since the epoch (in UTC)
|
||||
// timeout - long - how much time (in ms) the server has to deliver an answer.
|
||||
// request_timeout - long - how much time (in ms) a scroll/cursor needs to be kept alive between requests
|
||||
|
||||
// And the actual payload.
|
||||
//
|
||||
// query - string - the actual SQL query
|
||||
//
|
||||
|
||||
// Response:
|
||||
// Header fields (ResultInfo):
|
||||
//
|
||||
// time_received - long - (in UTC)
|
||||
// time_sent - long - (in UTC)
|
||||
// request_id - string - id for this page; if it's null it means there are no more results
|
||||
// # columns - int - number of columns
|
||||
// row schema
|
||||
// for each column (ColumnInfo):
|
||||
// name - string - name of the column
|
||||
// alias - string - if the column has an alias or label
|
||||
// table - string - index.type
|
||||
// schema - string - TBD (could be user)
|
||||
// catalog - string - TBD (could be cluster/node id)
|
||||
// type - int - JDBC type
|
||||
// # rows - int - number of rows
|
||||
// for each row, the actual values (the schema is not sent anymore)
|
||||
|
||||
QUERY_INIT(0x10),
|
||||
|
||||
QUERY_PAGE(0x15),
|
||||
|
||||
// Request (PageInfo):
|
||||
|
||||
// request_id - string - the request/scroll id
|
||||
// (TimeoutInfo):
|
||||
// client_time - long - ms since the epoch (in UTC)
|
||||
// timeout - long - how much time (in ms) the server has to deliver an answer.
|
||||
// request_timeout - long - how much time (in ms) the request needs to be kept alive until the next request
|
||||
|
||||
|
||||
// Returns (ResultPageInfo):
|
||||
// request_id - string - id for this page; if it's null it means there are no more results
|
||||
// # rows - int - number of rows
|
||||
// for each row, the actual values (the schema is not sent anymore)
|
||||
|
||||
// TODO: needs implementing
|
||||
QUERY_CLOSE(0x19);
|
||||
|
||||
|
||||
private static final Map<Integer, Action> MAP = Arrays.stream(Action.class.getEnumConstants())
|
||||
.collect(toMap(Action::value, Function.identity()));
|
||||
|
||||
private final int value;
|
||||
|
||||
Action(int value) {
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int value() {
|
||||
return value;
|
||||
public void write(DataOutput out) throws IOException {
|
||||
out.writeByte(ordinal());
|
||||
}
|
||||
|
||||
public static Action from(int value) {
|
||||
return MAP.get(value & 0x00000FF);
|
||||
@Override
|
||||
public RequestReader reader() {
|
||||
return reader;
|
||||
}
|
||||
}
|
||||
|
||||
public enum ResponseType implements AbstractProto.ResponseType {
|
||||
EXCEPTION(ExceptionResponse::new),
|
||||
ERROR(ErrorResponse::new),
|
||||
INFO(InfoResponse::new),
|
||||
META_TABLE(MetaTableResponse::new),
|
||||
META_COLUMN(MetaColumnResponse::new),
|
||||
QUERY_INIT(QueryInitResponse::new),
|
||||
QUERY_PAGE(QueryPageResponse::new),
|
||||
// QUERY_CLOSE(QueryClosenResponse::new) TODO implement me
|
||||
;
|
||||
|
||||
private final ResponseReader reader;
|
||||
|
||||
ResponseType(ResponseReader reader) {
|
||||
this.reader = reader;
|
||||
}
|
||||
|
||||
static ResponseType read(DataInput in) throws IOException {
|
||||
byte b = in.readByte();
|
||||
try {
|
||||
return values()[b];
|
||||
} catch (ArrayIndexOutOfBoundsException e) {
|
||||
throw new IllegalArgumentException("Unknown response type [" + b + "]", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(DataOutput out) throws IOException {
|
||||
out.writeByte(ordinal());
|
||||
}
|
||||
|
||||
@Override
|
||||
public ResponseReader reader() {
|
||||
return reader;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -5,7 +5,8 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
|
||||
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.Action;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.RequestType;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Request;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
|
@ -20,15 +21,13 @@ public class QueryInitRequest extends Request {
|
|||
public final TimeoutInfo timeout;
|
||||
|
||||
public QueryInitRequest(int fetchSize, String query, TimeZone timeZone, TimeoutInfo timeout) {
|
||||
super(Action.QUERY_INIT);
|
||||
this.fetchSize = fetchSize;
|
||||
this.query = query;
|
||||
this.timeZone = timeZone;
|
||||
this.timeout = timeout;
|
||||
}
|
||||
|
||||
QueryInitRequest(DataInput in) throws IOException {
|
||||
super(Action.QUERY_INIT);
|
||||
QueryInitRequest(int clientVersion, DataInput in) throws IOException {
|
||||
fetchSize = in.readInt();
|
||||
query = in.readUTF();
|
||||
timeZone = TimeZone.getTimeZone(in.readUTF());
|
||||
|
@ -36,24 +35,28 @@ public class QueryInitRequest extends Request {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void encode(DataOutput out) throws IOException {
|
||||
out.writeInt(action.value()); // NOCOMMIT this should be written by the caller
|
||||
public void write(DataOutput out) throws IOException {
|
||||
out.writeInt(fetchSize);
|
||||
out.writeUTF(query);
|
||||
out.writeUTF(timeZone.getID());
|
||||
timeout.encode(out);
|
||||
timeout.write(out);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
protected String toStringBody() {
|
||||
StringBuilder b = new StringBuilder();
|
||||
b.append("SqlInitReq[").append(query).append(']');
|
||||
b.append("query=[").append(query).append(']');
|
||||
if (false == timeZone.getID().equals("UTC")) {
|
||||
b.append('[').append(timeZone.getID()).append(']');
|
||||
b.append(" timeZone=[").append(timeZone.getID()).append(']');
|
||||
}
|
||||
return b.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public RequestType requestType() {
|
||||
return RequestType.QUERY_INIT;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null || obj.getClass() != getClass()) {
|
||||
|
|
|
@ -5,78 +5,96 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
|
||||
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.RequestType;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.ResponseType;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Request;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Response;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Objects;
|
||||
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.Action;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.Status;
|
||||
import static java.util.Collections.unmodifiableList;
|
||||
|
||||
import static java.lang.String.format;
|
||||
import static java.util.Collections.emptyList;
|
||||
|
||||
public class QueryInitResponse extends DataResponse {
|
||||
|
||||
public final long serverTimeQueryReceived, serverTimeResponseSent, timeSpent;
|
||||
public class QueryInitResponse extends Response {
|
||||
public final long serverTimeQueryReceived, serverTimeResponseSent;
|
||||
public final String requestId;
|
||||
public final List<ColumnInfo> columns;
|
||||
public final ResultPage data;
|
||||
|
||||
public QueryInitResponse(long serverTimeQueryReceived, long serverTimeResponseSent, String requestId, List<ColumnInfo> columns, Object data) {
|
||||
super(Action.QUERY_INIT, data);
|
||||
public QueryInitResponse(long serverTimeQueryReceived, long serverTimeResponseSent, String requestId, List<ColumnInfo> columns,
|
||||
ResultPage data) {
|
||||
this.serverTimeQueryReceived = serverTimeQueryReceived;
|
||||
this.serverTimeResponseSent = serverTimeResponseSent;
|
||||
this.timeSpent = serverTimeQueryReceived - serverTimeResponseSent;
|
||||
this.requestId = requestId;
|
||||
this.columns = columns;
|
||||
this.data = data;
|
||||
}
|
||||
|
||||
QueryInitResponse(Request request, DataInput in) throws IOException {
|
||||
serverTimeQueryReceived = in.readLong();
|
||||
serverTimeResponseSent = in.readLong();
|
||||
requestId = in.readUTF();
|
||||
int size = in.readInt();
|
||||
List<ColumnInfo> columns = new ArrayList<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
columns.add(new ColumnInfo(in));
|
||||
}
|
||||
this.columns = unmodifiableList(columns);
|
||||
Page data = new Page(columns);
|
||||
data.read(in);
|
||||
this.data = data;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return format(Locale.ROOT, "QueryInitRes[%s]", requestId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void encode(DataOutput out) throws IOException {
|
||||
out.writeInt(Status.toSuccess(action));
|
||||
|
||||
public void write(int clientVersion, DataOutput out) throws IOException {
|
||||
out.writeLong(serverTimeQueryReceived);
|
||||
out.writeLong(serverTimeResponseSent);
|
||||
out.writeUTF(requestId);
|
||||
|
||||
out.writeInt(columns.size());
|
||||
for (ColumnInfo c : columns) {
|
||||
out.writeUTF(c.name);
|
||||
out.writeUTF(c.label);
|
||||
out.writeUTF(c.table);
|
||||
out.writeUTF(c.schema);
|
||||
out.writeUTF(c.catalog);
|
||||
out.writeInt(c.type);
|
||||
c.write(out);
|
||||
}
|
||||
data.write(out);
|
||||
}
|
||||
|
||||
public static QueryInitResponse decode(DataInput in) throws IOException {
|
||||
long serverTimeQueryReceived = in.readLong();
|
||||
long serverTimeResponseSent = in.readLong();
|
||||
String requestId = in.readUTF();
|
||||
@Override
|
||||
protected String toStringBody() {
|
||||
return "timeReceived=[" + serverTimeQueryReceived
|
||||
+ "] timeSent=[" + serverTimeResponseSent
|
||||
+ "] requestId=[" + requestId
|
||||
+ "] columns=" + columns
|
||||
+ " data=[\n" + data + "]";
|
||||
}
|
||||
|
||||
@Override
|
||||
public RequestType requestType() {
|
||||
return RequestType.QUERY_INIT;
|
||||
}
|
||||
|
||||
int length = in.readInt();
|
||||
List<ColumnInfo> list = length < 1 ? emptyList() : new ArrayList<>(length);
|
||||
@Override
|
||||
public ResponseType responseType() {
|
||||
return ResponseType.QUERY_INIT;
|
||||
}
|
||||
|
||||
for (int i = 0; i < length; i++) {
|
||||
String name = in.readUTF();
|
||||
String label = in.readUTF();
|
||||
String table = in.readUTF();
|
||||
String schema = in.readUTF();
|
||||
String catalog = in.readUTF();
|
||||
int type = in.readInt();
|
||||
|
||||
list.add(new ColumnInfo(name, type, schema, catalog, table, label));
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null || obj.getClass() != getClass()) {
|
||||
return false;
|
||||
}
|
||||
QueryInitResponse other = (QueryInitResponse) obj;
|
||||
return serverTimeQueryReceived == other.serverTimeQueryReceived
|
||||
&& serverTimeResponseSent == other.serverTimeResponseSent
|
||||
&& requestId.equals(other.requestId)
|
||||
&& columns.equals(other.columns);
|
||||
// NOCOMMIT data
|
||||
}
|
||||
|
||||
return new QueryInitResponse(serverTimeQueryReceived, serverTimeResponseSent, requestId, list, null);
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(serverTimeQueryReceived, serverTimeResponseSent, requestId, columns); // NOCOMMIT data
|
||||
}
|
||||
}
|
|
@ -5,41 +5,72 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
|
||||
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.RequestType;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Nullable;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Request;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.Locale;
|
||||
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.Action;
|
||||
|
||||
import static java.lang.String.format;
|
||||
import java.util.Objects;
|
||||
|
||||
public class QueryPageRequest extends Request {
|
||||
|
||||
public final String requestId;
|
||||
public final TimeoutInfo timeout;
|
||||
private final transient Page data;
|
||||
|
||||
public QueryPageRequest(String requestId, TimeoutInfo timeout) {
|
||||
super(Action.QUERY_PAGE);
|
||||
public QueryPageRequest(String requestId, TimeoutInfo timeout, @Nullable Page data) {
|
||||
if (requestId == null) {
|
||||
throw new IllegalArgumentException("[requestId] must not be null");
|
||||
}
|
||||
if (timeout == null) {
|
||||
throw new IllegalArgumentException("[timeout] must not be null");
|
||||
}
|
||||
this.requestId = requestId;
|
||||
this.timeout = timeout;
|
||||
this.data = data;
|
||||
}
|
||||
|
||||
QueryPageRequest(int clientVersion, DataInput in) throws IOException {
|
||||
this.requestId = in.readUTF();
|
||||
this.timeout = new TimeoutInfo(in);
|
||||
this.data = null; // Data isn't used on the server side
|
||||
}
|
||||
|
||||
@Override
|
||||
public void encode(DataOutput out) throws IOException {
|
||||
out.writeInt(action.value());
|
||||
public void write(DataOutput out) throws IOException {
|
||||
out.writeUTF(requestId);
|
||||
timeout.encode(out);
|
||||
timeout.write(out);
|
||||
}
|
||||
|
||||
public static QueryPageRequest decode(DataInput in) throws IOException {
|
||||
String requestId = in.readUTF();
|
||||
TimeoutInfo timeout = new TimeoutInfo(in);
|
||||
return new QueryPageRequest(requestId, timeout);
|
||||
public Page data() {
|
||||
return data;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return format(Locale.ROOT, "QueryPageReq[%s]", requestId);
|
||||
protected String toStringBody() {
|
||||
return requestId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public RequestType requestType() {
|
||||
return RequestType.QUERY_PAGE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null || obj.getClass() != getClass()) {
|
||||
return false;
|
||||
}
|
||||
QueryPageRequest other = (QueryPageRequest) obj;
|
||||
return requestId.equals(other.requestId)
|
||||
&& timeout.equals(other.timeout);
|
||||
// data is intentionally ignored
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(requestId, timeout);
|
||||
// data is intentionally ignored
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,38 +5,72 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
|
||||
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.RequestType;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.ResponseType;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Request;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Response;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.Locale;
|
||||
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.Action;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.Status;
|
||||
|
||||
import static java.lang.String.format;
|
||||
|
||||
public class QueryPageResponse extends DataResponse {
|
||||
import java.util.Objects;
|
||||
|
||||
public class QueryPageResponse extends Response {
|
||||
public final String requestId;
|
||||
private final ResultPage data;
|
||||
|
||||
public QueryPageResponse(String requestId, Object data) {
|
||||
super(Action.QUERY_PAGE, data);
|
||||
public QueryPageResponse(String requestId, ResultPage data) {
|
||||
if (requestId == null) {
|
||||
throw new IllegalArgumentException("[requestId] must not be null");
|
||||
}
|
||||
if (data == null) {
|
||||
throw new IllegalArgumentException("[data] must not be null");
|
||||
}
|
||||
this.requestId = requestId;
|
||||
this.data = data;
|
||||
}
|
||||
|
||||
QueryPageResponse(Request request, DataInput in) throws IOException {
|
||||
this.requestId = in.readUTF();
|
||||
QueryPageRequest queryPageRequest = (QueryPageRequest) request;
|
||||
data = queryPageRequest.data();
|
||||
queryPageRequest.data().read(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void encode(DataOutput out) throws IOException {
|
||||
out.writeInt(Status.toSuccess(action));
|
||||
public void write(int clientVersion, DataOutput out) throws IOException {
|
||||
out.writeUTF(requestId);
|
||||
}
|
||||
|
||||
public static QueryPageResponse decode(DataInput in) throws IOException {
|
||||
String requestId = in.readUTF();
|
||||
return new QueryPageResponse(requestId, null);
|
||||
data.write(out);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return format(Locale.ROOT, "QueryPageRes[%s]", requestId);
|
||||
protected String toStringBody() {
|
||||
return "requestId=[" + requestId
|
||||
+ "] data=[\n" + data + "]";
|
||||
}
|
||||
|
||||
@Override
|
||||
public RequestType requestType() {
|
||||
return RequestType.QUERY_PAGE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ResponseType responseType() {
|
||||
return ResponseType.QUERY_PAGE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null || obj.getClass() != getClass()) {
|
||||
return false;
|
||||
}
|
||||
QueryPageResponse other = (QueryPageResponse) obj;
|
||||
return requestId.equals(other.requestId)
|
||||
&& data.equals(other.data);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(requestId, data);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,15 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
|
||||
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.Action;
|
||||
|
||||
public abstract class Request extends Message {
|
||||
|
||||
public Request(Action action) {
|
||||
super(action);
|
||||
}
|
||||
}
|
|
@ -1,15 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
|
||||
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.Action;
|
||||
|
||||
public abstract class Response extends Message {
|
||||
|
||||
public Response(Action action) {
|
||||
super(action);
|
||||
}
|
||||
}
|
|
@ -5,115 +5,24 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
|
||||
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.Action;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.Status;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.math.BigDecimal;
|
||||
import java.sql.Blob;
|
||||
import java.sql.Clob;
|
||||
import java.sql.JDBCType;
|
||||
import java.util.Locale;
|
||||
|
||||
import static java.lang.String.format;
|
||||
import static java.sql.Types.BIGINT;
|
||||
import static java.sql.Types.BINARY;
|
||||
import static java.sql.Types.BIT;
|
||||
import static java.sql.Types.BOOLEAN;
|
||||
import static java.sql.Types.CHAR;
|
||||
import static java.sql.Types.DOUBLE;
|
||||
import static java.sql.Types.FLOAT;
|
||||
import static java.sql.Types.INTEGER;
|
||||
import static java.sql.Types.LONGVARBINARY;
|
||||
import static java.sql.Types.LONGVARCHAR;
|
||||
import static java.sql.Types.NULL;
|
||||
import static java.sql.Types.REAL;
|
||||
import static java.sql.Types.SMALLINT;
|
||||
import static java.sql.Types.TIMESTAMP;
|
||||
import static java.sql.Types.TINYINT;
|
||||
import static java.sql.Types.VARBINARY;
|
||||
import static java.sql.Types.VARCHAR;
|
||||
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.MAGIC_NUMBER;
|
||||
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.VERSION;
|
||||
|
||||
public abstract class ProtoUtils {
|
||||
|
||||
private static final byte[] EMPTY_BYTES = new byte[0];
|
||||
|
||||
public static void write(DataOutput out, Message m) throws IOException {
|
||||
out.writeInt(MAGIC_NUMBER);
|
||||
out.writeInt(VERSION);
|
||||
m.encode(out);
|
||||
}
|
||||
|
||||
public static Request readRequest(DataInput in) throws IOException {
|
||||
switch (Action.from(in.readInt())) {
|
||||
case INFO:
|
||||
return InfoRequest.decode(in);
|
||||
case META_TABLE:
|
||||
return MetaTableRequest.decode(in);
|
||||
case META_COLUMN:
|
||||
return MetaColumnRequest.decode(in);
|
||||
case QUERY_INIT:
|
||||
return new QueryInitRequest(in);
|
||||
case QUERY_PAGE:
|
||||
return QueryPageRequest.decode(in);
|
||||
default:
|
||||
// cannot find action type
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
public static Response readResponse(DataInput in, int header) throws IOException {
|
||||
Action action = Action.from(header);
|
||||
|
||||
switch (Status.from(header)) {
|
||||
case EXCEPTION:
|
||||
return ExceptionResponse.decode(in, action);
|
||||
case ERROR:
|
||||
return ErrorResponse.decode(in, action);
|
||||
case SUCCESS:
|
||||
switch (action) {
|
||||
case INFO:
|
||||
return InfoResponse.decode(in);
|
||||
case META_TABLE:
|
||||
return MetaTableResponse.decode(in);
|
||||
case META_COLUMN:
|
||||
return MetaColumnResponse.decode(in);
|
||||
case QUERY_INIT:
|
||||
return QueryInitResponse.decode(in);
|
||||
case QUERY_PAGE:
|
||||
return QueryPageResponse.decode(in);
|
||||
default:
|
||||
// cannot find action type
|
||||
// NOCOMMIT it feels like this should throw *something*
|
||||
return null;
|
||||
}
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
public static String readHeader(DataInput in) throws IOException {
|
||||
// NOCOMMIT why not just throw?
|
||||
int magic = in.readInt();
|
||||
if (MAGIC_NUMBER != magic) {
|
||||
return "Invalid protocol";
|
||||
}
|
||||
int ver = in.readInt();
|
||||
if (VERSION != ver) {
|
||||
return format(Locale.ROOT, "Expected JDBC protocol version %s, found %s", VERSION, ver);
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
//
|
||||
// value read
|
||||
//
|
||||
/**
|
||||
* Abstract base class for a page of results. The canonical implementation in {@link Page}
|
||||
* and implementation must write usings the same format as {@linkplain Page}.
|
||||
*/
|
||||
public abstract class ResultPage {
|
||||
public abstract void write(DataOutput out) throws IOException;
|
||||
|
||||
// See Jdbc spec, appendix B
|
||||
@SuppressWarnings("unchecked")
|
||||
public static <T> T readValue(DataInput in, int type) throws IOException {
|
||||
protected static <T> T readValue(DataInput in, JDBCType type) throws IOException {
|
||||
// NOCOMMIT <T> feels slippery here
|
||||
Object result;
|
||||
byte hasNext = in.readByte();
|
||||
|
@ -124,6 +33,7 @@ public abstract class ProtoUtils {
|
|||
switch (type) {
|
||||
case NULL:
|
||||
// used to move the stream forward
|
||||
// NOCOMMIT why serialize NULL types at all?
|
||||
in.readBoolean();
|
||||
return null;
|
||||
case BIT:
|
||||
|
@ -153,14 +63,9 @@ public abstract class ProtoUtils {
|
|||
case VARBINARY:
|
||||
case LONGVARBINARY:
|
||||
int size = in.readInt();
|
||||
if (size == 0) {
|
||||
result = EMPTY_BYTES;
|
||||
}
|
||||
else {
|
||||
byte[] ar = new byte[size];
|
||||
in.readFully(ar, 0, size);
|
||||
result = ar;
|
||||
}
|
||||
byte[] ar = new byte[size];
|
||||
in.readFully(ar, 0, size);
|
||||
result = ar;
|
||||
break;
|
||||
case CHAR:
|
||||
case VARCHAR:
|
||||
|
@ -172,12 +77,12 @@ public abstract class ProtoUtils {
|
|||
result = in.readLong();
|
||||
break;
|
||||
default:
|
||||
throw new IOException("Don't know how to read type [" + type + " / " + JDBCType.valueOf(type) + "]");
|
||||
throw new IOException("Don't know how to read type [" + type + "]");
|
||||
}
|
||||
return (T) result;
|
||||
}
|
||||
|
||||
public static void writeValue(DataOutput out, Object o, int type) throws IOException {
|
||||
protected static void writeValue(DataOutput out, Object o, JDBCType type) throws IOException {
|
||||
if (o == null) {
|
||||
out.writeByte(0);
|
||||
return;
|
||||
|
@ -233,6 +138,53 @@ public abstract class ProtoUtils {
|
|||
out.writeLong(((Number) o).longValue());
|
||||
return;
|
||||
default:
|
||||
throw new IOException("Don't know how to write type [" + type + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The type of the array used to store columns of this type.
|
||||
*/
|
||||
protected static Class<?> classOf(JDBCType jdbcType) {
|
||||
switch (jdbcType) {
|
||||
case NUMERIC:
|
||||
case DECIMAL:
|
||||
return BigDecimal.class;
|
||||
case BOOLEAN:
|
||||
case BIT:
|
||||
return Boolean.class;
|
||||
case TINYINT:
|
||||
return Byte.class;
|
||||
case SMALLINT:
|
||||
return Short.class;
|
||||
case INTEGER:
|
||||
// NOCOMMIT should we be using primitives instead?
|
||||
return Integer.class;
|
||||
case BIGINT:
|
||||
return Long.class;
|
||||
case REAL:
|
||||
return Float.class;
|
||||
case FLOAT:
|
||||
case DOUBLE:
|
||||
return Double.class;
|
||||
case BINARY:
|
||||
case VARBINARY:
|
||||
case LONGVARBINARY:
|
||||
return byte[].class;
|
||||
case CHAR:
|
||||
case VARCHAR:
|
||||
case LONGVARCHAR:
|
||||
return String.class;
|
||||
case DATE:
|
||||
case TIME:
|
||||
case TIMESTAMP:
|
||||
return Long.class;
|
||||
case BLOB:
|
||||
return Blob.class;
|
||||
case CLOB:
|
||||
return Clob.class;
|
||||
default:
|
||||
throw new IllegalArgumentException("Unsupported JDBC type [" + jdbcType + "]");
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,76 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.StringTokenizer;
|
||||
|
||||
abstract class StringUtils {
|
||||
|
||||
static final String EMPTY = "";
|
||||
|
||||
static String nullAsEmpty(String string) {
|
||||
return string == null ? EMPTY : string;
|
||||
}
|
||||
|
||||
static boolean hasText(CharSequence sequence) {
|
||||
if (!hasLength(sequence)) {
|
||||
return false;
|
||||
}
|
||||
int length = sequence.length();
|
||||
for (int i = 0; i < length; i++) {
|
||||
if (!Character.isWhitespace(sequence.charAt(i))) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static boolean hasLength(CharSequence sequence) {
|
||||
return (sequence != null && sequence.length() > 0);
|
||||
}
|
||||
|
||||
static String[] splitToIndexAndType(String pattern) {
|
||||
List<String> tokens = tokenize(pattern, ".");
|
||||
|
||||
String[] results = new String[2];
|
||||
if (tokens.size() == 2) {
|
||||
results[0] = tokens.get(0);
|
||||
results[1] = tokens.get(1);
|
||||
}
|
||||
else {
|
||||
results[0] = nullAsEmpty(pattern);
|
||||
results[1] = EMPTY;
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
static List<String> tokenize(String string, String delimiters) {
|
||||
return tokenize(string, delimiters, true, true);
|
||||
}
|
||||
|
||||
static List<String> tokenize(String string, String delimiters, boolean trimTokens, boolean ignoreEmptyTokens) {
|
||||
if (!hasText(string)) {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
StringTokenizer st = new StringTokenizer(string, delimiters);
|
||||
List<String> tokens = new ArrayList<String>();
|
||||
while (st.hasMoreTokens()) {
|
||||
String token = st.nextToken();
|
||||
if (trimTokens) {
|
||||
token = token.trim();
|
||||
}
|
||||
if (!ignoreEmptyTokens || token.length() > 0) {
|
||||
tokens.add(token);
|
||||
}
|
||||
}
|
||||
return tokens;
|
||||
}
|
||||
|
||||
}
|
|
@ -26,7 +26,7 @@ public class TimeoutInfo {
|
|||
requestTimeout = in.readLong();
|
||||
}
|
||||
|
||||
void encode(DataOutput out) throws IOException {
|
||||
void write(DataOutput out) throws IOException {
|
||||
out.writeLong(clientTime);
|
||||
out.writeLong(timeout);
|
||||
out.writeLong(requestTimeout);
|
||||
|
|
|
@ -0,0 +1,56 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.sql.JDBCType;
|
||||
|
||||
import static org.elasticsearch.xpack.sql.test.RoundTripTestUtils.assertRoundTrip;
|
||||
|
||||
public class ColumnInfoTests extends ESTestCase {
|
||||
static ColumnInfo varcharInfo(String name) {
|
||||
return new ColumnInfo(name, JDBCType.VARCHAR, "", "", "", "");
|
||||
}
|
||||
|
||||
static ColumnInfo intInfo(String name) {
|
||||
return new ColumnInfo(name, JDBCType.INTEGER, "", "", "", "");
|
||||
}
|
||||
|
||||
static ColumnInfo doubleInfo(String name) {
|
||||
return new ColumnInfo(name, JDBCType.DOUBLE, "", "", "", "");
|
||||
}
|
||||
|
||||
static Object randomValueFor(ColumnInfo info) {
|
||||
switch (info.type) {
|
||||
case VARCHAR: return randomAlphaOfLength(5);
|
||||
case INTEGER: return randomInt();
|
||||
case DOUBLE: return randomDouble();
|
||||
default:
|
||||
throw new IllegalArgumentException("Unsupported type [" + info.type + "]");
|
||||
}
|
||||
}
|
||||
|
||||
static ColumnInfo randomColumnInfo() {
|
||||
return new ColumnInfo(randomAlphaOfLength(5), randomFrom(JDBCType.values()), randomAlphaOfLength(5), randomAlphaOfLength(5),
|
||||
randomAlphaOfLength(5), randomAlphaOfLength(5));
|
||||
}
|
||||
|
||||
public void testRoundTrip() throws IOException {
|
||||
assertRoundTrip(randomColumnInfo(), ColumnInfo::write, ColumnInfo::new);
|
||||
}
|
||||
|
||||
public void testToString() {
|
||||
assertEquals("test.doc.a<type=[VARCHAR] catalog=[as] schema=[ads] label=[lab]>",
|
||||
new ColumnInfo("a", JDBCType.VARCHAR, "test.doc", "as", "ads", "lab").toString());
|
||||
assertEquals("test.doc.a<type=[VARCHAR]>",
|
||||
new ColumnInfo("a", JDBCType.VARCHAR, "test.doc", "", "", "").toString());
|
||||
assertEquals("string<type=[VARCHAR]>", varcharInfo("string").toString());
|
||||
assertEquals("int<type=[INTEGER]>", intInfo("int").toString());
|
||||
assertEquals("d<type=[DOUBLE]>", doubleInfo("d").toString());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,28 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.RequestType;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.assertRoundTripCurrentVersion;
|
||||
|
||||
public class ErrorResponseTests extends ESTestCase {
|
||||
static ErrorResponse randomErrorResponse() {
|
||||
return new ErrorResponse(RequestType.META_TABLE, randomAlphaOfLength(5), randomAlphaOfLength(5), randomAlphaOfLength(5));
|
||||
}
|
||||
|
||||
public void testRoundTrip() throws IOException {
|
||||
assertRoundTripCurrentVersion(MetaTableRequestTests::randomMetaTableRequest, randomErrorResponse());
|
||||
}
|
||||
|
||||
public void testToString() {
|
||||
assertEquals("ErrorResponse<request=[INFO] message=[test] cause=[test] stack=[stack\nstack]>",
|
||||
new ErrorResponse(RequestType.INFO, "test", "test", "stack\nstack").toString());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,30 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.RequestType;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.AbstractProto.SqlExceptionType;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.assertRoundTripCurrentVersion;
|
||||
|
||||
public class ExceptionResponseTests extends ESTestCase {
|
||||
static ExceptionResponse randomExceptionResponse() {
|
||||
return new ExceptionResponse(RequestType.META_TABLE, randomAlphaOfLength(5), randomAlphaOfLength(5),
|
||||
randomFrom(SqlExceptionType.values()));
|
||||
}
|
||||
|
||||
public void testRoundTrip() throws IOException {
|
||||
assertRoundTripCurrentVersion(MetaTableRequestTests::randomMetaTableRequest, randomExceptionResponse());
|
||||
}
|
||||
|
||||
public void testToString() {
|
||||
assertEquals("ExceptionResponse<request=[INFO] message=[test] cause=[test] type=[SYNTAX]>",
|
||||
new ExceptionResponse(RequestType.INFO, "test", "test", SqlExceptionType.SYNTAX).toString());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,28 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.assertRoundTripCurrentVersion;
|
||||
|
||||
public class InfoRequestTests extends ESTestCase {
|
||||
static InfoRequest randomInfoRequest() {
|
||||
return new InfoRequest(randomAlphaOfLength(5), randomAlphaOfLength(5), randomAlphaOfLength(5),
|
||||
randomAlphaOfLength(5), randomAlphaOfLength(5));
|
||||
}
|
||||
|
||||
public void testRoundTrip() throws IOException {
|
||||
assertRoundTripCurrentVersion(randomInfoRequest());
|
||||
}
|
||||
|
||||
public void testToString() {
|
||||
assertEquals("InfoRequest<jvm=[version=[1.8.0_131] vendor=[testvendor] classPath=[testcp]] os=[name=[Mac OS X] version=[10.12.5]]>",
|
||||
new InfoRequest("1.8.0_131", "testvendor", "testcp", "Mac OS X", "10.12.5").toString());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,28 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.assertRoundTripCurrentVersion;
|
||||
|
||||
public class InfoResponseTests extends ESTestCase {
|
||||
static InfoResponse randomInfoResponse() {
|
||||
return new InfoResponse(randomAlphaOfLength(5), randomAlphaOfLength(5), randomByte(), randomByte(),
|
||||
randomAlphaOfLength(5), randomAlphaOfLength(5), randomAlphaOfLength(5));
|
||||
}
|
||||
|
||||
public void testRoundTrip() throws IOException {
|
||||
assertRoundTripCurrentVersion(InfoRequestTests::randomInfoRequest, randomInfoResponse());
|
||||
}
|
||||
|
||||
public void testToString() {
|
||||
assertEquals("InfoResponse<node=[adsf] cluster=[test_cluster] version=[6.0.0]/[major=[6] minor=[0] hash=[feed] date=[date]]>",
|
||||
new InfoResponse("adsf", "test_cluster", (byte) 6, (byte) 0, "6.0.0", "feed", "date").toString());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
|
||||
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Request;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Response;
|
||||
import org.elasticsearch.xpack.sql.test.RoundTripTestUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
public final class JdbcRoundTripTestUtils {
|
||||
private JdbcRoundTripTestUtils() {
|
||||
// Just static utilities
|
||||
}
|
||||
|
||||
static void assertRoundTripCurrentVersion(Request request) throws IOException {
|
||||
RoundTripTestUtils.assertRoundTrip(request, Proto.INSTANCE::writeRequest, Proto.INSTANCE::readRequest);
|
||||
}
|
||||
|
||||
static void assertRoundTripCurrentVersion(Supplier<Request> request, Response response) throws IOException {
|
||||
RoundTripTestUtils.assertRoundTrip(response,
|
||||
(r, out) -> Proto.INSTANCE.writeResponse(r, Proto.CURRENT_VERSION, out),
|
||||
in -> Proto.INSTANCE.readResponse(request.get(), in));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.sql.JDBCType;
|
||||
|
||||
import static org.elasticsearch.xpack.sql.test.RoundTripTestUtils.assertRoundTrip;
|
||||
|
||||
public class MetaColumnInfoTests extends ESTestCase {
|
||||
static MetaColumnInfo randomMetaColumnInfo() {
|
||||
return new MetaColumnInfo(randomAlphaOfLength(5), randomAlphaOfLength(5), randomFrom(JDBCType.values()),
|
||||
between(1, Integer.MAX_VALUE), between(1, Integer.MAX_VALUE));
|
||||
}
|
||||
|
||||
public void testRoundTrip() throws IOException {
|
||||
assertRoundTrip(randomMetaColumnInfo(), MetaColumnInfo::write, MetaColumnInfo::new);
|
||||
}
|
||||
|
||||
public void testToString() {
|
||||
assertEquals("test.doc.col<type=[VARCHAR] size=[100] position=[1]>",
|
||||
new MetaColumnInfo("test.doc", "col", JDBCType.VARCHAR, 100, 1).toString());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.assertRoundTripCurrentVersion;
|
||||
|
||||
public class MetaColumnRequestTests extends ESTestCase {
|
||||
public static MetaColumnRequest randomMetaColumnRequest() {
|
||||
return new MetaColumnRequest(randomAlphaOfLength(10), randomAlphaOfLength(10));
|
||||
}
|
||||
|
||||
public void testRoundTrip() throws IOException {
|
||||
assertRoundTripCurrentVersion(randomMetaColumnRequest());
|
||||
}
|
||||
|
||||
public void testToString() {
|
||||
assertEquals("MetaColumnRequest<table=[test.do%] column=[d%]>", new MetaColumnRequest("test.do%", "d%").toString());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.sql.JDBCType;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.assertRoundTripCurrentVersion;
|
||||
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.MetaColumnInfoTests.randomMetaColumnInfo;
|
||||
|
||||
|
||||
public class MetaColumnResponseTests extends ESTestCase {
|
||||
static MetaColumnResponse randomMetaColumnResponse() {
|
||||
int size = between(0, 10);
|
||||
List<MetaColumnInfo> columns = new ArrayList<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
columns.add(randomMetaColumnInfo());
|
||||
}
|
||||
return new MetaColumnResponse(columns);
|
||||
}
|
||||
|
||||
public void testRoundTrip() throws IOException {
|
||||
assertRoundTripCurrentVersion(MetaColumnRequestTests::randomMetaColumnRequest, randomMetaColumnResponse());
|
||||
}
|
||||
|
||||
public void testToString() {
|
||||
assertEquals("MetaColumnResponse<>", new MetaColumnResponse(emptyList()).toString());
|
||||
assertEquals("MetaColumnResponse<a.doc.col1<type=[VARCHAR] size=[100] position=[1]>, "
|
||||
+ "a.doc.col2<type=[INTEGER] size=[16] position=[2]>, "
|
||||
+ "b.doc.col1<type=[VARCHAR] size=[100] position=[1]>>", new MetaColumnResponse(Arrays.asList(
|
||||
new MetaColumnInfo("a.doc", "col1", JDBCType.VARCHAR, 100, 1),
|
||||
new MetaColumnInfo("a.doc", "col2", JDBCType.INTEGER, 16, 2),
|
||||
new MetaColumnInfo("b.doc", "col1", JDBCType.VARCHAR, 100, 1))).toString());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.assertRoundTripCurrentVersion;
|
||||
|
||||
public class MetaTableRequestTests extends ESTestCase {
|
||||
public static MetaTableRequest randomMetaTableRequest() {
|
||||
return new MetaTableRequest(randomAlphaOfLength(10));
|
||||
}
|
||||
|
||||
public void testRoundTrip() throws IOException {
|
||||
assertRoundTripCurrentVersion(randomMetaTableRequest());
|
||||
}
|
||||
|
||||
public void testToString() {
|
||||
assertEquals("MetaTableRequest<test.do%>", new MetaTableRequest("test.do%").toString());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.assertRoundTripCurrentVersion;
|
||||
|
||||
public class MetaTableResponseTests extends ESTestCase {
|
||||
static MetaTableResponse randomMetaTableResponse() {
|
||||
int size = between(0, 10);
|
||||
List<String> tables = new ArrayList<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
tables.add(randomAlphaOfLength(5));
|
||||
}
|
||||
return new MetaTableResponse(tables);
|
||||
}
|
||||
|
||||
public void testRoundTrip() throws IOException {
|
||||
assertRoundTripCurrentVersion(MetaTableRequestTests::randomMetaTableRequest, randomMetaTableResponse());
|
||||
}
|
||||
|
||||
public void testToString() {
|
||||
assertEquals("MetaTableResponse<>", new MetaTableResponse(emptyList()).toString());
|
||||
assertEquals("MetaTableResponse<a.doc, b.doc, c.doc>", new MetaTableResponse(Arrays.asList("a.doc", "b.doc", "c.doc")).toString());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,92 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
import static java.util.Collections.singletonList;
|
||||
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfoTests.doubleInfo;
|
||||
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfoTests.intInfo;
|
||||
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfoTests.randomValueFor;
|
||||
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfoTests.varcharInfo;
|
||||
import static org.elasticsearch.xpack.sql.test.RoundTripTestUtils.assertRoundTrip;
|
||||
import static org.elasticsearch.xpack.sql.test.RoundTripTestUtils.roundTrip;
|
||||
|
||||
public class PageTests extends ESTestCase {
|
||||
static Page randomPage() {
|
||||
int columns = between(0, 10);
|
||||
List<ColumnInfo> columnInfo = new ArrayList<>();
|
||||
for (int c = 0; c < columns; c++) {
|
||||
@SuppressWarnings("unchecked")
|
||||
Supplier<ColumnInfo> info = randomFrom(
|
||||
() -> varcharInfo(randomAlphaOfLength(5)),
|
||||
() -> intInfo(randomAlphaOfLength(5)),
|
||||
() -> doubleInfo(randomAlphaOfLength(5)));
|
||||
columnInfo.add(info.get());
|
||||
}
|
||||
return randomPageContents(columnInfo);
|
||||
}
|
||||
|
||||
static Page randomPageContents(List<ColumnInfo> columnInfo) {
|
||||
Object[][] rows = new Object[between(0, 10)][];
|
||||
for (int r = 0; r < rows.length; r++) {
|
||||
rows[r] = new Object[columnInfo.size()];
|
||||
for (int c = 0; c < columnInfo.size(); c++) {
|
||||
rows[r][c] = randomValueFor(columnInfo.get(c));
|
||||
}
|
||||
}
|
||||
return new Page(columnInfo, rows);
|
||||
}
|
||||
|
||||
public void testRoundTripNoReuse() throws IOException {
|
||||
Page example = randomPage();
|
||||
assertRoundTrip(example, Page::write, in -> {
|
||||
Page page = new Page(example.columnInfo());
|
||||
page.read(in);
|
||||
return page;
|
||||
});
|
||||
}
|
||||
|
||||
public void testRoundTripReuse() throws IOException {
|
||||
Page example = randomPage();
|
||||
Page target = new Page(example.columnInfo());
|
||||
roundTrip(example, Page::write, in -> {target.read(in); return null;});
|
||||
assertEquals(example, target);
|
||||
|
||||
example = randomPageContents(example.columnInfo());
|
||||
roundTrip(example, Page::write, in -> {target.read(in); return null;});
|
||||
assertEquals(example, target);
|
||||
}
|
||||
|
||||
public void testToString() {
|
||||
assertEquals("\n\n",
|
||||
new Page(emptyList(), new Object[][] {
|
||||
new Object[] {},
|
||||
new Object[] {},
|
||||
}).toString());
|
||||
assertEquals("test\n",
|
||||
new Page(singletonList(varcharInfo("a")), new Object[][] {
|
||||
new Object[] {"test"}
|
||||
}).toString());
|
||||
assertEquals("test, 1\n",
|
||||
new Page(Arrays.asList(varcharInfo("a"), intInfo("b")), new Object[][] {
|
||||
new Object[] {"test", 1}
|
||||
}).toString());
|
||||
assertEquals("test, 1\nbar, 7\n",
|
||||
new Page(Arrays.asList(varcharInfo("a"), intInfo("b")), new Object[][] {
|
||||
new Object[] {"test", 1},
|
||||
new Object[] {"bar", 7}
|
||||
}).toString());
|
||||
|
||||
}
|
||||
}
|
|
@ -8,9 +8,11 @@ package org.elasticsearch.xpack.sql.jdbc.net.protocol;
|
|||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.TimeZone;
|
||||
|
||||
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.assertRoundTripCurrentVersion;
|
||||
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.TimeoutInfoTests.randomTimeoutInfo;
|
||||
import static org.elasticsearch.xpack.sql.test.RoundTripTestUtils.assertRoundTrip;
|
||||
|
||||
|
||||
public class QueryInitRequestTests extends ESTestCase {
|
||||
public static QueryInitRequest randomQueryInitRequest() {
|
||||
|
@ -18,6 +20,13 @@ public class QueryInitRequestTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testRoundTrip() throws IOException {
|
||||
assertRoundTrip(randomQueryInitRequest(), QueryInitRequest::encode, in -> (QueryInitRequest) ProtoUtils.readRequest(in));
|
||||
assertRoundTripCurrentVersion(randomQueryInitRequest());
|
||||
}
|
||||
|
||||
public void testToString() {
|
||||
assertEquals("QueryInitRequest<query=[SELECT * FROM test.doc]>",
|
||||
new QueryInitRequest(10, "SELECT * FROM test.doc", TimeZone.getTimeZone("UTC"), new TimeoutInfo(1, 1, 1)).toString());
|
||||
assertEquals("QueryInitRequest<query=[SELECT * FROM test.doc] timeZone=[GMT-05:00]>",
|
||||
new QueryInitRequest(10, "SELECT * FROM test.doc", TimeZone.getTimeZone("GMT-5"), new TimeoutInfo(1, 1, 1)).toString());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static java.util.Collections.singletonList;
|
||||
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfoTests.varcharInfo;
|
||||
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.assertRoundTripCurrentVersion;
|
||||
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.PageTests.randomPage;
|
||||
|
||||
public class QueryInitResponseTests extends ESTestCase {
|
||||
static QueryInitResponse randomQueryInitResponse() {
|
||||
Page page = randomPage();
|
||||
return new QueryInitResponse(randomNonNegativeLong(), randomNonNegativeLong(), randomAlphaOfLength(5), page.columnInfo(), page);
|
||||
}
|
||||
|
||||
public void testRoundTrip() throws IOException {
|
||||
assertRoundTripCurrentVersion(QueryInitRequestTests::randomQueryInitRequest, randomQueryInitResponse());
|
||||
}
|
||||
|
||||
public void testToString() {
|
||||
Page page = new Page(singletonList(varcharInfo("a")), new Object[][] {
|
||||
new Object[] {"test"},
|
||||
new Object[] {"string"},
|
||||
});
|
||||
assertEquals("QueryInitResponse<timeReceived=[123] timeSent=[456] requestId=[test_id] columns=[a<type=[VARCHAR]>] data=["
|
||||
+ "\ntest\nstring\n]>",
|
||||
new QueryInitResponse(123, 456, "test_id", page.columnInfo(), page).toString());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.assertRoundTripCurrentVersion;
|
||||
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.TimeoutInfoTests.randomTimeoutInfo;
|
||||
|
||||
public class QueryPageRequestTests extends ESTestCase {
|
||||
public static QueryPageRequest randomQueryPageRequest(Page page) {
|
||||
return new QueryPageRequest(randomAlphaOfLength(5), randomTimeoutInfo(), page);
|
||||
}
|
||||
|
||||
public void testRoundTrip() throws IOException {
|
||||
assertRoundTripCurrentVersion(randomQueryPageRequest(null));
|
||||
}
|
||||
|
||||
public void testToString() {
|
||||
assertEquals("QueryPageRequest<test_id>", new QueryPageRequest("test_id", new TimeoutInfo(1, 1, 1), null).toString());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,34 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.jdbc.net.protocol;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static java.util.Collections.singletonList;
|
||||
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfoTests.varcharInfo;
|
||||
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.JdbcRoundTripTestUtils.assertRoundTripCurrentVersion;
|
||||
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.PageTests.randomPage;
|
||||
import static org.elasticsearch.xpack.sql.jdbc.net.protocol.QueryPageRequestTests.randomQueryPageRequest;
|
||||
|
||||
public class QueryPageResponseTests extends ESTestCase {
|
||||
static QueryPageResponse randomQueryPageResponse(Page page) {
|
||||
return new QueryPageResponse(randomAlphaOfLength(5), page);
|
||||
}
|
||||
|
||||
public void testRoundTrip() throws IOException {
|
||||
Page page = randomPage();
|
||||
assertRoundTripCurrentVersion(() -> randomQueryPageRequest(new Page(page.columnInfo())), randomQueryPageResponse(page));
|
||||
}
|
||||
|
||||
public void testToString() {
|
||||
Page results = new Page(singletonList(varcharInfo("a")), new Object[][] {
|
||||
new Object[] {"test"}
|
||||
});
|
||||
assertEquals("QueryPageResponse<requestId=[test_id] data=[\ntest\n]>", new QueryPageResponse("test_id", results).toString());
|
||||
}
|
||||
}
|
|
@ -17,6 +17,6 @@ public class TimeoutInfoTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testRoundTrip() throws IOException {
|
||||
assertRoundTrip(randomTimeoutInfo(), TimeoutInfo::encode, TimeoutInfo::new);
|
||||
assertRoundTrip(randomTimeoutInfo(), TimeoutInfo::write, TimeoutInfo::new);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,11 +43,13 @@ task generateGitHash {
|
|||
dependencies {
|
||||
compile project(':x-pack-elasticsearch:sql:net-client')
|
||||
compile project(':x-pack-elasticsearch:sql:jdbc-proto')
|
||||
compile project(':x-pack-elasticsearch:sql:shared-proto')
|
||||
/* We want to limit these dependencies so do not add anything to this list
|
||||
* without serious consideration, and probably shading. */
|
||||
|
||||
// Used by the embedded sql tests
|
||||
testCompile project(path: ':client:transport', configuration: 'runtime')
|
||||
testCompile project(path: ':x-pack-elasticsearch:plugin', configuration: 'testArtifacts')
|
||||
testCompile project(path: ':x-pack-elasticsearch:plugin', configuration: 'testArtifacts') // NOCOMMIT remove me?
|
||||
testCompile project(':x-pack-elasticsearch:sql:test-utils')
|
||||
testCompile "net.sourceforge.csvjdbc:csvjdbc:1.0.31"
|
||||
|
||||
|
@ -56,8 +58,10 @@ dependencies {
|
|||
|
||||
dependencyLicenses {
|
||||
mapping from: /jdbc-proto.*/, to: 'elasticsearch'
|
||||
mapping from: /shared-proto.*/, to: 'elasticsearch'
|
||||
mapping from: /net-client.*/, to: 'elasticsearch'
|
||||
ignoreSha 'jdbc-proto'
|
||||
ignoreSha 'shared-proto'
|
||||
ignoreSha 'net-client'
|
||||
}
|
||||
|
||||
|
|
|
@ -5,22 +5,22 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.sql.jdbc.jdbc;
|
||||
|
||||
import java.sql.Connection;
|
||||
import java.sql.DatabaseMetaData;
|
||||
import java.sql.ResultSet;
|
||||
import java.sql.RowIdLifetime;
|
||||
import java.sql.SQLException;
|
||||
import java.sql.SQLFeatureNotSupportedException;
|
||||
import java.sql.Types;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.client.Cursor;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfo;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.MetaColumnInfo;
|
||||
import org.elasticsearch.xpack.sql.jdbc.util.Version;
|
||||
import org.elasticsearch.xpack.sql.net.client.util.ObjectUtils;
|
||||
|
||||
import java.sql.Connection;
|
||||
import java.sql.DatabaseMetaData;
|
||||
import java.sql.JDBCType;
|
||||
import java.sql.ResultSet;
|
||||
import java.sql.RowIdLifetime;
|
||||
import java.sql.SQLException;
|
||||
import java.sql.SQLFeatureNotSupportedException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import static org.elasticsearch.xpack.sql.net.client.util.StringUtils.EMPTY;
|
||||
import static org.elasticsearch.xpack.sql.net.client.util.StringUtils.hasText;
|
||||
|
||||
|
@ -824,8 +824,8 @@ class JdbcDatabaseMetaData implements DatabaseMetaData, JdbcWrapper {
|
|||
row[ 1] = EMPTY;
|
||||
row[ 2] = col.table;
|
||||
row[ 3] = col.name;
|
||||
row[ 4] = col.type;
|
||||
row[ 5] = JdbcUtils.nameOf(col.type);
|
||||
row[ 4] = col.type.getVendorTypeNumber();
|
||||
row[ 5] = col.type.getName();
|
||||
row[ 6] = col.position; // NOCOMMIT this doesn't seem right
|
||||
row[ 7] = null;
|
||||
row[ 8] = null;
|
||||
|
@ -1186,11 +1186,11 @@ class JdbcDatabaseMetaData implements DatabaseMetaData, JdbcWrapper {
|
|||
Object obj = cols[i];
|
||||
if (obj instanceof String) {
|
||||
String name = obj.toString();
|
||||
int type = Types.VARCHAR;
|
||||
JDBCType type = JDBCType.VARCHAR;
|
||||
if (i + 1 < cols.length) {
|
||||
// check if the next item it's a type
|
||||
if (cols[i + 1] instanceof Class) {
|
||||
type = JdbcUtils.fromClass((Class<?>) cols[i + 1]);
|
||||
type = JDBCType.valueOf(JdbcUtils.fromClass((Class<?>) cols[i + 1]));
|
||||
i++;
|
||||
}
|
||||
// it's not, use the default and move on
|
||||
|
|
|
@ -59,7 +59,7 @@ class JdbcParameterMetaData implements ParameterMetaData, JdbcWrapper {
|
|||
|
||||
@Override
|
||||
public String getParameterClassName(int param) throws SQLException {
|
||||
return JdbcUtils.nameOf(paramInfo(param).type.getVendorTypeNumber().intValue());
|
||||
return paramInfo(param).type.name(); // NOCOMMIT this is almost certainly wrong
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -16,6 +16,7 @@ import java.sql.Array;
|
|||
import java.sql.Blob;
|
||||
import java.sql.Clob;
|
||||
import java.sql.Date;
|
||||
import java.sql.JDBCType;
|
||||
import java.sql.NClob;
|
||||
import java.sql.Ref;
|
||||
import java.sql.ResultSet;
|
||||
|
@ -316,14 +317,14 @@ class JdbcResultSet implements ResultSet, JdbcWrapper {
|
|||
return type.cast(val);
|
||||
}
|
||||
|
||||
int columnType = cursor.columns().get(columnIndex - 1).type;
|
||||
JDBCType columnType = cursor.columns().get(columnIndex - 1).type;
|
||||
|
||||
T t = TypeConverter.convert(val, columnType, type);
|
||||
|
||||
if (t != null) {
|
||||
return t;
|
||||
}
|
||||
throw new SQLException(format(Locale.ROOT, "Conversion from type %s to %s not supported", JdbcUtils.nameOf(columnType), type.getName()));
|
||||
throw new SQLException("Conversion from type [" + columnType + "] to [" + type.getName() + "] not supported");
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -109,12 +109,12 @@ class JdbcResultSetMetaData implements ResultSetMetaData, JdbcWrapper {
|
|||
|
||||
@Override
|
||||
public int getColumnType(int column) throws SQLException {
|
||||
return column(column).type;
|
||||
return column(column).type.getVendorTypeNumber();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getColumnTypeName(int column) throws SQLException {
|
||||
return JdbcUtils.typeName(column(column).type);
|
||||
return column(column).type.name();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -137,7 +137,7 @@ class JdbcResultSetMetaData implements ResultSetMetaData, JdbcWrapper {
|
|||
|
||||
@Override
|
||||
public String getColumnClassName(int column) throws SQLException {
|
||||
return JdbcUtils.nameOf(column(column).type);
|
||||
return column(column).type.getName(); // NOCOMMIT this is almost certainly wrong.
|
||||
}
|
||||
|
||||
private void checkOpen() throws SQLException {
|
||||
|
|
|
@ -14,19 +14,14 @@ import java.sql.Time;
|
|||
import java.sql.Timestamp;
|
||||
|
||||
import static java.sql.Types.BIGINT;
|
||||
import static java.sql.Types.BINARY;
|
||||
import static java.sql.Types.BIT;
|
||||
import static java.sql.Types.BLOB;
|
||||
import static java.sql.Types.BOOLEAN;
|
||||
import static java.sql.Types.CHAR;
|
||||
import static java.sql.Types.CLOB;
|
||||
import static java.sql.Types.DATE;
|
||||
import static java.sql.Types.DECIMAL;
|
||||
import static java.sql.Types.DOUBLE;
|
||||
import static java.sql.Types.FLOAT;
|
||||
import static java.sql.Types.INTEGER;
|
||||
import static java.sql.Types.LONGVARBINARY;
|
||||
import static java.sql.Types.LONGVARCHAR;
|
||||
import static java.sql.Types.NULL;
|
||||
import static java.sql.Types.NUMERIC;
|
||||
import static java.sql.Types.REAL;
|
||||
|
@ -71,57 +66,6 @@ public abstract class JdbcUtils {
|
|||
return wrapperClass;
|
||||
}
|
||||
|
||||
public static String nameOf(int jdbcType) {
|
||||
return JDBCType.valueOf(jdbcType).getName();
|
||||
}
|
||||
|
||||
// see javax.sql.rowset.RowSetMetaDataImpl
|
||||
// and https://db.apache.org/derby/docs/10.5/ref/rrefjdbc20377.html
|
||||
public static Class<?> classOf(int jdbcType) {
|
||||
|
||||
switch (jdbcType) {
|
||||
case NUMERIC:
|
||||
case DECIMAL:
|
||||
return BigDecimal.class;
|
||||
case BOOLEAN:
|
||||
case BIT:
|
||||
return Boolean.class;
|
||||
case TINYINT:
|
||||
return Byte.class;
|
||||
case SMALLINT:
|
||||
return Short.class;
|
||||
case INTEGER:
|
||||
return Integer.class;
|
||||
case BIGINT:
|
||||
return Long.class;
|
||||
case REAL:
|
||||
return Float.class;
|
||||
case FLOAT:
|
||||
case DOUBLE:
|
||||
return Double.class;
|
||||
case BINARY:
|
||||
case VARBINARY:
|
||||
case LONGVARBINARY:
|
||||
return byte[].class;
|
||||
case CHAR:
|
||||
case VARCHAR:
|
||||
case LONGVARCHAR:
|
||||
return String.class;
|
||||
case DATE:
|
||||
return Date.class;
|
||||
case TIME:
|
||||
return Time.class;
|
||||
case TIMESTAMP:
|
||||
return Timestamp.class;
|
||||
case BLOB:
|
||||
return Blob.class;
|
||||
case CLOB:
|
||||
return Clob.class;
|
||||
default:
|
||||
throw new IllegalArgumentException("Unsupported JDBC type [" + jdbcType + "/" + nameOf(jdbcType) + "]");
|
||||
}
|
||||
}
|
||||
|
||||
public static int fromClass(Class<?> clazz) {
|
||||
if (clazz == null) {
|
||||
return NULL;
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
package org.elasticsearch.xpack.sql.jdbc.jdbc;
|
||||
|
||||
import java.sql.Date;
|
||||
import java.sql.JDBCType;
|
||||
import java.sql.SQLException;
|
||||
import java.sql.Time;
|
||||
import java.sql.Timestamp;
|
||||
|
@ -21,25 +22,6 @@ import java.util.TimeZone;
|
|||
import java.util.function.Function;
|
||||
|
||||
import static java.lang.String.format;
|
||||
import static java.sql.Types.BIGINT;
|
||||
import static java.sql.Types.BINARY;
|
||||
import static java.sql.Types.BIT;
|
||||
import static java.sql.Types.BOOLEAN;
|
||||
import static java.sql.Types.CHAR;
|
||||
import static java.sql.Types.DATE;
|
||||
import static java.sql.Types.DOUBLE;
|
||||
import static java.sql.Types.FLOAT;
|
||||
import static java.sql.Types.INTEGER;
|
||||
import static java.sql.Types.LONGVARBINARY;
|
||||
import static java.sql.Types.LONGVARCHAR;
|
||||
import static java.sql.Types.REAL;
|
||||
import static java.sql.Types.SMALLINT;
|
||||
import static java.sql.Types.TIME;
|
||||
import static java.sql.Types.TIMESTAMP;
|
||||
import static java.sql.Types.TIMESTAMP_WITH_TIMEZONE;
|
||||
import static java.sql.Types.TINYINT;
|
||||
import static java.sql.Types.VARBINARY;
|
||||
import static java.sql.Types.VARCHAR;
|
||||
import static java.util.Calendar.DAY_OF_MONTH;
|
||||
import static java.util.Calendar.ERA;
|
||||
import static java.util.Calendar.HOUR_OF_DAY;
|
||||
|
@ -99,7 +81,7 @@ abstract class TypeConverter {
|
|||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
static <T> T convert(Object val, int columnType, Class<T> type) throws SQLException {
|
||||
static <T> T convert(Object val, JDBCType columnType, Class<T> type) throws SQLException {
|
||||
if (type == null) {
|
||||
return (T) asNative(val, columnType);
|
||||
}
|
||||
|
@ -161,7 +143,7 @@ abstract class TypeConverter {
|
|||
}
|
||||
|
||||
// keep in check with JdbcUtils#columnType
|
||||
private static Object asNative(Object v, int columnType) {
|
||||
private static Object asNative(Object v, JDBCType columnType) {
|
||||
Object result = null;
|
||||
switch (columnType) {
|
||||
case BIT:
|
||||
|
@ -198,7 +180,7 @@ abstract class TypeConverter {
|
|||
return nativeValue == null ? null : String.valueOf(nativeValue);
|
||||
}
|
||||
|
||||
private static Boolean asBoolean(Object val, int columnType) {
|
||||
private static Boolean asBoolean(Object val, JDBCType columnType) {
|
||||
switch (columnType) {
|
||||
case BIT:
|
||||
case BOOLEAN:
|
||||
|
@ -215,7 +197,7 @@ abstract class TypeConverter {
|
|||
}
|
||||
}
|
||||
|
||||
private static Byte asByte(Object val, int columnType) throws SQLException {
|
||||
private static Byte asByte(Object val, JDBCType columnType) throws SQLException {
|
||||
switch (columnType) {
|
||||
case BIT:
|
||||
case BOOLEAN:
|
||||
|
@ -235,7 +217,7 @@ abstract class TypeConverter {
|
|||
return null;
|
||||
}
|
||||
|
||||
private static Short asShort(Object val, int columnType) throws SQLException {
|
||||
private static Short asShort(Object val, JDBCType columnType) throws SQLException {
|
||||
switch (columnType) {
|
||||
case BIT:
|
||||
case BOOLEAN:
|
||||
|
@ -255,7 +237,7 @@ abstract class TypeConverter {
|
|||
return null;
|
||||
}
|
||||
|
||||
private static Integer asInteger(Object val, int columnType) throws SQLException {
|
||||
private static Integer asInteger(Object val, JDBCType columnType) throws SQLException {
|
||||
switch (columnType) {
|
||||
case BIT:
|
||||
case BOOLEAN:
|
||||
|
@ -275,7 +257,7 @@ abstract class TypeConverter {
|
|||
return null;
|
||||
}
|
||||
|
||||
private static Long asLong(Object val, int columnType) throws SQLException {
|
||||
private static Long asLong(Object val, JDBCType columnType) throws SQLException {
|
||||
switch (columnType) {
|
||||
case BIT:
|
||||
case BOOLEAN:
|
||||
|
@ -302,7 +284,7 @@ abstract class TypeConverter {
|
|||
return null;
|
||||
}
|
||||
|
||||
private static Float asFloat(Object val, int columnType) throws SQLException {
|
||||
private static Float asFloat(Object val, JDBCType columnType) throws SQLException {
|
||||
switch (columnType) {
|
||||
case BIT:
|
||||
case BOOLEAN:
|
||||
|
@ -322,7 +304,7 @@ abstract class TypeConverter {
|
|||
return null;
|
||||
}
|
||||
|
||||
private static Double asDouble(Object val, int columnType) throws SQLException {
|
||||
private static Double asDouble(Object val, JDBCType columnType) throws SQLException {
|
||||
switch (columnType) {
|
||||
case BIT:
|
||||
case BOOLEAN:
|
||||
|
@ -342,7 +324,7 @@ abstract class TypeConverter {
|
|||
return null;
|
||||
}
|
||||
|
||||
private static Date asDate(Object val, int columnType) throws SQLException {
|
||||
private static Date asDate(Object val, JDBCType columnType) throws SQLException {
|
||||
switch (columnType) {
|
||||
case TIME:
|
||||
// time has no date component
|
||||
|
@ -357,7 +339,7 @@ abstract class TypeConverter {
|
|||
return null;
|
||||
}
|
||||
|
||||
private static Time asTime(Object val, int columnType) throws SQLException {
|
||||
private static Time asTime(Object val, JDBCType columnType) throws SQLException {
|
||||
switch (columnType) {
|
||||
case DATE:
|
||||
// date has no time component
|
||||
|
@ -372,7 +354,7 @@ abstract class TypeConverter {
|
|||
return null;
|
||||
}
|
||||
|
||||
private static Timestamp asTimestamp(Object val, int columnType) throws SQLException {
|
||||
private static Timestamp asTimestamp(Object val, JDBCType columnType) throws SQLException {
|
||||
switch (columnType) {
|
||||
case DATE:
|
||||
return new Timestamp(utcMillisRemoveTime(((Number) val).longValue()));
|
||||
|
@ -387,26 +369,26 @@ abstract class TypeConverter {
|
|||
return null;
|
||||
}
|
||||
|
||||
private static byte[] asByteArray(Object val, int columnType) {
|
||||
private static byte[] asByteArray(Object val, JDBCType columnType) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
private static LocalDate asLocalDate(Object val, int columnType) {
|
||||
private static LocalDate asLocalDate(Object val, JDBCType columnType) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
private static LocalTime asLocalTime(Object val, int columnType) {
|
||||
private static LocalTime asLocalTime(Object val, JDBCType columnType) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
private static LocalDateTime asLocalDateTime(Object val, int columnType) {
|
||||
private static LocalDateTime asLocalDateTime(Object val, JDBCType columnType) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
private static OffsetTime asOffsetTime(Object val, int columnType) {
|
||||
private static OffsetTime asOffsetTime(Object val, JDBCType columnType) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
private static OffsetDateTime asOffsetDateTime(Object val, int columnType) {
|
||||
private static OffsetDateTime asOffsetDateTime(Object val, JDBCType columnType) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
|
|
|
@ -5,12 +5,13 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.sql.jdbc.net.client;
|
||||
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfo;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Page;
|
||||
import org.elasticsearch.xpack.sql.net.client.util.StringUtils;
|
||||
|
||||
import java.sql.SQLException;
|
||||
import java.util.List;
|
||||
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfo;
|
||||
import org.elasticsearch.xpack.sql.net.client.util.StringUtils;
|
||||
|
||||
class DefaultCursor implements Cursor {
|
||||
|
||||
private final JdbcHttpClient client;
|
||||
|
|
|
@ -16,23 +16,23 @@ import org.elasticsearch.xpack.sql.jdbc.net.protocol.MetaColumnRequest;
|
|||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.MetaColumnResponse;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.MetaTableRequest;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.MetaTableResponse;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.Action;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.SqlExceptionType;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.ProtoUtils;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Page;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.ResponseType;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.QueryInitRequest;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.QueryInitResponse;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.QueryPageRequest;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.QueryPageResponse;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Response;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.TimeoutInfo;
|
||||
import org.elasticsearch.xpack.sql.jdbc.util.BytesArray;
|
||||
import org.elasticsearch.xpack.sql.jdbc.util.FastByteArrayInputStream;
|
||||
import org.elasticsearch.xpack.sql.net.client.util.StringUtils;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Request;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Response;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.DataInput;
|
||||
import java.io.DataInputStream;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.sql.SQLException;
|
||||
import java.time.Instant;
|
||||
|
@ -66,67 +66,21 @@ public class JdbcHttpClient implements Closeable {
|
|||
}
|
||||
|
||||
public Cursor query(String sql, TimeZone timeZone, RequestMeta meta) throws SQLException {
|
||||
BytesArray ba = http.put(out -> queryRequest(out, meta, sql, timeZone));
|
||||
return doIO(ba, in -> queryResponse(in, meta));
|
||||
}
|
||||
|
||||
private void queryRequest(DataOutput out, RequestMeta meta, String sql, TimeZone timeZone) throws IOException {
|
||||
int fetch = meta.fetchSize() >= 0 ? meta.fetchSize() : conCfg.pageSize();
|
||||
ProtoUtils.write(out, new QueryInitRequest(fetch, sql, timeZone, timeout(meta)));
|
||||
QueryInitRequest request = new QueryInitRequest(fetch, sql, timeZone, timeout(meta));
|
||||
BytesArray ba = http.put(out -> Proto.INSTANCE.writeRequest(request, out));
|
||||
QueryInitResponse response = doIO(ba, in -> (QueryInitResponse) readResponse(request, in));
|
||||
return new DefaultCursor(this, response.requestId, (Page) response.data, meta);
|
||||
}
|
||||
|
||||
/**
|
||||
* Read the next page of results, updating the {@link Page} and returning
|
||||
* the scroll id to use to fetch the next page.
|
||||
*/
|
||||
public String nextPage(String requestId, Page page, RequestMeta meta) throws SQLException {
|
||||
BytesArray ba = http.put(out -> ProtoUtils.write(out, new QueryPageRequest(requestId, timeout(meta))));
|
||||
return doIO(ba, in -> pageResponse(in, page));
|
||||
}
|
||||
|
||||
private TimeoutInfo timeout(RequestMeta meta) {
|
||||
// client time
|
||||
long clientTime = Instant.now().toEpochMilli();
|
||||
|
||||
// timeout (in ms)
|
||||
long timeout = meta.timeoutInMs();
|
||||
if (timeout == 0) {
|
||||
timeout = conCfg.getQueryTimeout();
|
||||
}
|
||||
return new TimeoutInfo(clientTime, timeout, conCfg.getPageTimeout());
|
||||
}
|
||||
|
||||
private Cursor queryResponse(DataInput in, RequestMeta meta) throws IOException, SQLException {
|
||||
QueryInitResponse response = readResponse(in, Action.QUERY_INIT);
|
||||
|
||||
// finally read data
|
||||
// allocate columns
|
||||
int rows = in.readInt();
|
||||
Page page = Page.of(response.columns, rows);
|
||||
readData(in, page, rows);
|
||||
|
||||
return new DefaultCursor(this, response.requestId, page, meta);
|
||||
}
|
||||
|
||||
private void readData(DataInput in, Page page, int rows) throws IOException {
|
||||
page.resize(rows);
|
||||
int[] jdbcTypes = page.columnInfo().stream()
|
||||
.mapToInt(c -> c.type)
|
||||
.toArray();
|
||||
|
||||
for (int row = 0; row < rows; row++) {
|
||||
for (int column = 0; column < jdbcTypes.length; column++) {
|
||||
page.column(column)[row] = ProtoUtils.readValue(in, jdbcTypes[column]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private String pageResponse(DataInput in, Page page) throws IOException, SQLException {
|
||||
QueryPageResponse response = readResponse(in, Action.QUERY_PAGE);
|
||||
|
||||
// read the data
|
||||
// allocate columns
|
||||
int rows = in.readInt();
|
||||
page.resize(rows); // NOCOMMIT I believe this is duplicated with readData
|
||||
readData(in, page, rows);
|
||||
|
||||
return response.requestId;
|
||||
QueryPageRequest request = new QueryPageRequest(requestId, timeout(meta), page);
|
||||
BytesArray ba = http.put(out -> Proto.INSTANCE.writeRequest(request, out));
|
||||
return doIO(ba, in -> ((QueryPageResponse) readResponse(request, in)).requestId);
|
||||
}
|
||||
|
||||
public InfoResponse serverInfo() throws SQLException {
|
||||
|
@ -137,26 +91,21 @@ public class JdbcHttpClient implements Closeable {
|
|||
}
|
||||
|
||||
private InfoResponse fetchServerInfo() throws SQLException {
|
||||
BytesArray ba = http.put(out -> ProtoUtils.write(out, new InfoRequest()));
|
||||
return doIO(ba, in -> readResponse(in, Action.INFO));
|
||||
InfoRequest request = new InfoRequest();
|
||||
BytesArray ba = http.put(out -> Proto.INSTANCE.writeRequest(request, out));
|
||||
return doIO(ba, in -> (InfoResponse) readResponse(request, in));
|
||||
}
|
||||
|
||||
public List<String> metaInfoTables(String pattern) throws SQLException {
|
||||
BytesArray ba = http.put(out -> ProtoUtils.write(out, new MetaTableRequest(pattern)));
|
||||
|
||||
return doIO(ba, in -> {
|
||||
MetaTableResponse res = readResponse(in, Action.META_TABLE);
|
||||
return res.tables;
|
||||
});
|
||||
MetaTableRequest request = new MetaTableRequest(pattern);
|
||||
BytesArray ba = http.put(out -> Proto.INSTANCE.writeRequest(request, out));
|
||||
return doIO(ba, in -> ((MetaTableResponse) readResponse(request, in)).tables);
|
||||
}
|
||||
|
||||
public List<MetaColumnInfo> metaInfoColumns(String tablePattern, String columnPattern) throws SQLException {
|
||||
BytesArray ba = http.put(out -> ProtoUtils.write(out, new MetaColumnRequest(tablePattern, columnPattern)));
|
||||
|
||||
return doIO(ba, in -> {
|
||||
MetaColumnResponse res = readResponse(in, Action.META_COLUMN);
|
||||
return res.columns;
|
||||
});
|
||||
MetaColumnRequest request = new MetaColumnRequest(tablePattern, columnPattern);
|
||||
BytesArray ba = http.put(out -> Proto.INSTANCE.writeRequest(request, out));
|
||||
return doIO(ba, in -> ((MetaColumnResponse) readResponse(request, in)).columns);
|
||||
}
|
||||
|
||||
public void close() {
|
||||
|
@ -179,36 +128,29 @@ public class JdbcHttpClient implements Closeable {
|
|||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private static <R extends Response> R readResponse(DataInput in, Action expected) throws IOException, SQLException {
|
||||
String errorMessage = ProtoUtils.readHeader(in);
|
||||
if (errorMessage != null) {
|
||||
throw new JdbcException(errorMessage);
|
||||
}
|
||||
|
||||
int header = in.readInt();
|
||||
private static Response readResponse(Request request, DataInput in) throws IOException, SQLException {
|
||||
Response response = Proto.INSTANCE.readResponse(request, in);
|
||||
|
||||
Action action = Action.from(header);
|
||||
if (expected != action) {
|
||||
throw new JdbcException("Expected response for %s, found %s", expected, action);
|
||||
}
|
||||
|
||||
Response response = ProtoUtils.readResponse(in, header);
|
||||
|
||||
// NOCOMMIT why not move the throw login into readResponse?
|
||||
if (response instanceof ExceptionResponse) {
|
||||
if (response.responseType() == ResponseType.EXCEPTION) {
|
||||
ExceptionResponse ex = (ExceptionResponse) response;
|
||||
throw SqlExceptionType.asException(ex.asSql, ex.message);
|
||||
throw ex.asException();
|
||||
}
|
||||
if (response instanceof ErrorResponse) {
|
||||
if (response.responseType() == ResponseType.EXCEPTION) {
|
||||
ErrorResponse error = (ErrorResponse) response;
|
||||
throw new JdbcException("%s", error.stack);
|
||||
}
|
||||
if (response instanceof Response) {
|
||||
// NOCOMMIT I'd feel more comfortable either returning Response and passing the class in and calling responseClass.cast(response)
|
||||
return (R) response;
|
||||
}
|
||||
return response;
|
||||
}
|
||||
|
||||
throw new JdbcException("Invalid response status %08X", header);
|
||||
private TimeoutInfo timeout(RequestMeta meta) {
|
||||
// client time
|
||||
long clientTime = Instant.now().toEpochMilli();
|
||||
|
||||
// timeout (in ms)
|
||||
long timeout = meta.timeoutInMs();
|
||||
if (timeout == 0) {
|
||||
timeout = conCfg.getQueryTimeout();
|
||||
}
|
||||
return new TimeoutInfo(clientTime, timeout, conCfg.getPageTimeout());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,86 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.jdbc.net.client;
|
||||
|
||||
import org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcException;
|
||||
import org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcUtils;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfo;
|
||||
|
||||
import java.lang.reflect.Array;
|
||||
import java.sql.Date;
|
||||
import java.sql.Time;
|
||||
import java.sql.Timestamp;
|
||||
import java.util.List;
|
||||
|
||||
// Stores a page of data in a columnar-format since:
|
||||
// * the structure does not change
|
||||
// * array allocation can be quite efficient
|
||||
// * array can be reallocated (especially since the pages have the same size)
|
||||
|
||||
// c1 c1 c1
|
||||
// c2 c2 c2 ...
|
||||
public class Page {
|
||||
|
||||
// logical limit
|
||||
private int rows;
|
||||
|
||||
private final List<ColumnInfo> columnInfo;
|
||||
private final Object[][] data;
|
||||
|
||||
private Page(int rows, List<ColumnInfo> columnInfo, Object[][] data) {
|
||||
this.rows = rows;
|
||||
this.columnInfo = columnInfo;
|
||||
this.data = data;
|
||||
}
|
||||
|
||||
void resize(int newLength) {
|
||||
// resize only when needed
|
||||
// the array is kept around so check its length not the logical limit
|
||||
if (newLength > data[0].length) {
|
||||
for (int i = 0; i < data.length; i++) {
|
||||
data[i] = (Object[]) Array.newInstance(data[i].getClass().getComponentType(), newLength);
|
||||
}
|
||||
}
|
||||
rows = newLength;
|
||||
}
|
||||
|
||||
int rows() {
|
||||
return rows;
|
||||
}
|
||||
|
||||
List<ColumnInfo> columnInfo() {
|
||||
return columnInfo;
|
||||
}
|
||||
|
||||
Object[] column(int index) {
|
||||
if (index < 0 || index >= data.length) {
|
||||
throw new JdbcException("Invalid column %d (max is %d)", index, data.length - 1);
|
||||
}
|
||||
|
||||
return data[index];
|
||||
}
|
||||
|
||||
Object entry(int row, int column) {
|
||||
if (row < 0 || row >= rows) {
|
||||
throw new JdbcException("Invalid row %d (max is %d)", row, rows - 1);
|
||||
}
|
||||
return column(column)[row];
|
||||
}
|
||||
|
||||
static Page of(List<ColumnInfo> columnInfo, int dataSize) {
|
||||
Object[][] data = new Object[columnInfo.size()][];
|
||||
|
||||
for (int i = 0; i < columnInfo.size(); i++) {
|
||||
Class<?> types = JdbcUtils.classOf(columnInfo.get(i).type);
|
||||
if (types == Timestamp.class || types == Date.class || types == Time.class) {
|
||||
types = Long.class;
|
||||
}
|
||||
data[i] = (Object[]) Array.newInstance(types, dataSize);
|
||||
}
|
||||
|
||||
return new Page(dataSize, columnInfo, data);
|
||||
}
|
||||
}
|
|
@ -5,6 +5,7 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.sql.jdbc.framework;
|
||||
|
||||
import java.sql.JDBCType;
|
||||
import java.sql.ResultSet;
|
||||
import java.sql.ResultSetMetaData;
|
||||
import java.sql.SQLException;
|
||||
|
@ -13,7 +14,6 @@ import java.util.Calendar;
|
|||
import java.util.Locale;
|
||||
import java.util.TimeZone;
|
||||
|
||||
import static org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcUtils.nameOf;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
|
@ -51,9 +51,8 @@ public class JdbcAssert {
|
|||
int expectedType = expectedMeta.getColumnType(column);
|
||||
int actualType = actualMeta.getColumnType(column);
|
||||
|
||||
assertEquals(
|
||||
"Different column type for column [" + expectedName + "] (" + nameOf(expectedType) + " != " + nameOf(actualType) + ")",
|
||||
expectedType, actualType);
|
||||
assertEquals("Different column type for column [" + expectedName + "] (" + JDBCType.valueOf(expectedType) + " != "
|
||||
+ JDBCType.valueOf(actualType) + ")", expectedType, actualType);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
package org.elasticsearch.xpack.sql.jdbc.framework;
|
||||
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Response;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Response;
|
||||
import org.elasticsearch.xpack.sql.test.server.ProtoHttpServer;
|
||||
|
||||
/**
|
||||
|
|
|
@ -9,11 +9,12 @@ import com.sun.net.httpserver.HttpExchange;
|
|||
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.xpack.sql.TestUtils;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.ProtoUtils;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Request;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Response;
|
||||
import org.elasticsearch.xpack.sql.plugin.jdbc.server.JdbcServer;
|
||||
import org.elasticsearch.xpack.sql.plugin.jdbc.server.JdbcServerProtoUtils;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.AbstractProto;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Request;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Response;
|
||||
import org.elasticsearch.xpack.sql.server.AbstractSqlServer;
|
||||
import org.elasticsearch.xpack.sql.server.jdbc.JdbcServer;
|
||||
import org.elasticsearch.xpack.sql.test.server.ProtoHandler;
|
||||
|
||||
import java.io.DataInput;
|
||||
|
@ -26,13 +27,13 @@ class SqlProtoHandler extends ProtoHandler<Response> {
|
|||
private final JdbcServer server;
|
||||
|
||||
SqlProtoHandler(Client client) {
|
||||
super(client, ProtoUtils::readHeader, JdbcServerProtoUtils::write);
|
||||
super(client, response -> AbstractSqlServer.write(AbstractProto.CURRENT_VERSION, response));
|
||||
this.server = new JdbcServer(TestUtils.planExecutor(client), clusterName, () -> info.getNode().getName(), info.getVersion(), info.getBuild());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void handle(HttpExchange http, DataInput in) throws IOException {
|
||||
Request req = ProtoUtils.readRequest(in);
|
||||
Request req = Proto.INSTANCE.readRequest(in);
|
||||
server.handle(req, wrap(resp -> sendHttpResponse(http, resp), ex -> fail(http, ex)));
|
||||
}
|
||||
}
|
|
@ -5,18 +5,21 @@ description = 'The server components of SQL for Elasticsearch'
|
|||
dependencies {
|
||||
compile project(':x-pack-elasticsearch:sql:jdbc-proto')
|
||||
compile project(':x-pack-elasticsearch:sql:cli-proto')
|
||||
compile project(':x-pack-elasticsearch:sql:shared-proto')
|
||||
provided "org.elasticsearch.plugin:aggs-matrix-stats-client:${project.versions.elasticsearch}"
|
||||
//NOCOMMIT - we should upgrade to the latest 4.5.x if not 4.7
|
||||
compile 'org.antlr:antlr4-runtime:4.5.1-1'
|
||||
provided "org.elasticsearch:elasticsearch:${project.versions.elasticsearch}"
|
||||
|
||||
|
||||
}
|
||||
|
||||
dependencyLicenses {
|
||||
mapping from: /jdbc-proto.*/, to: 'elasticsearch'
|
||||
mapping from: /cli-proto.*/, to: 'elasticsearch'
|
||||
mapping from: /shared-proto.*/, to: 'elasticsearch'
|
||||
ignoreSha 'jdbc-proto'
|
||||
ignoreSha 'cli-proto'
|
||||
ignoreSha 'shared-proto'
|
||||
}
|
||||
|
||||
// NOCOMMIT probably not a good thing to rely on.....
|
||||
|
|
|
@ -24,15 +24,15 @@ import org.elasticsearch.script.ScriptService;
|
|||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.watcher.ResourceWatcherService;
|
||||
import org.elasticsearch.xpack.sql.execution.PlanExecutor;
|
||||
import org.elasticsearch.xpack.sql.plugin.jdbc.action.JdbcAction;
|
||||
import org.elasticsearch.xpack.sql.plugin.jdbc.action.TransportJdbcAction;
|
||||
import org.elasticsearch.xpack.sql.plugin.jdbc.http.JdbcHttpHandler;
|
||||
import org.elasticsearch.xpack.sql.plugin.sql.action.SqlAction;
|
||||
import org.elasticsearch.xpack.sql.plugin.sql.action.TransportSqlAction;
|
||||
import org.elasticsearch.xpack.sql.plugin.sql.rest.RestSqlAction;
|
||||
import org.elasticsearch.xpack.sql.server.cli.CliAction;
|
||||
import org.elasticsearch.xpack.sql.server.cli.CliHttpHandler;
|
||||
import org.elasticsearch.xpack.sql.server.cli.TransportCliAction;
|
||||
import org.elasticsearch.xpack.sql.server.jdbc.JdbcAction;
|
||||
import org.elasticsearch.xpack.sql.server.jdbc.JdbcHttpHandler;
|
||||
import org.elasticsearch.xpack.sql.server.jdbc.TransportJdbcAction;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
|
|
|
@ -1,112 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.plugin.jdbc.server;
|
||||
|
||||
import org.elasticsearch.ResourceNotFoundException;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.xpack.sql.analysis.AnalysisException;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.DataResponse;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.ErrorResponse;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.ExceptionResponse;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.Action;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.SqlExceptionType;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.ProtoUtils;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.QueryInitResponse;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Response;
|
||||
import org.elasticsearch.xpack.sql.parser.ParsingException;
|
||||
import org.elasticsearch.xpack.sql.session.RowSet;
|
||||
import org.elasticsearch.xpack.sql.session.RowSetCursor;
|
||||
import org.joda.time.ReadableInstant;
|
||||
|
||||
import java.io.DataOutput;
|
||||
import java.io.DataOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.PrintWriter;
|
||||
import java.io.StringWriter;
|
||||
import java.sql.Types;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
|
||||
import static org.elasticsearch.xpack.sql.util.StringUtils.EMPTY;
|
||||
|
||||
public abstract class JdbcServerProtoUtils {
|
||||
|
||||
public static BytesReference write(Response response) throws IOException {
|
||||
try (BytesStreamOutput array = new BytesStreamOutput();
|
||||
DataOutputStream out = new DataOutputStream(array)) {
|
||||
ProtoUtils.write(out, response);
|
||||
|
||||
// serialize payload (if present)
|
||||
if (response instanceof DataResponse) { // NOCOMMIT why not implement an interface?
|
||||
RowSetCursor cursor = (RowSetCursor) ((QueryInitResponse) response).data;
|
||||
|
||||
if (cursor != null) {
|
||||
JdbcServerProtoUtils.write(out, cursor);
|
||||
}
|
||||
}
|
||||
out.flush();
|
||||
return array.bytes();
|
||||
}
|
||||
}
|
||||
|
||||
private static void write(DataOutput out, RowSet rowSet) throws IOException {
|
||||
out.writeInt(rowSet.size());
|
||||
int[] jdbcTypes = rowSet.schema().types().stream()
|
||||
.mapToInt(dt -> dt.sqlType().getVendorTypeNumber())
|
||||
.toArray();
|
||||
|
||||
// unroll forEach manually to avoid a Consumer + try/catch for each value...
|
||||
for (boolean hasRows = rowSet.hasCurrentRow(); hasRows; hasRows = rowSet.advanceRow()) {
|
||||
for (int i = 0; i < rowSet.rowSize(); i++) {
|
||||
Object value = rowSet.column(i);
|
||||
// unpack Joda classes on the server-side to not 'pollute' the common project and thus the client
|
||||
if (jdbcTypes[i] == Types.TIMESTAMP && value instanceof ReadableInstant) {
|
||||
// NOCOMMIT feels like a hack that'd be better cleaned up another way.
|
||||
value = ((ReadableInstant) value).getMillis();
|
||||
}
|
||||
ProtoUtils.writeValue(out, value, jdbcTypes[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static Response exception(Throwable cause, Action action) {
|
||||
SqlExceptionType sqlExceptionType = sqlExceptionType(cause);
|
||||
|
||||
String message = EMPTY;
|
||||
String cs = EMPTY;
|
||||
if (cause != null) {
|
||||
if (Strings.hasText(cause.getMessage())) {
|
||||
message = cause.getMessage();
|
||||
}
|
||||
cs = cause.getClass().getName();
|
||||
}
|
||||
|
||||
if (sqlExceptionType != null) {
|
||||
return new ExceptionResponse(action, message, cs, sqlExceptionType);
|
||||
}
|
||||
else {
|
||||
// TODO: might want to 'massage' this
|
||||
StringWriter sw = new StringWriter();
|
||||
cause.printStackTrace(new PrintWriter(sw));
|
||||
return new ErrorResponse(action, message, cs, sw.toString());
|
||||
}
|
||||
}
|
||||
|
||||
private static SqlExceptionType sqlExceptionType(Throwable cause) {
|
||||
if (cause instanceof AnalysisException || cause instanceof ResourceNotFoundException) {
|
||||
return SqlExceptionType.DATA;
|
||||
}
|
||||
if (cause instanceof ParsingException) {
|
||||
return SqlExceptionType.SYNTAX;
|
||||
}
|
||||
if (cause instanceof TimeoutException) {
|
||||
return SqlExceptionType.TIMEOUT;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,86 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.server;
|
||||
|
||||
import org.elasticsearch.ResourceNotFoundException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.xpack.sql.analysis.AnalysisException;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto;
|
||||
import org.elasticsearch.xpack.sql.parser.ParsingException;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.AbstractErrorResponse;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.AbstractExceptionResponse;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.AbstractProto.SqlExceptionType;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Request;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Response;
|
||||
|
||||
import java.io.DataOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.PrintWriter;
|
||||
import java.io.StringWriter;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
|
||||
import static org.elasticsearch.xpack.sql.util.StringUtils.EMPTY;
|
||||
|
||||
public abstract class AbstractSqlServer {
|
||||
public final void handle(Request req, ActionListener<Response> listener) {
|
||||
try {
|
||||
innerHandle(req, listener);
|
||||
} catch (Exception e) {
|
||||
listener.onResponse(exceptionResponse(req, e));
|
||||
}
|
||||
}
|
||||
|
||||
protected final Response exceptionResponse(Request req, Exception e) {
|
||||
// NOCOMMIT I wonder why we don't just teach the servers to handle ES's normal exception response.....
|
||||
SqlExceptionType exceptionType = sqlExceptionType(e);
|
||||
|
||||
String message = EMPTY;
|
||||
String cs = EMPTY;
|
||||
if (Strings.hasText(e.getMessage())) {
|
||||
message = e.getMessage();
|
||||
}
|
||||
cs = e.getClass().getName();
|
||||
|
||||
if (exceptionType != null) {
|
||||
return buildExceptionResponse(req, message, cs, exceptionType);
|
||||
} else {
|
||||
StringWriter sw = new StringWriter();
|
||||
e.printStackTrace(new PrintWriter(sw));
|
||||
return buildErrorResponse(req, message, cs, sw.toString());
|
||||
}
|
||||
}
|
||||
|
||||
protected abstract void innerHandle(Request req, ActionListener<Response> listener);
|
||||
protected abstract AbstractExceptionResponse<?> buildExceptionResponse(Request request, String message, String cause,
|
||||
SqlExceptionType exceptionType);
|
||||
protected abstract AbstractErrorResponse<?> buildErrorResponse(Request request, String message, String cause, String stack);
|
||||
|
||||
public static BytesReference write(int clientVersion, Response response) throws IOException {
|
||||
try (BytesStreamOutput array = new BytesStreamOutput();
|
||||
DataOutputStream out = new DataOutputStream(array)) {
|
||||
Proto.INSTANCE.writeResponse(response, clientVersion, out);
|
||||
out.flush();
|
||||
return array.bytes();
|
||||
}
|
||||
}
|
||||
|
||||
private static SqlExceptionType sqlExceptionType(Throwable cause) {
|
||||
if (cause instanceof AnalysisException || cause instanceof ResourceNotFoundException) {
|
||||
return SqlExceptionType.DATA;
|
||||
}
|
||||
if (cause instanceof ParsingException) {
|
||||
return SqlExceptionType.SYNTAX;
|
||||
}
|
||||
if (cause instanceof TimeoutException) {
|
||||
return SqlExceptionType.TIMEOUT;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
}
|
|
@ -15,6 +15,8 @@ import org.elasticsearch.rest.RestChannel;
|
|||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.Proto;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.AbstractProto;
|
||||
import org.elasticsearch.xpack.sql.server.AbstractSqlServer;
|
||||
import org.elasticsearch.xpack.sql.util.StringUtils;
|
||||
|
||||
import java.io.DataInputStream;
|
||||
|
@ -39,7 +41,7 @@ public class CliHttpHandler extends BaseRestHandler {
|
|||
}
|
||||
|
||||
try (DataInputStream in = new DataInputStream(request.content().streamInput())) {
|
||||
CliRequest cliRequest = new CliRequest(Proto.readRequest(in));
|
||||
CliRequest cliRequest = new CliRequest(Proto.INSTANCE.readRequest(in));
|
||||
return c -> client.executeLocally(CliAction.INSTANCE, cliRequest,
|
||||
ActionListener.wrap(response -> cliResponse(c, response), ex -> error(c, ex)));
|
||||
}
|
||||
|
@ -49,7 +51,9 @@ public class CliHttpHandler extends BaseRestHandler {
|
|||
BytesRestResponse restResponse = null;
|
||||
|
||||
try {
|
||||
restResponse = new BytesRestResponse(OK, TEXT_CONTENT_TYPE, CliServerProtoUtils.write(response.response()));
|
||||
// NOCOMMIT use a real version
|
||||
restResponse = new BytesRestResponse(OK, TEXT_CONTENT_TYPE,
|
||||
AbstractSqlServer.write(AbstractProto.CURRENT_VERSION, response.response()));
|
||||
} catch (IOException ex) {
|
||||
restResponse = new BytesRestResponse(INTERNAL_SERVER_ERROR, TEXT_CONTENT_TYPE, StringUtils.EMPTY);
|
||||
}
|
||||
|
|
|
@ -5,12 +5,12 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.sql.server.cli;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.CompositeIndicesRequest;
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.Request;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Request;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@ package org.elasticsearch.xpack.sql.server.cli;
|
|||
|
||||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.Request;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Request;
|
||||
|
||||
public class CliRequestBuilder extends ActionRequestBuilder<CliRequest, CliResponse, CliRequestBuilder> {
|
||||
|
||||
|
|
|
@ -6,13 +6,11 @@
|
|||
package org.elasticsearch.xpack.sql.server.cli;
|
||||
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.Response;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Response;
|
||||
import org.elasticsearch.xpack.sql.session.RowSetCursor;
|
||||
|
||||
public class CliResponse extends ActionResponse {
|
||||
|
||||
private Response response;
|
||||
private RowSetCursor cursor;
|
||||
|
||||
public CliResponse() {}
|
||||
|
||||
|
@ -22,14 +20,9 @@ public class CliResponse extends ActionResponse {
|
|||
|
||||
public CliResponse(Response response, RowSetCursor cursor) {
|
||||
this.response = response;
|
||||
this.cursor = cursor;
|
||||
}
|
||||
|
||||
public Response response() {
|
||||
return response;
|
||||
}
|
||||
|
||||
public RowSetCursor cursor() {
|
||||
return cursor;
|
||||
}
|
||||
}
|
|
@ -10,14 +10,17 @@ import org.elasticsearch.Version;
|
|||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.CommandRequest;
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.CommandResponse;
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.ErrorResponse;
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.ExceptionResponse;
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.InfoRequest;
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.InfoResponse;
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.Request;
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.Response;
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.Proto.RequestType;
|
||||
import org.elasticsearch.xpack.sql.execution.PlanExecutor;
|
||||
import org.elasticsearch.xpack.sql.execution.search.SearchHitRowSetCursor;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.QueryPageRequest;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.AbstractProto.SqlExceptionType;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Request;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Response;
|
||||
import org.elasticsearch.xpack.sql.server.AbstractSqlServer;
|
||||
import org.elasticsearch.xpack.sql.util.StringUtils;
|
||||
|
||||
import java.util.TimeZone;
|
||||
|
@ -26,7 +29,7 @@ import java.util.function.Supplier;
|
|||
import static org.elasticsearch.action.ActionListener.wrap;
|
||||
import static org.elasticsearch.xpack.sql.util.StringUtils.EMPTY;
|
||||
|
||||
public class CliServer {
|
||||
public class CliServer extends AbstractSqlServer {
|
||||
|
||||
private final PlanExecutor executor;
|
||||
private final Supplier<InfoResponse> infoResponse;
|
||||
|
@ -37,23 +40,37 @@ public class CliServer {
|
|||
this.infoResponse = () -> new InfoResponse(nodeName.get(), clusterName, version.major, version.minor, version.toString(),
|
||||
build.shortHash(), build.date());
|
||||
}
|
||||
|
||||
public void handle(Request req, ActionListener<Response> listener) {
|
||||
|
||||
@Override
|
||||
protected void innerHandle(Request req, ActionListener<Response> listener) {
|
||||
RequestType requestType = (RequestType) req.requestType();
|
||||
try {
|
||||
if (req instanceof InfoRequest) {
|
||||
switch (requestType) {
|
||||
case INFO:
|
||||
listener.onResponse(info((InfoRequest) req));
|
||||
}
|
||||
else if (req instanceof CommandRequest) {
|
||||
break;
|
||||
case COMMAND:
|
||||
command((CommandRequest) req, listener);
|
||||
}
|
||||
else {
|
||||
listener.onResponse(new ExceptionResponse(req.requestType(), "Invalid requested", null));
|
||||
break;
|
||||
default:
|
||||
throw new IllegalArgumentException("Unsupported action [" + requestType + "]");
|
||||
}
|
||||
} catch (Exception ex) {
|
||||
listener.onResponse(CliServerProtoUtils.exception(ex, req.requestType()));
|
||||
listener.onResponse(exceptionResponse(req, ex));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ErrorResponse buildErrorResponse(Request request, String message, String cause, String stack) {
|
||||
return new ErrorResponse((RequestType) request.requestType(), message, cause, stack);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ExceptionResponse buildExceptionResponse(Request request, String message, String cause,
|
||||
SqlExceptionType exceptionType) {
|
||||
return new ExceptionResponse((RequestType) request.requestType(), message, cause, exceptionType);
|
||||
}
|
||||
|
||||
public InfoResponse info(InfoRequest req) {
|
||||
return infoResponse.get();
|
||||
}
|
||||
|
@ -70,13 +87,10 @@ public class CliServer {
|
|||
requestId = StringUtils.nullAsEmpty(((SearchHitRowSetCursor) c).scrollId());
|
||||
}
|
||||
|
||||
// NOCOMMIT it looks like this tries to buffer the entire response in memory before returning it which is going to OOM some po
|
||||
// NOCOMMIT also this blocks the current thread while it iterates the cursor
|
||||
listener.onResponse(new CommandResponse(start, stop, requestId, CliUtils.toString(c)));
|
||||
},
|
||||
ex -> listener.onResponse(CliServerProtoUtils.exception(ex, req.requestType()))));
|
||||
ex -> listener.onResponse(exceptionResponse(req, ex))));
|
||||
}
|
||||
|
||||
public void queryPage(QueryPageRequest req, ActionListener<Response> listener) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
}
|
|
@ -1,60 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.server.cli;
|
||||
|
||||
import org.elasticsearch.ResourceNotFoundException;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.xpack.sql.SqlException;
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.ErrorResponse;
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.ExceptionResponse;
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.Proto;
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.Proto.RequestType;
|
||||
import org.elasticsearch.xpack.sql.cli.net.protocol.Response;
|
||||
|
||||
import java.io.DataOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.PrintWriter;
|
||||
import java.io.StringWriter;
|
||||
|
||||
import static org.elasticsearch.xpack.sql.util.StringUtils.EMPTY;
|
||||
|
||||
public abstract class CliServerProtoUtils {
|
||||
|
||||
public static BytesReference write(Response response) throws IOException {
|
||||
try (BytesStreamOutput array = new BytesStreamOutput();
|
||||
DataOutputStream out = new DataOutputStream(array)) {
|
||||
Proto.writeResponse(response, Proto.CURRENT_VERSION, out);
|
||||
out.flush();
|
||||
return array.bytes();
|
||||
}
|
||||
}
|
||||
|
||||
public static Response exception(Throwable cause, RequestType requestType) {
|
||||
String message = EMPTY;
|
||||
String cs = EMPTY;
|
||||
if (cause != null) {
|
||||
if (Strings.hasText(cause.getMessage())) {
|
||||
message = cause.getMessage();
|
||||
}
|
||||
cs = cause.getClass().getName();
|
||||
}
|
||||
|
||||
if (expectedException(cause)) {
|
||||
return new ExceptionResponse(requestType, message, cs);
|
||||
}
|
||||
else {
|
||||
StringWriter sw = new StringWriter();
|
||||
cause.printStackTrace(new PrintWriter(sw));
|
||||
return new ErrorResponse(requestType, message, cs, sw.toString());
|
||||
}
|
||||
}
|
||||
|
||||
private static boolean expectedException(Throwable cause) {
|
||||
return (cause instanceof SqlException || cause instanceof ResourceNotFoundException);
|
||||
}
|
||||
}
|
|
@ -3,7 +3,7 @@
|
|||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.plugin.jdbc.action;
|
||||
package org.elasticsearch.xpack.sql.server.jdbc;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
|
@ -3,7 +3,7 @@
|
|||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.plugin.jdbc.http;
|
||||
package org.elasticsearch.xpack.sql.server.jdbc;
|
||||
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.client.node.NodeClient;
|
||||
|
@ -13,11 +13,9 @@ import org.elasticsearch.rest.BytesRestResponse;
|
|||
import org.elasticsearch.rest.RestChannel;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.ProtoUtils;
|
||||
import org.elasticsearch.xpack.sql.plugin.jdbc.action.JdbcAction;
|
||||
import org.elasticsearch.xpack.sql.plugin.jdbc.action.JdbcRequest;
|
||||
import org.elasticsearch.xpack.sql.plugin.jdbc.action.JdbcResponse;
|
||||
import org.elasticsearch.xpack.sql.plugin.jdbc.server.JdbcServerProtoUtils;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.AbstractProto;
|
||||
import org.elasticsearch.xpack.sql.server.AbstractSqlServer;
|
||||
import org.elasticsearch.xpack.sql.util.StringUtils;
|
||||
|
||||
import java.io.DataInputStream;
|
||||
|
@ -44,13 +42,8 @@ public class JdbcHttpHandler extends BaseRestHandler { // NOCOMMIT these are cal
|
|||
}
|
||||
|
||||
try (DataInputStream in = new DataInputStream(request.content().streamInput())) {
|
||||
String msg = ProtoUtils.readHeader(in);
|
||||
if (msg != null) {
|
||||
return badProto(msg);
|
||||
}
|
||||
|
||||
try {
|
||||
return c -> client.executeLocally(JdbcAction.INSTANCE, new JdbcRequest(ProtoUtils.readRequest(in)),
|
||||
return c -> client.executeLocally(JdbcAction.INSTANCE, new JdbcRequest(Proto.INSTANCE.readRequest(in)),
|
||||
wrap(response -> jdbcResponse(c, response), ex -> error(c, ex)));
|
||||
|
||||
} catch (Exception ex) {
|
||||
|
@ -67,7 +60,9 @@ public class JdbcHttpHandler extends BaseRestHandler { // NOCOMMIT these are cal
|
|||
BytesRestResponse restResponse = null;
|
||||
|
||||
try {
|
||||
restResponse = new BytesRestResponse(OK, TEXT_CONTENT_TYPE, JdbcServerProtoUtils.write(response.response()));
|
||||
// NOCOMMIT use the real version
|
||||
restResponse = new BytesRestResponse(OK, TEXT_CONTENT_TYPE,
|
||||
AbstractSqlServer.write(AbstractProto.CURRENT_VERSION, response.response()));
|
||||
} catch (IOException ex) {
|
||||
logger.error("error building jdbc response", ex);
|
||||
restResponse = new BytesRestResponse(INTERNAL_SERVER_ERROR, TEXT_CONTENT_TYPE, StringUtils.EMPTY);
|
|
@ -3,14 +3,14 @@
|
|||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.plugin.jdbc.action;
|
||||
|
||||
import java.util.Objects;
|
||||
package org.elasticsearch.xpack.sql.server.jdbc;
|
||||
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.CompositeIndicesRequest;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Request;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Request;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
|
|
@ -3,11 +3,11 @@
|
|||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.plugin.jdbc.action;
|
||||
package org.elasticsearch.xpack.sql.server.jdbc;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Request;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Request;
|
||||
|
||||
public class JdbcRequestBuilder extends ActionRequestBuilder<JdbcRequest, JdbcResponse, JdbcRequestBuilder> {
|
||||
|
|
@ -3,10 +3,10 @@
|
|||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.plugin.jdbc.action;
|
||||
package org.elasticsearch.xpack.sql.server.jdbc;
|
||||
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Response;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Response;
|
||||
import org.elasticsearch.xpack.sql.session.RowSetCursor;
|
||||
|
||||
public class JdbcResponse extends ActionResponse {
|
|
@ -3,7 +3,7 @@
|
|||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.plugin.jdbc.server;
|
||||
package org.elasticsearch.xpack.sql.server.jdbc;
|
||||
|
||||
import org.elasticsearch.Build;
|
||||
import org.elasticsearch.Version;
|
||||
|
@ -13,6 +13,8 @@ import org.elasticsearch.xpack.sql.analysis.catalog.EsType;
|
|||
import org.elasticsearch.xpack.sql.execution.PlanExecutor;
|
||||
import org.elasticsearch.xpack.sql.execution.search.SearchHitRowSetCursor;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfo;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.ErrorResponse;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.ExceptionResponse;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.InfoRequest;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.InfoResponse;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.MetaColumnInfo;
|
||||
|
@ -20,14 +22,18 @@ import org.elasticsearch.xpack.sql.jdbc.net.protocol.MetaColumnRequest;
|
|||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.MetaColumnResponse;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.MetaTableRequest;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.MetaTableResponse;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Proto.RequestType;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.QueryInitRequest;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.QueryInitResponse;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.QueryPageRequest;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Request;
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.Response;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.AbstractProto.SqlExceptionType;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Request;
|
||||
import org.elasticsearch.xpack.sql.protocol.shared.Response;
|
||||
import org.elasticsearch.xpack.sql.server.AbstractSqlServer;
|
||||
import org.elasticsearch.xpack.sql.type.DataType;
|
||||
import org.elasticsearch.xpack.sql.util.StringUtils;
|
||||
|
||||
import java.sql.JDBCType;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
|
@ -37,10 +43,11 @@ import java.util.regex.Pattern;
|
|||
|
||||
import static java.util.stream.Collectors.toList;
|
||||
import static org.elasticsearch.action.ActionListener.wrap;
|
||||
import static org.elasticsearch.xpack.sql.util.StringUtils.EMPTY;
|
||||
|
||||
public class JdbcServer {
|
||||
import static org.elasticsearch.common.Strings.coalesceToEmpty;
|
||||
import static org.elasticsearch.common.Strings.hasText;
|
||||
import static org.elasticsearch.common.Strings.tokenizeToStringArray;
|
||||
|
||||
public class JdbcServer extends AbstractSqlServer {
|
||||
private final PlanExecutor executor;
|
||||
private final Supplier<InfoResponse> infoResponse;
|
||||
|
||||
|
@ -49,58 +56,78 @@ public class JdbcServer {
|
|||
// Delay building the response until runtime because the node name is not available at startup
|
||||
this.infoResponse = () -> new InfoResponse(nodeName.get(), clusterName, version.major, version.minor, version.toString(), build.shortHash(), build.date());
|
||||
}
|
||||
|
||||
public void handle(Request req, ActionListener<Response> listener) {
|
||||
try {
|
||||
if (req instanceof InfoRequest) {
|
||||
listener.onResponse(info((InfoRequest) req));
|
||||
}
|
||||
else if (req instanceof MetaTableRequest) {
|
||||
listener.onResponse(metaTable((MetaTableRequest) req));
|
||||
}
|
||||
else if (req instanceof MetaColumnRequest) {
|
||||
listener.onResponse(metaColumn((MetaColumnRequest) req));
|
||||
}
|
||||
else if (req instanceof QueryInitRequest) {
|
||||
queryInit((QueryInitRequest) req, listener);
|
||||
}
|
||||
} catch (Exception ex) {
|
||||
listener.onResponse(JdbcServerProtoUtils.exception(ex, req.action));
|
||||
|
||||
@Override
|
||||
protected void innerHandle(Request req, ActionListener<Response> listener) {
|
||||
RequestType requestType = (RequestType) req.requestType();
|
||||
switch (requestType) {
|
||||
case INFO:
|
||||
listener.onResponse(info((InfoRequest) req));
|
||||
break;
|
||||
case META_TABLE:
|
||||
listener.onResponse(metaTable((MetaTableRequest) req));
|
||||
break;
|
||||
case META_COLUMN:
|
||||
listener.onResponse(metaColumn((MetaColumnRequest) req));
|
||||
break;
|
||||
case QUERY_INIT:
|
||||
queryInit((QueryInitRequest) req, listener);
|
||||
break;
|
||||
case QUERY_PAGE:
|
||||
// TODO implement me
|
||||
default:
|
||||
throw new IllegalArgumentException("Unsupported action [" + requestType + "]");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected ErrorResponse buildErrorResponse(Request request, String message, String cause, String stack) {
|
||||
return new ErrorResponse((RequestType) request.requestType(), message, cause, stack);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ExceptionResponse buildExceptionResponse(Request request, String message, String cause,
|
||||
SqlExceptionType exceptionType) {
|
||||
return new ExceptionResponse((RequestType) request.requestType(), message, cause, exceptionType);
|
||||
}
|
||||
|
||||
public InfoResponse info(InfoRequest req) {
|
||||
return infoResponse.get();
|
||||
}
|
||||
|
||||
public MetaTableResponse metaTable(MetaTableRequest req) {
|
||||
String indexPattern = Strings.hasText(req.index) ? StringUtils.jdbcToEsPattern(req.index) : "*";
|
||||
String[] split = splitToIndexAndType(req.pattern());
|
||||
String index = split[0];
|
||||
String type = split[1];
|
||||
String indexPattern = hasText(index) ? StringUtils.jdbcToEsPattern(index) : "*";
|
||||
|
||||
Collection<EsType> types = executor.catalog().listTypes(indexPattern, req.type);
|
||||
Collection<EsType> types = executor.catalog().listTypes(indexPattern, type);
|
||||
return new MetaTableResponse(types.stream()
|
||||
.map(t -> t.index() + "." + t.name())
|
||||
.collect(toList()));
|
||||
}
|
||||
|
||||
public MetaColumnResponse metaColumn(MetaColumnRequest req) {
|
||||
String indexPattern = Strings.hasText(req.index) ? StringUtils.jdbcToEsPattern(req.index) : "*";
|
||||
String[] split = splitToIndexAndType(req.tablePattern());
|
||||
String index = split[0];
|
||||
String type = split[1];
|
||||
String indexPattern = Strings.hasText(index) ? StringUtils.jdbcToEsPattern(index) : "*";
|
||||
|
||||
Collection<EsType> types = executor.catalog().listTypes(indexPattern, req.type);
|
||||
Collection<EsType> types = executor.catalog().listTypes(indexPattern, type);
|
||||
|
||||
Pattern columnMatcher = Strings.hasText(req.column) ? StringUtils.likeRegex(req.column) : null;
|
||||
Pattern columnMatcher = hasText(req.columnPattern()) ? StringUtils.likeRegex(req.columnPattern()) : null;
|
||||
|
||||
List<MetaColumnInfo> resp = new ArrayList<>();
|
||||
for (EsType type : types) {
|
||||
for (EsType esType : types) {
|
||||
int pos = 0;
|
||||
for (Entry<String, DataType> entry : type.mapping().entrySet()) {
|
||||
for (Entry<String, DataType> entry : esType.mapping().entrySet()) {
|
||||
pos++;
|
||||
if (columnMatcher == null || columnMatcher.matcher(entry.getKey()).matches()) {
|
||||
String name = entry.getKey();
|
||||
String table = type.index() + "." + type.name();
|
||||
int tp = entry.getValue().sqlType().getVendorTypeNumber().intValue();
|
||||
String table = esType.index() + "." + esType.name();
|
||||
JDBCType tp = entry.getValue().sqlType();
|
||||
int size = entry.getValue().precision();
|
||||
resp.add(new MetaColumnInfo(name, table, tp, size, pos));
|
||||
resp.add(new MetaColumnInfo(table, name, tp, size, pos));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -114,20 +141,33 @@ public class JdbcServer {
|
|||
|
||||
executor.sql(req.query, req.timeZone, wrap(c -> {
|
||||
long stop = System.currentTimeMillis();
|
||||
String requestId = EMPTY;
|
||||
String requestId = "";
|
||||
if (c.hasNextSet() && c instanceof SearchHitRowSetCursor) {
|
||||
requestId = StringUtils.nullAsEmpty(((SearchHitRowSetCursor) c).scrollId());
|
||||
}
|
||||
|
||||
List<ColumnInfo> list = c.schema().stream()
|
||||
.map(e -> new ColumnInfo(e.name(), e.type().sqlType().getVendorTypeNumber().intValue(), EMPTY, EMPTY, EMPTY, EMPTY))
|
||||
List<ColumnInfo> columnInfo = c.schema().stream()
|
||||
.map(e -> new ColumnInfo(e.name(), e.type().sqlType(), "", "", "", ""))
|
||||
.collect(toList());
|
||||
|
||||
listener.onResponse(new QueryInitResponse(start, stop, requestId, list, c));
|
||||
}, ex -> listener.onResponse(JdbcServerProtoUtils.exception(ex, req.action))));
|
||||
listener.onResponse(new QueryInitResponse(start, stop, requestId, columnInfo, new RowSetCursorResultPage(c)));
|
||||
}, ex -> listener.onResponse(exceptionResponse(req, ex))));
|
||||
}
|
||||
|
||||
public void queryPage(QueryPageRequest req, ActionListener<Response> listener) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
static String[] splitToIndexAndType(String pattern) {
|
||||
String[] tokens = tokenizeToStringArray(pattern, ".");
|
||||
|
||||
if (tokens.length == 2) {
|
||||
return tokens;
|
||||
}
|
||||
if (tokens.length != 1) {
|
||||
throw new IllegalArgumentException("bad pattern: [" + pattern + "]");
|
||||
}
|
||||
|
||||
return new String[] {coalesceToEmpty(pattern), ""};
|
||||
}
|
||||
}
|
|
@ -0,0 +1,46 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.server.jdbc;
|
||||
|
||||
import org.elasticsearch.xpack.sql.jdbc.net.protocol.ResultPage;
|
||||
import org.elasticsearch.xpack.sql.session.RowSet;
|
||||
import org.joda.time.ReadableInstant;
|
||||
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.sql.JDBCType;
|
||||
|
||||
/**
|
||||
* Adapts {@link RowSet} into a {@link ResultPage} so it can be serialized.
|
||||
* Note that we are careful not to read the {@linkplain RowSet} more then
|
||||
* once.
|
||||
*/
|
||||
public class RowSetCursorResultPage extends ResultPage {
|
||||
private final RowSet rowSet;
|
||||
|
||||
public RowSetCursorResultPage(RowSet rowSet) {
|
||||
this.rowSet = rowSet;
|
||||
}
|
||||
|
||||
public void write(DataOutput out) throws IOException {
|
||||
int rows = rowSet.size();
|
||||
out.writeInt(rows);
|
||||
if (rows == 0) {
|
||||
return;
|
||||
}
|
||||
do {
|
||||
for (int column = 0; column < rowSet.rowSize(); column++) {
|
||||
JDBCType columnType = rowSet.schema().types().get(column).sqlType();
|
||||
Object value = rowSet.column(column);
|
||||
if (columnType == JDBCType.TIMESTAMP && value instanceof ReadableInstant) {
|
||||
// TODO it feels like there should be a better way to do this
|
||||
value = ((ReadableInstant) value).getMillis();
|
||||
}
|
||||
writeValue(out, value, columnType);
|
||||
}
|
||||
} while (rowSet.advanceRow());
|
||||
}
|
||||
}
|
|
@ -3,7 +3,7 @@
|
|||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.plugin.jdbc.action;
|
||||
package org.elasticsearch.xpack.sql.server.jdbc;
|
||||
|
||||
import org.elasticsearch.Build;
|
||||
import org.elasticsearch.Version;
|
||||
|
@ -18,7 +18,6 @@ import org.elasticsearch.threadpool.ThreadPool;
|
|||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.sql.analysis.catalog.EsCatalog;
|
||||
import org.elasticsearch.xpack.sql.execution.PlanExecutor;
|
||||
import org.elasticsearch.xpack.sql.plugin.jdbc.server.JdbcServer;
|
||||
|
||||
import static org.elasticsearch.xpack.sql.util.ActionUtils.chain;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue