Remove checkstyle suppressions for sql/jdbc* (elastic/x-pack-elasticsearch#2379)

Removes the line length checkstyle suppression for `sql/jdbc*` and
fixes all the checkstyle violations. Removes a few files that had
violations that were not used.

Original commit: elastic/x-pack-elasticsearch@031c2ba8e3
This commit is contained in:
Nik Everett 2017-08-30 16:40:29 -04:00 committed by GitHub
parent fd13c54cdc
commit 7a2722e74a
13 changed files with 46 additions and 309 deletions

View File

@ -8,8 +8,6 @@
<!-- These files are generated by ANTLR so its silly to hold them to our rules. -->
<suppress files="sql[/\\]server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]xpack[/\\]sql[/\\]parser[/\\]SqlBase(Base(Listener|Visitor)|Lexer|Listener|Parser|Visitor).java" checks="." />
<!-- NOCOMMIT Temporary-->
<suppress files="sql[/\\]jdbc[/\\].*.java" checks="LineLength" />
<suppress files="sql[/\\]server[/\\].*.java" checks="LineLength" />
<suppress files="sql[/\\]server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]xpack[/\\]sql[/\\]expression[/\\].*.java" checks="EqualsHashCode" />

View File

@ -66,7 +66,8 @@ public class ProtoUtils {
case LONGVARCHAR:
result = in.readUTF();
break;
// NB: date/time is kept in its raw form since the JdbcDriver has to do calendar/timezone conversion anyway and thus the long value is relevant
// NB: date/time is kept in its raw form since the JdbcDriver has to do calendar/timezone
// conversion anyway and thus the long value is relevant
case TIMESTAMP:
result = in.readLong();
break;

View File

@ -25,7 +25,8 @@ public class QueryInitResponse extends Response {
public final List<ColumnInfo> columns;
public final Payload data;
public QueryInitResponse(long serverTimeQueryReceived, long serverTimeResponseSent, String requestId, List<ColumnInfo> columns, Payload data) {
public QueryInitResponse(long serverTimeQueryReceived, long serverTimeResponseSent, String requestId, List<ColumnInfo> columns,
Payload data) {
this.serverTimeQueryReceived = serverTimeQueryReceived;
this.serverTimeResponseSent = serverTimeResponseSent;
this.requestId = requestId;

View File

@ -7,7 +7,6 @@ package org.elasticsearch.xpack.sql.jdbc.debug;
import org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcConfiguration;
import org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcException;
import org.elasticsearch.xpack.sql.jdbc.util.IOUtils;
import org.elasticsearch.xpack.sql.net.client.SuppressForbidden;
import java.io.OutputStreamWriter;
@ -166,7 +165,9 @@ public final class Debug {
OUTPUT_REFS.remove(out);
DebugLog d = OUTPUT_CACHE.remove(out);
if (d != null) {
IOUtils.close(d.print);
if (d.print != null) {
d.print.close();
}
}
}
else {
@ -182,7 +183,9 @@ public final class Debug {
// clear the streams
for (DebugLog d : OUTPUT_CACHE.values()) {
IOUtils.close(d.print);
if (d.print != null) {
d.print.close();
}
}
OUTPUT_CACHE.clear();

View File

@ -31,6 +31,9 @@ import org.elasticsearch.xpack.sql.jdbc.debug.Debug;
import org.elasticsearch.xpack.sql.jdbc.net.client.JdbcHttpClient;
import org.elasticsearch.xpack.sql.net.client.util.StringUtils;
/**
* Implementation of {@link Connection} for Elasticsearch.
*/
public class JdbcConnection implements Connection, JdbcWrapper {
private final String url, userName;
@ -254,14 +257,16 @@ public class JdbcConnection implements Connection, JdbcWrapper {
}
@Override
public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException {
public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability)
throws SQLException {
checkOpen();
checkHoldability(resultSetHoldability);
return prepareStatement(sql, resultSetType, resultSetConcurrency);
}
@Override
public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException {
public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability)
throws SQLException {
checkOpen();
checkHoldability(resultSetHoldability);
return prepareCall(sql, resultSetType, resultSetConcurrency);

View File

@ -24,11 +24,12 @@ import java.util.List;
import static org.elasticsearch.xpack.sql.net.client.util.StringUtils.EMPTY;
import static org.elasticsearch.xpack.sql.net.client.util.StringUtils.hasText;
// Schema Information based on Postgres
// https://www.postgresql.org/docs/9.0/static/information-schema.html
// Currently virtually/synthetic tables are not supported so the client returns
// empty data instead of creating a query
/**
* Implementation of {@link DatabaseMetaData} for Elasticsearch. Draws inspiration
* from <a href="https://www.postgresql.org/docs/9.0/static/information-schema.html">
* PostgreSQL</a>. Virtual/synthetic tables are not supported so the client returns
* empty data instead of creating a query.
*/
class JdbcDatabaseMetaData implements DatabaseMetaData, JdbcWrapper {
private final JdbcConnection con;
@ -650,7 +651,8 @@ class JdbcDatabaseMetaData implements DatabaseMetaData, JdbcWrapper {
}
@Override
public ResultSet getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern, String columnNamePattern) throws SQLException {
public ResultSet getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern, String columnNamePattern)
throws SQLException {
return emptySet(con.cfg,
"PARAMETERS",
"PROCEDURE_CAT",
@ -781,7 +783,8 @@ class JdbcDatabaseMetaData implements DatabaseMetaData, JdbcWrapper {
}
@Override
public ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException {
public ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern)
throws SQLException {
List<ColumnInfo> info = columnInfo("COLUMNS",
"TABLE_CAT",
"TABLE_SCHEM",
@ -815,7 +818,8 @@ class JdbcDatabaseMetaData implements DatabaseMetaData, JdbcWrapper {
}
String cat = defaultCatalog();
List<MetaColumnInfo> columns = con.client.metaInfoColumns(sqlWildcardToSimplePattern(tableNamePattern), sqlWildcardToRegexPattern(columnNamePattern));
List<MetaColumnInfo> columns = con.client.metaInfoColumns(
sqlWildcardToSimplePattern(tableNamePattern), sqlWildcardToRegexPattern(columnNamePattern));
Object[][] data = new Object[columns.size()][];
for (int i = 0; i < data.length; i++) {
data[i] = new Object[24];
@ -886,7 +890,8 @@ class JdbcDatabaseMetaData implements DatabaseMetaData, JdbcWrapper {
}
@Override
public ResultSet getCrossReference(String parentCatalog, String parentSchema, String parentTable, String foreignCatalog, String foreignSchema, String foreignTable) throws SQLException {
public ResultSet getCrossReference(String parentCatalog, String parentSchema, String parentTable, String foreignCatalog,
String foreignSchema, String foreignTable) throws SQLException {
throw new SQLFeatureNotSupportedException("Cross reference not supported");
}
@ -1041,7 +1046,8 @@ class JdbcDatabaseMetaData implements DatabaseMetaData, JdbcWrapper {
}
@Override
public ResultSet getAttributes(String catalog, String schemaPattern, String typeNamePattern, String attributeNamePattern) throws SQLException {
public ResultSet getAttributes(String catalog, String schemaPattern, String typeNamePattern, String attributeNamePattern)
throws SQLException {
return emptySet(con.cfg,
"ATTRIBUTES",
"TYPE_CAT",
@ -1145,7 +1151,8 @@ class JdbcDatabaseMetaData implements DatabaseMetaData, JdbcWrapper {
}
@Override
public ResultSet getFunctionColumns(String catalog, String schemaPattern, String functionNamePattern, String columnNamePattern) throws SQLException {
public ResultSet getFunctionColumns(String catalog, String schemaPattern, String functionNamePattern, String columnNamePattern)
throws SQLException {
return emptySet(con.cfg,
"FUNCTION_COLUMNS",
"FUNCTION_CAT",
@ -1167,7 +1174,8 @@ class JdbcDatabaseMetaData implements DatabaseMetaData, JdbcWrapper {
}
@Override
public ResultSet getPseudoColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException {
public ResultSet getPseudoColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern)
throws SQLException {
return emptySet(con.cfg,
"PSEUDO_COLUMNS",
"TABLE_CAT",

View File

@ -38,10 +38,6 @@ public class BytesArray {
this.size = size;
}
public BytesArray(String source) {
bytes(source);
}
public BytesArray(Bytes bytes) {
this(bytes.bytes(), 0, bytes.size());
}
@ -84,12 +80,6 @@ public class BytesArray {
this.offset = ba.offset;
}
public void bytes(String from) {
size = 0;
offset = 0;
UnicodeUtil.UTF16toUTF8(from, 0, from.length(), this.bytes, 0);
}
public void size(int size) {
this.size = size;
}

View File

@ -1,122 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.jdbc.util;
import java.io.IOException;
import java.io.OutputStream;
/**
* Taken from Elasticsearch - copy of org.elasticsearch.common.io.FastByteArrayOutputStream with some enhancements, mainly in allowing access to the underlying byte[].
*
* Similar to {@link java.io.ByteArrayOutputStream} just not synced.
*/
public class FastByteArrayOutputStream extends OutputStream {
private BytesArray data;
/**
* Creates a new byte array output stream. The buffer capacity is
* initially 1024 bytes, though its size increases if necessary.
* <p>
* ES: We use 1024 bytes since we mainly use this to build json/smile
* content in memory, and rarely does the 32 byte default in ByteArrayOutputStream fits...
*/
public FastByteArrayOutputStream() {
this(1024);
}
/**
* Creates a new byte array output stream, with a buffer capacity of
* the specified size, in bytes.
*
* @param size the initial size.
*/
public FastByteArrayOutputStream(int size) {
Assert.isTrue(size >= 0, "Negative initial size: " + size);
data = new BytesArray(size);
}
public FastByteArrayOutputStream(BytesArray data) {
this.data = data;
}
/**
* Writes the specified byte to this byte array output stream.
*
* @param b the byte to be written.
*/
public void write(int b) {
data.add(b);
}
/**
* Writes <code>len</code> bytes from the specified byte array
* starting at offset <code>off</code> to this byte array output stream.
* <p>
* <b>NO checks for bounds, parameters must be ok!</b>
*
* @param b the data.
* @param off the start offset in the data.
* @param len the number of bytes to write.
*/
public void write(byte b[], int off, int len) {
data.add(b, off, len);
}
/**
* Writes the complete contents of this byte array output stream to
* the specified output stream argument, as if by calling the output
* stream's write method using <code>out.write(buf, 0, count)</code>.
*
* @param out the output stream to which to write the data.
* @throws IOException if an I/O error occurs.
*/
public void writeTo(OutputStream out) throws IOException {
out.write(data.bytes, 0, data.size);
}
public BytesArray bytes() {
return data;
}
public void setBytes(byte[] data, int size) {
this.data.bytes(data, size);
}
/**
* Returns the current size of the buffer.
*
* @return the value of the <code>count</code> field, which is the number
* of valid bytes in this output stream.
* @see java.io.ByteArrayOutputStream#count
*/
public long size() {
return data.length();
}
/**
* Resets the <code>count</code> field of this byte array output
* stream to zero, so that all currently accumulated output in the
* output stream is discarded. The output stream can be used again,
* reusing the already capacity buffer space.
*
* @see java.io.ByteArrayInputStream#count
*/
public void reset() {
data.reset();
}
public String toString() {
return data.toString();
}
/**
* Closing a <tt>ByteArrayOutputStream</tt> has no effect. The methods in
* this class can be called after the stream has been closed without
* generating an <tt>IOException</tt>.
*/
public void close() throws IOException {}
}

View File

@ -1,79 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.jdbc.util;
import java.io.Closeable;
import java.io.IOException;
import java.io.InputStream;
import java.io.StringReader;
import java.util.Properties;
public abstract class IOUtils {
public static Properties propsFromString(String source) {
Properties copy = new Properties();
if (source != null) {
try {
copy.load(new StringReader(source));
} catch (IOException ex) {
throw new RuntimeException(ex);
}
}
return copy;
}
public static void close(Closeable closable) {
if (closable != null) {
try {
closable.close();
} catch (IOException e) {
// silently ignore
}
}
}
public static String asString(InputStream in) throws IOException {
return asBytes(in).toString();
}
public static BytesArray asBytes(InputStream in) throws IOException {
BytesArray ba = unwrapStreamBuffer(in);
if (ba != null) {
return ba;
}
return asBytes(new BytesArray(in.available()), in);
}
public static BytesArray asBytes(BytesArray ba, InputStream input) throws IOException {
BytesArray buf = unwrapStreamBuffer(input);
if (buf != null) {
ba.bytes(buf);
return ba;
}
FastByteArrayOutputStream bos = new FastByteArrayOutputStream(ba);
byte[] buffer = new byte[1024];
int read = 0;
try (InputStream in = input) {
while ((read = in.read(buffer)) != -1) {
bos.write(buffer, 0, read);
}
} finally {
// non needed but used to avoid the warnings
bos.close();
}
return bos.bytes();
}
private static BytesArray unwrapStreamBuffer(InputStream in) {
if (in instanceof FastByteArrayInputStream) {
return ((FastByteArrayInputStream) in).data;
}
return null;
}
}

View File

@ -1,72 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.jdbc.util;
public class UnicodeUtil {
public static final int UNI_SUR_HIGH_START = 0xD800;
public static final int UNI_SUR_HIGH_END = 0xDBFF;
public static final int UNI_SUR_LOW_START = 0xDC00;
public static final int UNI_SUR_LOW_END = 0xDFFF;
public static final int UNI_REPLACEMENT_CHAR = 0xFFFD;
private static final long HALF_SHIFT = 10;
private static final int SURROGATE_OFFSET = Character.MIN_SUPPLEMENTARY_CODE_POINT - (UNI_SUR_HIGH_START << HALF_SHIFT) - UNI_SUR_LOW_START;
/** Encode characters from this String, starting at offset
* for length characters. Output to the destination array
* will begin at {@code outOffset}. It is the responsibility of the
* caller to make sure that the destination array is large enough.
* <p>
* note this method returns the final output offset (outOffset + number of bytes written)
*/
public static int UTF16toUTF8(final CharSequence s, final int offset, final int length, byte[] out, int outOffset) {
final int end = offset + length;
int upto = outOffset;
for (int i = offset; i < end; i++) {
final int code = (int) s.charAt(i);
if (code < 0x80)
out[upto++] = (byte) code;
else if (code < 0x800) {
out[upto++] = (byte) (0xC0 | (code >> 6));
out[upto++] = (byte) (0x80 | (code & 0x3F));
}
else if (code < 0xD800 || code > 0xDFFF) {
out[upto++] = (byte) (0xE0 | (code >> 12));
out[upto++] = (byte) (0x80 | ((code >> 6) & 0x3F));
out[upto++] = (byte) (0x80 | (code & 0x3F));
}
else {
// surrogate pair
// confirm valid high surrogate
if (code < 0xDC00 && (i < end - 1)) {
int utf32 = (int) s.charAt(i + 1);
// confirm valid low surrogate and write pair
if (utf32 >= 0xDC00 && utf32 <= 0xDFFF) {
utf32 = (code << 10) + utf32 + SURROGATE_OFFSET;
i++;
out[upto++] = (byte) (0xF0 | (utf32 >> 18));
out[upto++] = (byte) (0x80 | ((utf32 >> 12) & 0x3F));
out[upto++] = (byte) (0x80 | ((utf32 >> 6) & 0x3F));
out[upto++] = (byte) (0x80 | (utf32 & 0x3F));
continue;
}
}
// replace unpaired surrogate or out-of-order low surrogate
// with substitution character
out[upto++] = (byte) 0xEF;
out[upto++] = (byte) 0xBF;
out[upto++] = (byte) 0xBD;
}
}
//assert matches(s, offset, length, out, upto);
return upto;
}
}

View File

@ -72,8 +72,8 @@ public class DataLoader {
}
bulk.append("}\n");
});
client.performRequest("POST", "/test_emp/emp/_bulk", singletonMap("refresh", "true"), new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON));
client.performRequest("POST", "/test_emp/emp/_bulk", singletonMap("refresh", "true"),
new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON));
}
private static void csvToLines(String name, CheckedBiConsumer<List<String>, List<String>, Exception> consumeLine) throws Exception {

View File

@ -81,8 +81,11 @@ public abstract class SpecBaseIntegrationTestCase extends JdbcIntegrationTestCas
}
/**
* Implementations should pay attention on using {@link #executeJdbcQuery(Connection, String)} (typically for ES connections)
* and {@link #assertResults(ResultSet, ResultSet)} which takes into account logging/debugging results (through {@link #logEsResultSet()}.
* Implementations should pay attention on using
* {@link #executeJdbcQuery(Connection, String)} (typically for
* ES connections) and {@link #assertResults(ResultSet, ResultSet)}
* which takes into account logging/debugging results (through
* {@link #logEsResultSet()}.
*/
protected abstract void doTest() throws Throwable;

View File

@ -28,7 +28,8 @@ class SqlProtoHandler extends ProtoHandler<Response> {
SqlProtoHandler(Client client) {
super(client, response -> AbstractSqlServer.write(AbstractProto.CURRENT_VERSION, response));
this.server = new JdbcServer(TestUtils.planExecutor(client), clusterName, () -> info.getNode().getName(), info.getVersion(), info.getBuild());
this.server = new JdbcServer(TestUtils.planExecutor(client), clusterName, () -> info.getNode().getName(), info.getVersion(),
info.getBuild());
}
@Override