mirror of https://github.com/apache/druid.git
Explain plan for custom insert syntax (#12243)
* Initial commit, explain plan for custom insert syntax working * Cleanup separate SqlInsert handling
This commit is contained in:
parent
eae163a797
commit
8fc0e5c95c
|
@ -383,6 +383,7 @@ data: {
|
|||
# Example: SqlShowDatabases(), SqlShowTables().
|
||||
statementParserMethods: [
|
||||
"DruidSqlInsert()"
|
||||
"DruidSqlExplain()"
|
||||
]
|
||||
|
||||
# List of methods for parsing custom literals.
|
||||
|
@ -433,6 +434,7 @@ data: {
|
|||
# "dataTypeParserMethods".
|
||||
implementationFiles: [
|
||||
"insert.ftl"
|
||||
"explain.ftl"
|
||||
]
|
||||
|
||||
includePosixOperators: false
|
||||
|
|
|
@ -0,0 +1,71 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Parses an EXPLAIN PLAN statement. Allows for custom druid's statements as well.
|
||||
* The main change from SqlExplain() rule is that the statements that can occur in front of the explain's can now be
|
||||
* custom druid statements as well reflected in the DruidQueryOrSqlQueryOrDml() production rule
|
||||
*
|
||||
* Since this copies directly from SqlExplain(), this would need to be modified while updating Calcite to allow for
|
||||
* any changes and improvements (e.g. adding another format apart from json or xml in which one can
|
||||
* specify the explain plan output)
|
||||
*/
|
||||
SqlNode DruidSqlExplain() :
|
||||
{
|
||||
SqlNode stmt;
|
||||
SqlExplainLevel detailLevel = SqlExplainLevel.EXPPLAN_ATTRIBUTES;
|
||||
SqlExplain.Depth depth;
|
||||
final SqlExplainFormat format;
|
||||
}
|
||||
{
|
||||
<EXPLAIN> <PLAN>
|
||||
[ detailLevel = ExplainDetailLevel() ]
|
||||
depth = ExplainDepth()
|
||||
(
|
||||
LOOKAHEAD(2)
|
||||
<AS> <XML> { format = SqlExplainFormat.XML; }
|
||||
|
|
||||
<AS> <JSON> { format = SqlExplainFormat.JSON; }
|
||||
|
|
||||
{ format = SqlExplainFormat.TEXT; }
|
||||
)
|
||||
<FOR> stmt = DruidQueryOrSqlQueryOrDml() {
|
||||
return new SqlExplain(getPos(),
|
||||
stmt,
|
||||
detailLevel.symbol(SqlParserPos.ZERO),
|
||||
depth.symbol(SqlParserPos.ZERO),
|
||||
format.symbol(SqlParserPos.ZERO),
|
||||
nDynamicParams);
|
||||
}
|
||||
}
|
||||
|
||||
SqlNode DruidQueryOrSqlQueryOrDml() :
|
||||
{
|
||||
SqlNode stmt;
|
||||
}
|
||||
{
|
||||
(
|
||||
stmt = DruidSqlInsert()
|
||||
|
|
||||
stmt = SqlQueryOrDml()
|
||||
)
|
||||
{
|
||||
return stmt;
|
||||
}
|
||||
}
|
|
@ -758,7 +758,7 @@ public class DruidPlanner implements Closeable
|
|||
private final SqlExplain explain;
|
||||
|
||||
@Nullable
|
||||
private final SqlInsert insert;
|
||||
private final DruidSqlInsert insert;
|
||||
|
||||
private final SqlNode query;
|
||||
|
||||
|
@ -767,7 +767,7 @@ public class DruidPlanner implements Closeable
|
|||
|
||||
private ParsedNodes(
|
||||
@Nullable SqlExplain explain,
|
||||
@Nullable SqlInsert insert,
|
||||
@Nullable DruidSqlInsert insert,
|
||||
SqlNode query,
|
||||
@Nullable Granularity ingestionGranularity
|
||||
)
|
||||
|
@ -781,7 +781,7 @@ public class DruidPlanner implements Closeable
|
|||
static ParsedNodes create(final SqlNode node) throws ValidationException
|
||||
{
|
||||
SqlExplain explain = null;
|
||||
SqlInsert insert = null;
|
||||
DruidSqlInsert druidSqlInsert = null;
|
||||
SqlNode query = node;
|
||||
Granularity ingestionGranularity = null;
|
||||
|
||||
|
@ -791,8 +791,8 @@ public class DruidPlanner implements Closeable
|
|||
}
|
||||
|
||||
if (query.getKind() == SqlKind.INSERT) {
|
||||
insert = (SqlInsert) query;
|
||||
query = insert.getSource();
|
||||
druidSqlInsert = (DruidSqlInsert) query;
|
||||
query = druidSqlInsert.getSource();
|
||||
|
||||
// Check if ORDER BY clause is not provided to the underlying query
|
||||
if (query instanceof SqlOrderBy) {
|
||||
|
@ -803,37 +803,31 @@ public class DruidPlanner implements Closeable
|
|||
}
|
||||
}
|
||||
|
||||
// Processing to be done when the original query has either of the PARTITIONED BY or CLUSTERED BY clause
|
||||
// The following condition should always be true however added defensively
|
||||
if (insert instanceof DruidSqlInsert) {
|
||||
DruidSqlInsert druidSqlInsert = (DruidSqlInsert) insert;
|
||||
ingestionGranularity = druidSqlInsert.getPartitionedBy();
|
||||
|
||||
ingestionGranularity = druidSqlInsert.getPartitionedBy();
|
||||
if (druidSqlInsert.getClusteredBy() != null) {
|
||||
// If we have a CLUSTERED BY clause, extract the information in that CLUSTERED BY and create a new SqlOrderBy
|
||||
// node
|
||||
SqlNode offset = null;
|
||||
SqlNode fetch = null;
|
||||
|
||||
if (druidSqlInsert.getClusteredBy() != null) {
|
||||
// If we have a CLUSTERED BY clause, extract the information in that CLUSTERED BY and create a new SqlOrderBy
|
||||
// node
|
||||
SqlNode offset = null;
|
||||
SqlNode fetch = null;
|
||||
|
||||
if (query instanceof SqlOrderBy) {
|
||||
SqlOrderBy sqlOrderBy = (SqlOrderBy) query;
|
||||
// This represents the underlying query free of OFFSET, FETCH and ORDER BY clauses
|
||||
// For a sqlOrderBy.query like "SELECT dim1, sum(dim2) FROM foo OFFSET 10 FETCH 30 ORDER BY dim1 GROUP BY dim1
|
||||
// this would represent the "SELECT dim1, sum(dim2) from foo GROUP BY dim1
|
||||
query = sqlOrderBy.query;
|
||||
offset = sqlOrderBy.offset;
|
||||
fetch = sqlOrderBy.fetch;
|
||||
}
|
||||
// Creates a new SqlOrderBy query, which may have our CLUSTERED BY overwritten
|
||||
query = new SqlOrderBy(
|
||||
query.getParserPosition(),
|
||||
query,
|
||||
druidSqlInsert.getClusteredBy(),
|
||||
offset,
|
||||
fetch
|
||||
);
|
||||
if (query instanceof SqlOrderBy) {
|
||||
SqlOrderBy sqlOrderBy = (SqlOrderBy) query;
|
||||
// This represents the underlying query free of OFFSET, FETCH and ORDER BY clauses
|
||||
// For a sqlOrderBy.query like "SELECT dim1, sum(dim2) FROM foo OFFSET 10 FETCH 30 ORDER BY dim1 GROUP BY dim1
|
||||
// this would represent the "SELECT dim1, sum(dim2) from foo GROUP BY dim1
|
||||
query = sqlOrderBy.query;
|
||||
offset = sqlOrderBy.offset;
|
||||
fetch = sqlOrderBy.fetch;
|
||||
}
|
||||
// Creates a new SqlOrderBy query, which may have our CLUSTERED BY overwritten
|
||||
query = new SqlOrderBy(
|
||||
query.getParserPosition(),
|
||||
query,
|
||||
druidSqlInsert.getClusteredBy(),
|
||||
offset,
|
||||
fetch
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -841,7 +835,7 @@ public class DruidPlanner implements Closeable
|
|||
throw new ValidationException(StringUtils.format("Cannot execute [%s].", query.getKind()));
|
||||
}
|
||||
|
||||
return new ParsedNodes(explain, insert, query, ingestionGranularity);
|
||||
return new ParsedNodes(explain, druidSqlInsert, query, ingestionGranularity);
|
||||
}
|
||||
|
||||
@Nullable
|
||||
|
@ -851,7 +845,7 @@ public class DruidPlanner implements Closeable
|
|||
}
|
||||
|
||||
@Nullable
|
||||
public SqlInsert getInsertNode()
|
||||
public DruidSqlInsert getInsertNode()
|
||||
{
|
||||
return insert;
|
||||
}
|
||||
|
|
|
@ -62,7 +62,6 @@ import org.hamcrest.Matcher;
|
|||
import org.hamcrest.MatcherAssert;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
import org.junit.internal.matchers.ThrowableMessageMatcher;
|
||||
|
||||
|
@ -559,27 +558,6 @@ public class CalciteInsertDmlTest extends BaseCalciteQueryTest
|
|||
didTest = true;
|
||||
}
|
||||
|
||||
// Currently EXPLAIN PLAN FOR doesn't work with the modified syntax
|
||||
@Ignore
|
||||
@Test
|
||||
public void testExplainInsertWithPartitionedByAndClusteredBy()
|
||||
{
|
||||
Assert.assertThrows(
|
||||
SqlPlanningException.class,
|
||||
() ->
|
||||
testQuery(
|
||||
StringUtils.format(
|
||||
"EXPLAIN PLAN FOR INSERT INTO dst SELECT * FROM %s PARTITIONED BY DAY CLUSTERED BY 1",
|
||||
externSql(externalDataSource)
|
||||
),
|
||||
ImmutableList.of(),
|
||||
ImmutableList.of()
|
||||
)
|
||||
);
|
||||
didTest = true;
|
||||
}
|
||||
|
||||
@Ignore
|
||||
@Test
|
||||
public void testExplainInsertFromExternal() throws Exception
|
||||
{
|
||||
|
@ -592,7 +570,7 @@ public class CalciteInsertDmlTest extends BaseCalciteQueryTest
|
|||
.columns("x", "y", "z")
|
||||
.context(
|
||||
queryJsonMapper.readValue(
|
||||
"{\"defaultTimeout\":300000,\"maxScatterGatherBytes\":9223372036854775807,\"sqlCurrentTimestamp\":\"2000-01-01T00:00:00Z\",\"sqlQueryId\":\"dummy\",\"vectorize\":\"false\",\"vectorizeVirtualColumns\":\"false\"}",
|
||||
"{\"defaultTimeout\":300000,\"maxScatterGatherBytes\":9223372036854775807,\"sqlCurrentTimestamp\":\"2000-01-01T00:00:00Z\",\"sqlInsertSegmentGranularity\":\"{\\\"type\\\":\\\"all\\\"}\",\"sqlQueryId\":\"dummy\",\"vectorize\":\"false\",\"vectorizeVirtualColumns\":\"false\"}",
|
||||
JacksonUtils.TYPE_REFERENCE_MAP_STRING_OBJECT
|
||||
)
|
||||
)
|
||||
|
@ -624,7 +602,6 @@ public class CalciteInsertDmlTest extends BaseCalciteQueryTest
|
|||
didTest = true;
|
||||
}
|
||||
|
||||
@Ignore
|
||||
@Test
|
||||
public void testExplainInsertFromExternalUnauthorized()
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue