mirror of https://github.com/apache/druid.git
Explain plan for custom insert syntax (#12243)
* Initial commit, explain plan for custom insert syntax working * Cleanup separate SqlInsert handling
This commit is contained in:
parent
eae163a797
commit
8fc0e5c95c
|
@ -383,6 +383,7 @@ data: {
|
||||||
# Example: SqlShowDatabases(), SqlShowTables().
|
# Example: SqlShowDatabases(), SqlShowTables().
|
||||||
statementParserMethods: [
|
statementParserMethods: [
|
||||||
"DruidSqlInsert()"
|
"DruidSqlInsert()"
|
||||||
|
"DruidSqlExplain()"
|
||||||
]
|
]
|
||||||
|
|
||||||
# List of methods for parsing custom literals.
|
# List of methods for parsing custom literals.
|
||||||
|
@ -433,6 +434,7 @@ data: {
|
||||||
# "dataTypeParserMethods".
|
# "dataTypeParserMethods".
|
||||||
implementationFiles: [
|
implementationFiles: [
|
||||||
"insert.ftl"
|
"insert.ftl"
|
||||||
|
"explain.ftl"
|
||||||
]
|
]
|
||||||
|
|
||||||
includePosixOperators: false
|
includePosixOperators: false
|
||||||
|
|
|
@ -0,0 +1,71 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parses an EXPLAIN PLAN statement. Allows for custom druid's statements as well.
|
||||||
|
* The main change from SqlExplain() rule is that the statements that can occur in front of the explain's can now be
|
||||||
|
* custom druid statements as well reflected in the DruidQueryOrSqlQueryOrDml() production rule
|
||||||
|
*
|
||||||
|
* Since this copies directly from SqlExplain(), this would need to be modified while updating Calcite to allow for
|
||||||
|
* any changes and improvements (e.g. adding another format apart from json or xml in which one can
|
||||||
|
* specify the explain plan output)
|
||||||
|
*/
|
||||||
|
SqlNode DruidSqlExplain() :
|
||||||
|
{
|
||||||
|
SqlNode stmt;
|
||||||
|
SqlExplainLevel detailLevel = SqlExplainLevel.EXPPLAN_ATTRIBUTES;
|
||||||
|
SqlExplain.Depth depth;
|
||||||
|
final SqlExplainFormat format;
|
||||||
|
}
|
||||||
|
{
|
||||||
|
<EXPLAIN> <PLAN>
|
||||||
|
[ detailLevel = ExplainDetailLevel() ]
|
||||||
|
depth = ExplainDepth()
|
||||||
|
(
|
||||||
|
LOOKAHEAD(2)
|
||||||
|
<AS> <XML> { format = SqlExplainFormat.XML; }
|
||||||
|
|
|
||||||
|
<AS> <JSON> { format = SqlExplainFormat.JSON; }
|
||||||
|
|
|
||||||
|
{ format = SqlExplainFormat.TEXT; }
|
||||||
|
)
|
||||||
|
<FOR> stmt = DruidQueryOrSqlQueryOrDml() {
|
||||||
|
return new SqlExplain(getPos(),
|
||||||
|
stmt,
|
||||||
|
detailLevel.symbol(SqlParserPos.ZERO),
|
||||||
|
depth.symbol(SqlParserPos.ZERO),
|
||||||
|
format.symbol(SqlParserPos.ZERO),
|
||||||
|
nDynamicParams);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
SqlNode DruidQueryOrSqlQueryOrDml() :
|
||||||
|
{
|
||||||
|
SqlNode stmt;
|
||||||
|
}
|
||||||
|
{
|
||||||
|
(
|
||||||
|
stmt = DruidSqlInsert()
|
||||||
|
|
|
||||||
|
stmt = SqlQueryOrDml()
|
||||||
|
)
|
||||||
|
{
|
||||||
|
return stmt;
|
||||||
|
}
|
||||||
|
}
|
|
@ -758,7 +758,7 @@ public class DruidPlanner implements Closeable
|
||||||
private final SqlExplain explain;
|
private final SqlExplain explain;
|
||||||
|
|
||||||
@Nullable
|
@Nullable
|
||||||
private final SqlInsert insert;
|
private final DruidSqlInsert insert;
|
||||||
|
|
||||||
private final SqlNode query;
|
private final SqlNode query;
|
||||||
|
|
||||||
|
@ -767,7 +767,7 @@ public class DruidPlanner implements Closeable
|
||||||
|
|
||||||
private ParsedNodes(
|
private ParsedNodes(
|
||||||
@Nullable SqlExplain explain,
|
@Nullable SqlExplain explain,
|
||||||
@Nullable SqlInsert insert,
|
@Nullable DruidSqlInsert insert,
|
||||||
SqlNode query,
|
SqlNode query,
|
||||||
@Nullable Granularity ingestionGranularity
|
@Nullable Granularity ingestionGranularity
|
||||||
)
|
)
|
||||||
|
@ -781,7 +781,7 @@ public class DruidPlanner implements Closeable
|
||||||
static ParsedNodes create(final SqlNode node) throws ValidationException
|
static ParsedNodes create(final SqlNode node) throws ValidationException
|
||||||
{
|
{
|
||||||
SqlExplain explain = null;
|
SqlExplain explain = null;
|
||||||
SqlInsert insert = null;
|
DruidSqlInsert druidSqlInsert = null;
|
||||||
SqlNode query = node;
|
SqlNode query = node;
|
||||||
Granularity ingestionGranularity = null;
|
Granularity ingestionGranularity = null;
|
||||||
|
|
||||||
|
@ -791,8 +791,8 @@ public class DruidPlanner implements Closeable
|
||||||
}
|
}
|
||||||
|
|
||||||
if (query.getKind() == SqlKind.INSERT) {
|
if (query.getKind() == SqlKind.INSERT) {
|
||||||
insert = (SqlInsert) query;
|
druidSqlInsert = (DruidSqlInsert) query;
|
||||||
query = insert.getSource();
|
query = druidSqlInsert.getSource();
|
||||||
|
|
||||||
// Check if ORDER BY clause is not provided to the underlying query
|
// Check if ORDER BY clause is not provided to the underlying query
|
||||||
if (query instanceof SqlOrderBy) {
|
if (query instanceof SqlOrderBy) {
|
||||||
|
@ -803,11 +803,6 @@ public class DruidPlanner implements Closeable
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Processing to be done when the original query has either of the PARTITIONED BY or CLUSTERED BY clause
|
|
||||||
// The following condition should always be true however added defensively
|
|
||||||
if (insert instanceof DruidSqlInsert) {
|
|
||||||
DruidSqlInsert druidSqlInsert = (DruidSqlInsert) insert;
|
|
||||||
|
|
||||||
ingestionGranularity = druidSqlInsert.getPartitionedBy();
|
ingestionGranularity = druidSqlInsert.getPartitionedBy();
|
||||||
|
|
||||||
if (druidSqlInsert.getClusteredBy() != null) {
|
if (druidSqlInsert.getClusteredBy() != null) {
|
||||||
|
@ -835,13 +830,12 @@ public class DruidPlanner implements Closeable
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if (!query.isA(SqlKind.QUERY)) {
|
if (!query.isA(SqlKind.QUERY)) {
|
||||||
throw new ValidationException(StringUtils.format("Cannot execute [%s].", query.getKind()));
|
throw new ValidationException(StringUtils.format("Cannot execute [%s].", query.getKind()));
|
||||||
}
|
}
|
||||||
|
|
||||||
return new ParsedNodes(explain, insert, query, ingestionGranularity);
|
return new ParsedNodes(explain, druidSqlInsert, query, ingestionGranularity);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Nullable
|
@Nullable
|
||||||
|
@ -851,7 +845,7 @@ public class DruidPlanner implements Closeable
|
||||||
}
|
}
|
||||||
|
|
||||||
@Nullable
|
@Nullable
|
||||||
public SqlInsert getInsertNode()
|
public DruidSqlInsert getInsertNode()
|
||||||
{
|
{
|
||||||
return insert;
|
return insert;
|
||||||
}
|
}
|
||||||
|
|
|
@ -62,7 +62,6 @@ import org.hamcrest.Matcher;
|
||||||
import org.hamcrest.MatcherAssert;
|
import org.hamcrest.MatcherAssert;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Ignore;
|
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.junit.internal.matchers.ThrowableMessageMatcher;
|
import org.junit.internal.matchers.ThrowableMessageMatcher;
|
||||||
|
|
||||||
|
@ -559,27 +558,6 @@ public class CalciteInsertDmlTest extends BaseCalciteQueryTest
|
||||||
didTest = true;
|
didTest = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Currently EXPLAIN PLAN FOR doesn't work with the modified syntax
|
|
||||||
@Ignore
|
|
||||||
@Test
|
|
||||||
public void testExplainInsertWithPartitionedByAndClusteredBy()
|
|
||||||
{
|
|
||||||
Assert.assertThrows(
|
|
||||||
SqlPlanningException.class,
|
|
||||||
() ->
|
|
||||||
testQuery(
|
|
||||||
StringUtils.format(
|
|
||||||
"EXPLAIN PLAN FOR INSERT INTO dst SELECT * FROM %s PARTITIONED BY DAY CLUSTERED BY 1",
|
|
||||||
externSql(externalDataSource)
|
|
||||||
),
|
|
||||||
ImmutableList.of(),
|
|
||||||
ImmutableList.of()
|
|
||||||
)
|
|
||||||
);
|
|
||||||
didTest = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Ignore
|
|
||||||
@Test
|
@Test
|
||||||
public void testExplainInsertFromExternal() throws Exception
|
public void testExplainInsertFromExternal() throws Exception
|
||||||
{
|
{
|
||||||
|
@ -592,7 +570,7 @@ public class CalciteInsertDmlTest extends BaseCalciteQueryTest
|
||||||
.columns("x", "y", "z")
|
.columns("x", "y", "z")
|
||||||
.context(
|
.context(
|
||||||
queryJsonMapper.readValue(
|
queryJsonMapper.readValue(
|
||||||
"{\"defaultTimeout\":300000,\"maxScatterGatherBytes\":9223372036854775807,\"sqlCurrentTimestamp\":\"2000-01-01T00:00:00Z\",\"sqlQueryId\":\"dummy\",\"vectorize\":\"false\",\"vectorizeVirtualColumns\":\"false\"}",
|
"{\"defaultTimeout\":300000,\"maxScatterGatherBytes\":9223372036854775807,\"sqlCurrentTimestamp\":\"2000-01-01T00:00:00Z\",\"sqlInsertSegmentGranularity\":\"{\\\"type\\\":\\\"all\\\"}\",\"sqlQueryId\":\"dummy\",\"vectorize\":\"false\",\"vectorizeVirtualColumns\":\"false\"}",
|
||||||
JacksonUtils.TYPE_REFERENCE_MAP_STRING_OBJECT
|
JacksonUtils.TYPE_REFERENCE_MAP_STRING_OBJECT
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
@ -624,7 +602,6 @@ public class CalciteInsertDmlTest extends BaseCalciteQueryTest
|
||||||
didTest = true;
|
didTest = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Ignore
|
|
||||||
@Test
|
@Test
|
||||||
public void testExplainInsertFromExternalUnauthorized()
|
public void testExplainInsertFromExternalUnauthorized()
|
||||||
{
|
{
|
||||||
|
|
Loading…
Reference in New Issue