mirror of https://github.com/apache/druid.git
SQL INSERT planner support. (#11959)
* SQL INSERT planner support. The main changes are: 1) DruidPlanner is able to validate and authorize INSERT queries. They require WRITE permission on the target datasource. 2) QueryMaker is now an interface, and there is a QueryMakerFactory that creates instances of it. There is only one production implementation of each (NativeQueryMaker and NativeQueryMakerFactory), which together behave the same way as the former QueryMaker class. But this opens the door to executing queries in ways other than the Druid query stack, and is used by unit tests (CalciteInsertDmlTest) to test the INSERT planning functionality. 3) Adds an EXTERN table macro that allows references external data using InputSource and InputFormat from Druid's batch ingestion API. This is not exposed in production yet, but is used by unit tests. 4) Adds a QueryFeature concept that enables the planner to change its behavior slightly depending on the capabilities of the execution system. 5) Adds an "AuthorizableOperator" concept that enables SqlOperators to require additional permissions. This is used by the EXTERN table macro. Related odds and ends: - Add equals, hashCode, toString methods to InlineInputSource. Aids in the "from external" tests in CalciteInsertDmlTest. - Add JSON-serializability to RowSignature. - Move the SQL string inside PlannerContext so it is "baked into" the planner when the planner is created. Cleans up the code a bit, since in practice, the same query is passed in every time to the same planner anyway. * Fix up calls to CalciteTests.createMockQueryLifecycleFactory. * Fix checkstyle issues. * Adjustments for CI. * Adjust DruidAvaticaHandlerTest for stricter test authorizations.
This commit is contained in:
parent
bb3d2a433a
commit
0354407655
|
@ -425,7 +425,7 @@ public class SqlBenchmark
|
|||
CalciteTests.createMockRootSchema(conglomerate, walker, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER);
|
||||
plannerFactory = new PlannerFactory(
|
||||
rootSchema,
|
||||
CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate),
|
||||
CalciteTests.createMockQueryMakerFactory(walker, conglomerate),
|
||||
createOperatorTable(),
|
||||
CalciteTests.createExprMacroTable(),
|
||||
plannerConfig,
|
||||
|
@ -467,7 +467,7 @@ public class SqlBenchmark
|
|||
);
|
||||
final String sql = QUERIES.get(Integer.parseInt(query));
|
||||
try (final DruidPlanner planner = plannerFactory.createPlannerForTesting(context, sql)) {
|
||||
final PlannerResult plannerResult = planner.plan(sql);
|
||||
final PlannerResult plannerResult = planner.plan();
|
||||
final Sequence<Object[]> resultSequence = plannerResult.run();
|
||||
final Object[] lastRow = resultSequence.accumulate(null, (accumulated, in) -> in);
|
||||
blackhole.consume(lastRow);
|
||||
|
@ -485,7 +485,7 @@ public class SqlBenchmark
|
|||
);
|
||||
final String sql = QUERIES.get(Integer.parseInt(query));
|
||||
try (final DruidPlanner planner = plannerFactory.createPlannerForTesting(context, sql)) {
|
||||
final PlannerResult plannerResult = planner.plan(sql);
|
||||
final PlannerResult plannerResult = planner.plan();
|
||||
blackhole.consume(plannerResult);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -268,7 +268,7 @@ public class SqlExpressionBenchmark
|
|||
CalciteTests.createMockRootSchema(conglomerate, walker, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER);
|
||||
plannerFactory = new PlannerFactory(
|
||||
rootSchema,
|
||||
CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate),
|
||||
CalciteTests.createMockQueryMakerFactory(walker, conglomerate),
|
||||
CalciteTests.createOperatorTable(),
|
||||
CalciteTests.createExprMacroTable(),
|
||||
plannerConfig,
|
||||
|
@ -305,7 +305,7 @@ public class SqlExpressionBenchmark
|
|||
);
|
||||
final String sql = QUERIES.get(Integer.parseInt(query));
|
||||
try (final DruidPlanner planner = plannerFactory.createPlannerForTesting(context, sql)) {
|
||||
final PlannerResult plannerResult = planner.plan(sql);
|
||||
final PlannerResult plannerResult = planner.plan();
|
||||
final Sequence<Object[]> resultSequence = plannerResult.run();
|
||||
final Object[] lastRow = resultSequence.accumulate(null, (accumulated, in) -> in);
|
||||
blackhole.consume(lastRow);
|
||||
|
|
|
@ -115,7 +115,7 @@ public class SqlVsNativeBenchmark
|
|||
CalciteTests.createMockRootSchema(conglomerate, walker, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER);
|
||||
plannerFactory = new PlannerFactory(
|
||||
rootSchema,
|
||||
CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate),
|
||||
CalciteTests.createMockQueryMakerFactory(walker, conglomerate),
|
||||
CalciteTests.createOperatorTable(),
|
||||
CalciteTests.createExprMacroTable(),
|
||||
plannerConfig,
|
||||
|
@ -162,7 +162,7 @@ public class SqlVsNativeBenchmark
|
|||
public void queryPlanner(Blackhole blackhole) throws Exception
|
||||
{
|
||||
try (final DruidPlanner planner = plannerFactory.createPlannerForTesting(null, sqlQuery)) {
|
||||
final PlannerResult plannerResult = planner.plan(sqlQuery);
|
||||
final PlannerResult plannerResult = planner.plan();
|
||||
final Sequence<Object[]> resultSequence = plannerResult.run();
|
||||
final Object[] lastRow = resultSequence.accumulate(null, (accumulated, in) -> in);
|
||||
blackhole.consume(lastRow);
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.druid.java.util.common.StringUtils;
|
|||
|
||||
import javax.annotation.Nullable;
|
||||
import java.io.File;
|
||||
import java.util.Objects;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
public class InlineInputSource extends AbstractInputSource
|
||||
|
@ -75,4 +76,31 @@ public class InlineInputSource extends AbstractInputSource
|
|||
temporaryDirectory
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o)
|
||||
{
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
InlineInputSource that = (InlineInputSource) o;
|
||||
return Objects.equals(data, that.data);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode()
|
||||
{
|
||||
return Objects.hash(data);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString()
|
||||
{
|
||||
return "InlineInputSource{" +
|
||||
"data='" + data + '\'' +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,79 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.druid.segment.column;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonCreator;
|
||||
import com.fasterxml.jackson.annotation.JsonInclude;
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
import org.apache.druid.java.util.common.IAE;
|
||||
|
||||
import javax.annotation.Nullable;
|
||||
|
||||
/**
|
||||
* Class used by {@link RowSignature} for serialization.
|
||||
*
|
||||
* Package-private since it is not intended to be used outside that narrow use case. In other cases where passing
|
||||
* around information about column types is important, use {@link ColumnType} instead.
|
||||
*/
|
||||
class ColumnSignature
|
||||
{
|
||||
private final String name;
|
||||
|
||||
@Nullable
|
||||
private final ColumnType type;
|
||||
|
||||
@JsonCreator
|
||||
ColumnSignature(
|
||||
@JsonProperty("name") String name,
|
||||
@JsonProperty("type") @Nullable ColumnType type
|
||||
)
|
||||
{
|
||||
this.name = name;
|
||||
this.type = type;
|
||||
|
||||
// Name must be nonnull, but type can be null (if the type is unknown)
|
||||
if (name == null || name.isEmpty()) {
|
||||
throw new IAE(name, "Column name must be non-empty");
|
||||
}
|
||||
}
|
||||
|
||||
@JsonProperty("name")
|
||||
String name()
|
||||
{
|
||||
return name;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
@JsonProperty("type")
|
||||
@JsonInclude(JsonInclude.Include.NON_NULL)
|
||||
ColumnType type()
|
||||
{
|
||||
return type;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString()
|
||||
{
|
||||
return "ColumnSignature{" +
|
||||
"name='" + name + '\'' +
|
||||
", type=" + type +
|
||||
'}';
|
||||
}
|
||||
}
|
|
@ -19,12 +19,13 @@
|
|||
|
||||
package org.apache.druid.segment.column;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonCreator;
|
||||
import com.fasterxml.jackson.annotation.JsonValue;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import it.unimi.dsi.fastutil.objects.Object2IntMap;
|
||||
import it.unimi.dsi.fastutil.objects.Object2IntOpenHashMap;
|
||||
import org.apache.druid.java.util.common.IAE;
|
||||
import org.apache.druid.java.util.common.Pair;
|
||||
import org.apache.druid.query.aggregation.AggregatorFactory;
|
||||
import org.apache.druid.query.aggregation.PostAggregator;
|
||||
import org.apache.druid.query.dimension.DimensionSpec;
|
||||
|
@ -40,9 +41,7 @@ import java.util.Objects;
|
|||
import java.util.Optional;
|
||||
|
||||
/**
|
||||
* Type signature for a row in a Druid datasource or query result. Rows have an ordering and every
|
||||
* column has a defined type. This is a little bit of a fiction in the Druid world (where rows do not _actually_ have
|
||||
* well defined types) but we do impose types for the SQL layer.
|
||||
* Type signature for a row in a Druid datasource or query result.
|
||||
*
|
||||
* @see org.apache.druid.query.QueryToolChest#resultArraySignature which returns signatures for query results
|
||||
* @see org.apache.druid.query.InlineDataSource#getRowSignature which returns signatures for inline datasources
|
||||
|
@ -55,30 +54,42 @@ public class RowSignature implements ColumnInspector
|
|||
private final Object2IntMap<String> columnPositions = new Object2IntOpenHashMap<>();
|
||||
private final List<String> columnNames;
|
||||
|
||||
private RowSignature(final List<Pair<String, ColumnType>> columnTypeList)
|
||||
private RowSignature(final List<ColumnSignature> columnTypeList)
|
||||
{
|
||||
this.columnPositions.defaultReturnValue(-1);
|
||||
|
||||
final ImmutableList.Builder<String> columnNamesBuilder = ImmutableList.builder();
|
||||
|
||||
for (int i = 0; i < columnTypeList.size(); i++) {
|
||||
final Pair<String, ColumnType> pair = columnTypeList.get(i);
|
||||
final ColumnType existingType = columnTypes.get(pair.lhs);
|
||||
final ColumnSignature sig = columnTypeList.get(i);
|
||||
final ColumnType existingType = columnTypes.get(sig.name());
|
||||
|
||||
if (columnTypes.containsKey(pair.lhs) && existingType != pair.rhs) {
|
||||
if (columnTypes.containsKey(sig.name()) && !Objects.equals(existingType, sig.type())) {
|
||||
// It's ok to add the same column twice as long as the type is consistent.
|
||||
// Note: we need the containsKey because the existingType might be present, but null.
|
||||
throw new IAE("Column[%s] has conflicting types [%s] and [%s]", pair.lhs, existingType, pair.rhs);
|
||||
throw new IAE("Column[%s] has conflicting types [%s] and [%s]", sig.name(), existingType, sig.type());
|
||||
}
|
||||
|
||||
columnTypes.put(pair.lhs, pair.rhs);
|
||||
columnPositions.put(pair.lhs, i);
|
||||
columnNamesBuilder.add(pair.lhs);
|
||||
columnTypes.put(sig.name(), sig.type());
|
||||
columnPositions.put(sig.name(), i);
|
||||
columnNamesBuilder.add(sig.name());
|
||||
}
|
||||
|
||||
this.columnNames = columnNamesBuilder.build();
|
||||
}
|
||||
|
||||
@JsonCreator
|
||||
static RowSignature fromColumnSignatures(final List<ColumnSignature> columnSignatures)
|
||||
{
|
||||
final Builder builder = builder();
|
||||
|
||||
for (final ColumnSignature columnSignature : columnSignatures) {
|
||||
builder.add(columnSignature.name(), columnSignature.type());
|
||||
}
|
||||
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
public static Builder builder()
|
||||
{
|
||||
return new Builder();
|
||||
|
@ -158,6 +169,19 @@ public class RowSignature implements ColumnInspector
|
|||
return columnPositions.applyAsInt(columnName);
|
||||
}
|
||||
|
||||
@JsonValue
|
||||
private List<ColumnSignature> asColumnSignatures()
|
||||
{
|
||||
final List<ColumnSignature> retVal = new ArrayList<>();
|
||||
|
||||
for (String columnName : columnNames) {
|
||||
final ColumnType type = columnTypes.get(columnName);
|
||||
retVal.add(new ColumnSignature(columnName, type));
|
||||
}
|
||||
|
||||
return retVal;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o)
|
||||
{
|
||||
|
@ -207,7 +231,7 @@ public class RowSignature implements ColumnInspector
|
|||
|
||||
public static class Builder
|
||||
{
|
||||
private final List<Pair<String, ColumnType>> columnTypeList;
|
||||
private final List<ColumnSignature> columnTypeList;
|
||||
|
||||
private Builder()
|
||||
{
|
||||
|
@ -216,21 +240,21 @@ public class RowSignature implements ColumnInspector
|
|||
|
||||
/**
|
||||
* Add a column to this signature.
|
||||
* @param columnName name, must be nonnull
|
||||
*
|
||||
* @param columnName name, must be nonnull
|
||||
* @param columnType type, may be null if unknown
|
||||
*/
|
||||
public Builder add(final String columnName, @Nullable final ColumnType columnType)
|
||||
{
|
||||
// Name must be nonnull, but type can be null (if the type is unknown)
|
||||
Preconditions.checkNotNull(columnName, "'columnName' must be non-null");
|
||||
columnTypeList.add(Pair.of(columnName, columnType));
|
||||
columnTypeList.add(new ColumnSignature(columnName, columnType));
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder addAll(final RowSignature other)
|
||||
{
|
||||
for (String columnName : other.getColumnNames()) {
|
||||
add(columnName, other.getColumnType(columnName).orElse(null));
|
||||
final List<String> names = other.getColumnNames();
|
||||
for (int i = 0; i < names.size(); i++) {
|
||||
add(names.get(i), other.getColumnType(i).orElse(null));
|
||||
}
|
||||
|
||||
return this;
|
||||
|
|
|
@ -0,0 +1,133 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.druid.segment.column;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import org.apache.druid.segment.TestHelper;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class RowSignatureTest
|
||||
{
|
||||
@Test
|
||||
public void test_add_withConflict()
|
||||
{
|
||||
final RowSignature.Builder builder =
|
||||
RowSignature.builder()
|
||||
.add("s", ColumnType.STRING)
|
||||
.add("d", ColumnType.DOUBLE)
|
||||
.add("d", ColumnType.LONG);
|
||||
|
||||
Assert.assertThrows(
|
||||
"Column [d] has conflicting types",
|
||||
IllegalArgumentException.class,
|
||||
builder::build
|
||||
);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void test_addAll()
|
||||
{
|
||||
final RowSignature expectedSignature =
|
||||
RowSignature.builder()
|
||||
.add("s", ColumnType.STRING)
|
||||
.add("d", ColumnType.DOUBLE)
|
||||
.add("l", ColumnType.LONG)
|
||||
.build();
|
||||
|
||||
final RowSignature signature =
|
||||
RowSignature.builder()
|
||||
.addAll(RowSignature.builder().add("s", ColumnType.STRING).add("d", ColumnType.DOUBLE).build())
|
||||
.addAll(RowSignature.builder().add("l", ColumnType.LONG).build())
|
||||
.build();
|
||||
|
||||
Assert.assertEquals(expectedSignature, signature);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void test_addAll_withOverlap()
|
||||
{
|
||||
final RowSignature expectedSignature =
|
||||
RowSignature.builder()
|
||||
.add("s", ColumnType.STRING)
|
||||
.add("d", ColumnType.DOUBLE)
|
||||
.add("d", ColumnType.DOUBLE)
|
||||
.build();
|
||||
|
||||
final RowSignature signature =
|
||||
RowSignature.builder()
|
||||
.addAll(RowSignature.builder().add("s", ColumnType.STRING).add("d", ColumnType.DOUBLE).build())
|
||||
.addAll(RowSignature.builder().add("d", ColumnType.DOUBLE).build())
|
||||
.build();
|
||||
|
||||
Assert.assertEquals(ImmutableList.of("s", "d", "d"), expectedSignature.getColumnNames());
|
||||
Assert.assertEquals(expectedSignature, signature);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void test_json() throws IOException
|
||||
{
|
||||
final String signatureString =
|
||||
"[{\"name\":\"s\",\"type\":\"STRING\"},"
|
||||
+ "{\"name\":\"d\",\"type\":\"DOUBLE\"},"
|
||||
+ "{\"name\":\"f\",\"type\":\"FLOAT\"},"
|
||||
+ "{\"name\":\"l\",\"type\":\"LONG\"},"
|
||||
+ "{\"name\":\"u\"},"
|
||||
+ "{\"name\":\"c\",\"type\":\"COMPLEX\"},"
|
||||
+ "{\"name\":\"cf\",\"type\":\"COMPLEX<foo>\"},"
|
||||
+ "{\"name\":\"as\",\"type\":\"ARRAY<STRING>\"}"
|
||||
+ "]";
|
||||
|
||||
final ObjectMapper mapper = TestHelper.makeJsonMapper();
|
||||
final RowSignature signature = mapper.readValue(signatureString, RowSignature.class);
|
||||
Assert.assertEquals(signatureString, mapper.writeValueAsString(signature));
|
||||
Assert.assertEquals(
|
||||
RowSignature.builder()
|
||||
.add("s", ColumnType.STRING)
|
||||
.add("d", ColumnType.DOUBLE)
|
||||
.add("f", ColumnType.FLOAT)
|
||||
.add("l", ColumnType.LONG)
|
||||
.add("u", null)
|
||||
.add("c", ColumnType.UNKNOWN_COMPLEX)
|
||||
.add("cf", ColumnType.ofComplex("foo"))
|
||||
.add("as", ColumnType.ofArray(ColumnType.STRING))
|
||||
.build(),
|
||||
signature
|
||||
);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void test_json_missingName()
|
||||
{
|
||||
final String signatureString =
|
||||
"[{\"name\":\"s\",\"type\":\"STRING\"},"
|
||||
+ "{\"type\":\"DOUBLE\"}]";
|
||||
|
||||
final ObjectMapper mapper = TestHelper.makeJsonMapper();
|
||||
Assert.assertThrows(
|
||||
"Column name must be non-empty",
|
||||
IOException.class,
|
||||
() -> mapper.readValue(signatureString, RowSignature.class)
|
||||
);
|
||||
}
|
||||
}
|
|
@ -21,7 +21,6 @@ package org.apache.druid.sql;
|
|||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.errorprone.annotations.concurrent.GuardedBy;
|
||||
import org.apache.calcite.avatica.remote.TypedValue;
|
||||
import org.apache.calcite.sql.parser.SqlParseException;
|
||||
|
@ -47,7 +46,7 @@ import org.apache.druid.server.security.Access;
|
|||
import org.apache.druid.server.security.AuthenticationResult;
|
||||
import org.apache.druid.server.security.AuthorizationUtils;
|
||||
import org.apache.druid.server.security.ForbiddenException;
|
||||
import org.apache.druid.server.security.Resource;
|
||||
import org.apache.druid.server.security.ResourceAction;
|
||||
import org.apache.druid.sql.calcite.planner.DruidPlanner;
|
||||
import org.apache.druid.sql.calcite.planner.PlannerContext;
|
||||
import org.apache.druid.sql.calcite.planner.PlannerFactory;
|
||||
|
@ -194,7 +193,7 @@ public class SqlLifecycle
|
|||
Access access = doAuthorize(
|
||||
AuthorizationUtils.authorizeAllResourceActions(
|
||||
authenticationResult,
|
||||
Iterables.transform(validationResult.getResources(), AuthorizationUtils.RESOURCE_READ_RA_GENERATOR),
|
||||
validationResult.getResourceActions(),
|
||||
plannerFactory.getAuthorizerMapper()
|
||||
)
|
||||
);
|
||||
|
@ -216,7 +215,7 @@ public class SqlLifecycle
|
|||
Access access = doAuthorize(
|
||||
AuthorizationUtils.authorizeAllResourceActions(
|
||||
req,
|
||||
Iterables.transform(validationResult.getResources(), AuthorizationUtils.RESOURCE_READ_RA_GENERATOR),
|
||||
validationResult.getResourceActions(),
|
||||
plannerFactory.getAuthorizerMapper()
|
||||
)
|
||||
);
|
||||
|
@ -225,13 +224,13 @@ public class SqlLifecycle
|
|||
|
||||
private ValidationResult validate(AuthenticationResult authenticationResult)
|
||||
{
|
||||
try (DruidPlanner planner = plannerFactory.createPlanner(queryContext)) {
|
||||
try (DruidPlanner planner = plannerFactory.createPlanner(sql, queryContext)) {
|
||||
// set planner context for logs/metrics in case something explodes early
|
||||
this.plannerContext = planner.getPlannerContext();
|
||||
this.plannerContext.setAuthenticationResult(authenticationResult);
|
||||
// set parameters on planner context, if parameters have already been set
|
||||
this.plannerContext.setParameters(parameters);
|
||||
this.validationResult = planner.validate(sql);
|
||||
this.validationResult = planner.validate();
|
||||
return validationResult;
|
||||
}
|
||||
// we can't collapse catch clauses since SqlPlanningException has type-sensitive constructors.
|
||||
|
@ -266,7 +265,6 @@ public class SqlLifecycle
|
|||
* Prepare the query lifecycle for execution, without completely planning into something that is executable, but
|
||||
* including some initial parsing and validation and any dyanmic parameter type resolution, to support prepared
|
||||
* statements via JDBC.
|
||||
*
|
||||
*/
|
||||
public PrepareResult prepare() throws RelConversionException
|
||||
{
|
||||
|
@ -277,7 +275,7 @@ public class SqlLifecycle
|
|||
}
|
||||
Preconditions.checkNotNull(plannerContext, "Cannot prepare, plannerContext is null");
|
||||
try (DruidPlanner planner = plannerFactory.createPlannerWithContext(plannerContext)) {
|
||||
this.prepareResult = planner.prepare(sql);
|
||||
this.prepareResult = planner.prepare();
|
||||
return prepareResult;
|
||||
}
|
||||
// we can't collapse catch clauses since SqlPlanningException has type-sensitive constructors.
|
||||
|
@ -299,7 +297,7 @@ public class SqlLifecycle
|
|||
transition(State.AUTHORIZED, State.PLANNED);
|
||||
Preconditions.checkNotNull(plannerContext, "Cannot plan, plannerContext is null");
|
||||
try (DruidPlanner planner = plannerFactory.createPlannerWithContext(plannerContext)) {
|
||||
this.plannerResult = planner.plan(sql);
|
||||
this.plannerResult = planner.plan();
|
||||
}
|
||||
// we can't collapse catch clauses since SqlPlanningException has type-sensitive constructors.
|
||||
catch (SqlParseException e) {
|
||||
|
@ -379,10 +377,9 @@ public class SqlLifecycle
|
|||
return validate(authenticationResult);
|
||||
}
|
||||
|
||||
public Set<Resource> getAuthorizedResources()
|
||||
public Set<ResourceAction> getRequiredResourceActions()
|
||||
{
|
||||
assert validationResult != null;
|
||||
return validationResult.getResources();
|
||||
return Preconditions.checkNotNull(validationResult, "validationResult").getResourceActions();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -449,7 +446,11 @@ public class SqlLifecycle
|
|||
if (validationResult != null) {
|
||||
metricBuilder.setDimension(
|
||||
"dataSource",
|
||||
validationResult.getResources().stream().map(Resource::getName).collect(Collectors.toList()).toString()
|
||||
validationResult.getResourceActions()
|
||||
.stream()
|
||||
.map(action -> action.getResource().getName())
|
||||
.collect(Collectors.toList())
|
||||
.toString()
|
||||
);
|
||||
}
|
||||
metricBuilder.setDimension("remoteAddress", StringUtils.nullToEmptyNonDruidDataString(remoteAddress));
|
||||
|
|
|
@ -40,9 +40,9 @@ import org.apache.druid.server.security.ForbiddenException;
|
|||
import org.apache.druid.sql.SqlLifecycle;
|
||||
import org.apache.druid.sql.calcite.planner.Calcites;
|
||||
import org.apache.druid.sql.calcite.planner.PrepareResult;
|
||||
import org.apache.druid.sql.calcite.rel.QueryMaker;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.sql.Array;
|
||||
import java.sql.DatabaseMetaData;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
@ -119,20 +119,20 @@ public class DruidStatement implements Closeable
|
|||
|
||||
final ColumnMetaData.AvaticaType columnType;
|
||||
if (field.getType().getSqlTypeName() == SqlTypeName.ARRAY) {
|
||||
final ColumnMetaData.Rep elementRep = QueryMaker.rep(field.getType().getComponentType().getSqlTypeName());
|
||||
final ColumnMetaData.Rep elementRep = rep(field.getType().getComponentType().getSqlTypeName());
|
||||
final ColumnMetaData.ScalarType elementType = ColumnMetaData.scalar(
|
||||
field.getType().getComponentType().getSqlTypeName().getJdbcOrdinal(),
|
||||
field.getType().getComponentType().getSqlTypeName().getName(),
|
||||
elementRep
|
||||
);
|
||||
final ColumnMetaData.Rep arrayRep = QueryMaker.rep(field.getType().getSqlTypeName());
|
||||
final ColumnMetaData.Rep arrayRep = rep(field.getType().getSqlTypeName());
|
||||
columnType = ColumnMetaData.array(
|
||||
elementType,
|
||||
field.getType().getSqlTypeName().getName(),
|
||||
arrayRep
|
||||
);
|
||||
} else {
|
||||
final ColumnMetaData.Rep rep = QueryMaker.rep(field.getType().getSqlTypeName());
|
||||
final ColumnMetaData.Rep rep = rep(field.getType().getSqlTypeName());
|
||||
columnType = ColumnMetaData.scalar(
|
||||
field.getType().getSqlTypeName().getJdbcOrdinal(),
|
||||
field.getType().getSqlTypeName().getName(),
|
||||
|
@ -411,6 +411,35 @@ public class DruidStatement implements Closeable
|
|||
throw new ISE("Invalid action for state[%s]", state);
|
||||
}
|
||||
|
||||
private static ColumnMetaData.Rep rep(final SqlTypeName sqlType)
|
||||
{
|
||||
if (SqlTypeName.CHAR_TYPES.contains(sqlType)) {
|
||||
return ColumnMetaData.Rep.of(String.class);
|
||||
} else if (sqlType == SqlTypeName.TIMESTAMP) {
|
||||
return ColumnMetaData.Rep.of(Long.class);
|
||||
} else if (sqlType == SqlTypeName.DATE) {
|
||||
return ColumnMetaData.Rep.of(Integer.class);
|
||||
} else if (sqlType == SqlTypeName.INTEGER) {
|
||||
// use Number.class for exact numeric types since JSON transport might switch longs to integers
|
||||
return ColumnMetaData.Rep.of(Number.class);
|
||||
} else if (sqlType == SqlTypeName.BIGINT) {
|
||||
// use Number.class for exact numeric types since JSON transport might switch longs to integers
|
||||
return ColumnMetaData.Rep.of(Number.class);
|
||||
} else if (sqlType == SqlTypeName.FLOAT) {
|
||||
return ColumnMetaData.Rep.of(Float.class);
|
||||
} else if (sqlType == SqlTypeName.DOUBLE || sqlType == SqlTypeName.DECIMAL) {
|
||||
return ColumnMetaData.Rep.of(Double.class);
|
||||
} else if (sqlType == SqlTypeName.BOOLEAN) {
|
||||
return ColumnMetaData.Rep.of(Boolean.class);
|
||||
} else if (sqlType == SqlTypeName.OTHER) {
|
||||
return ColumnMetaData.Rep.of(Object.class);
|
||||
} else if (sqlType == SqlTypeName.ARRAY) {
|
||||
return ColumnMetaData.Rep.of(Array.class);
|
||||
} else {
|
||||
throw new ISE("No rep for SQL type[%s]", sqlType);
|
||||
}
|
||||
}
|
||||
|
||||
enum State
|
||||
{
|
||||
NEW,
|
||||
|
|
|
@ -0,0 +1,35 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.druid.sql.calcite.expression;
|
||||
|
||||
import org.apache.calcite.sql.SqlCall;
|
||||
import org.apache.druid.server.security.ResourceAction;
|
||||
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* Interface for {@link org.apache.calcite.sql.SqlOperator} that need authorization in order to execute.
|
||||
*
|
||||
* Checked by {@link org.apache.druid.sql.calcite.planner.SqlResourceCollectorShuttle}.
|
||||
*/
|
||||
public interface AuthorizableOperator
|
||||
{
|
||||
Set<ResourceAction> computeResources(SqlCall call);
|
||||
}
|
152
sql/src/main/java/org/apache/druid/sql/calcite/external/ExternalDataSource.java
vendored
Normal file
152
sql/src/main/java/org/apache/druid/sql/calcite/external/ExternalDataSource.java
vendored
Normal file
|
@ -0,0 +1,152 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.druid.sql.calcite.external;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonCreator;
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
import com.fasterxml.jackson.annotation.JsonTypeName;
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.druid.data.input.InputFormat;
|
||||
import org.apache.druid.data.input.InputSource;
|
||||
import org.apache.druid.java.util.common.IAE;
|
||||
import org.apache.druid.query.DataSource;
|
||||
import org.apache.druid.segment.column.RowSignature;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* Represents external data for INSERT queries. Only used by the SQL layer, not by the query stack.
|
||||
*
|
||||
* Includes an {@link InputSource} and {@link InputFormat}, plus a {@link RowSignature} so the SQL planner has
|
||||
* the type information necessary to validate and plan the query.
|
||||
*
|
||||
* This class is exercised in CalciteInsertDmlTest but is not currently exposed to end users.
|
||||
*/
|
||||
@JsonTypeName("external")
|
||||
public class ExternalDataSource implements DataSource
|
||||
{
|
||||
private final InputSource inputSource;
|
||||
private final InputFormat inputFormat;
|
||||
private final RowSignature signature;
|
||||
|
||||
@JsonCreator
|
||||
public ExternalDataSource(
|
||||
@JsonProperty("inputSource") final InputSource inputSource,
|
||||
@JsonProperty("inputFormat") final InputFormat inputFormat,
|
||||
@JsonProperty("signature") final RowSignature signature
|
||||
)
|
||||
{
|
||||
this.inputSource = Preconditions.checkNotNull(inputSource, "inputSource");
|
||||
this.inputFormat = Preconditions.checkNotNull(inputFormat, "inputFormat");
|
||||
this.signature = Preconditions.checkNotNull(signature, "signature");
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public InputSource getInputSource()
|
||||
{
|
||||
return inputSource;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public InputFormat getInputFormat()
|
||||
{
|
||||
return inputFormat;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public RowSignature getSignature()
|
||||
{
|
||||
return signature;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<String> getTableNames()
|
||||
{
|
||||
return Collections.emptySet();
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<DataSource> getChildren()
|
||||
{
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
||||
@Override
|
||||
public DataSource withChildren(final List<DataSource> children)
|
||||
{
|
||||
if (!children.isEmpty()) {
|
||||
throw new IAE("Cannot accept children");
|
||||
}
|
||||
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isCacheable(boolean isBroker)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isGlobal()
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isConcrete()
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o)
|
||||
{
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
ExternalDataSource that = (ExternalDataSource) o;
|
||||
return Objects.equals(inputSource, that.inputSource)
|
||||
&& Objects.equals(inputFormat, that.inputFormat)
|
||||
&& Objects.equals(signature, that.signature);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode()
|
||||
{
|
||||
return Objects.hash(inputSource, inputFormat, signature);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString()
|
||||
{
|
||||
return "ExternalDataSource{" +
|
||||
"inputSource=" + inputSource +
|
||||
", inputFormat=" + inputFormat +
|
||||
", signature=" + signature +
|
||||
'}';
|
||||
}
|
||||
}
|
115
sql/src/main/java/org/apache/druid/sql/calcite/external/ExternalOperatorConversion.java
vendored
Normal file
115
sql/src/main/java/org/apache/druid/sql/calcite/external/ExternalOperatorConversion.java
vendored
Normal file
|
@ -0,0 +1,115 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.druid.sql.calcite.external;
|
||||
|
||||
import com.google.inject.Inject;
|
||||
import org.apache.calcite.rel.type.RelDataTypeFactory;
|
||||
import org.apache.calcite.rex.RexNode;
|
||||
import org.apache.calcite.sql.SqlCall;
|
||||
import org.apache.calcite.sql.SqlIdentifier;
|
||||
import org.apache.calcite.sql.SqlOperator;
|
||||
import org.apache.calcite.sql.parser.SqlParserPos;
|
||||
import org.apache.calcite.sql.type.OperandTypes;
|
||||
import org.apache.calcite.sql.type.ReturnTypes;
|
||||
import org.apache.calcite.sql.type.SqlTypeFactoryImpl;
|
||||
import org.apache.calcite.sql.type.SqlTypeFamily;
|
||||
import org.apache.calcite.sql.validate.SqlUserDefinedTableMacro;
|
||||
import org.apache.druid.segment.column.RowSignature;
|
||||
import org.apache.druid.server.security.Action;
|
||||
import org.apache.druid.server.security.Resource;
|
||||
import org.apache.druid.server.security.ResourceAction;
|
||||
import org.apache.druid.sql.calcite.expression.AuthorizableOperator;
|
||||
import org.apache.druid.sql.calcite.expression.DruidExpression;
|
||||
import org.apache.druid.sql.calcite.expression.SqlOperatorConversion;
|
||||
import org.apache.druid.sql.calcite.planner.DruidTypeSystem;
|
||||
import org.apache.druid.sql.calcite.planner.PlannerContext;
|
||||
|
||||
import javax.annotation.Nullable;
|
||||
import java.util.Collections;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* Registers the "EXTERN" operator, which is used in queries like "INSERT INTO dst SELECT * FROM TABLE(EXTERN(...))".
|
||||
*
|
||||
* This class is exercised in CalciteInsertDmlTest but is not currently exposed to end users.
|
||||
*/
|
||||
public class ExternalOperatorConversion implements SqlOperatorConversion
|
||||
{
|
||||
public static final String FUNCTION_NAME = "EXTERN";
|
||||
|
||||
// EXTERNAL is not an "official" resource type (it doesn't appear as a constant in the ResourceType class).
|
||||
// It is here so we can write tests that check that authorization works as expected, like CalciteInsertDmlTest.
|
||||
// This should be rethought before the functionality is exposed to end users.
|
||||
public static final ResourceAction EXTERNAL_RESOURCE_ACTION =
|
||||
new ResourceAction(new Resource("EXTERNAL", "EXTERNAL"), Action.READ);
|
||||
|
||||
private static final RelDataTypeFactory TYPE_FACTORY = new SqlTypeFactoryImpl(DruidTypeSystem.INSTANCE);
|
||||
|
||||
private final SqlUserDefinedTableMacro operator;
|
||||
|
||||
@Inject
|
||||
public ExternalOperatorConversion(final ExternalTableMacro macro)
|
||||
{
|
||||
this.operator = new ExternalOperator(macro);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SqlOperator calciteOperator()
|
||||
{
|
||||
return operator;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
@Override
|
||||
public DruidExpression toDruidExpression(PlannerContext plannerContext, RowSignature rowSignature, RexNode rexNode)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
private static class ExternalOperator extends SqlUserDefinedTableMacro implements AuthorizableOperator
|
||||
{
|
||||
public ExternalOperator(final ExternalTableMacro macro)
|
||||
{
|
||||
super(
|
||||
new SqlIdentifier(FUNCTION_NAME, SqlParserPos.ZERO),
|
||||
ReturnTypes.CURSOR,
|
||||
null,
|
||||
OperandTypes.sequence(
|
||||
"(inputSource, inputFormat, signature)",
|
||||
OperandTypes.family(SqlTypeFamily.STRING),
|
||||
OperandTypes.family(SqlTypeFamily.STRING),
|
||||
OperandTypes.family(SqlTypeFamily.STRING)
|
||||
),
|
||||
macro.getParameters()
|
||||
.stream()
|
||||
.map(parameter -> parameter.getType(TYPE_FACTORY))
|
||||
.collect(Collectors.toList()),
|
||||
macro
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<ResourceAction> computeResources(final SqlCall call)
|
||||
{
|
||||
return Collections.singleton(EXTERNAL_RESOURCE_ACTION);
|
||||
}
|
||||
}
|
||||
}
|
159
sql/src/main/java/org/apache/druid/sql/calcite/external/ExternalTableMacro.java
vendored
Normal file
159
sql/src/main/java/org/apache/druid/sql/calcite/external/ExternalTableMacro.java
vendored
Normal file
|
@ -0,0 +1,159 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.druid.sql.calcite.external;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonProcessingException;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.inject.Inject;
|
||||
import org.apache.calcite.rel.type.RelDataType;
|
||||
import org.apache.calcite.rel.type.RelDataTypeFactory;
|
||||
import org.apache.calcite.schema.FunctionParameter;
|
||||
import org.apache.calcite.schema.TableMacro;
|
||||
import org.apache.calcite.schema.TranslatableTable;
|
||||
import org.apache.druid.data.input.InputFormat;
|
||||
import org.apache.druid.data.input.InputSource;
|
||||
import org.apache.druid.guice.annotations.Json;
|
||||
import org.apache.druid.segment.column.RowSignature;
|
||||
import org.apache.druid.sql.calcite.table.DruidTable;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Used by {@link ExternalOperatorConversion} to generate {@link DruidTable} that reference {@link ExternalDataSource}.
|
||||
*
|
||||
* This class is exercised in CalciteInsertDmlTest but is not currently exposed to end users.
|
||||
*/
|
||||
public class ExternalTableMacro implements TableMacro
|
||||
{
|
||||
private final ObjectMapper jsonMapper;
|
||||
|
||||
@Inject
|
||||
public ExternalTableMacro(@Json final ObjectMapper jsonMapper)
|
||||
{
|
||||
this.jsonMapper = jsonMapper;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TranslatableTable apply(final List<Object> arguments)
|
||||
{
|
||||
try {
|
||||
final InputSource inputSource = jsonMapper.readValue((String) arguments.get(0), InputSource.class);
|
||||
final InputFormat inputFormat = jsonMapper.readValue((String) arguments.get(1), InputFormat.class);
|
||||
final RowSignature signature = jsonMapper.readValue((String) arguments.get(2), RowSignature.class);
|
||||
|
||||
return new DruidTable(
|
||||
new ExternalDataSource(inputSource, inputFormat, signature),
|
||||
signature,
|
||||
jsonMapper,
|
||||
false,
|
||||
false
|
||||
);
|
||||
}
|
||||
catch (JsonProcessingException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<FunctionParameter> getParameters()
|
||||
{
|
||||
return ImmutableList.of(
|
||||
new FunctionParameter()
|
||||
{
|
||||
@Override
|
||||
public int getOrdinal()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName()
|
||||
{
|
||||
return "inputSource";
|
||||
}
|
||||
|
||||
@Override
|
||||
public RelDataType getType(RelDataTypeFactory typeFactory)
|
||||
{
|
||||
return typeFactory.createJavaType(String.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isOptional()
|
||||
{
|
||||
return false;
|
||||
}
|
||||
},
|
||||
new FunctionParameter()
|
||||
{
|
||||
@Override
|
||||
public int getOrdinal()
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName()
|
||||
{
|
||||
return "inputFormat";
|
||||
}
|
||||
|
||||
@Override
|
||||
public RelDataType getType(RelDataTypeFactory typeFactory)
|
||||
{
|
||||
return typeFactory.createJavaType(String.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isOptional()
|
||||
{
|
||||
return false;
|
||||
}
|
||||
},
|
||||
new FunctionParameter()
|
||||
{
|
||||
@Override
|
||||
public int getOrdinal()
|
||||
{
|
||||
return 2;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName()
|
||||
{
|
||||
return "signature";
|
||||
}
|
||||
|
||||
@Override
|
||||
public RelDataType getType(RelDataTypeFactory typeFactory)
|
||||
{
|
||||
return typeFactory.createJavaType(String.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isOptional()
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
78
sql/src/main/java/org/apache/druid/sql/calcite/external/ExternalTableScan.java
vendored
Normal file
78
sql/src/main/java/org/apache/druid/sql/calcite/external/ExternalTableScan.java
vendored
Normal file
|
@ -0,0 +1,78 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.druid.sql.calcite.external;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonProcessingException;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import org.apache.calcite.plan.Convention;
|
||||
import org.apache.calcite.plan.RelOptCluster;
|
||||
import org.apache.calcite.rel.AbstractRelNode;
|
||||
import org.apache.calcite.rel.RelWriter;
|
||||
import org.apache.calcite.rel.type.RelDataType;
|
||||
import org.apache.druid.sql.calcite.table.DruidTable;
|
||||
|
||||
/**
|
||||
* Represents a scan of an external table. Generated by {@link DruidTable} when its datasource is an
|
||||
* {@link ExternalDataSource}.
|
||||
*
|
||||
* This class is exercised in CalciteInsertDmlTest but is not currently exposed to end users.
|
||||
*/
|
||||
public class ExternalTableScan extends AbstractRelNode
|
||||
{
|
||||
private final ObjectMapper jsonMapper;
|
||||
private final DruidTable druidTable;
|
||||
|
||||
public ExternalTableScan(
|
||||
final RelOptCluster cluster,
|
||||
final ObjectMapper jsonMapper,
|
||||
final DruidTable druidTable
|
||||
)
|
||||
{
|
||||
super(cluster, cluster.traitSetOf(Convention.NONE));
|
||||
this.jsonMapper = jsonMapper;
|
||||
this.druidTable = druidTable;
|
||||
}
|
||||
|
||||
public DruidTable getDruidTable()
|
||||
{
|
||||
return druidTable;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected RelDataType deriveRowType()
|
||||
{
|
||||
return druidTable.getRowType(getCluster().getTypeFactory());
|
||||
}
|
||||
|
||||
@Override
|
||||
public RelWriter explainTerms(RelWriter pw)
|
||||
{
|
||||
final String dataSourceString;
|
||||
|
||||
try {
|
||||
dataSourceString = jsonMapper.writeValueAsString(druidTable.getDataSource());
|
||||
}
|
||||
catch (JsonProcessingException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
||||
return pw.item("dataSource", dataSourceString);
|
||||
}
|
||||
}
|
59
sql/src/main/java/org/apache/druid/sql/calcite/external/ExternalTableScanRule.java
vendored
Normal file
59
sql/src/main/java/org/apache/druid/sql/calcite/external/ExternalTableScanRule.java
vendored
Normal file
|
@ -0,0 +1,59 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.druid.sql.calcite.external;
|
||||
|
||||
import org.apache.calcite.plan.RelOptRule;
|
||||
import org.apache.calcite.plan.RelOptRuleCall;
|
||||
import org.apache.druid.sql.calcite.planner.PlannerContext;
|
||||
import org.apache.druid.sql.calcite.rel.DruidQueryRel;
|
||||
import org.apache.druid.sql.calcite.run.QueryFeature;
|
||||
|
||||
/**
|
||||
* Rule that converts an {@link ExternalTableScan} to a call to {@link DruidQueryRel#scanExternal}.
|
||||
*
|
||||
* This class is exercised in CalciteInsertDmlTest but is not currently exposed to end users.
|
||||
*/
|
||||
public class ExternalTableScanRule extends RelOptRule
|
||||
{
|
||||
private final PlannerContext plannerContext;
|
||||
|
||||
public ExternalTableScanRule(final PlannerContext plannerContext)
|
||||
{
|
||||
super(operand(ExternalTableScan.class, any()));
|
||||
this.plannerContext = plannerContext;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean matches(RelOptRuleCall call)
|
||||
{
|
||||
if (plannerContext.getQueryMaker().feature(QueryFeature.CAN_READ_EXTERNAL_DATA)) {
|
||||
return super.matches(call);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onMatch(final RelOptRuleCall call)
|
||||
{
|
||||
final ExternalTableScan scan = call.rel(0);
|
||||
call.transformTo(DruidQueryRel.scanExternal(scan, plannerContext));
|
||||
}
|
||||
}
|
|
@ -20,19 +20,39 @@
|
|||
package org.apache.druid.sql.calcite.planner;
|
||||
|
||||
import com.google.inject.Binder;
|
||||
import com.google.inject.Key;
|
||||
import com.google.inject.Module;
|
||||
import org.apache.druid.guice.JsonConfigProvider;
|
||||
import org.apache.druid.guice.LazySingleton;
|
||||
import org.apache.druid.guice.PolyBind;
|
||||
import org.apache.druid.sql.calcite.run.NativeQueryMakerFactory;
|
||||
import org.apache.druid.sql.calcite.run.QueryMakerFactory;
|
||||
|
||||
/**
|
||||
* The module responsible for provide bindings for the Calcite Planner.
|
||||
*/
|
||||
public class CalcitePlannerModule implements Module
|
||||
{
|
||||
public static final String PROPERTY_SQL_EXECUTOR_TYPE = "druid.sql.executor.type";
|
||||
|
||||
@Override
|
||||
public void configure(Binder binder)
|
||||
{
|
||||
JsonConfigProvider.bind(binder, "druid.sql.planner", PlannerConfig.class);
|
||||
binder.bind(PlannerFactory.class);
|
||||
binder.bind(DruidOperatorTable.class);
|
||||
|
||||
PolyBind.optionBinder(binder, Key.get(QueryMakerFactory.class))
|
||||
.addBinding(NativeQueryMakerFactory.TYPE)
|
||||
.to(NativeQueryMakerFactory.class)
|
||||
.in(LazySingleton.class);
|
||||
|
||||
PolyBind.createChoiceWithDefault(
|
||||
binder,
|
||||
PROPERTY_SQL_EXECUTOR_TYPE,
|
||||
Key.get(QueryMakerFactory.class),
|
||||
NativeQueryMakerFactory.TYPE
|
||||
);
|
||||
|
||||
binder.bind(PlannerFactory.class).in(LazySingleton.class);
|
||||
binder.bind(DruidOperatorTable.class).in(LazySingleton.class);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,11 +20,11 @@
|
|||
package org.apache.druid.sql.calcite.planner;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonProcessingException;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.base.Supplier;
|
||||
import com.google.common.base.Suppliers;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.Iterables;
|
||||
import org.apache.calcite.DataContext;
|
||||
import org.apache.calcite.adapter.java.JavaTypeFactory;
|
||||
import org.apache.calcite.config.CalciteConnectionConfig;
|
||||
|
@ -49,6 +49,8 @@ import org.apache.calcite.rel.type.RelDataTypeFactory;
|
|||
import org.apache.calcite.rex.RexBuilder;
|
||||
import org.apache.calcite.rex.RexNode;
|
||||
import org.apache.calcite.sql.SqlExplain;
|
||||
import org.apache.calcite.sql.SqlIdentifier;
|
||||
import org.apache.calcite.sql.SqlInsert;
|
||||
import org.apache.calcite.sql.SqlKind;
|
||||
import org.apache.calcite.sql.SqlNode;
|
||||
import org.apache.calcite.sql.parser.SqlParseException;
|
||||
|
@ -61,20 +63,30 @@ import org.apache.calcite.tools.Planner;
|
|||
import org.apache.calcite.tools.RelConversionException;
|
||||
import org.apache.calcite.tools.ValidationException;
|
||||
import org.apache.calcite.util.Pair;
|
||||
import org.apache.druid.java.util.common.StringUtils;
|
||||
import org.apache.druid.java.util.common.guava.BaseSequence;
|
||||
import org.apache.druid.java.util.common.guava.Sequence;
|
||||
import org.apache.druid.java.util.common.guava.Sequences;
|
||||
import org.apache.druid.java.util.emitter.EmittingLogger;
|
||||
import org.apache.druid.segment.DimensionHandlerUtils;
|
||||
import org.apache.druid.server.security.Action;
|
||||
import org.apache.druid.server.security.Resource;
|
||||
import org.apache.druid.server.security.ResourceAction;
|
||||
import org.apache.druid.server.security.ResourceType;
|
||||
import org.apache.druid.sql.calcite.rel.DruidConvention;
|
||||
import org.apache.druid.sql.calcite.rel.DruidRel;
|
||||
import org.apache.druid.sql.calcite.run.QueryMaker;
|
||||
import org.apache.druid.sql.calcite.run.QueryMakerFactory;
|
||||
|
||||
import javax.annotation.Nullable;
|
||||
import java.io.Closeable;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class DruidPlanner implements Closeable
|
||||
{
|
||||
|
@ -83,115 +95,120 @@ public class DruidPlanner implements Closeable
|
|||
private final FrameworkConfig frameworkConfig;
|
||||
private final Planner planner;
|
||||
private final PlannerContext plannerContext;
|
||||
private final ObjectMapper jsonMapper;
|
||||
private final QueryMakerFactory queryMakerFactory;
|
||||
|
||||
private RexBuilder rexBuilder;
|
||||
|
||||
public DruidPlanner(
|
||||
DruidPlanner(
|
||||
final FrameworkConfig frameworkConfig,
|
||||
final PlannerContext plannerContext,
|
||||
final ObjectMapper jsonMapper
|
||||
final QueryMakerFactory queryMakerFactory
|
||||
)
|
||||
{
|
||||
this.frameworkConfig = frameworkConfig;
|
||||
this.planner = Frameworks.getPlanner(frameworkConfig);
|
||||
this.plannerContext = plannerContext;
|
||||
this.jsonMapper = jsonMapper;
|
||||
this.queryMakerFactory = queryMakerFactory;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates an SQL query and collects a {@link ValidationResult} which contains a set of
|
||||
* {@link org.apache.druid.server.security.Resource} corresponding to any Druid datasources or views which are taking
|
||||
* part in the query
|
||||
* Validates a SQL query and populates {@link PlannerContext#getResourceActions()}.
|
||||
*
|
||||
* @return set of {@link Resource} corresponding to any Druid datasources or views which are taking part in the query.
|
||||
*/
|
||||
public ValidationResult validate(final String sql) throws SqlParseException, ValidationException
|
||||
public ValidationResult validate() throws SqlParseException, ValidationException
|
||||
{
|
||||
reset();
|
||||
SqlNode parsed = planner.parse(sql);
|
||||
if (parsed.getKind() == SqlKind.EXPLAIN) {
|
||||
SqlExplain explain = (SqlExplain) parsed;
|
||||
parsed = explain.getExplicandum();
|
||||
}
|
||||
SqlValidator validator = getValidator();
|
||||
SqlNode validated;
|
||||
resetPlanner();
|
||||
final ParsedNodes parsed = ParsedNodes.create(planner.parse(plannerContext.getSql()));
|
||||
final SqlValidator validator = getValidator();
|
||||
final SqlNode validatedQueryNode;
|
||||
|
||||
try {
|
||||
validated = validator.validate(rewriteDynamicParameters(parsed));
|
||||
validatedQueryNode = validator.validate(rewriteDynamicParameters(parsed.getQueryNode()));
|
||||
}
|
||||
catch (RuntimeException e) {
|
||||
throw new ValidationException(e);
|
||||
}
|
||||
SqlResourceCollectorShuttle resourceCollectorShuttle =
|
||||
new SqlResourceCollectorShuttle(validator, plannerContext);
|
||||
validated.accept(resourceCollectorShuttle);
|
||||
plannerContext.setResources(resourceCollectorShuttle.getResources());
|
||||
return new ValidationResult(resourceCollectorShuttle.getResources());
|
||||
|
||||
SqlResourceCollectorShuttle resourceCollectorShuttle = new SqlResourceCollectorShuttle(validator, plannerContext);
|
||||
validatedQueryNode.accept(resourceCollectorShuttle);
|
||||
|
||||
final Set<ResourceAction> resourceActions = new HashSet<>(resourceCollectorShuttle.getResourceActions());
|
||||
|
||||
if (parsed.getInsertNode() != null) {
|
||||
final String targetDataSource = validateAndGetDataSourceForInsert(parsed.getInsertNode());
|
||||
resourceActions.add(new ResourceAction(new Resource(targetDataSource, ResourceType.DATASOURCE), Action.WRITE));
|
||||
}
|
||||
|
||||
plannerContext.setResourceActions(resourceActions);
|
||||
return new ValidationResult(resourceActions);
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepare an SQL query for execution, including some initial parsing and validation and any dyanmic parameter type
|
||||
* resolution, to support prepared statements via JDBC.
|
||||
*
|
||||
* In some future this could perhaps re-use some of the work done by {@link #validate(String)}
|
||||
* In some future this could perhaps re-use some of the work done by {@link #validate()}
|
||||
* instead of repeating it, but that day is not today.
|
||||
*/
|
||||
public PrepareResult prepare(final String sql) throws SqlParseException, ValidationException, RelConversionException
|
||||
public PrepareResult prepare() throws SqlParseException, ValidationException, RelConversionException
|
||||
{
|
||||
reset();
|
||||
SqlNode parsed = planner.parse(sql);
|
||||
SqlExplain explain = null;
|
||||
if (parsed.getKind() == SqlKind.EXPLAIN) {
|
||||
explain = (SqlExplain) parsed;
|
||||
parsed = explain.getExplicandum();
|
||||
}
|
||||
final SqlNode validated = planner.validate(parsed);
|
||||
RelRoot root = planner.rel(validated);
|
||||
RelDataType rowType = root.validatedRowType;
|
||||
resetPlanner();
|
||||
|
||||
SqlValidator validator = getValidator();
|
||||
RelDataType parameterTypes = validator.getParameterRowType(validator.validate(validated));
|
||||
final ParsedNodes parsed = ParsedNodes.create(planner.parse(plannerContext.getSql()));
|
||||
final SqlNode validatedQueryNode = planner.validate(parsed.getQueryNode());
|
||||
final RelRoot rootQueryRel = planner.rel(validatedQueryNode);
|
||||
|
||||
if (explain != null) {
|
||||
final RelDataTypeFactory typeFactory = root.rel.getCluster().getTypeFactory();
|
||||
return new PrepareResult(getExplainStructType(typeFactory), parameterTypes);
|
||||
final SqlValidator validator = getValidator();
|
||||
final RelDataTypeFactory typeFactory = rootQueryRel.rel.getCluster().getTypeFactory();
|
||||
final RelDataType parameterTypes = validator.getParameterRowType(validator.validate(validatedQueryNode));
|
||||
final RelDataType returnedRowType;
|
||||
|
||||
if (parsed.getExplainNode() != null) {
|
||||
returnedRowType = getExplainStructType(typeFactory);
|
||||
} else {
|
||||
returnedRowType = buildQueryMaker(rootQueryRel, parsed.getInsertNode()).getResultType();
|
||||
}
|
||||
return new PrepareResult(rowType, parameterTypes);
|
||||
|
||||
return new PrepareResult(returnedRowType, parameterTypes);
|
||||
}
|
||||
|
||||
/**
|
||||
* Plan an SQL query for execution, returning a {@link PlannerResult} which can be used to actually execute the query.
|
||||
*
|
||||
* Ideally, the query can be planned into a native Druid query, using
|
||||
* {@link #planWithDruidConvention(SqlExplain, RelRoot)}, but will fall-back to
|
||||
* {@link #planWithBindableConvention(SqlExplain, RelRoot)} if this is not possible.
|
||||
* Ideally, the query can be planned into a native Druid query, using {@link #planWithDruidConvention}, but will
|
||||
* fall-back to {@link #planWithBindableConvention} if this is not possible.
|
||||
*
|
||||
* In some future this could perhaps re-use some of the work done by {@link #validate(String)}
|
||||
* In some future this could perhaps re-use some of the work done by {@link #validate()}
|
||||
* instead of repeating it, but that day is not today.
|
||||
*/
|
||||
public PlannerResult plan(final String sql) throws SqlParseException, ValidationException, RelConversionException
|
||||
public PlannerResult plan() throws SqlParseException, ValidationException, RelConversionException
|
||||
{
|
||||
reset();
|
||||
SqlExplain explain = null;
|
||||
SqlNode parsed = planner.parse(sql);
|
||||
if (parsed.getKind() == SqlKind.EXPLAIN) {
|
||||
explain = (SqlExplain) parsed;
|
||||
parsed = explain.getExplicandum();
|
||||
}
|
||||
resetPlanner();
|
||||
|
||||
final ParsedNodes parsed = ParsedNodes.create(planner.parse(plannerContext.getSql()));
|
||||
|
||||
// the planner's type factory is not available until after parsing
|
||||
this.rexBuilder = new RexBuilder(planner.getTypeFactory());
|
||||
SqlNode parametized = rewriteDynamicParameters(parsed);
|
||||
|
||||
final SqlNode validated = planner.validate(parametized);
|
||||
final RelRoot root = planner.rel(validated);
|
||||
final SqlNode parameterizedQueryNode = rewriteDynamicParameters(parsed.getQueryNode());
|
||||
final SqlNode validatedQueryNode = planner.validate(parameterizedQueryNode);
|
||||
final RelRoot rootQueryRel = planner.rel(validatedQueryNode);
|
||||
|
||||
try {
|
||||
return planWithDruidConvention(explain, root);
|
||||
return planWithDruidConvention(rootQueryRel, parsed.getExplainNode(), parsed.getInsertNode());
|
||||
}
|
||||
catch (RelOptPlanner.CannotPlanException e) {
|
||||
// Try again with BINDABLE convention. Used for querying Values and metadata tables.
|
||||
try {
|
||||
return planWithBindableConvention(explain, root);
|
||||
}
|
||||
catch (Exception e2) {
|
||||
e.addSuppressed(e2);
|
||||
if (parsed.getInsertNode() == null) {
|
||||
// Try again with BINDABLE convention. Used for querying Values and metadata tables.
|
||||
try {
|
||||
return planWithBindableConvention(rootQueryRel, parsed.getExplainNode());
|
||||
}
|
||||
catch (Exception e2) {
|
||||
e.addSuppressed(e2);
|
||||
throw e;
|
||||
}
|
||||
} else {
|
||||
// Cannot INSERT with BINDABLE.
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
@ -223,7 +240,7 @@ public class DruidPlanner implements Closeable
|
|||
* closely with the state of {@link #planner}, instead of repeating parsing and validation between each of these
|
||||
* steps.
|
||||
*/
|
||||
private void reset()
|
||||
private void resetPlanner()
|
||||
{
|
||||
planner.close();
|
||||
planner.reset();
|
||||
|
@ -233,19 +250,23 @@ public class DruidPlanner implements Closeable
|
|||
* Construct a {@link PlannerResult} for a {@link RelNode} that is directly translatable to a native Druid query.
|
||||
*/
|
||||
private PlannerResult planWithDruidConvention(
|
||||
final SqlExplain explain,
|
||||
final RelRoot root
|
||||
) throws RelConversionException
|
||||
final RelRoot root,
|
||||
@Nullable final SqlExplain explain,
|
||||
@Nullable final SqlInsert insert
|
||||
) throws ValidationException, RelConversionException
|
||||
{
|
||||
final RelNode possiblyWrappedRootRel = possiblyWrapRootWithOuterLimitFromContext(root);
|
||||
final RelRoot possiblyLimitedRoot = possiblyWrapRootWithOuterLimitFromContext(root);
|
||||
|
||||
RelNode parametized = rewriteRelDynamicParameters(possiblyWrappedRootRel);
|
||||
final QueryMaker queryMaker = buildQueryMaker(root, insert);
|
||||
plannerContext.setQueryMaker(queryMaker);
|
||||
|
||||
RelNode parameterized = rewriteRelDynamicParameters(possiblyLimitedRoot.rel);
|
||||
final DruidRel<?> druidRel = (DruidRel<?>) planner.transform(
|
||||
Rules.DRUID_CONVENTION_RULES,
|
||||
planner.getEmptyTraitSet()
|
||||
.replace(DruidConvention.instance())
|
||||
.plus(root.collation),
|
||||
parametized
|
||||
parameterized
|
||||
);
|
||||
|
||||
if (explain != null) {
|
||||
|
@ -253,32 +274,25 @@ public class DruidPlanner implements Closeable
|
|||
} else {
|
||||
final Supplier<Sequence<Object[]>> resultsSupplier = () -> {
|
||||
// sanity check
|
||||
final Set<ResourceAction> readResourceActions =
|
||||
plannerContext.getResourceActions()
|
||||
.stream()
|
||||
.filter(action -> action.getAction() == Action.READ)
|
||||
.collect(Collectors.toSet());
|
||||
|
||||
Preconditions.checkState(
|
||||
plannerContext.getResources().isEmpty() == druidRel.getDataSourceNames().isEmpty()
|
||||
readResourceActions.isEmpty() == druidRel.getDataSourceNames().isEmpty()
|
||||
// The resources found in the plannerContext can be less than the datasources in
|
||||
// the query plan, because the query planner can eliminate empty tables by replacing
|
||||
// them with InlineDataSource of empty rows.
|
||||
|| plannerContext.getResources().size() >= druidRel.getDataSourceNames().size(),
|
||||
|| readResourceActions.size() >= druidRel.getDataSourceNames().size(),
|
||||
"Authorization sanity check failed"
|
||||
);
|
||||
if (root.isRefTrivial()) {
|
||||
return druidRel.runQuery();
|
||||
} else {
|
||||
// Add a mapping on top to accommodate root.fields.
|
||||
return Sequences.map(
|
||||
druidRel.runQuery(),
|
||||
input -> {
|
||||
final Object[] retVal = new Object[root.fields.size()];
|
||||
for (int i = 0; i < root.fields.size(); i++) {
|
||||
retVal[i] = input[root.fields.get(i).getKey()];
|
||||
}
|
||||
return retVal;
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
return druidRel.runQuery();
|
||||
};
|
||||
|
||||
return new PlannerResult(resultsSupplier, root.validatedRowType);
|
||||
return new PlannerResult(resultsSupplier, queryMaker.getResultType());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -286,12 +300,12 @@ public class DruidPlanner implements Closeable
|
|||
* Construct a {@link PlannerResult} for a fall-back 'bindable' rel, for things that are not directly translatable
|
||||
* to native Druid queries such as system tables and just a general purpose (but definitely not optimized) fall-back.
|
||||
*
|
||||
* See {@link #planWithDruidConvention(SqlExplain, RelRoot)} which will handle things which are directly translatable
|
||||
* See {@link #planWithDruidConvention} which will handle things which are directly translatable
|
||||
* to native Druid queries.
|
||||
*/
|
||||
private PlannerResult planWithBindableConvention(
|
||||
final SqlExplain explain,
|
||||
final RelRoot root
|
||||
final RelRoot root,
|
||||
@Nullable final SqlExplain explain
|
||||
) throws RelConversionException
|
||||
{
|
||||
BindableRel bindableRel = (BindableRel) planner.transform(
|
||||
|
@ -370,17 +384,19 @@ public class DruidPlanner implements Closeable
|
|||
)
|
||||
{
|
||||
final String explanation = RelOptUtil.dumpPlan("", rel, explain.getFormat(), explain.getDetailLevel());
|
||||
String resources;
|
||||
String resourcesString;
|
||||
try {
|
||||
resources = jsonMapper.writeValueAsString(plannerContext.getResources());
|
||||
final Set<Resource> resources =
|
||||
plannerContext.getResourceActions().stream().map(ResourceAction::getResource).collect(Collectors.toSet());
|
||||
resourcesString = plannerContext.getJsonMapper().writeValueAsString(resources);
|
||||
}
|
||||
catch (JsonProcessingException jpe) {
|
||||
// this should never happen, we create the Resources here, not a user
|
||||
log.error(jpe, "Encountered exception while serializing Resources for explain output");
|
||||
resources = null;
|
||||
resourcesString = null;
|
||||
}
|
||||
final Supplier<Sequence<Object[]>> resultsSupplier = Suppliers.ofInstance(
|
||||
Sequences.simple(ImmutableList.of(new Object[]{explanation, resources})));
|
||||
Sequences.simple(ImmutableList.of(new Object[]{explanation, resourcesString})));
|
||||
return new PlannerResult(resultsSupplier, getExplainStructType(rel.getCluster().getTypeFactory()));
|
||||
}
|
||||
|
||||
|
@ -397,14 +413,16 @@ public class DruidPlanner implements Closeable
|
|||
* @return root node wrapped with a limiting logical sort if a limit is specified in the query context.
|
||||
*/
|
||||
@Nullable
|
||||
private RelNode possiblyWrapRootWithOuterLimitFromContext(RelRoot root)
|
||||
private RelRoot possiblyWrapRootWithOuterLimitFromContext(RelRoot root)
|
||||
{
|
||||
Object outerLimitObj = plannerContext.getQueryContext().get(PlannerContext.CTX_SQL_OUTER_LIMIT);
|
||||
Long outerLimit = DimensionHandlerUtils.convertObjectToLong(outerLimitObj, true);
|
||||
if (outerLimit == null) {
|
||||
return root.rel;
|
||||
return root;
|
||||
}
|
||||
|
||||
final LogicalSort newRootRel;
|
||||
|
||||
if (root.rel instanceof Sort) {
|
||||
Sort sort = (Sort) root.rel;
|
||||
|
||||
|
@ -413,34 +431,25 @@ public class DruidPlanner implements Closeable
|
|||
|
||||
if (newOffsetLimit.equals(originalOffsetLimit)) {
|
||||
// nothing to do, don't bother to make a new sort
|
||||
return root.rel;
|
||||
return root;
|
||||
}
|
||||
|
||||
return LogicalSort.create(
|
||||
newRootRel = LogicalSort.create(
|
||||
sort.getInput(),
|
||||
sort.collation,
|
||||
newOffsetLimit.getOffsetAsRexNode(rexBuilder),
|
||||
newOffsetLimit.getLimitAsRexNode(rexBuilder)
|
||||
);
|
||||
} else {
|
||||
return LogicalSort.create(
|
||||
newRootRel = LogicalSort.create(
|
||||
root.rel,
|
||||
root.collation,
|
||||
null,
|
||||
new OffsetLimit(0, outerLimit).getLimitAsRexNode(rexBuilder)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
private static RelDataType getExplainStructType(RelDataTypeFactory typeFactory)
|
||||
{
|
||||
return typeFactory.createStructType(
|
||||
ImmutableList.of(
|
||||
Calcites.createSqlType(typeFactory, SqlTypeName.VARCHAR),
|
||||
Calcites.createSqlType(typeFactory, SqlTypeName.VARCHAR)
|
||||
),
|
||||
ImmutableList.of("PLAN", "RESOURCES")
|
||||
);
|
||||
return new RelRoot(newRootRel, root.validatedRowType, root.kind, root.fields, root.collation);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -506,6 +515,67 @@ public class DruidPlanner implements Closeable
|
|||
return rootRel.accept(parameterizer);
|
||||
}
|
||||
|
||||
private QueryMaker buildQueryMaker(
|
||||
final RelRoot rootQueryRel,
|
||||
@Nullable final SqlInsert insert
|
||||
) throws ValidationException
|
||||
{
|
||||
if (insert != null) {
|
||||
final String targetDataSource = validateAndGetDataSourceForInsert(insert);
|
||||
return queryMakerFactory.buildForInsert(targetDataSource, rootQueryRel, plannerContext);
|
||||
} else {
|
||||
return queryMakerFactory.buildForSelect(rootQueryRel, plannerContext);
|
||||
}
|
||||
}
|
||||
|
||||
private static RelDataType getExplainStructType(RelDataTypeFactory typeFactory)
|
||||
{
|
||||
return typeFactory.createStructType(
|
||||
ImmutableList.of(
|
||||
Calcites.createSqlType(typeFactory, SqlTypeName.VARCHAR),
|
||||
Calcites.createSqlType(typeFactory, SqlTypeName.VARCHAR)
|
||||
),
|
||||
ImmutableList.of("PLAN", "RESOURCES")
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract target datasource from a {@link SqlInsert}, and also validate that the INSERT is of a form we support.
|
||||
* Expects the INSERT target to be either an unqualified name, or a name qualified by the default schema.
|
||||
*/
|
||||
private String validateAndGetDataSourceForInsert(final SqlInsert insert) throws ValidationException
|
||||
{
|
||||
if (insert.isUpsert()) {
|
||||
throw new ValidationException("UPSERT is not supported.");
|
||||
}
|
||||
|
||||
if (insert.getTargetColumnList() != null) {
|
||||
throw new ValidationException("INSERT with target column list is not supported.");
|
||||
}
|
||||
|
||||
final SqlIdentifier tableIdentifier = (SqlIdentifier) insert.getTargetTable();
|
||||
|
||||
if (tableIdentifier.names.isEmpty()) {
|
||||
// I don't think this can happen, but include a branch for it just in case.
|
||||
throw new ValidationException("INSERT requires target table.");
|
||||
} else if (tableIdentifier.names.size() == 1) {
|
||||
// Unqualified name.
|
||||
return Iterables.getOnlyElement(tableIdentifier.names);
|
||||
} else {
|
||||
// Qualified name.
|
||||
final String defaultSchemaName =
|
||||
Iterables.getOnlyElement(CalciteSchema.from(frameworkConfig.getDefaultSchema()).path(null));
|
||||
|
||||
if (tableIdentifier.names.size() == 2 && defaultSchemaName.equals(tableIdentifier.names.get(0))) {
|
||||
return tableIdentifier.names.get(1);
|
||||
} else {
|
||||
throw new ValidationException(
|
||||
StringUtils.format("Cannot INSERT into [%s] because it is not a Druid datasource.", tableIdentifier)
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static class EnumeratorIterator<T> implements Iterator<T>
|
||||
{
|
||||
private final Iterator<T> it;
|
||||
|
@ -527,4 +597,62 @@ public class DruidPlanner implements Closeable
|
|||
return it.next();
|
||||
}
|
||||
}
|
||||
|
||||
private static class ParsedNodes
|
||||
{
|
||||
@Nullable
|
||||
private SqlExplain explain;
|
||||
|
||||
@Nullable
|
||||
private SqlInsert insert;
|
||||
|
||||
private SqlNode query;
|
||||
|
||||
private ParsedNodes(@Nullable SqlExplain explain, @Nullable SqlInsert insert, SqlNode query)
|
||||
{
|
||||
this.explain = explain;
|
||||
this.insert = insert;
|
||||
this.query = query;
|
||||
}
|
||||
|
||||
static ParsedNodes create(final SqlNode node) throws ValidationException
|
||||
{
|
||||
SqlExplain explain = null;
|
||||
SqlInsert insert = null;
|
||||
SqlNode query = node;
|
||||
|
||||
if (query.getKind() == SqlKind.EXPLAIN) {
|
||||
explain = (SqlExplain) query;
|
||||
query = explain.getExplicandum();
|
||||
}
|
||||
|
||||
if (query.getKind() == SqlKind.INSERT) {
|
||||
insert = (SqlInsert) query;
|
||||
query = insert.getSource();
|
||||
}
|
||||
|
||||
if (!query.isA(SqlKind.QUERY)) {
|
||||
throw new ValidationException(StringUtils.format("Cannot execute [%s].", query.getKind()));
|
||||
}
|
||||
|
||||
return new ParsedNodes(explain, insert, query);
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public SqlExplain getExplainNode()
|
||||
{
|
||||
return explain;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public SqlInsert getInsertNode()
|
||||
{
|
||||
return insert;
|
||||
}
|
||||
|
||||
public SqlNode getQueryNode()
|
||||
{
|
||||
return query;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.apache.druid.sql.calcite.planner;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.base.Strings;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
|
@ -28,11 +29,13 @@ import org.apache.calcite.avatica.remote.TypedValue;
|
|||
import org.apache.calcite.linq4j.QueryProvider;
|
||||
import org.apache.calcite.schema.SchemaPlus;
|
||||
import org.apache.druid.java.util.common.DateTimes;
|
||||
import org.apache.druid.java.util.common.ISE;
|
||||
import org.apache.druid.java.util.common.Numbers;
|
||||
import org.apache.druid.math.expr.ExprMacroTable;
|
||||
import org.apache.druid.server.security.Access;
|
||||
import org.apache.druid.server.security.AuthenticationResult;
|
||||
import org.apache.druid.server.security.Resource;
|
||||
import org.apache.druid.server.security.ResourceAction;
|
||||
import org.apache.druid.sql.calcite.run.QueryMaker;
|
||||
import org.apache.druid.sql.calcite.schema.DruidSchemaCatalog;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
@ -66,8 +69,10 @@ public class PlannerContext
|
|||
// DataContext keys
|
||||
public static final String DATA_CTX_AUTHENTICATION_RESULT = "authenticationResult";
|
||||
|
||||
private final String sql;
|
||||
private final DruidOperatorTable operatorTable;
|
||||
private final ExprMacroTable macroTable;
|
||||
private final ObjectMapper jsonMapper;
|
||||
private final PlannerConfig plannerConfig;
|
||||
private final DateTime localNow;
|
||||
private final DruidSchemaCatalog rootSchema;
|
||||
|
@ -79,14 +84,17 @@ public class PlannerContext
|
|||
private List<TypedValue> parameters = Collections.emptyList();
|
||||
// result of authentication, providing identity to authorize set of resources produced by validation
|
||||
private AuthenticationResult authenticationResult;
|
||||
// set of datasources and views which must be authorized
|
||||
private Set<Resource> resources = Collections.emptySet();
|
||||
// set of datasources and views which must be authorized, initialized to null so we can detect if it has been set.
|
||||
private Set<ResourceAction> resourceActions = null;
|
||||
// result of authorizing set of resources against authentication identity
|
||||
private Access authorizationResult;
|
||||
private QueryMaker queryMaker;
|
||||
|
||||
private PlannerContext(
|
||||
final String sql,
|
||||
final DruidOperatorTable operatorTable,
|
||||
final ExprMacroTable macroTable,
|
||||
final ObjectMapper jsonMapper,
|
||||
final PlannerConfig plannerConfig,
|
||||
final DateTime localNow,
|
||||
final boolean stringifyArrays,
|
||||
|
@ -94,8 +102,10 @@ public class PlannerContext
|
|||
final Map<String, Object> queryContext
|
||||
)
|
||||
{
|
||||
this.sql = sql;
|
||||
this.operatorTable = operatorTable;
|
||||
this.macroTable = macroTable;
|
||||
this.jsonMapper = jsonMapper;
|
||||
this.plannerConfig = Preconditions.checkNotNull(plannerConfig, "plannerConfig");
|
||||
this.rootSchema = rootSchema;
|
||||
this.queryContext = queryContext != null ? new HashMap<>(queryContext) : new HashMap<>();
|
||||
|
@ -111,8 +121,10 @@ public class PlannerContext
|
|||
}
|
||||
|
||||
public static PlannerContext create(
|
||||
final String sql,
|
||||
final DruidOperatorTable operatorTable,
|
||||
final ExprMacroTable macroTable,
|
||||
final ObjectMapper jsonMapper,
|
||||
final PlannerConfig plannerConfig,
|
||||
final DruidSchemaCatalog rootSchema,
|
||||
final Map<String, Object> queryContext
|
||||
|
@ -151,8 +163,10 @@ public class PlannerContext
|
|||
}
|
||||
|
||||
return new PlannerContext(
|
||||
sql,
|
||||
operatorTable,
|
||||
macroTable,
|
||||
jsonMapper,
|
||||
plannerConfig.withOverrides(queryContext),
|
||||
utcNow.withZone(timeZone),
|
||||
stringifyArrays,
|
||||
|
@ -171,6 +185,11 @@ public class PlannerContext
|
|||
return macroTable;
|
||||
}
|
||||
|
||||
public ObjectMapper getJsonMapper()
|
||||
{
|
||||
return jsonMapper;
|
||||
}
|
||||
|
||||
public PlannerConfig getPlannerConfig()
|
||||
{
|
||||
return plannerConfig;
|
||||
|
@ -209,7 +228,12 @@ public class PlannerContext
|
|||
|
||||
public AuthenticationResult getAuthenticationResult()
|
||||
{
|
||||
return authenticationResult;
|
||||
return Preconditions.checkNotNull(authenticationResult, "Authentication result not available");
|
||||
}
|
||||
|
||||
public String getSql()
|
||||
{
|
||||
return sql;
|
||||
}
|
||||
|
||||
public String getSqlQueryId()
|
||||
|
@ -288,7 +312,7 @@ public class PlannerContext
|
|||
|
||||
public Access getAuthorizationResult()
|
||||
{
|
||||
return authorizationResult;
|
||||
return Preconditions.checkNotNull(authorizationResult, "Authorization result not available");
|
||||
}
|
||||
|
||||
public void setParameters(List<TypedValue> parameters)
|
||||
|
@ -298,21 +322,51 @@ public class PlannerContext
|
|||
|
||||
public void setAuthenticationResult(AuthenticationResult authenticationResult)
|
||||
{
|
||||
if (this.authenticationResult != null) {
|
||||
// It's a bug if this happens, because setAuthenticationResult should be called exactly once.
|
||||
throw new ISE("Authentication result has already been set");
|
||||
}
|
||||
|
||||
this.authenticationResult = Preconditions.checkNotNull(authenticationResult, "authenticationResult");
|
||||
}
|
||||
|
||||
public void setAuthorizationResult(Access access)
|
||||
{
|
||||
if (this.authorizationResult != null) {
|
||||
// It's a bug if this happens, because setAuthorizationResult should be called exactly once.
|
||||
throw new ISE("Authorization result has already been set");
|
||||
}
|
||||
|
||||
this.authorizationResult = Preconditions.checkNotNull(access, "authorizationResult");
|
||||
}
|
||||
|
||||
public Set<Resource> getResources()
|
||||
public Set<ResourceAction> getResourceActions()
|
||||
{
|
||||
return resources;
|
||||
return Preconditions.checkNotNull(resourceActions, "Resources not available");
|
||||
}
|
||||
|
||||
public void setResources(Set<Resource> resources)
|
||||
public void setResourceActions(Set<ResourceAction> resourceActions)
|
||||
{
|
||||
this.resources = Preconditions.checkNotNull(resources, "resources");
|
||||
if (this.resourceActions != null) {
|
||||
// It's a bug if this happens, because setResourceActions should be called exactly once.
|
||||
throw new ISE("Resources have already been set");
|
||||
}
|
||||
|
||||
this.resourceActions = Preconditions.checkNotNull(resourceActions, "resourceActions");
|
||||
}
|
||||
|
||||
public void setQueryMaker(QueryMaker queryMaker)
|
||||
{
|
||||
if (this.queryMaker != null) {
|
||||
// It's a bug if this happens, because setQueryMaker should be called exactly once.
|
||||
throw new ISE("QueryMaker has already been set");
|
||||
}
|
||||
|
||||
this.queryMaker = Preconditions.checkNotNull(queryMaker, "queryMaker");
|
||||
}
|
||||
|
||||
public QueryMaker getQueryMaker()
|
||||
{
|
||||
return Preconditions.checkNotNull(queryMaker, "QueryMaker not available");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,11 +38,10 @@ import org.apache.calcite.tools.Frameworks;
|
|||
import org.apache.calcite.tools.ValidationException;
|
||||
import org.apache.druid.guice.annotations.Json;
|
||||
import org.apache.druid.math.expr.ExprMacroTable;
|
||||
import org.apache.druid.server.QueryLifecycleFactory;
|
||||
import org.apache.druid.server.security.Access;
|
||||
import org.apache.druid.server.security.AuthorizerMapper;
|
||||
import org.apache.druid.server.security.NoopEscalator;
|
||||
import org.apache.druid.sql.calcite.rel.QueryMaker;
|
||||
import org.apache.druid.sql.calcite.run.QueryMakerFactory;
|
||||
import org.apache.druid.sql.calcite.schema.DruidSchemaCatalog;
|
||||
import org.apache.druid.sql.calcite.schema.DruidSchemaName;
|
||||
|
||||
|
@ -61,7 +60,7 @@ public class PlannerFactory
|
|||
.build();
|
||||
|
||||
private final DruidSchemaCatalog rootSchema;
|
||||
private final QueryLifecycleFactory queryLifecycleFactory;
|
||||
private final QueryMakerFactory queryMakerFactory;
|
||||
private final DruidOperatorTable operatorTable;
|
||||
private final ExprMacroTable macroTable;
|
||||
private final PlannerConfig plannerConfig;
|
||||
|
@ -72,7 +71,7 @@ public class PlannerFactory
|
|||
@Inject
|
||||
public PlannerFactory(
|
||||
final DruidSchemaCatalog rootSchema,
|
||||
final QueryLifecycleFactory queryLifecycleFactory,
|
||||
final QueryMakerFactory queryMakerFactory,
|
||||
final DruidOperatorTable operatorTable,
|
||||
final ExprMacroTable macroTable,
|
||||
final PlannerConfig plannerConfig,
|
||||
|
@ -82,7 +81,7 @@ public class PlannerFactory
|
|||
)
|
||||
{
|
||||
this.rootSchema = rootSchema;
|
||||
this.queryLifecycleFactory = queryLifecycleFactory;
|
||||
this.queryMakerFactory = queryMakerFactory;
|
||||
this.operatorTable = operatorTable;
|
||||
this.macroTable = macroTable;
|
||||
this.plannerConfig = plannerConfig;
|
||||
|
@ -94,41 +93,29 @@ public class PlannerFactory
|
|||
/**
|
||||
* Create a Druid query planner from an initial query context
|
||||
*/
|
||||
public DruidPlanner createPlanner(final Map<String, Object> queryContext)
|
||||
public DruidPlanner createPlanner(final String sql, final Map<String, Object> queryContext)
|
||||
{
|
||||
final PlannerContext plannerContext = PlannerContext.create(
|
||||
final PlannerContext context = PlannerContext.create(
|
||||
sql,
|
||||
operatorTable,
|
||||
macroTable,
|
||||
jsonMapper,
|
||||
plannerConfig,
|
||||
rootSchema,
|
||||
queryContext
|
||||
);
|
||||
final QueryMaker queryMaker = new QueryMaker(queryLifecycleFactory, plannerContext, jsonMapper);
|
||||
final FrameworkConfig frameworkConfig = buildFrameworkConfig(plannerContext, queryMaker);
|
||||
|
||||
return new DruidPlanner(
|
||||
frameworkConfig,
|
||||
plannerContext,
|
||||
jsonMapper
|
||||
);
|
||||
return createPlannerWithContext(context);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new Druid query planner, re-using a previous {@link PlannerContext}
|
||||
*/
|
||||
public DruidPlanner createPlannerWithContext(PlannerContext plannerContext)
|
||||
public DruidPlanner createPlannerWithContext(final PlannerContext plannerContext)
|
||||
{
|
||||
final QueryMaker queryMaker = new QueryMaker(queryLifecycleFactory, plannerContext, jsonMapper);
|
||||
final FrameworkConfig frameworkConfig = buildFrameworkConfig(plannerContext, queryMaker);
|
||||
|
||||
return new DruidPlanner(
|
||||
frameworkConfig,
|
||||
plannerContext,
|
||||
jsonMapper
|
||||
);
|
||||
return new DruidPlanner(buildFrameworkConfig(plannerContext), plannerContext, queryMakerFactory);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Not just visible for, but only for testing. Create a planner pre-loaded with an escalated authentication result
|
||||
* and ready to go authorization result.
|
||||
|
@ -136,10 +123,11 @@ public class PlannerFactory
|
|||
@VisibleForTesting
|
||||
public DruidPlanner createPlannerForTesting(final Map<String, Object> queryContext, String query)
|
||||
{
|
||||
DruidPlanner thePlanner = createPlanner(queryContext);
|
||||
thePlanner.getPlannerContext().setAuthenticationResult(NoopEscalator.getInstance().createEscalatedAuthenticationResult());
|
||||
final DruidPlanner thePlanner = createPlanner(query, queryContext);
|
||||
thePlanner.getPlannerContext()
|
||||
.setAuthenticationResult(NoopEscalator.getInstance().createEscalatedAuthenticationResult());
|
||||
try {
|
||||
thePlanner.validate(query);
|
||||
thePlanner.validate();
|
||||
}
|
||||
catch (SqlParseException | ValidationException e) {
|
||||
throw new RuntimeException(e);
|
||||
|
@ -153,7 +141,7 @@ public class PlannerFactory
|
|||
return authorizerMapper;
|
||||
}
|
||||
|
||||
private FrameworkConfig buildFrameworkConfig(PlannerContext plannerContext, QueryMaker queryMaker)
|
||||
private FrameworkConfig buildFrameworkConfig(PlannerContext plannerContext)
|
||||
{
|
||||
final SqlToRelConverter.Config sqlToRelConverterConfig = SqlToRelConverter
|
||||
.configBuilder()
|
||||
|
@ -168,7 +156,7 @@ public class PlannerFactory
|
|||
.traitDefs(ConventionTraitDef.INSTANCE, RelCollationTraitDef.INSTANCE)
|
||||
.convertletTable(new DruidConvertletTable(plannerContext))
|
||||
.operatorTable(operatorTable)
|
||||
.programs(Rules.programs(plannerContext, queryMaker))
|
||||
.programs(Rules.programs(plannerContext))
|
||||
.executor(new DruidRexExecutor(plannerContext))
|
||||
.typeSystem(DruidTypeSystem.INSTANCE)
|
||||
.defaultSchema(rootSchema.getSubSchema(druidSchemaName))
|
||||
|
|
|
@ -73,7 +73,7 @@ import org.apache.calcite.sql2rel.RelFieldTrimmer;
|
|||
import org.apache.calcite.tools.Program;
|
||||
import org.apache.calcite.tools.Programs;
|
||||
import org.apache.calcite.tools.RelBuilder;
|
||||
import org.apache.druid.sql.calcite.rel.QueryMaker;
|
||||
import org.apache.druid.sql.calcite.external.ExternalTableScanRule;
|
||||
import org.apache.druid.sql.calcite.rule.DruidLogicalValuesRule;
|
||||
import org.apache.druid.sql.calcite.rule.DruidRelToDruidRule;
|
||||
import org.apache.druid.sql.calcite.rule.DruidRules;
|
||||
|
@ -203,7 +203,7 @@ public class Rules
|
|||
// No instantiation.
|
||||
}
|
||||
|
||||
public static List<Program> programs(final PlannerContext plannerContext, final QueryMaker queryMaker)
|
||||
public static List<Program> programs(final PlannerContext plannerContext)
|
||||
{
|
||||
|
||||
|
||||
|
@ -216,7 +216,7 @@ public class Rules
|
|||
);
|
||||
|
||||
return ImmutableList.of(
|
||||
Programs.sequence(preProgram, Programs.ofRules(druidConventionRuleSet(plannerContext, queryMaker))),
|
||||
Programs.sequence(preProgram, Programs.ofRules(druidConventionRuleSet(plannerContext))),
|
||||
Programs.sequence(preProgram, Programs.ofRules(bindableConventionRuleSet(plannerContext)))
|
||||
);
|
||||
}
|
||||
|
@ -234,16 +234,15 @@ public class Rules
|
|||
return Programs.of(builder.build(), noDag, metadataProvider);
|
||||
}
|
||||
|
||||
private static List<RelOptRule> druidConventionRuleSet(
|
||||
final PlannerContext plannerContext,
|
||||
final QueryMaker queryMaker
|
||||
)
|
||||
private static List<RelOptRule> druidConventionRuleSet(final PlannerContext plannerContext)
|
||||
{
|
||||
final ImmutableList.Builder<RelOptRule> retVal = ImmutableList.<RelOptRule>builder()
|
||||
final ImmutableList.Builder<RelOptRule> retVal = ImmutableList
|
||||
.<RelOptRule>builder()
|
||||
.addAll(baseRuleSet(plannerContext))
|
||||
.add(DruidRelToDruidRule.instance())
|
||||
.add(new DruidTableScanRule(queryMaker))
|
||||
.add(new DruidLogicalValuesRule(queryMaker))
|
||||
.add(new DruidTableScanRule(plannerContext))
|
||||
.add(new DruidLogicalValuesRule(plannerContext))
|
||||
.add(new ExternalTableScanRule(plannerContext))
|
||||
.addAll(DruidRules.rules(plannerContext));
|
||||
|
||||
return retVal.build();
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.apache.druid.sql.calcite.planner;
|
||||
|
||||
import org.apache.calcite.sql.SqlCall;
|
||||
import org.apache.calcite.sql.SqlIdentifier;
|
||||
import org.apache.calcite.sql.SqlNode;
|
||||
import org.apache.calcite.sql.util.SqlShuttle;
|
||||
|
@ -26,8 +27,12 @@ import org.apache.calcite.sql.validate.IdentifierNamespace;
|
|||
import org.apache.calcite.sql.validate.SqlValidator;
|
||||
import org.apache.calcite.sql.validate.SqlValidatorNamespace;
|
||||
import org.apache.calcite.sql.validate.SqlValidatorTable;
|
||||
import org.apache.druid.java.util.common.ISE;
|
||||
import org.apache.druid.server.security.Action;
|
||||
import org.apache.druid.server.security.Resource;
|
||||
import org.apache.druid.server.security.ResourceAction;
|
||||
import org.apache.druid.server.security.ResourceType;
|
||||
import org.apache.druid.sql.calcite.expression.AuthorizableOperator;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
|
@ -43,17 +48,27 @@ import java.util.Set;
|
|||
*/
|
||||
public class SqlResourceCollectorShuttle extends SqlShuttle
|
||||
{
|
||||
private final Set<Resource> resources;
|
||||
private final Set<ResourceAction> resourceActions;
|
||||
private final PlannerContext plannerContext;
|
||||
private final SqlValidator validator;
|
||||
|
||||
public SqlResourceCollectorShuttle(SqlValidator validator, PlannerContext plannerContext)
|
||||
{
|
||||
this.validator = validator;
|
||||
this.resources = new HashSet<>();
|
||||
this.resourceActions = new HashSet<>();
|
||||
this.plannerContext = plannerContext;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SqlNode visit(SqlCall call)
|
||||
{
|
||||
if (call.getOperator() instanceof AuthorizableOperator) {
|
||||
resourceActions.addAll(((AuthorizableOperator) call.getOperator()).computeResources(call));
|
||||
}
|
||||
|
||||
return super.visit(call);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SqlNode visit(SqlIdentifier id)
|
||||
{
|
||||
|
@ -71,16 +86,19 @@ public class SqlResourceCollectorShuttle extends SqlShuttle
|
|||
final String resourceName = qualifiedNameParts.get(1);
|
||||
final String resourceType = plannerContext.getSchemaResourceType(schema, resourceName);
|
||||
if (resourceType != null) {
|
||||
resources.add(new Resource(resourceName, resourceType));
|
||||
resourceActions.add(new ResourceAction(new Resource(resourceName, resourceType), Action.READ));
|
||||
}
|
||||
} else if (qualifiedNameParts.size() > 2) {
|
||||
// Don't expect to see more than 2 names (catalog?).
|
||||
throw new ISE("Cannot analyze table idetifier %s", qualifiedNameParts);
|
||||
}
|
||||
}
|
||||
}
|
||||
return super.visit(id);
|
||||
}
|
||||
|
||||
public Set<Resource> getResources()
|
||||
public Set<ResourceAction> getResourceActions()
|
||||
{
|
||||
return resources;
|
||||
return resourceActions;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.apache.druid.sql.calcite.planner;
|
|||
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import org.apache.druid.server.security.Resource;
|
||||
import org.apache.druid.server.security.ResourceAction;
|
||||
|
||||
import java.util.Set;
|
||||
|
||||
|
@ -31,17 +32,17 @@ import java.util.Set;
|
|||
*/
|
||||
public class ValidationResult
|
||||
{
|
||||
private final Set<Resource> resources;
|
||||
private final Set<ResourceAction> resourceActions;
|
||||
|
||||
public ValidationResult(
|
||||
final Set<Resource> resources
|
||||
final Set<ResourceAction> resourceActions
|
||||
)
|
||||
{
|
||||
this.resources = ImmutableSet.copyOf(resources);
|
||||
this.resourceActions = ImmutableSet.copyOf(resourceActions);
|
||||
}
|
||||
|
||||
public Set<Resource> getResources()
|
||||
public Set<ResourceAction> getResourceActions()
|
||||
{
|
||||
return resources;
|
||||
return resourceActions;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,7 +42,6 @@ import org.apache.druid.java.util.common.IAE;
|
|||
import org.apache.druid.java.util.common.ISE;
|
||||
import org.apache.druid.java.util.common.Pair;
|
||||
import org.apache.druid.java.util.common.StringUtils;
|
||||
import org.apache.druid.java.util.common.guava.Sequence;
|
||||
import org.apache.druid.query.DataSource;
|
||||
import org.apache.druid.query.JoinDataSource;
|
||||
import org.apache.druid.query.QueryDataSource;
|
||||
|
@ -83,16 +82,16 @@ public class DruidJoinQueryRel extends DruidRel<DruidJoinQueryRel>
|
|||
Join joinRel,
|
||||
Filter leftFilter,
|
||||
PartialDruidQuery partialQuery,
|
||||
QueryMaker queryMaker
|
||||
PlannerContext plannerContext
|
||||
)
|
||||
{
|
||||
super(cluster, traitSet, queryMaker);
|
||||
super(cluster, traitSet, plannerContext);
|
||||
this.joinRel = joinRel;
|
||||
this.left = joinRel.getLeft();
|
||||
this.right = joinRel.getRight();
|
||||
this.leftFilter = leftFilter;
|
||||
this.partialQuery = partialQuery;
|
||||
this.plannerConfig = queryMaker.getPlannerContext().getPlannerConfig();
|
||||
this.plannerConfig = plannerContext.getPlannerConfig();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -101,7 +100,7 @@ public class DruidJoinQueryRel extends DruidRel<DruidJoinQueryRel>
|
|||
public static DruidJoinQueryRel create(
|
||||
final Join joinRel,
|
||||
final Filter leftFilter,
|
||||
final QueryMaker queryMaker
|
||||
final PlannerContext plannerContext
|
||||
)
|
||||
{
|
||||
return new DruidJoinQueryRel(
|
||||
|
@ -110,7 +109,7 @@ public class DruidJoinQueryRel extends DruidRel<DruidJoinQueryRel>
|
|||
joinRel,
|
||||
leftFilter,
|
||||
PartialDruidQuery.create(joinRel),
|
||||
queryMaker
|
||||
plannerContext
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -120,17 +119,6 @@ public class DruidJoinQueryRel extends DruidRel<DruidJoinQueryRel>
|
|||
return partialQuery;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Sequence<Object[]> runQuery()
|
||||
{
|
||||
// runQuery doesn't need to finalize aggregations, because the fact that runQuery is happening suggests this
|
||||
// is the outermost query and it will actually get run as a native query. Druid's native query layer will
|
||||
// finalize aggregations for the outermost query even if we don't explicitly ask it to.
|
||||
|
||||
final DruidQuery query = toDruidQuery(false);
|
||||
return getQueryMaker().runQuery(query);
|
||||
}
|
||||
|
||||
@Override
|
||||
public DruidJoinQueryRel withPartialQuery(final PartialDruidQuery newQueryBuilder)
|
||||
{
|
||||
|
@ -140,7 +128,7 @@ public class DruidJoinQueryRel extends DruidRel<DruidJoinQueryRel>
|
|||
joinRel,
|
||||
leftFilter,
|
||||
newQueryBuilder,
|
||||
getQueryMaker()
|
||||
getPlannerContext()
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -234,7 +222,7 @@ public class DruidJoinQueryRel extends DruidRel<DruidJoinQueryRel>
|
|||
),
|
||||
leftFilter,
|
||||
partialQuery,
|
||||
getQueryMaker()
|
||||
getPlannerContext()
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -273,7 +261,7 @@ public class DruidJoinQueryRel extends DruidRel<DruidJoinQueryRel>
|
|||
joinRel.copy(joinRel.getTraitSet(), inputs),
|
||||
leftFilter,
|
||||
getPartialDruidQuery(),
|
||||
getQueryMaker()
|
||||
getPlannerContext()
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -293,7 +281,7 @@ public class DruidJoinQueryRel extends DruidRel<DruidJoinQueryRel>
|
|||
final DruidQuery druidQuery = toDruidQueryForExplaining();
|
||||
|
||||
try {
|
||||
queryString = getQueryMaker().getJsonMapper().writeValueAsString(druidQuery.getQuery());
|
||||
queryString = getPlannerContext().getJsonMapper().writeValueAsString(druidQuery.getQuery());
|
||||
}
|
||||
catch (JsonProcessingException e) {
|
||||
throw new RuntimeException(e);
|
||||
|
|
|
@ -32,11 +32,10 @@ import org.apache.calcite.rel.RelWriter;
|
|||
import org.apache.calcite.rel.metadata.RelMetadataQuery;
|
||||
import org.apache.calcite.rel.type.RelDataType;
|
||||
import org.apache.druid.java.util.common.StringUtils;
|
||||
import org.apache.druid.java.util.common.guava.Sequence;
|
||||
import org.apache.druid.java.util.common.guava.Sequences;
|
||||
import org.apache.druid.query.QueryDataSource;
|
||||
import org.apache.druid.query.TableDataSource;
|
||||
import org.apache.druid.segment.column.RowSignature;
|
||||
import org.apache.druid.sql.calcite.planner.PlannerContext;
|
||||
import org.apache.druid.sql.calcite.table.RowSignatures;
|
||||
|
||||
import java.util.List;
|
||||
|
@ -57,10 +56,10 @@ public class DruidOuterQueryRel extends DruidRel<DruidOuterQueryRel>
|
|||
RelTraitSet traitSet,
|
||||
RelNode sourceRel,
|
||||
PartialDruidQuery partialQuery,
|
||||
QueryMaker queryMaker
|
||||
PlannerContext plannerContext
|
||||
)
|
||||
{
|
||||
super(cluster, traitSet, queryMaker);
|
||||
super(cluster, traitSet, plannerContext);
|
||||
this.sourceRel = sourceRel;
|
||||
this.partialQuery = partialQuery;
|
||||
}
|
||||
|
@ -75,7 +74,7 @@ public class DruidOuterQueryRel extends DruidRel<DruidOuterQueryRel>
|
|||
sourceRel.getTraitSet().plusAll(partialQuery.getRelTraits()),
|
||||
sourceRel,
|
||||
partialQuery,
|
||||
sourceRel.getQueryMaker()
|
||||
sourceRel.getPlannerContext()
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -85,21 +84,6 @@ public class DruidOuterQueryRel extends DruidRel<DruidOuterQueryRel>
|
|||
return partialQuery;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Sequence<Object[]> runQuery()
|
||||
{
|
||||
// runQuery doesn't need to finalize aggregations, because the fact that runQuery is happening suggests this
|
||||
// is the outermost query and it will actually get run as a native query. Druid's native query layer will
|
||||
// finalize aggregations for the outermost query even if we don't explicitly ask it to.
|
||||
|
||||
final DruidQuery query = toDruidQuery(false);
|
||||
if (query != null) {
|
||||
return getQueryMaker().runQuery(query);
|
||||
} else {
|
||||
return Sequences.empty();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public DruidOuterQueryRel withPartialQuery(final PartialDruidQuery newQueryBuilder)
|
||||
{
|
||||
|
@ -108,7 +92,7 @@ public class DruidOuterQueryRel extends DruidRel<DruidOuterQueryRel>
|
|||
getTraitSet().plusAll(newQueryBuilder.getRelTraits()),
|
||||
sourceRel,
|
||||
newQueryBuilder,
|
||||
getQueryMaker()
|
||||
getPlannerContext()
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -150,7 +134,7 @@ public class DruidOuterQueryRel extends DruidRel<DruidOuterQueryRel>
|
|||
getTraitSet().plus(DruidConvention.instance()),
|
||||
RelOptRule.convert(sourceRel, DruidConvention.instance()),
|
||||
partialQuery,
|
||||
getQueryMaker()
|
||||
getPlannerContext()
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -177,7 +161,7 @@ public class DruidOuterQueryRel extends DruidRel<DruidOuterQueryRel>
|
|||
traitSet,
|
||||
Iterables.getOnlyElement(inputs),
|
||||
getPartialDruidQuery(),
|
||||
getQueryMaker()
|
||||
getPlannerContext()
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -194,7 +178,7 @@ public class DruidOuterQueryRel extends DruidRel<DruidOuterQueryRel>
|
|||
final DruidQuery druidQuery = toDruidQueryForExplaining();
|
||||
|
||||
try {
|
||||
queryString = getQueryMaker().getJsonMapper().writeValueAsString(druidQuery.getQuery());
|
||||
queryString = getPlannerContext().getJsonMapper().writeValueAsString(druidQuery.getQuery());
|
||||
}
|
||||
catch (JsonProcessingException e) {
|
||||
throw new RuntimeException(e);
|
||||
|
|
|
@ -25,7 +25,6 @@ import com.google.common.collect.ImmutableList;
|
|||
import com.google.common.collect.ImmutableSortedMap;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Iterators;
|
||||
import com.google.common.collect.Ordering;
|
||||
import com.google.common.primitives.Ints;
|
||||
import it.unimi.dsi.fastutil.ints.IntArrayList;
|
||||
import it.unimi.dsi.fastutil.ints.IntList;
|
||||
|
@ -82,11 +81,14 @@ import org.apache.druid.sql.calcite.planner.Calcites;
|
|||
import org.apache.druid.sql.calcite.planner.OffsetLimit;
|
||||
import org.apache.druid.sql.calcite.planner.PlannerContext;
|
||||
import org.apache.druid.sql.calcite.rule.GroupByRules;
|
||||
import org.apache.druid.sql.calcite.run.QueryFeature;
|
||||
import org.apache.druid.sql.calcite.run.QueryFeatureInspector;
|
||||
import org.apache.druid.sql.calcite.table.RowSignatures;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import javax.annotation.Nullable;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
|
@ -94,6 +96,9 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.SortedSet;
|
||||
import java.util.TreeSet;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* A fully formed Druid query, built from a {@link PartialDruidQuery}. The work to develop this query is done
|
||||
|
@ -142,7 +147,7 @@ public class DruidQuery
|
|||
this.outputRowSignature = computeOutputRowSignature(sourceRowSignature, selectProjection, grouping, sorting);
|
||||
this.outputRowType = Preconditions.checkNotNull(outputRowType, "outputRowType");
|
||||
this.virtualColumnRegistry = Preconditions.checkNotNull(virtualColumnRegistry, "virtualColumnRegistry");
|
||||
this.query = computeQuery();
|
||||
this.query = computeQuery(plannerContext.getQueryMaker());
|
||||
}
|
||||
|
||||
public static DruidQuery fromPartialQuery(
|
||||
|
@ -732,35 +737,35 @@ public class DruidQuery
|
|||
*
|
||||
* @return Druid query
|
||||
*/
|
||||
private Query computeQuery()
|
||||
private Query computeQuery(final QueryFeatureInspector queryFeatureInspector)
|
||||
{
|
||||
if (dataSource instanceof QueryDataSource) {
|
||||
// If there is a subquery, then we prefer the outer query to be a groupBy if possible, since this potentially
|
||||
// enables more efficient execution. (The groupBy query toolchest can handle some subqueries by itself, without
|
||||
// requiring the Broker to inline results.)
|
||||
final GroupByQuery outerQuery = toGroupByQuery();
|
||||
final GroupByQuery outerQuery = toGroupByQuery(queryFeatureInspector);
|
||||
|
||||
if (outerQuery != null) {
|
||||
return outerQuery;
|
||||
}
|
||||
}
|
||||
|
||||
final TimeseriesQuery tsQuery = toTimeseriesQuery();
|
||||
final TimeseriesQuery tsQuery = toTimeseriesQuery(queryFeatureInspector);
|
||||
if (tsQuery != null) {
|
||||
return tsQuery;
|
||||
}
|
||||
|
||||
final TopNQuery topNQuery = toTopNQuery();
|
||||
final TopNQuery topNQuery = toTopNQuery(queryFeatureInspector);
|
||||
if (topNQuery != null) {
|
||||
return topNQuery;
|
||||
}
|
||||
|
||||
final GroupByQuery groupByQuery = toGroupByQuery();
|
||||
final GroupByQuery groupByQuery = toGroupByQuery(queryFeatureInspector);
|
||||
if (groupByQuery != null) {
|
||||
return groupByQuery;
|
||||
}
|
||||
|
||||
final ScanQuery scanQuery = toScanQuery();
|
||||
final ScanQuery scanQuery = toScanQuery(queryFeatureInspector);
|
||||
if (scanQuery != null) {
|
||||
return scanQuery;
|
||||
}
|
||||
|
@ -774,9 +779,10 @@ public class DruidQuery
|
|||
* @return query
|
||||
*/
|
||||
@Nullable
|
||||
public TimeseriesQuery toTimeseriesQuery()
|
||||
private TimeseriesQuery toTimeseriesQuery(final QueryFeatureInspector queryFeatureInspector)
|
||||
{
|
||||
if (grouping == null
|
||||
if (!queryFeatureInspector.feature(QueryFeature.CAN_RUN_TIMESERIES)
|
||||
|| grouping == null
|
||||
|| grouping.getSubtotals().hasEffect(grouping.getDimensionSpecs())
|
||||
|| grouping.getHavingFilter() != null) {
|
||||
return null;
|
||||
|
@ -821,7 +827,7 @@ public class DruidQuery
|
|||
timeseriesLimit = Ints.checkedCast(limit);
|
||||
}
|
||||
|
||||
switch (sorting.getSortKind(dimensionExpression.getOutputName())) {
|
||||
switch (sorting.getTimeSortKind(dimensionExpression.getOutputName())) {
|
||||
case UNORDERED:
|
||||
case TIME_ASCENDING:
|
||||
descending = false;
|
||||
|
@ -883,8 +889,13 @@ public class DruidQuery
|
|||
* @return query or null
|
||||
*/
|
||||
@Nullable
|
||||
public TopNQuery toTopNQuery()
|
||||
private TopNQuery toTopNQuery(final QueryFeatureInspector queryFeatureInspector)
|
||||
{
|
||||
// Must be allowed by the QueryMaker.
|
||||
if (!queryFeatureInspector.feature(QueryFeature.CAN_RUN_TOPN)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Must have GROUP BY one column, no GROUPING SETS, ORDER BY ≤ 1 column, LIMIT > 0 and ≤ maxTopNLimit,
|
||||
// no OFFSET, no HAVING.
|
||||
final boolean topNOk = grouping != null
|
||||
|
@ -969,7 +980,7 @@ public class DruidQuery
|
|||
* @return query or null
|
||||
*/
|
||||
@Nullable
|
||||
public GroupByQuery toGroupByQuery()
|
||||
private GroupByQuery toGroupByQuery(final QueryFeatureInspector queryFeatureInspector)
|
||||
{
|
||||
if (grouping == null) {
|
||||
return null;
|
||||
|
@ -1082,7 +1093,7 @@ public class DruidQuery
|
|||
* @return query or null
|
||||
*/
|
||||
@Nullable
|
||||
public ScanQuery toScanQuery()
|
||||
private ScanQuery toScanQuery(final QueryFeatureInspector queryFeatureInspector)
|
||||
{
|
||||
if (grouping != null) {
|
||||
// Scan cannot GROUP BY.
|
||||
|
@ -1102,7 +1113,7 @@ public class DruidQuery
|
|||
final DataSource newDataSource = dataSourceFiltrationPair.lhs;
|
||||
final Filtration filtration = dataSourceFiltrationPair.rhs;
|
||||
|
||||
final ScanQuery.Order order;
|
||||
final List<ScanQuery.OrderBy> orderByColumns;
|
||||
long scanOffset = 0L;
|
||||
long scanLimit = 0L;
|
||||
|
||||
|
@ -1120,31 +1131,31 @@ public class DruidQuery
|
|||
scanLimit = limit;
|
||||
}
|
||||
|
||||
final Sorting.SortKind sortKind = sorting.getSortKind(ColumnHolder.TIME_COLUMN_NAME);
|
||||
|
||||
if (sortKind == Sorting.SortKind.UNORDERED) {
|
||||
order = ScanQuery.Order.NONE;
|
||||
} else if (sortKind == Sorting.SortKind.TIME_ASCENDING) {
|
||||
order = ScanQuery.Order.ASCENDING;
|
||||
} else if (sortKind == Sorting.SortKind.TIME_DESCENDING) {
|
||||
order = ScanQuery.Order.DESCENDING;
|
||||
} else {
|
||||
assert sortKind == Sorting.SortKind.NON_TIME;
|
||||
|
||||
// Scan cannot ORDER BY non-time columns.
|
||||
return null;
|
||||
}
|
||||
orderByColumns = sorting.getOrderBys().stream().map(
|
||||
orderBy ->
|
||||
new ScanQuery.OrderBy(
|
||||
orderBy.getDimension(),
|
||||
orderBy.getDirection() == OrderByColumnSpec.Direction.DESCENDING
|
||||
? ScanQuery.Order.DESCENDING
|
||||
: ScanQuery.Order.ASCENDING
|
||||
)
|
||||
).collect(Collectors.toList());
|
||||
} else {
|
||||
order = ScanQuery.Order.NONE;
|
||||
orderByColumns = Collections.emptyList();
|
||||
}
|
||||
|
||||
// Compute the list of columns to select.
|
||||
final Set<String> columns = new HashSet<>(outputRowSignature.getColumnNames());
|
||||
|
||||
if (order != ScanQuery.Order.NONE) {
|
||||
columns.add(ColumnHolder.TIME_COLUMN_NAME);
|
||||
if (!queryFeatureInspector.feature(QueryFeature.SCAN_CAN_ORDER_BY_NON_TIME)
|
||||
&& (orderByColumns.size() > 1
|
||||
|| orderByColumns.stream()
|
||||
.anyMatch(orderBy -> !orderBy.getColumnName().equals(ColumnHolder.TIME_COLUMN_NAME)))) {
|
||||
// Cannot handle this ordering.
|
||||
return null;
|
||||
}
|
||||
|
||||
// Compute the list of columns to select, sorted and deduped.
|
||||
final SortedSet<String> scanColumns = new TreeSet<>(outputRowSignature.getColumnNames());
|
||||
orderByColumns.forEach(column -> scanColumns.add(column.getColumnName()));
|
||||
|
||||
return new ScanQuery(
|
||||
newDataSource,
|
||||
filtration.getQuerySegmentSpec(),
|
||||
|
@ -1153,10 +1164,10 @@ public class DruidQuery
|
|||
0,
|
||||
scanOffset,
|
||||
scanLimit,
|
||||
order,
|
||||
null,
|
||||
orderByColumns,
|
||||
filtration.getDimFilter(),
|
||||
Ordering.natural().sortedCopy(columns),
|
||||
ImmutableList.copyOf(scanColumns),
|
||||
false,
|
||||
ImmutableSortedMap.copyOf(plannerContext.getQueryContext())
|
||||
);
|
||||
|
|
|
@ -32,7 +32,8 @@ import org.apache.calcite.rel.logical.LogicalTableScan;
|
|||
import org.apache.calcite.rel.logical.LogicalValues;
|
||||
import org.apache.calcite.rel.metadata.RelMetadataQuery;
|
||||
import org.apache.calcite.rel.type.RelDataType;
|
||||
import org.apache.druid.java.util.common.guava.Sequence;
|
||||
import org.apache.druid.sql.calcite.external.ExternalTableScan;
|
||||
import org.apache.druid.sql.calcite.planner.PlannerContext;
|
||||
import org.apache.druid.sql.calcite.table.DruidTable;
|
||||
|
||||
import javax.annotation.Nullable;
|
||||
|
@ -53,24 +54,24 @@ public class DruidQueryRel extends DruidRel<DruidQueryRel>
|
|||
final RelTraitSet traitSet,
|
||||
@Nullable final RelOptTable table,
|
||||
final DruidTable druidTable,
|
||||
final QueryMaker queryMaker,
|
||||
final PlannerContext plannerContext,
|
||||
final PartialDruidQuery partialQuery
|
||||
)
|
||||
{
|
||||
super(cluster, traitSet, queryMaker);
|
||||
super(cluster, traitSet, plannerContext);
|
||||
this.table = table;
|
||||
this.druidTable = Preconditions.checkNotNull(druidTable, "druidTable");
|
||||
this.partialQuery = Preconditions.checkNotNull(partialQuery, "partialQuery");
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a DruidQueryRel representing a full scan.
|
||||
* Create a DruidQueryRel representing a full scan of a builtin table or lookup.
|
||||
*/
|
||||
public static DruidQueryRel fullScan(
|
||||
public static DruidQueryRel scanTable(
|
||||
final LogicalTableScan scanRel,
|
||||
final RelOptTable table,
|
||||
final DruidTable druidTable,
|
||||
final QueryMaker queryMaker
|
||||
final PlannerContext plannerContext
|
||||
)
|
||||
{
|
||||
return new DruidQueryRel(
|
||||
|
@ -78,15 +79,36 @@ public class DruidQueryRel extends DruidRel<DruidQueryRel>
|
|||
scanRel.getCluster().traitSetOf(Convention.NONE),
|
||||
Preconditions.checkNotNull(table, "table"),
|
||||
druidTable,
|
||||
queryMaker,
|
||||
plannerContext,
|
||||
PartialDruidQuery.create(scanRel)
|
||||
);
|
||||
}
|
||||
|
||||
public static DruidQueryRel fullScan(
|
||||
/**
|
||||
* Create a DruidQueryRel representing a full scan of external data.
|
||||
*/
|
||||
public static DruidQueryRel scanExternal(
|
||||
final ExternalTableScan scanRel,
|
||||
final PlannerContext plannerContext
|
||||
)
|
||||
{
|
||||
return new DruidQueryRel(
|
||||
scanRel.getCluster(),
|
||||
scanRel.getCluster().traitSetOf(Convention.NONE),
|
||||
null,
|
||||
scanRel.getDruidTable(),
|
||||
plannerContext,
|
||||
PartialDruidQuery.create(scanRel)
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a DruidQueryRel representing a full scan of inline, literal values.
|
||||
*/
|
||||
public static DruidQueryRel scanValues(
|
||||
final LogicalValues valuesRel,
|
||||
final DruidTable druidTable,
|
||||
final QueryMaker queryMaker
|
||||
final PlannerContext plannerContext
|
||||
)
|
||||
{
|
||||
return new DruidQueryRel(
|
||||
|
@ -94,7 +116,7 @@ public class DruidQueryRel extends DruidRel<DruidQueryRel>
|
|||
valuesRel.getTraitSet(), // the traitSet of valuesRel should be kept
|
||||
null,
|
||||
druidTable,
|
||||
queryMaker,
|
||||
plannerContext,
|
||||
PartialDruidQuery.create(valuesRel)
|
||||
);
|
||||
}
|
||||
|
@ -125,7 +147,7 @@ public class DruidQueryRel extends DruidRel<DruidQueryRel>
|
|||
getTraitSet().replace(DruidConvention.instance()),
|
||||
table,
|
||||
druidTable,
|
||||
getQueryMaker(),
|
||||
getPlannerContext(),
|
||||
partialQuery
|
||||
);
|
||||
}
|
||||
|
@ -150,21 +172,11 @@ public class DruidQueryRel extends DruidRel<DruidQueryRel>
|
|||
getTraitSet().plusAll(newQueryBuilder.getRelTraits()),
|
||||
table,
|
||||
druidTable,
|
||||
getQueryMaker(),
|
||||
getPlannerContext(),
|
||||
newQueryBuilder
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Sequence<Object[]> runQuery()
|
||||
{
|
||||
// runQuery doesn't need to finalize aggregations, because the fact that runQuery is happening suggests this
|
||||
// is the outermost query and it will actually get run as a native query. Druid's native query layer will
|
||||
// finalize aggregations for the outermost query even if we don't explicitly ask it to.
|
||||
|
||||
return getQueryMaker().runQuery(toDruidQuery(false));
|
||||
}
|
||||
|
||||
public DruidTable getDruidTable()
|
||||
{
|
||||
return druidTable;
|
||||
|
@ -189,7 +201,7 @@ public class DruidQueryRel extends DruidRel<DruidQueryRel>
|
|||
final DruidQuery druidQuery = toDruidQueryForExplaining();
|
||||
|
||||
try {
|
||||
queryString = getQueryMaker().getJsonMapper().writeValueAsString(druidQuery.getQuery());
|
||||
queryString = getPlannerContext().getJsonMapper().writeValueAsString(druidQuery.getQuery());
|
||||
}
|
||||
catch (JsonProcessingException e) {
|
||||
throw new RuntimeException(e);
|
||||
|
|
|
@ -30,12 +30,12 @@ import java.util.Set;
|
|||
|
||||
public abstract class DruidRel<T extends DruidRel> extends AbstractRelNode
|
||||
{
|
||||
private final QueryMaker queryMaker;
|
||||
private final PlannerContext plannerContext;
|
||||
|
||||
protected DruidRel(RelOptCluster cluster, RelTraitSet traitSet, QueryMaker queryMaker)
|
||||
protected DruidRel(RelOptCluster cluster, RelTraitSet traitSet, PlannerContext plannerContext)
|
||||
{
|
||||
super(cluster, traitSet);
|
||||
this.queryMaker = queryMaker;
|
||||
this.plannerContext = plannerContext;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -45,7 +45,14 @@ public abstract class DruidRel<T extends DruidRel> extends AbstractRelNode
|
|||
@Nullable
|
||||
public abstract PartialDruidQuery getPartialDruidQuery();
|
||||
|
||||
public abstract Sequence<Object[]> runQuery();
|
||||
public Sequence<Object[]> runQuery()
|
||||
{
|
||||
// runQuery doesn't need to finalize aggregations, because the fact that runQuery is happening suggests this
|
||||
// is the outermost query, and it will actually get run as a native query. Druid's native query layer will
|
||||
// finalize aggregations for the outermost query even if we don't explicitly ask it to.
|
||||
|
||||
return getPlannerContext().getQueryMaker().runQuery(toDruidQuery(false));
|
||||
}
|
||||
|
||||
public abstract T withPartialQuery(PartialDruidQuery newQueryBuilder);
|
||||
|
||||
|
@ -83,14 +90,9 @@ public abstract class DruidRel<T extends DruidRel> extends AbstractRelNode
|
|||
*/
|
||||
public abstract DruidQuery toDruidQueryForExplaining();
|
||||
|
||||
public QueryMaker getQueryMaker()
|
||||
{
|
||||
return queryMaker;
|
||||
}
|
||||
|
||||
public PlannerContext getPlannerContext()
|
||||
{
|
||||
return queryMaker.getPlannerContext();
|
||||
return plannerContext;
|
||||
}
|
||||
|
||||
public abstract T asDruidConvention();
|
||||
|
|
|
@ -31,11 +31,11 @@ import org.apache.calcite.rel.core.Union;
|
|||
import org.apache.calcite.rel.metadata.RelMetadataQuery;
|
||||
import org.apache.calcite.rel.type.RelDataType;
|
||||
import org.apache.druid.java.util.common.StringUtils;
|
||||
import org.apache.druid.java.util.common.guava.Sequence;
|
||||
import org.apache.druid.query.DataSource;
|
||||
import org.apache.druid.query.TableDataSource;
|
||||
import org.apache.druid.query.UnionDataSource;
|
||||
import org.apache.druid.segment.column.RowSignature;
|
||||
import org.apache.druid.sql.calcite.planner.PlannerContext;
|
||||
import org.apache.druid.sql.calcite.table.RowSignatures;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
@ -66,10 +66,10 @@ public class DruidUnionDataSourceRel extends DruidRel<DruidUnionDataSourceRel>
|
|||
final Union unionRel,
|
||||
final List<String> unionColumnNames,
|
||||
final PartialDruidQuery partialQuery,
|
||||
final QueryMaker queryMaker
|
||||
final PlannerContext plannerContext
|
||||
)
|
||||
{
|
||||
super(cluster, traitSet, queryMaker);
|
||||
super(cluster, traitSet, plannerContext);
|
||||
this.unionRel = unionRel;
|
||||
this.unionColumnNames = unionColumnNames;
|
||||
this.partialQuery = partialQuery;
|
||||
|
@ -78,7 +78,7 @@ public class DruidUnionDataSourceRel extends DruidRel<DruidUnionDataSourceRel>
|
|||
public static DruidUnionDataSourceRel create(
|
||||
final Union unionRel,
|
||||
final List<String> unionColumnNames,
|
||||
final QueryMaker queryMaker
|
||||
final PlannerContext plannerContext
|
||||
)
|
||||
{
|
||||
return new DruidUnionDataSourceRel(
|
||||
|
@ -87,7 +87,7 @@ public class DruidUnionDataSourceRel extends DruidRel<DruidUnionDataSourceRel>
|
|||
unionRel,
|
||||
unionColumnNames,
|
||||
PartialDruidQuery.create(unionRel),
|
||||
queryMaker
|
||||
plannerContext
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -111,20 +111,10 @@ public class DruidUnionDataSourceRel extends DruidRel<DruidUnionDataSourceRel>
|
|||
unionRel,
|
||||
unionColumnNames,
|
||||
newQueryBuilder,
|
||||
getQueryMaker()
|
||||
getPlannerContext()
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Sequence<Object[]> runQuery()
|
||||
{
|
||||
// runQuery doesn't need to finalize aggregations, because the fact that runQuery is happening suggests this
|
||||
// is the outermost query and it will actually get run as a native query. Druid's native query layer will
|
||||
// finalize aggregations for the outermost query even if we don't explicitly ask it to.
|
||||
|
||||
return getQueryMaker().runQuery(toDruidQuery(false));
|
||||
}
|
||||
|
||||
@Override
|
||||
public DruidQuery toDruidQuery(final boolean finalizeAggregations)
|
||||
{
|
||||
|
@ -204,7 +194,7 @@ public class DruidUnionDataSourceRel extends DruidRel<DruidUnionDataSourceRel>
|
|||
),
|
||||
unionColumnNames,
|
||||
partialQuery,
|
||||
getQueryMaker()
|
||||
getPlannerContext()
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -229,7 +219,7 @@ public class DruidUnionDataSourceRel extends DruidRel<DruidUnionDataSourceRel>
|
|||
(Union) unionRel.copy(unionRel.getTraitSet(), inputs),
|
||||
unionColumnNames,
|
||||
partialQuery,
|
||||
getQueryMaker()
|
||||
getPlannerContext()
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -252,7 +242,7 @@ public class DruidUnionDataSourceRel extends DruidRel<DruidUnionDataSourceRel>
|
|||
final DruidQuery druidQuery = toDruidQueryForExplaining();
|
||||
|
||||
try {
|
||||
queryString = getQueryMaker().getJsonMapper().writeValueAsString(druidQuery.getQuery());
|
||||
queryString = getPlannerContext().getJsonMapper().writeValueAsString(druidQuery.getQuery());
|
||||
}
|
||||
catch (JsonProcessingException e) {
|
||||
throw new RuntimeException(e);
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.apache.druid.java.util.common.StringUtils;
|
|||
import org.apache.druid.java.util.common.guava.Sequence;
|
||||
import org.apache.druid.java.util.common.guava.Sequences;
|
||||
import org.apache.druid.query.UnionDataSource;
|
||||
import org.apache.druid.sql.calcite.planner.PlannerContext;
|
||||
|
||||
import javax.annotation.Nullable;
|
||||
import java.util.ArrayList;
|
||||
|
@ -60,20 +61,20 @@ public class DruidUnionRel extends DruidRel<DruidUnionRel>
|
|||
private DruidUnionRel(
|
||||
final RelOptCluster cluster,
|
||||
final RelTraitSet traitSet,
|
||||
final QueryMaker queryMaker,
|
||||
final PlannerContext plannerContext,
|
||||
final RelDataType rowType,
|
||||
final List<RelNode> rels,
|
||||
final int limit
|
||||
)
|
||||
{
|
||||
super(cluster, traitSet, queryMaker);
|
||||
super(cluster, traitSet, plannerContext);
|
||||
this.rowType = rowType;
|
||||
this.rels = rels;
|
||||
this.limit = limit;
|
||||
}
|
||||
|
||||
public static DruidUnionRel create(
|
||||
final QueryMaker queryMaker,
|
||||
final PlannerContext plannerContext,
|
||||
final RelDataType rowType,
|
||||
final List<RelNode> rels,
|
||||
final int limit
|
||||
|
@ -84,7 +85,7 @@ public class DruidUnionRel extends DruidRel<DruidUnionRel>
|
|||
return new DruidUnionRel(
|
||||
rels.get(0).getCluster(),
|
||||
rels.get(0).getTraitSet(),
|
||||
queryMaker,
|
||||
plannerContext,
|
||||
rowType,
|
||||
new ArrayList<>(rels),
|
||||
limit
|
||||
|
@ -138,7 +139,7 @@ public class DruidUnionRel extends DruidRel<DruidUnionRel>
|
|||
return new DruidUnionRel(
|
||||
getCluster(),
|
||||
getTraitSet().replace(DruidConvention.instance()),
|
||||
getQueryMaker(),
|
||||
getPlannerContext(),
|
||||
rowType,
|
||||
rels.stream().map(rel -> RelOptRule.convert(rel, DruidConvention.instance())).collect(Collectors.toList()),
|
||||
limit
|
||||
|
@ -163,7 +164,7 @@ public class DruidUnionRel extends DruidRel<DruidUnionRel>
|
|||
return new DruidUnionRel(
|
||||
getCluster(),
|
||||
traitSet,
|
||||
getQueryMaker(),
|
||||
getPlannerContext(),
|
||||
rowType,
|
||||
inputs,
|
||||
limit
|
||||
|
|
|
@ -84,7 +84,7 @@ public class Sorting
|
|||
return new Sorting(Collections.emptyList(), OffsetLimit.none(), null);
|
||||
}
|
||||
|
||||
public SortKind getSortKind(final String timeColumn)
|
||||
public SortKind getTimeSortKind(final String timeColumn)
|
||||
{
|
||||
if (orderBys.isEmpty()) {
|
||||
return SortKind.UNORDERED;
|
||||
|
|
|
@ -186,7 +186,7 @@ public class DruidJoinRule extends RelOptRule
|
|||
join.isSemiJoinDone()
|
||||
),
|
||||
leftFilter,
|
||||
left.getQueryMaker()
|
||||
left.getPlannerContext()
|
||||
);
|
||||
|
||||
final RelBuilder relBuilder =
|
||||
|
|
|
@ -31,7 +31,6 @@ import org.apache.druid.segment.column.RowSignature;
|
|||
import org.apache.druid.sql.calcite.planner.Calcites;
|
||||
import org.apache.druid.sql.calcite.planner.PlannerContext;
|
||||
import org.apache.druid.sql.calcite.rel.DruidQueryRel;
|
||||
import org.apache.druid.sql.calcite.rel.QueryMaker;
|
||||
import org.apache.druid.sql.calcite.table.DruidTable;
|
||||
import org.apache.druid.sql.calcite.table.RowSignatures;
|
||||
|
||||
|
@ -50,12 +49,12 @@ import java.util.stream.Collectors;
|
|||
*/
|
||||
public class DruidLogicalValuesRule extends RelOptRule
|
||||
{
|
||||
private final QueryMaker queryMaker;
|
||||
private final PlannerContext plannerContext;
|
||||
|
||||
public DruidLogicalValuesRule(QueryMaker queryMaker)
|
||||
public DruidLogicalValuesRule(PlannerContext plannerContext)
|
||||
{
|
||||
super(operand(LogicalValues.class, any()));
|
||||
this.queryMaker = queryMaker;
|
||||
this.plannerContext = plannerContext;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -67,7 +66,7 @@ public class DruidLogicalValuesRule extends RelOptRule
|
|||
.stream()
|
||||
.map(tuple -> tuple
|
||||
.stream()
|
||||
.map(v -> getValueFromLiteral(v, queryMaker.getPlannerContext()))
|
||||
.map(v -> getValueFromLiteral(v, plannerContext))
|
||||
.collect(Collectors.toList())
|
||||
.toArray(new Object[0])
|
||||
)
|
||||
|
@ -79,11 +78,12 @@ public class DruidLogicalValuesRule extends RelOptRule
|
|||
final DruidTable druidTable = new DruidTable(
|
||||
InlineDataSource.fromIterable(objectTuples, rowSignature),
|
||||
rowSignature,
|
||||
null,
|
||||
true,
|
||||
false
|
||||
);
|
||||
call.transformTo(
|
||||
DruidQueryRel.fullScan(values, druidTable, queryMaker)
|
||||
DruidQueryRel.scanValues(values, druidTable, plannerContext)
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -62,7 +62,7 @@ public class DruidSortUnionRule extends RelOptRule
|
|||
final int offset = sort.offset != null ? RexLiteral.intValue(sort.offset) : 0;
|
||||
|
||||
final DruidUnionRel newUnionRel = DruidUnionRel.create(
|
||||
unionRel.getQueryMaker(),
|
||||
unionRel.getPlannerContext(),
|
||||
unionRel.getRowType(),
|
||||
unionRel.getInputs(),
|
||||
unionRel.getLimit() >= 0 ? Math.min(limit + offset, unionRel.getLimit()) : limit + offset
|
||||
|
|
|
@ -23,18 +23,18 @@ import org.apache.calcite.plan.RelOptRule;
|
|||
import org.apache.calcite.plan.RelOptRuleCall;
|
||||
import org.apache.calcite.plan.RelOptTable;
|
||||
import org.apache.calcite.rel.logical.LogicalTableScan;
|
||||
import org.apache.druid.sql.calcite.planner.PlannerContext;
|
||||
import org.apache.druid.sql.calcite.rel.DruidQueryRel;
|
||||
import org.apache.druid.sql.calcite.rel.QueryMaker;
|
||||
import org.apache.druid.sql.calcite.table.DruidTable;
|
||||
|
||||
public class DruidTableScanRule extends RelOptRule
|
||||
{
|
||||
private final QueryMaker queryMaker;
|
||||
private final PlannerContext plannerContext;
|
||||
|
||||
public DruidTableScanRule(final QueryMaker queryMaker)
|
||||
public DruidTableScanRule(final PlannerContext plannerContext)
|
||||
{
|
||||
super(operand(LogicalTableScan.class, any()));
|
||||
this.queryMaker = queryMaker;
|
||||
this.plannerContext = plannerContext;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -45,7 +45,7 @@ public class DruidTableScanRule extends RelOptRule
|
|||
final DruidTable druidTable = table.unwrap(DruidTable.class);
|
||||
if (druidTable != null) {
|
||||
call.transformTo(
|
||||
DruidQueryRel.fullScan(scan, table, druidTable, queryMaker)
|
||||
DruidQueryRel.scanTable(scan, table, druidTable, plannerContext)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -91,7 +91,7 @@ public class DruidUnionDataSourceRule extends RelOptRule
|
|||
DruidUnionDataSourceRel.create(
|
||||
(Union) newUnionRel,
|
||||
getColumnNamesIfTableOrUnion(firstDruidRel).get(),
|
||||
firstDruidRel.getQueryMaker()
|
||||
firstDruidRel.getPlannerContext()
|
||||
)
|
||||
);
|
||||
} else {
|
||||
|
@ -104,7 +104,7 @@ public class DruidUnionDataSourceRule extends RelOptRule
|
|||
DruidUnionDataSourceRel.create(
|
||||
unionRel,
|
||||
getColumnNamesIfTableOrUnion(firstDruidRel).get(),
|
||||
firstDruidRel.getQueryMaker()
|
||||
firstDruidRel.getPlannerContext()
|
||||
)
|
||||
);
|
||||
}
|
||||
|
|
|
@ -72,7 +72,7 @@ public class DruidUnionRule extends RelOptRule
|
|||
if (unionRel.all) {
|
||||
call.transformTo(
|
||||
DruidUnionRel.create(
|
||||
someDruidRel.getQueryMaker(),
|
||||
someDruidRel.getPlannerContext(),
|
||||
unionRel.getRowType(),
|
||||
inputs,
|
||||
-1
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.druid.sql.calcite.rel;
|
||||
package org.apache.druid.sql.calcite.run;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonProcessingException;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
|
@ -25,12 +25,14 @@ import com.google.common.collect.Iterables;
|
|||
import com.google.common.primitives.Ints;
|
||||
import it.unimi.dsi.fastutil.objects.Object2IntMap;
|
||||
import it.unimi.dsi.fastutil.objects.Object2IntOpenHashMap;
|
||||
import org.apache.calcite.avatica.ColumnMetaData;
|
||||
import org.apache.calcite.rel.type.RelDataType;
|
||||
import org.apache.calcite.runtime.Hook;
|
||||
import org.apache.calcite.sql.type.SqlTypeName;
|
||||
import org.apache.calcite.util.NlsString;
|
||||
import org.apache.calcite.util.Pair;
|
||||
import org.apache.druid.common.config.NullHandling;
|
||||
import org.apache.druid.java.util.common.DateTimes;
|
||||
import org.apache.druid.java.util.common.IAE;
|
||||
import org.apache.druid.java.util.common.ISE;
|
||||
import org.apache.druid.java.util.common.Intervals;
|
||||
import org.apache.druid.java.util.common.guava.Sequence;
|
||||
|
@ -49,44 +51,64 @@ import org.apache.druid.server.security.Access;
|
|||
import org.apache.druid.server.security.AuthenticationResult;
|
||||
import org.apache.druid.sql.calcite.planner.Calcites;
|
||||
import org.apache.druid.sql.calcite.planner.PlannerContext;
|
||||
import org.apache.druid.sql.calcite.rel.CannotBuildQueryException;
|
||||
import org.apache.druid.sql.calcite.rel.DruidQuery;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.Interval;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.sql.Array;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class QueryMaker
|
||||
public class NativeQueryMaker implements QueryMaker
|
||||
{
|
||||
private final QueryLifecycleFactory queryLifecycleFactory;
|
||||
private final PlannerContext plannerContext;
|
||||
private final ObjectMapper jsonMapper;
|
||||
private final List<Pair<Integer, String>> fieldMapping;
|
||||
private final RelDataType resultType;
|
||||
|
||||
public QueryMaker(
|
||||
public NativeQueryMaker(
|
||||
final QueryLifecycleFactory queryLifecycleFactory,
|
||||
final PlannerContext plannerContext,
|
||||
final ObjectMapper jsonMapper
|
||||
final ObjectMapper jsonMapper,
|
||||
final List<Pair<Integer, String>> fieldMapping,
|
||||
final RelDataType resultType
|
||||
)
|
||||
{
|
||||
this.queryLifecycleFactory = queryLifecycleFactory;
|
||||
this.plannerContext = plannerContext;
|
||||
this.jsonMapper = jsonMapper;
|
||||
this.fieldMapping = fieldMapping;
|
||||
this.resultType = resultType;
|
||||
}
|
||||
|
||||
public PlannerContext getPlannerContext()
|
||||
@Override
|
||||
public RelDataType getResultType()
|
||||
{
|
||||
return plannerContext;
|
||||
return resultType;
|
||||
}
|
||||
|
||||
public ObjectMapper getJsonMapper()
|
||||
@Override
|
||||
public boolean feature(QueryFeature feature)
|
||||
{
|
||||
return jsonMapper;
|
||||
switch (feature) {
|
||||
case CAN_RUN_TIMESERIES:
|
||||
case CAN_RUN_TOPN:
|
||||
return true;
|
||||
case CAN_READ_EXTERNAL_DATA:
|
||||
case SCAN_CAN_ORDER_BY_NON_TIME:
|
||||
return false;
|
||||
default:
|
||||
throw new IAE("Unrecognized feature: %s", feature);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Sequence<Object[]> runQuery(final DruidQuery druidQuery)
|
||||
{
|
||||
final Query<?> query = druidQuery.getQuery();
|
||||
|
@ -113,14 +135,17 @@ public class QueryMaker
|
|||
rowOrder = druidQuery.getOutputRowSignature().getColumnNames();
|
||||
}
|
||||
|
||||
return execute(
|
||||
query,
|
||||
rowOrder,
|
||||
final List<SqlTypeName> columnTypes =
|
||||
druidQuery.getOutputRowType()
|
||||
.getFieldList()
|
||||
.stream()
|
||||
.map(f -> f.getType().getSqlTypeName())
|
||||
.collect(Collectors.toList())
|
||||
.collect(Collectors.toList());
|
||||
|
||||
return execute(
|
||||
query,
|
||||
mapColumnList(rowOrder, fieldMapping),
|
||||
mapColumnList(columnTypes, fieldMapping)
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -159,10 +184,10 @@ public class QueryMaker
|
|||
final List<String> resultArrayFields = toolChest.resultArraySignature(query).getColumnNames();
|
||||
final Sequence<Object[]> resultArrays = toolChest.resultsAsArrays(query, results);
|
||||
|
||||
return remapFields(resultArrays, resultArrayFields, newFields, newTypes);
|
||||
return mapResultSequence(resultArrays, resultArrayFields, newFields, newTypes);
|
||||
}
|
||||
|
||||
private Sequence<Object[]> remapFields(
|
||||
private Sequence<Object[]> mapResultSequence(
|
||||
final Sequence<Object[]> sequence,
|
||||
final List<String> originalFields,
|
||||
final List<String> newFields,
|
||||
|
@ -204,35 +229,6 @@ public class QueryMaker
|
|||
);
|
||||
}
|
||||
|
||||
public static ColumnMetaData.Rep rep(final SqlTypeName sqlType)
|
||||
{
|
||||
if (SqlTypeName.CHAR_TYPES.contains(sqlType)) {
|
||||
return ColumnMetaData.Rep.of(String.class);
|
||||
} else if (sqlType == SqlTypeName.TIMESTAMP) {
|
||||
return ColumnMetaData.Rep.of(Long.class);
|
||||
} else if (sqlType == SqlTypeName.DATE) {
|
||||
return ColumnMetaData.Rep.of(Integer.class);
|
||||
} else if (sqlType == SqlTypeName.INTEGER) {
|
||||
// use Number.class for exact numeric types since JSON transport might switch longs to integers
|
||||
return ColumnMetaData.Rep.of(Number.class);
|
||||
} else if (sqlType == SqlTypeName.BIGINT) {
|
||||
// use Number.class for exact numeric types since JSON transport might switch longs to integers
|
||||
return ColumnMetaData.Rep.of(Number.class);
|
||||
} else if (sqlType == SqlTypeName.FLOAT) {
|
||||
return ColumnMetaData.Rep.of(Float.class);
|
||||
} else if (sqlType == SqlTypeName.DOUBLE || sqlType == SqlTypeName.DECIMAL) {
|
||||
return ColumnMetaData.Rep.of(Double.class);
|
||||
} else if (sqlType == SqlTypeName.BOOLEAN) {
|
||||
return ColumnMetaData.Rep.of(Boolean.class);
|
||||
} else if (sqlType == SqlTypeName.OTHER) {
|
||||
return ColumnMetaData.Rep.of(Object.class);
|
||||
} else if (sqlType == SqlTypeName.ARRAY) {
|
||||
return ColumnMetaData.Rep.of(Array.class);
|
||||
} else {
|
||||
throw new ISE("No rep for SQL type[%s]", sqlType);
|
||||
}
|
||||
}
|
||||
|
||||
private Object coerce(final Object value, final SqlTypeName sqlType)
|
||||
{
|
||||
final Object coercedValue;
|
||||
|
@ -368,4 +364,15 @@ public class QueryMaker
|
|||
}
|
||||
return dateTime;
|
||||
}
|
||||
|
||||
private static <T> List<T> mapColumnList(final List<T> in, final List<Pair<Integer, String>> fieldMapping)
|
||||
{
|
||||
final List<T> out = new ArrayList<>(fieldMapping.size());
|
||||
|
||||
for (final Pair<Integer, String> entry : fieldMapping) {
|
||||
out.add(in.get(entry.getKey()));
|
||||
}
|
||||
|
||||
return out;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,69 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.druid.sql.calcite.run;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.inject.Inject;
|
||||
import org.apache.calcite.rel.RelRoot;
|
||||
import org.apache.calcite.tools.ValidationException;
|
||||
import org.apache.druid.guice.LazySingleton;
|
||||
import org.apache.druid.server.QueryLifecycleFactory;
|
||||
import org.apache.druid.sql.calcite.planner.PlannerContext;
|
||||
|
||||
@LazySingleton
|
||||
public class NativeQueryMakerFactory implements QueryMakerFactory
|
||||
{
|
||||
public static final String TYPE = "native";
|
||||
|
||||
private final QueryLifecycleFactory queryLifecycleFactory;
|
||||
private final ObjectMapper jsonMapper;
|
||||
|
||||
@Inject
|
||||
public NativeQueryMakerFactory(
|
||||
final QueryLifecycleFactory queryLifecycleFactory,
|
||||
final ObjectMapper jsonMapper
|
||||
)
|
||||
{
|
||||
this.queryLifecycleFactory = queryLifecycleFactory;
|
||||
this.jsonMapper = jsonMapper;
|
||||
}
|
||||
|
||||
@Override
|
||||
public QueryMaker buildForSelect(final RelRoot relRoot, final PlannerContext plannerContext)
|
||||
{
|
||||
return new NativeQueryMaker(
|
||||
queryLifecycleFactory,
|
||||
plannerContext,
|
||||
jsonMapper,
|
||||
relRoot.fields,
|
||||
relRoot.validatedRowType
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public QueryMaker buildForInsert(
|
||||
final String targetDataSource,
|
||||
final RelRoot relRoot,
|
||||
final PlannerContext plannerContext
|
||||
) throws ValidationException
|
||||
{
|
||||
throw new ValidationException("Cannot execute INSERT queries.");
|
||||
}
|
||||
}
|
|
@ -0,0 +1,49 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.druid.sql.calcite.run;
|
||||
|
||||
import org.apache.druid.sql.calcite.external.ExternalDataSource;
|
||||
|
||||
/**
|
||||
* Arguments to {@link QueryFeatureInspector#feature(QueryFeature)}.
|
||||
*/
|
||||
public enum QueryFeature
|
||||
{
|
||||
/**
|
||||
* Queries of type {@link org.apache.druid.query.timeseries.TimeseriesQuery} are usable.
|
||||
*/
|
||||
CAN_RUN_TIMESERIES,
|
||||
|
||||
/**
|
||||
* Queries of type {@link org.apache.druid.query.topn.TopNQuery} are usable.
|
||||
*/
|
||||
CAN_RUN_TOPN,
|
||||
|
||||
/**
|
||||
* Queries can use {@link ExternalDataSource}.
|
||||
*/
|
||||
CAN_READ_EXTERNAL_DATA,
|
||||
|
||||
/**
|
||||
* Scan queries can use {@link org.apache.druid.query.scan.ScanQuery#getOrderBys()} that are based on something
|
||||
* other than the "__time" column.
|
||||
*/
|
||||
SCAN_CAN_ORDER_BY_NON_TIME,
|
||||
}
|
|
@ -0,0 +1,32 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.druid.sql.calcite.run;
|
||||
|
||||
/**
|
||||
* Gives the SQL-to-Druid query translator information about what features are supporetd by the {@link QueryMaker}
|
||||
* that will execute the query.
|
||||
*/
|
||||
public interface QueryFeatureInspector
|
||||
{
|
||||
/**
|
||||
* Returns whether a feature is present or not.
|
||||
*/
|
||||
boolean feature(QueryFeature feature);
|
||||
}
|
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.druid.sql.calcite.run;
|
||||
|
||||
import org.apache.calcite.rel.type.RelDataType;
|
||||
import org.apache.druid.java.util.common.guava.Sequence;
|
||||
import org.apache.druid.sql.calcite.rel.DruidQuery;
|
||||
|
||||
/**
|
||||
* Interface for executing Druid queries. Each one is created by a {@link QueryMakerFactory} and is tied to a
|
||||
* specific SQL query. Extends {@link QueryFeatureInspector}, so calling code can tell what this executor supports.
|
||||
*/
|
||||
public interface QueryMaker extends QueryFeatureInspector
|
||||
{
|
||||
/**
|
||||
* Returns the SQL row type for this query.
|
||||
*/
|
||||
RelDataType getResultType();
|
||||
|
||||
/**
|
||||
* Executes a given Druid query, which is expected to correspond to the SQL query that this QueryMaker was originally
|
||||
* created for. The returned arrays match the row type given by {@link #getResultType()}.
|
||||
*/
|
||||
Sequence<Object[]> runQuery(DruidQuery druidQuery);
|
||||
}
|
|
@ -0,0 +1,60 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.druid.sql.calcite.run;
|
||||
|
||||
import org.apache.calcite.rel.RelRoot;
|
||||
import org.apache.calcite.tools.ValidationException;
|
||||
import org.apache.druid.sql.calcite.planner.PlannerContext;
|
||||
|
||||
/**
|
||||
* Interface for creating {@link QueryMaker}, which in turn are used to execute Druid queries.
|
||||
*/
|
||||
public interface QueryMakerFactory
|
||||
{
|
||||
/**
|
||||
* Create a {@link QueryMaker} for a SELECT query.
|
||||
*
|
||||
* @param relRoot planned and validated rel
|
||||
* @param plannerContext context for this query
|
||||
*
|
||||
* @return an executor for the provided query
|
||||
*
|
||||
* @throws ValidationException if this factory cannot build an executor for the provided query
|
||||
*/
|
||||
@SuppressWarnings("RedundantThrows")
|
||||
QueryMaker buildForSelect(RelRoot relRoot, PlannerContext plannerContext) throws ValidationException;
|
||||
|
||||
/**
|
||||
* Create a {@link QueryMaker} for an INSERT ... SELECT query.
|
||||
*
|
||||
* @param targetDataSource datasource for the INSERT portion of the query
|
||||
* @param relRoot planned and validated rel for the SELECT portion of the query
|
||||
* @param plannerContext context for this query
|
||||
*
|
||||
* @return an executor for the provided query
|
||||
*
|
||||
* @throws ValidationException if this factory cannot build an executor for the provided query
|
||||
*/
|
||||
QueryMaker buildForInsert(
|
||||
String targetDataSource,
|
||||
RelRoot relRoot,
|
||||
PlannerContext plannerContext
|
||||
) throws ValidationException;
|
||||
}
|
|
@ -791,7 +791,7 @@ public class DruidSchema extends AbstractSchema
|
|||
} else {
|
||||
tableDataSource = new TableDataSource(dataSource);
|
||||
}
|
||||
return new DruidTable(tableDataSource, builder.build(), isJoinable, isBroadcast);
|
||||
return new DruidTable(tableDataSource, builder.build(), null, isJoinable, isBroadcast);
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
|
|
|
@ -59,7 +59,10 @@ public class LookupSchema extends AbstractSchema
|
|||
for (final String lookupName : lookupProvider.getAllLookupNames()) {
|
||||
// all lookups should be also joinable through lookup joinable factory, and lookups are effectively broadcast
|
||||
// (if we ignore lookup tiers...)
|
||||
tableMapBuilder.put(lookupName, new DruidTable(new LookupDataSource(lookupName), ROW_SIGNATURE, true, true));
|
||||
tableMapBuilder.put(
|
||||
lookupName,
|
||||
new DruidTable(new LookupDataSource(lookupName), ROW_SIGNATURE, null, true, true)
|
||||
);
|
||||
}
|
||||
|
||||
return tableMapBuilder.build();
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.apache.druid.sql.calcite.table;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.calcite.config.CalciteConnectionConfig;
|
||||
import org.apache.calcite.plan.RelOptTable;
|
||||
|
@ -34,27 +35,40 @@ import org.apache.calcite.sql.SqlCall;
|
|||
import org.apache.calcite.sql.SqlNode;
|
||||
import org.apache.druid.query.DataSource;
|
||||
import org.apache.druid.segment.column.RowSignature;
|
||||
import org.apache.druid.sql.calcite.external.ExternalDataSource;
|
||||
import org.apache.druid.sql.calcite.external.ExternalTableScan;
|
||||
|
||||
import javax.annotation.Nullable;
|
||||
import java.util.Objects;
|
||||
|
||||
public class DruidTable implements TranslatableTable
|
||||
{
|
||||
private final DataSource dataSource;
|
||||
private final RowSignature rowSignature;
|
||||
|
||||
@Nullable
|
||||
private final ObjectMapper objectMapper;
|
||||
private final boolean joinable;
|
||||
private final boolean broadcast;
|
||||
|
||||
public DruidTable(
|
||||
final DataSource dataSource,
|
||||
final RowSignature rowSignature,
|
||||
@Nullable final ObjectMapper objectMapper,
|
||||
final boolean isJoinable,
|
||||
final boolean isBroadcast
|
||||
)
|
||||
{
|
||||
this.dataSource = Preconditions.checkNotNull(dataSource, "dataSource");
|
||||
this.rowSignature = Preconditions.checkNotNull(rowSignature, "rowSignature");
|
||||
this.objectMapper = objectMapper;
|
||||
this.joinable = isJoinable;
|
||||
this.broadcast = isBroadcast;
|
||||
|
||||
if (dataSource instanceof ExternalDataSource && objectMapper == null) {
|
||||
// objectMapper is used by ExternalTableScan to generate its digest.
|
||||
throw new NullPointerException("ObjectMapper is required for external datasources");
|
||||
}
|
||||
}
|
||||
|
||||
public DataSource getDataSource()
|
||||
|
@ -115,7 +129,13 @@ public class DruidTable implements TranslatableTable
|
|||
@Override
|
||||
public RelNode toRel(final RelOptTable.ToRelContext context, final RelOptTable table)
|
||||
{
|
||||
return LogicalTableScan.create(context.getCluster(), table);
|
||||
if (dataSource instanceof ExternalDataSource) {
|
||||
// Cannot use LogicalTableScan here, because its digest is solely based on the name of the table macro.
|
||||
// Must use our own class that computes its own digest.
|
||||
return new ExternalTableScan(context.getCluster(), objectMapper, this);
|
||||
} else {
|
||||
return LogicalTableScan.create(context.getCluster(), table);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -56,9 +56,8 @@ public class DruidViewMacro implements TableMacro
|
|||
public TranslatableTable apply(final List<Object> arguments)
|
||||
{
|
||||
final RelDataType rowType;
|
||||
try (final DruidPlanner planner = plannerFactory.createPlanner(null)) {
|
||||
|
||||
rowType = planner.plan(viewSql).rowType();
|
||||
try (final DruidPlanner planner = plannerFactory.createPlanner(viewSql, null)) {
|
||||
rowType = planner.plan().rowType();
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
|
|
|
@ -22,7 +22,6 @@ package org.apache.druid.sql.http;
|
|||
import com.fasterxml.jackson.core.JsonProcessingException;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.io.CountingOutputStream;
|
||||
import com.google.inject.Inject;
|
||||
import org.apache.calcite.plan.RelOptPlanner;
|
||||
|
@ -45,7 +44,7 @@ import org.apache.druid.server.security.Access;
|
|||
import org.apache.druid.server.security.AuthorizationUtils;
|
||||
import org.apache.druid.server.security.AuthorizerMapper;
|
||||
import org.apache.druid.server.security.ForbiddenException;
|
||||
import org.apache.druid.server.security.Resource;
|
||||
import org.apache.druid.server.security.ResourceAction;
|
||||
import org.apache.druid.sql.SqlLifecycle;
|
||||
import org.apache.druid.sql.SqlLifecycleFactory;
|
||||
import org.apache.druid.sql.SqlLifecycleManager;
|
||||
|
@ -278,13 +277,13 @@ public class SqlResource
|
|||
if (lifecycles.isEmpty()) {
|
||||
return Response.status(Status.NOT_FOUND).build();
|
||||
}
|
||||
Set<Resource> resources = lifecycles
|
||||
Set<ResourceAction> resources = lifecycles
|
||||
.stream()
|
||||
.flatMap(lifecycle -> lifecycle.getAuthorizedResources().stream())
|
||||
.flatMap(lifecycle -> lifecycle.getRequiredResourceActions().stream())
|
||||
.collect(Collectors.toSet());
|
||||
Access access = AuthorizationUtils.authorizeAllResourceActions(
|
||||
req,
|
||||
Iterables.transform(resources, AuthorizationUtils.RESOURCE_READ_RA_GENERATOR),
|
||||
resources,
|
||||
authorizerMapper
|
||||
);
|
||||
|
||||
|
|
|
@ -109,7 +109,7 @@ public class SqlLifecycleTest
|
|||
DruidPlanner mockPlanner = EasyMock.createMock(DruidPlanner.class);
|
||||
PlannerContext mockPlannerContext = EasyMock.createMock(PlannerContext.class);
|
||||
ValidationResult validationResult = new ValidationResult(Collections.emptySet());
|
||||
EasyMock.expect(plannerFactory.createPlanner(EasyMock.anyObject())).andReturn(mockPlanner).once();
|
||||
EasyMock.expect(plannerFactory.createPlanner(EasyMock.eq(sql), EasyMock.anyObject())).andReturn(mockPlanner).once();
|
||||
EasyMock.expect(mockPlanner.getPlannerContext()).andReturn(mockPlannerContext).once();
|
||||
mockPlannerContext.setAuthenticationResult(CalciteTests.REGULAR_USER_AUTH_RESULT);
|
||||
EasyMock.expectLastCall();
|
||||
|
@ -118,7 +118,7 @@ public class SqlLifecycleTest
|
|||
EasyMock.expect(plannerFactory.getAuthorizerMapper()).andReturn(CalciteTests.TEST_AUTHORIZER_MAPPER).once();
|
||||
mockPlannerContext.setAuthorizationResult(Access.OK);
|
||||
EasyMock.expectLastCall();
|
||||
EasyMock.expect(mockPlanner.validate(sql)).andReturn(validationResult).once();
|
||||
EasyMock.expect(mockPlanner.validate()).andReturn(validationResult).once();
|
||||
mockPlanner.close();
|
||||
EasyMock.expectLastCall();
|
||||
|
||||
|
@ -132,7 +132,7 @@ public class SqlLifecycleTest
|
|||
// test prepare
|
||||
PrepareResult mockPrepareResult = EasyMock.createMock(PrepareResult.class);
|
||||
EasyMock.expect(plannerFactory.createPlannerWithContext(EasyMock.eq(mockPlannerContext))).andReturn(mockPlanner).once();
|
||||
EasyMock.expect(mockPlanner.prepare(sql)).andReturn(mockPrepareResult).once();
|
||||
EasyMock.expect(mockPlanner.prepare()).andReturn(mockPrepareResult).once();
|
||||
mockPlanner.close();
|
||||
EasyMock.expectLastCall();
|
||||
EasyMock.replay(plannerFactory, serviceEmitter, requestLogger, mockPlanner, mockPlannerContext, mockPrepareResult);
|
||||
|
@ -145,7 +145,7 @@ public class SqlLifecycleTest
|
|||
// test plan
|
||||
PlannerResult mockPlanResult = EasyMock.createMock(PlannerResult.class);
|
||||
EasyMock.expect(plannerFactory.createPlannerWithContext(EasyMock.eq(mockPlannerContext))).andReturn(mockPlanner).once();
|
||||
EasyMock.expect(mockPlanner.plan(sql)).andReturn(mockPlanResult).once();
|
||||
EasyMock.expect(mockPlanner.plan()).andReturn(mockPlanResult).once();
|
||||
mockPlanner.close();
|
||||
EasyMock.expectLastCall();
|
||||
EasyMock.replay(plannerFactory, serviceEmitter, requestLogger, mockPlanner, mockPlannerContext, mockPrepareResult, mockPlanResult);
|
||||
|
@ -206,7 +206,7 @@ public class SqlLifecycleTest
|
|||
DruidPlanner mockPlanner = EasyMock.createMock(DruidPlanner.class);
|
||||
PlannerContext mockPlannerContext = EasyMock.createMock(PlannerContext.class);
|
||||
ValidationResult validationResult = new ValidationResult(Collections.emptySet());
|
||||
EasyMock.expect(plannerFactory.createPlanner(EasyMock.anyObject())).andReturn(mockPlanner).once();
|
||||
EasyMock.expect(plannerFactory.createPlanner(EasyMock.eq(sql), EasyMock.anyObject())).andReturn(mockPlanner).once();
|
||||
EasyMock.expect(mockPlanner.getPlannerContext()).andReturn(mockPlannerContext).once();
|
||||
mockPlannerContext.setAuthenticationResult(CalciteTests.REGULAR_USER_AUTH_RESULT);
|
||||
EasyMock.expectLastCall();
|
||||
|
@ -215,7 +215,7 @@ public class SqlLifecycleTest
|
|||
EasyMock.expect(plannerFactory.getAuthorizerMapper()).andReturn(CalciteTests.TEST_AUTHORIZER_MAPPER).once();
|
||||
mockPlannerContext.setAuthorizationResult(Access.OK);
|
||||
EasyMock.expectLastCall();
|
||||
EasyMock.expect(mockPlanner.validate(sql)).andReturn(validationResult).once();
|
||||
EasyMock.expect(mockPlanner.validate()).andReturn(validationResult).once();
|
||||
mockPlanner.close();
|
||||
EasyMock.expectLastCall();
|
||||
|
||||
|
@ -235,7 +235,7 @@ public class SqlLifecycleTest
|
|||
// test prepare
|
||||
PrepareResult mockPrepareResult = EasyMock.createMock(PrepareResult.class);
|
||||
EasyMock.expect(plannerFactory.createPlannerWithContext(EasyMock.eq(mockPlannerContext))).andReturn(mockPlanner).once();
|
||||
EasyMock.expect(mockPlanner.prepare(sql)).andReturn(mockPrepareResult).once();
|
||||
EasyMock.expect(mockPlanner.prepare()).andReturn(mockPrepareResult).once();
|
||||
mockPlanner.close();
|
||||
EasyMock.expectLastCall();
|
||||
EasyMock.replay(plannerFactory, serviceEmitter, requestLogger, mockPlanner, mockPlannerContext, mockPrepareResult);
|
||||
|
@ -248,7 +248,7 @@ public class SqlLifecycleTest
|
|||
// test plan
|
||||
PlannerResult mockPlanResult = EasyMock.createMock(PlannerResult.class);
|
||||
EasyMock.expect(plannerFactory.createPlannerWithContext(EasyMock.eq(mockPlannerContext))).andReturn(mockPlanner).once();
|
||||
EasyMock.expect(mockPlanner.plan(sql)).andReturn(mockPlanResult).once();
|
||||
EasyMock.expect(mockPlanner.plan()).andReturn(mockPlanResult).once();
|
||||
mockPlanner.close();
|
||||
EasyMock.expectLastCall();
|
||||
EasyMock.replay(plannerFactory, serviceEmitter, requestLogger, mockPlanner, mockPlannerContext, mockPrepareResult, mockPlanResult);
|
||||
|
|
|
@ -33,6 +33,7 @@ import com.google.inject.Module;
|
|||
import com.google.inject.multibindings.Multibinder;
|
||||
import com.google.inject.name.Names;
|
||||
import org.apache.calcite.avatica.AvaticaClientRuntimeException;
|
||||
import org.apache.calcite.avatica.AvaticaSqlException;
|
||||
import org.apache.calcite.avatica.Meta;
|
||||
import org.apache.calcite.avatica.MissingResultsException;
|
||||
import org.apache.calcite.avatica.NoSuchStatementException;
|
||||
|
@ -65,6 +66,8 @@ import org.apache.druid.sql.calcite.planner.Calcites;
|
|||
import org.apache.druid.sql.calcite.planner.DruidOperatorTable;
|
||||
import org.apache.druid.sql.calcite.planner.PlannerConfig;
|
||||
import org.apache.druid.sql.calcite.planner.PlannerFactory;
|
||||
import org.apache.druid.sql.calcite.run.NativeQueryMakerFactory;
|
||||
import org.apache.druid.sql.calcite.run.QueryMakerFactory;
|
||||
import org.apache.druid.sql.calcite.schema.DruidSchemaCatalog;
|
||||
import org.apache.druid.sql.calcite.schema.DruidSchemaName;
|
||||
import org.apache.druid.sql.calcite.schema.NamedSchema;
|
||||
|
@ -209,6 +212,7 @@ public abstract class DruidAvaticaHandlerTest extends CalciteTestBase
|
|||
binder.bind(QueryScheduler.class)
|
||||
.toProvider(QuerySchedulerProvider.class)
|
||||
.in(LazySingleton.class);
|
||||
binder.bind(QueryMakerFactory.class).to(NativeQueryMakerFactory.class);
|
||||
}
|
||||
}
|
||||
)
|
||||
|
@ -890,7 +894,7 @@ public abstract class DruidAvaticaHandlerTest extends CalciteTestBase
|
|||
CalciteTests.createSqlLifecycleFactory(
|
||||
new PlannerFactory(
|
||||
rootSchema,
|
||||
CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate),
|
||||
CalciteTests.createMockQueryMakerFactory(walker, conglomerate),
|
||||
operatorTable,
|
||||
macroTable,
|
||||
plannerConfig,
|
||||
|
@ -980,7 +984,7 @@ public abstract class DruidAvaticaHandlerTest extends CalciteTestBase
|
|||
CalciteTests.createSqlLifecycleFactory(
|
||||
new PlannerFactory(
|
||||
rootSchema,
|
||||
CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate),
|
||||
CalciteTests.createMockQueryMakerFactory(walker, conglomerate),
|
||||
operatorTable,
|
||||
macroTable,
|
||||
plannerConfig,
|
||||
|
@ -1097,9 +1101,24 @@ public abstract class DruidAvaticaHandlerTest extends CalciteTestBase
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testSysTableParameterBinding() throws Exception
|
||||
public void testSysTableParameterBindingRegularUser() throws Exception
|
||||
{
|
||||
PreparedStatement statement = client.prepareStatement("SELECT COUNT(*) AS cnt FROM sys.servers WHERE servers.host = ?");
|
||||
PreparedStatement statement =
|
||||
client.prepareStatement("SELECT COUNT(*) AS cnt FROM sys.servers WHERE servers.host = ?");
|
||||
statement.setString(1, "dummy");
|
||||
|
||||
Assert.assertThrows(
|
||||
"Insufficient permission to view servers",
|
||||
AvaticaSqlException.class,
|
||||
statement::executeQuery
|
||||
);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSysTableParameterBindingSuperUser() throws Exception
|
||||
{
|
||||
PreparedStatement statement =
|
||||
superuserClient.prepareStatement("SELECT COUNT(*) AS cnt FROM sys.servers WHERE servers.host = ?");
|
||||
statement.setString(1, "dummy");
|
||||
final ResultSet resultSet = statement.executeQuery();
|
||||
final List<Map<String, Object>> rows = getRows(resultSet);
|
||||
|
|
|
@ -91,7 +91,7 @@ public class DruidStatementTest extends CalciteTestBase
|
|||
CalciteTests.createMockRootSchema(conglomerate, walker, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER);
|
||||
final PlannerFactory plannerFactory = new PlannerFactory(
|
||||
rootSchema,
|
||||
CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate),
|
||||
CalciteTests.createMockQueryMakerFactory(walker, conglomerate),
|
||||
operatorTable,
|
||||
macroTable,
|
||||
plannerConfig,
|
||||
|
|
|
@ -23,6 +23,7 @@ import com.fasterxml.jackson.core.JsonProcessingException;
|
|||
import com.fasterxml.jackson.databind.InjectableValues;
|
||||
import com.fasterxml.jackson.databind.Module;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.fasterxml.jackson.databind.module.SimpleModule;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import org.apache.calcite.plan.RelOptPlanner;
|
||||
|
@ -76,10 +77,11 @@ import org.apache.druid.server.QueryStackTests;
|
|||
import org.apache.druid.server.security.AuthenticationResult;
|
||||
import org.apache.druid.server.security.AuthorizerMapper;
|
||||
import org.apache.druid.server.security.ForbiddenException;
|
||||
import org.apache.druid.server.security.Resource;
|
||||
import org.apache.druid.server.security.ResourceAction;
|
||||
import org.apache.druid.sql.SqlLifecycle;
|
||||
import org.apache.druid.sql.SqlLifecycleFactory;
|
||||
import org.apache.druid.sql.calcite.expression.DruidExpression;
|
||||
import org.apache.druid.sql.calcite.external.ExternalDataSource;
|
||||
import org.apache.druid.sql.calcite.planner.Calcites;
|
||||
import org.apache.druid.sql.calcite.planner.DruidOperatorTable;
|
||||
import org.apache.druid.sql.calcite.planner.PlannerConfig;
|
||||
|
@ -478,7 +480,7 @@ public class BaseCalciteQueryTest extends CalciteTestBase
|
|||
@Rule
|
||||
public QueryLogHook getQueryLogHook()
|
||||
{
|
||||
return queryLogHook = QueryLogHook.create(queryJsonMapper);
|
||||
return queryLogHook = QueryLogHook.create(createQueryJsonMapper());
|
||||
}
|
||||
|
||||
@Before
|
||||
|
@ -555,7 +557,9 @@ public class BaseCalciteQueryTest extends CalciteTestBase
|
|||
|
||||
public Iterable<? extends Module> getJacksonModules()
|
||||
{
|
||||
return new LookupSerdeModule().getJacksonModules();
|
||||
final List<Module> modules = new ArrayList<>(new LookupSerdeModule().getJacksonModules());
|
||||
modules.add(new SimpleModule().registerSubtypes(ExternalDataSource.class));
|
||||
return modules;
|
||||
}
|
||||
|
||||
public void assertQueryIsUnplannable(final String sql)
|
||||
|
@ -698,7 +702,7 @@ public class BaseCalciteQueryTest extends CalciteTestBase
|
|||
/**
|
||||
* Override not just the outer query context, but also the contexts of all subqueries.
|
||||
*/
|
||||
private <T> Query<T> recursivelyOverrideContext(final Query<T> query, final Map<String, Object> context)
|
||||
public <T> Query<T> recursivelyOverrideContext(final Query<T> query, final Map<String, Object> context)
|
||||
{
|
||||
return query.withDataSource(recursivelyOverrideContext(query.getDataSource(), context))
|
||||
.withOverriddenContext(context);
|
||||
|
@ -945,7 +949,7 @@ public class BaseCalciteQueryTest extends CalciteTestBase
|
|||
}
|
||||
}
|
||||
|
||||
public Set<Resource> analyzeResources(
|
||||
public Set<ResourceAction> analyzeResources(
|
||||
PlannerConfig plannerConfig,
|
||||
String sql,
|
||||
AuthenticationResult authenticationResult
|
||||
|
@ -961,7 +965,7 @@ public class BaseCalciteQueryTest extends CalciteTestBase
|
|||
|
||||
SqlLifecycle lifecycle = lifecycleFactory.factorize();
|
||||
lifecycle.initialize(sql, ImmutableMap.of());
|
||||
return lifecycle.runAnalyzeResources(authenticationResult).getResources();
|
||||
return lifecycle.runAnalyzeResources(authenticationResult).getResourceActions();
|
||||
}
|
||||
|
||||
public SqlLifecycleFactory getSqlLifecycleFactory(
|
||||
|
@ -983,7 +987,10 @@ public class BaseCalciteQueryTest extends CalciteTestBase
|
|||
|
||||
final PlannerFactory plannerFactory = new PlannerFactory(
|
||||
rootSchema,
|
||||
CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate),
|
||||
new TestQueryMakerFactory(
|
||||
CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate),
|
||||
objectMapper
|
||||
),
|
||||
operatorTable,
|
||||
macroTable,
|
||||
plannerConfig,
|
||||
|
|
|
@ -0,0 +1,650 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.druid.sql.calcite;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonProcessingException;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import org.apache.druid.data.input.impl.CsvInputFormat;
|
||||
import org.apache.druid.data.input.impl.InlineInputSource;
|
||||
import org.apache.druid.java.util.common.ISE;
|
||||
import org.apache.druid.java.util.common.StringUtils;
|
||||
import org.apache.druid.java.util.common.granularity.Granularities;
|
||||
import org.apache.druid.java.util.common.jackson.JacksonUtils;
|
||||
import org.apache.druid.query.Query;
|
||||
import org.apache.druid.query.aggregation.CountAggregatorFactory;
|
||||
import org.apache.druid.query.aggregation.LongSumAggregatorFactory;
|
||||
import org.apache.druid.query.aggregation.hyperloglog.HyperUniquesAggregatorFactory;
|
||||
import org.apache.druid.query.dimension.DefaultDimensionSpec;
|
||||
import org.apache.druid.query.groupby.GroupByQuery;
|
||||
import org.apache.druid.query.scan.ScanQuery;
|
||||
import org.apache.druid.segment.column.ColumnType;
|
||||
import org.apache.druid.segment.column.RowSignature;
|
||||
import org.apache.druid.server.security.Action;
|
||||
import org.apache.druid.server.security.AuthenticationResult;
|
||||
import org.apache.druid.server.security.ForbiddenException;
|
||||
import org.apache.druid.server.security.Resource;
|
||||
import org.apache.druid.server.security.ResourceAction;
|
||||
import org.apache.druid.server.security.ResourceType;
|
||||
import org.apache.druid.sql.SqlLifecycle;
|
||||
import org.apache.druid.sql.SqlLifecycleFactory;
|
||||
import org.apache.druid.sql.SqlPlanningException;
|
||||
import org.apache.druid.sql.calcite.external.ExternalDataSource;
|
||||
import org.apache.druid.sql.calcite.external.ExternalOperatorConversion;
|
||||
import org.apache.druid.sql.calcite.filtration.Filtration;
|
||||
import org.apache.druid.sql.calcite.planner.Calcites;
|
||||
import org.apache.druid.sql.calcite.planner.PlannerConfig;
|
||||
import org.apache.druid.sql.calcite.planner.PlannerContext;
|
||||
import org.apache.druid.sql.calcite.util.CalciteTests;
|
||||
import org.hamcrest.CoreMatchers;
|
||||
import org.hamcrest.Matcher;
|
||||
import org.hamcrest.MatcherAssert;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
import org.junit.internal.matchers.ThrowableMessageMatcher;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
public class CalciteInsertDmlTest extends BaseCalciteQueryTest
|
||||
{
|
||||
private static final Map<String, Object> DEFAULT_CONTEXT =
|
||||
ImmutableMap.<String, Object>builder()
|
||||
.put(PlannerContext.CTX_SQL_QUERY_ID, DUMMY_SQL_ID)
|
||||
.build();
|
||||
|
||||
private static final RowSignature FOO_TABLE_SIGNATURE =
|
||||
RowSignature.builder()
|
||||
.addTimeColumn()
|
||||
.add("cnt", ColumnType.LONG)
|
||||
.add("dim1", ColumnType.STRING)
|
||||
.add("dim2", ColumnType.STRING)
|
||||
.add("dim3", ColumnType.STRING)
|
||||
.add("m1", ColumnType.FLOAT)
|
||||
.add("m2", ColumnType.DOUBLE)
|
||||
.add("unique_dim1", HyperUniquesAggregatorFactory.TYPE)
|
||||
.build();
|
||||
|
||||
private final ExternalDataSource externalDataSource = new ExternalDataSource(
|
||||
new InlineInputSource("a,b,1\nc,d,2\n"),
|
||||
new CsvInputFormat(ImmutableList.of("x", "y", "z"), null, false, false, 0),
|
||||
RowSignature.builder()
|
||||
.add("x", ColumnType.STRING)
|
||||
.add("y", ColumnType.STRING)
|
||||
.add("z", ColumnType.LONG)
|
||||
.build()
|
||||
);
|
||||
|
||||
private boolean didTest = false;
|
||||
|
||||
@After
|
||||
@Override
|
||||
public void tearDown() throws Exception
|
||||
{
|
||||
super.tearDown();
|
||||
|
||||
// Catch situations where tests forgot to call "verify" on their tester.
|
||||
if (!didTest) {
|
||||
throw new ISE("Test was not run; did you call verify() on a tester?");
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInsertFromTable()
|
||||
{
|
||||
testInsertQuery()
|
||||
.sql("INSERT INTO dst SELECT * FROM foo")
|
||||
.expectTarget("dst", FOO_TABLE_SIGNATURE)
|
||||
.expectResources(dataSourceRead("foo"), dataSourceWrite("dst"))
|
||||
.expectQuery(
|
||||
newScanQueryBuilder()
|
||||
.dataSource("foo")
|
||||
.intervals(querySegmentSpec(Filtration.eternity()))
|
||||
.columns("__time", "cnt", "dim1", "dim2", "dim3", "m1", "m2", "unique_dim1")
|
||||
.build()
|
||||
)
|
||||
.verify();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInsertFromView()
|
||||
{
|
||||
testInsertQuery()
|
||||
.sql("INSERT INTO dst SELECT * FROM view.aview")
|
||||
.expectTarget("dst", RowSignature.builder().add("dim1_firstchar", ColumnType.STRING).build())
|
||||
.expectResources(viewRead("aview"), dataSourceWrite("dst"))
|
||||
.expectQuery(
|
||||
newScanQueryBuilder()
|
||||
.dataSource("foo")
|
||||
.intervals(querySegmentSpec(Filtration.eternity()))
|
||||
.virtualColumns(expressionVirtualColumn("v0", "substring(\"dim1\", 0, 1)", ColumnType.STRING))
|
||||
.filters(selector("dim2", "a", null))
|
||||
.columns("v0")
|
||||
.build()
|
||||
)
|
||||
.verify();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInsertIntoExistingTable()
|
||||
{
|
||||
testInsertQuery()
|
||||
.sql("INSERT INTO foo SELECT * FROM foo")
|
||||
.expectTarget("foo", FOO_TABLE_SIGNATURE)
|
||||
.expectResources(dataSourceRead("foo"), dataSourceWrite("foo"))
|
||||
.expectQuery(
|
||||
newScanQueryBuilder()
|
||||
.dataSource("foo")
|
||||
.intervals(querySegmentSpec(Filtration.eternity()))
|
||||
.columns("__time", "cnt", "dim1", "dim2", "dim3", "m1", "m2", "unique_dim1")
|
||||
.build()
|
||||
)
|
||||
.verify();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInsertIntoQualifiedTable()
|
||||
{
|
||||
testInsertQuery()
|
||||
.sql("INSERT INTO druid.dst SELECT * FROM foo")
|
||||
.expectTarget("dst", FOO_TABLE_SIGNATURE)
|
||||
.expectResources(dataSourceRead("foo"), dataSourceWrite("dst"))
|
||||
.expectQuery(
|
||||
newScanQueryBuilder()
|
||||
.dataSource("foo")
|
||||
.intervals(querySegmentSpec(Filtration.eternity()))
|
||||
.columns("__time", "cnt", "dim1", "dim2", "dim3", "m1", "m2", "unique_dim1")
|
||||
.build()
|
||||
)
|
||||
.verify();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInsertUsingColumnList()
|
||||
{
|
||||
testInsertQuery()
|
||||
.sql("INSERT INTO dst (foo, bar) SELECT dim1, dim2 FROM foo")
|
||||
.expectValidationError(SqlPlanningException.class, "INSERT with target column list is not supported.")
|
||||
.verify();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUpsert()
|
||||
{
|
||||
testInsertQuery()
|
||||
.sql("UPSERT INTO dst SELECT * FROM foo")
|
||||
.expectValidationError(SqlPlanningException.class, "UPSERT is not supported.")
|
||||
.verify();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInsertIntoSystemTable()
|
||||
{
|
||||
testInsertQuery()
|
||||
.sql("INSERT INTO INFORMATION_SCHEMA.COLUMNS SELECT * FROM foo")
|
||||
.expectValidationError(
|
||||
SqlPlanningException.class,
|
||||
"Cannot INSERT into [INFORMATION_SCHEMA.COLUMNS] because it is not a Druid datasource."
|
||||
)
|
||||
.verify();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInsertIntoView()
|
||||
{
|
||||
testInsertQuery()
|
||||
.sql("INSERT INTO view.aview SELECT * FROM foo")
|
||||
.expectValidationError(
|
||||
SqlPlanningException.class,
|
||||
"Cannot INSERT into [view.aview] because it is not a Druid datasource."
|
||||
)
|
||||
.verify();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInsertFromUnauthorizedDataSource()
|
||||
{
|
||||
testInsertQuery()
|
||||
.sql("INSERT INTO dst SELECT * FROM \"%s\"", CalciteTests.FORBIDDEN_DATASOURCE)
|
||||
.expectValidationError(ForbiddenException.class)
|
||||
.verify();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInsertIntoUnauthorizedDataSource()
|
||||
{
|
||||
testInsertQuery()
|
||||
.sql("INSERT INTO \"%s\" SELECT * FROM foo", CalciteTests.FORBIDDEN_DATASOURCE)
|
||||
.expectValidationError(ForbiddenException.class)
|
||||
.verify();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInsertIntoNonexistentSchema()
|
||||
{
|
||||
testInsertQuery()
|
||||
.sql("INSERT INTO nonexistent.dst SELECT * FROM foo")
|
||||
.expectValidationError(
|
||||
SqlPlanningException.class,
|
||||
"Cannot INSERT into [nonexistent.dst] because it is not a Druid datasource."
|
||||
)
|
||||
.verify();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInsertFromExternal()
|
||||
{
|
||||
testInsertQuery()
|
||||
.sql("INSERT INTO dst SELECT * FROM %s", externSql(externalDataSource))
|
||||
.authentication(CalciteTests.SUPER_USER_AUTH_RESULT)
|
||||
.expectTarget("dst", externalDataSource.getSignature())
|
||||
.expectResources(dataSourceWrite("dst"), ExternalOperatorConversion.EXTERNAL_RESOURCE_ACTION)
|
||||
.expectQuery(
|
||||
newScanQueryBuilder()
|
||||
.dataSource(externalDataSource)
|
||||
.intervals(querySegmentSpec(Filtration.eternity()))
|
||||
.columns("x", "y", "z")
|
||||
.build()
|
||||
)
|
||||
.verify();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testExplainInsertFromExternal() throws Exception
|
||||
{
|
||||
// Skip vectorization since otherwise the "context" will change for each subtest.
|
||||
skipVectorize();
|
||||
|
||||
final ScanQuery expectedQuery = newScanQueryBuilder()
|
||||
.dataSource(externalDataSource)
|
||||
.intervals(querySegmentSpec(Filtration.eternity()))
|
||||
.columns("x", "y", "z")
|
||||
.context(
|
||||
queryJsonMapper.readValue(
|
||||
"{\"defaultTimeout\":300000,\"maxScatterGatherBytes\":9223372036854775807,\"sqlCurrentTimestamp\":\"2000-01-01T00:00:00Z\",\"sqlQueryId\":\"dummy\",\"vectorize\":\"false\",\"vectorizeVirtualColumns\":\"false\"}",
|
||||
JacksonUtils.TYPE_REFERENCE_MAP_STRING_OBJECT
|
||||
)
|
||||
)
|
||||
.build();
|
||||
|
||||
final String expectedExplanation =
|
||||
"DruidQueryRel(query=["
|
||||
+ queryJsonMapper.writeValueAsString(expectedQuery)
|
||||
+ "], signature=[{x:STRING, y:STRING, z:LONG}])\n";
|
||||
|
||||
// Use testQuery for EXPLAIN (not testInsertQuery).
|
||||
testQuery(
|
||||
new PlannerConfig(),
|
||||
StringUtils.format("EXPLAIN PLAN FOR INSERT INTO dst SELECT * FROM %s", externSql(externalDataSource)),
|
||||
CalciteTests.SUPER_USER_AUTH_RESULT,
|
||||
ImmutableList.of(),
|
||||
ImmutableList.of(
|
||||
new Object[]{
|
||||
expectedExplanation,
|
||||
"[{\"name\":\"EXTERNAL\",\"type\":\"EXTERNAL\"},{\"name\":\"dst\",\"type\":\"DATASOURCE\"}]"
|
||||
}
|
||||
)
|
||||
);
|
||||
|
||||
// Not using testInsertQuery, so must set didTest manually to satisfy the check in tearDown.
|
||||
didTest = true;
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testExplainInsertFromExternalUnauthorized()
|
||||
{
|
||||
// Use testQuery for EXPLAIN (not testInsertQuery).
|
||||
Assert.assertThrows(
|
||||
ForbiddenException.class,
|
||||
() ->
|
||||
testQuery(
|
||||
StringUtils.format("EXPLAIN PLAN FOR INSERT INTO dst SELECT * FROM %s", externSql(externalDataSource)),
|
||||
ImmutableList.of(),
|
||||
ImmutableList.of()
|
||||
)
|
||||
);
|
||||
|
||||
// Not using testInsertQuery, so must set didTest manually to satisfy the check in tearDown.
|
||||
didTest = true;
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInsertFromExternalUnauthorized()
|
||||
{
|
||||
testInsertQuery()
|
||||
.sql("INSERT INTO dst SELECT * FROM %s", externSql(externalDataSource))
|
||||
.expectValidationError(ForbiddenException.class)
|
||||
.verify();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInsertFromExternalProjectSort()
|
||||
{
|
||||
// INSERT with a particular column ordering.
|
||||
|
||||
testInsertQuery()
|
||||
.sql("INSERT INTO dst SELECT x || y AS xy, z FROM %s ORDER BY 1, 2", externSql(externalDataSource))
|
||||
.authentication(CalciteTests.SUPER_USER_AUTH_RESULT)
|
||||
.expectTarget("dst", RowSignature.builder().add("xy", ColumnType.STRING).add("z", ColumnType.LONG).build())
|
||||
.expectResources(dataSourceWrite("dst"), ExternalOperatorConversion.EXTERNAL_RESOURCE_ACTION)
|
||||
.expectQuery(
|
||||
newScanQueryBuilder()
|
||||
.dataSource(externalDataSource)
|
||||
.intervals(querySegmentSpec(Filtration.eternity()))
|
||||
.virtualColumns(expressionVirtualColumn("v0", "concat(\"x\",\"y\")", ColumnType.STRING))
|
||||
.columns("v0", "z")
|
||||
.orderBy(
|
||||
ImmutableList.of(
|
||||
new ScanQuery.OrderBy("v0", ScanQuery.Order.ASCENDING),
|
||||
new ScanQuery.OrderBy("z", ScanQuery.Order.ASCENDING)
|
||||
)
|
||||
)
|
||||
.build()
|
||||
)
|
||||
.verify();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInsertFromExternalAggregate()
|
||||
{
|
||||
// INSERT with rollup.
|
||||
|
||||
testInsertQuery()
|
||||
.sql(
|
||||
"INSERT INTO dst SELECT x, SUM(z) AS sum_z, COUNT(*) AS cnt FROM %s GROUP BY 1",
|
||||
externSql(externalDataSource)
|
||||
)
|
||||
.authentication(CalciteTests.SUPER_USER_AUTH_RESULT)
|
||||
.expectTarget(
|
||||
"dst",
|
||||
RowSignature.builder()
|
||||
.add("x", ColumnType.STRING)
|
||||
.add("sum_z", ColumnType.LONG)
|
||||
.add("cnt", ColumnType.LONG)
|
||||
.build()
|
||||
)
|
||||
.expectResources(dataSourceWrite("dst"), ExternalOperatorConversion.EXTERNAL_RESOURCE_ACTION)
|
||||
.expectQuery(
|
||||
GroupByQuery.builder()
|
||||
.setDataSource(externalDataSource)
|
||||
.setInterval(querySegmentSpec(Filtration.eternity()))
|
||||
.setGranularity(Granularities.ALL)
|
||||
.setDimensions(dimensions(new DefaultDimensionSpec("x", "d0")))
|
||||
.setAggregatorSpecs(
|
||||
new LongSumAggregatorFactory("a0", "z"),
|
||||
new CountAggregatorFactory("a1")
|
||||
)
|
||||
.build()
|
||||
)
|
||||
.verify();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInsertFromExternalAggregateAll()
|
||||
{
|
||||
// INSERT with rollup into a single row (no GROUP BY exprs).
|
||||
|
||||
testInsertQuery()
|
||||
.sql(
|
||||
"INSERT INTO dst SELECT COUNT(*) AS cnt FROM %s",
|
||||
externSql(externalDataSource)
|
||||
)
|
||||
.authentication(CalciteTests.SUPER_USER_AUTH_RESULT)
|
||||
.expectTarget(
|
||||
"dst",
|
||||
RowSignature.builder()
|
||||
.add("cnt", ColumnType.LONG)
|
||||
.build()
|
||||
)
|
||||
.expectResources(dataSourceWrite("dst"), ExternalOperatorConversion.EXTERNAL_RESOURCE_ACTION)
|
||||
.expectQuery(
|
||||
GroupByQuery.builder()
|
||||
.setDataSource(externalDataSource)
|
||||
.setInterval(querySegmentSpec(Filtration.eternity()))
|
||||
.setGranularity(Granularities.ALL)
|
||||
.setAggregatorSpecs(new CountAggregatorFactory("a0"))
|
||||
.build()
|
||||
)
|
||||
.verify();
|
||||
}
|
||||
|
||||
private String externSql(final ExternalDataSource externalDataSource)
|
||||
{
|
||||
try {
|
||||
return StringUtils.format(
|
||||
"TABLE(extern(%s, %s, %s))",
|
||||
Calcites.escapeStringLiteral(queryJsonMapper.writeValueAsString(externalDataSource.getInputSource())),
|
||||
Calcites.escapeStringLiteral(queryJsonMapper.writeValueAsString(externalDataSource.getInputFormat())),
|
||||
Calcites.escapeStringLiteral(queryJsonMapper.writeValueAsString(externalDataSource.getSignature()))
|
||||
);
|
||||
}
|
||||
catch (JsonProcessingException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
private InsertDmlTester testInsertQuery()
|
||||
{
|
||||
return new InsertDmlTester();
|
||||
}
|
||||
|
||||
public class InsertDmlTester
|
||||
{
|
||||
private String sql;
|
||||
private PlannerConfig plannerConfig = new PlannerConfig();
|
||||
private Map<String, Object> queryContext = DEFAULT_CONTEXT;
|
||||
private AuthenticationResult authenticationResult = CalciteTests.REGULAR_USER_AUTH_RESULT;
|
||||
private String expectedTargetDataSource;
|
||||
private RowSignature expectedTargetSignature;
|
||||
private List<ResourceAction> expectedResources;
|
||||
private Query expectedQuery;
|
||||
private Matcher<Throwable> validationErrorMatcher;
|
||||
|
||||
private InsertDmlTester()
|
||||
{
|
||||
// Nothing to do.
|
||||
}
|
||||
|
||||
public InsertDmlTester sql(final String sql)
|
||||
{
|
||||
this.sql = sql;
|
||||
return this;
|
||||
}
|
||||
|
||||
private InsertDmlTester sql(final String sqlPattern, final Object arg, final Object... otherArgs)
|
||||
{
|
||||
final Object[] args = new Object[otherArgs.length + 1];
|
||||
args[0] = arg;
|
||||
System.arraycopy(otherArgs, 0, args, 1, otherArgs.length);
|
||||
this.sql = StringUtils.format(sqlPattern, args);
|
||||
return this;
|
||||
}
|
||||
|
||||
public InsertDmlTester context(final Map<String, Object> context)
|
||||
{
|
||||
this.queryContext = context;
|
||||
return this;
|
||||
}
|
||||
|
||||
public InsertDmlTester authentication(final AuthenticationResult authenticationResult)
|
||||
{
|
||||
this.authenticationResult = authenticationResult;
|
||||
return this;
|
||||
}
|
||||
|
||||
public InsertDmlTester expectTarget(
|
||||
final String expectedTargetDataSource,
|
||||
final RowSignature expectedTargetSignature
|
||||
)
|
||||
{
|
||||
this.expectedTargetDataSource = Preconditions.checkNotNull(expectedTargetDataSource, "expectedTargetDataSource");
|
||||
this.expectedTargetSignature = Preconditions.checkNotNull(expectedTargetSignature, "expectedTargetSignature");
|
||||
return this;
|
||||
}
|
||||
|
||||
public InsertDmlTester expectResources(final ResourceAction... expectedResources)
|
||||
{
|
||||
this.expectedResources = Arrays.asList(expectedResources);
|
||||
return this;
|
||||
}
|
||||
|
||||
@SuppressWarnings("rawtypes")
|
||||
public InsertDmlTester expectQuery(final Query expectedQuery)
|
||||
{
|
||||
this.expectedQuery = expectedQuery;
|
||||
return this;
|
||||
}
|
||||
|
||||
public InsertDmlTester expectValidationError(Matcher<Throwable> validationErrorMatcher)
|
||||
{
|
||||
this.validationErrorMatcher = validationErrorMatcher;
|
||||
return this;
|
||||
}
|
||||
|
||||
public InsertDmlTester expectValidationError(Class<? extends Throwable> clazz)
|
||||
{
|
||||
return expectValidationError(CoreMatchers.instanceOf(clazz));
|
||||
}
|
||||
|
||||
public InsertDmlTester expectValidationError(Class<? extends Throwable> clazz, String message)
|
||||
{
|
||||
return expectValidationError(
|
||||
CoreMatchers.allOf(
|
||||
CoreMatchers.instanceOf(clazz),
|
||||
ThrowableMessageMatcher.hasMessage(CoreMatchers.equalTo(message))
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
public void verify()
|
||||
{
|
||||
if (didTest) {
|
||||
// It's good form to only do one test per method.
|
||||
// This also helps us ensure that "verify" actually does get called.
|
||||
throw new ISE("Use one @Test method per tester");
|
||||
}
|
||||
|
||||
didTest = true;
|
||||
|
||||
if (sql == null) {
|
||||
throw new ISE("Test must have SQL statement");
|
||||
}
|
||||
|
||||
try {
|
||||
log.info("SQL: %s", sql);
|
||||
queryLogHook.clearRecordedQueries();
|
||||
|
||||
if (validationErrorMatcher != null) {
|
||||
verifyValidationError();
|
||||
} else {
|
||||
verifySuccess();
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
private void verifyValidationError()
|
||||
{
|
||||
if (expectedTargetDataSource != null) {
|
||||
throw new ISE("Test must not have expectedTargetDataSource");
|
||||
}
|
||||
|
||||
if (expectedResources != null) {
|
||||
throw new ISE("Test must not have expectedResources");
|
||||
}
|
||||
|
||||
if (expectedQuery != null) {
|
||||
throw new ISE("Test must not have expectedQuery");
|
||||
}
|
||||
|
||||
final SqlLifecycleFactory sqlLifecycleFactory = getSqlLifecycleFactory(
|
||||
plannerConfig,
|
||||
createOperatorTable(),
|
||||
createMacroTable(),
|
||||
CalciteTests.TEST_AUTHORIZER_MAPPER,
|
||||
queryJsonMapper
|
||||
);
|
||||
|
||||
final SqlLifecycle sqlLifecycle = sqlLifecycleFactory.factorize();
|
||||
sqlLifecycle.initialize(sql, queryContext);
|
||||
|
||||
final Throwable e = Assert.assertThrows(
|
||||
Throwable.class,
|
||||
() -> sqlLifecycle.validateAndAuthorize(authenticationResult)
|
||||
);
|
||||
|
||||
MatcherAssert.assertThat(e, validationErrorMatcher);
|
||||
Assert.assertTrue(queryLogHook.getRecordedQueries().isEmpty());
|
||||
}
|
||||
|
||||
private void verifySuccess() throws Exception
|
||||
{
|
||||
if (expectedTargetDataSource == null) {
|
||||
throw new ISE("Test must have expectedTargetDataSource");
|
||||
}
|
||||
|
||||
if (expectedResources == null) {
|
||||
throw new ISE("Test must have expectedResources");
|
||||
}
|
||||
|
||||
final List<Query> expectedQueries =
|
||||
expectedQuery == null
|
||||
? Collections.emptyList()
|
||||
: Collections.singletonList(recursivelyOverrideContext(expectedQuery, queryContext));
|
||||
|
||||
Assert.assertEquals(
|
||||
ImmutableSet.copyOf(expectedResources),
|
||||
analyzeResources(plannerConfig, sql, authenticationResult)
|
||||
);
|
||||
|
||||
final List<Object[]> results =
|
||||
getResults(plannerConfig, queryContext, Collections.emptyList(), sql, authenticationResult);
|
||||
|
||||
verifyResults(
|
||||
sql,
|
||||
expectedQueries,
|
||||
Collections.singletonList(new Object[]{expectedTargetDataSource, expectedTargetSignature}),
|
||||
results
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
private static ResourceAction viewRead(final String viewName)
|
||||
{
|
||||
return new ResourceAction(new Resource(viewName, ResourceType.VIEW), Action.READ);
|
||||
}
|
||||
|
||||
private static ResourceAction dataSourceRead(final String dataSource)
|
||||
{
|
||||
return new ResourceAction(new Resource(dataSource, ResourceType.DATASOURCE), Action.READ);
|
||||
}
|
||||
|
||||
private static ResourceAction dataSourceWrite(final String dataSource)
|
||||
{
|
||||
return new ResourceAction(new Resource(dataSource, ResourceType.DATASOURCE), Action.WRITE);
|
||||
}
|
||||
}
|
|
@ -20,7 +20,9 @@
|
|||
package org.apache.druid.sql.calcite;
|
||||
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import org.apache.druid.server.security.Action;
|
||||
import org.apache.druid.server.security.Resource;
|
||||
import org.apache.druid.server.security.ResourceAction;
|
||||
import org.apache.druid.server.security.ResourceType;
|
||||
import org.apache.druid.sql.calcite.planner.PlannerConfig;
|
||||
import org.apache.druid.sql.calcite.util.CalciteTests;
|
||||
|
@ -36,7 +38,7 @@ public class DruidPlannerResourceAnalyzeTest extends BaseCalciteQueryTest
|
|||
{
|
||||
final String sql = "SELECT COUNT(*) FROM foo WHERE foo.dim1 <> 'z'";
|
||||
|
||||
Set<Resource> requiredResources = analyzeResources(
|
||||
Set<ResourceAction> requiredResources = analyzeResources(
|
||||
PLANNER_CONFIG_DEFAULT,
|
||||
sql,
|
||||
CalciteTests.REGULAR_USER_AUTH_RESULT
|
||||
|
@ -44,7 +46,7 @@ public class DruidPlannerResourceAnalyzeTest extends BaseCalciteQueryTest
|
|||
|
||||
Assert.assertEquals(
|
||||
ImmutableSet.of(
|
||||
new Resource("foo", ResourceType.DATASOURCE)
|
||||
new ResourceAction(new Resource("foo", ResourceType.DATASOURCE), Action.READ)
|
||||
),
|
||||
requiredResources
|
||||
);
|
||||
|
@ -55,7 +57,7 @@ public class DruidPlannerResourceAnalyzeTest extends BaseCalciteQueryTest
|
|||
{
|
||||
final String sql = "SELECT COUNT(*) FROM foo as druid WHERE druid.dim1 <> 'z'";
|
||||
|
||||
Set<Resource> requiredResources = analyzeResources(
|
||||
Set<ResourceAction> requiredResources = analyzeResources(
|
||||
PLANNER_CONFIG_DEFAULT,
|
||||
sql,
|
||||
CalciteTests.REGULAR_USER_AUTH_RESULT
|
||||
|
@ -63,7 +65,7 @@ public class DruidPlannerResourceAnalyzeTest extends BaseCalciteQueryTest
|
|||
|
||||
Assert.assertEquals(
|
||||
ImmutableSet.of(
|
||||
new Resource("foo", ResourceType.DATASOURCE)
|
||||
new ResourceAction(new Resource("foo", ResourceType.DATASOURCE), Action.READ)
|
||||
),
|
||||
requiredResources
|
||||
);
|
||||
|
@ -81,7 +83,7 @@ public class DruidPlannerResourceAnalyzeTest extends BaseCalciteQueryTest
|
|||
+ " )\n"
|
||||
+ ")";
|
||||
|
||||
Set<Resource> requiredResources = analyzeResources(
|
||||
Set<ResourceAction> requiredResources = analyzeResources(
|
||||
PLANNER_CONFIG_DEFAULT,
|
||||
sql,
|
||||
CalciteTests.REGULAR_USER_AUTH_RESULT
|
||||
|
@ -89,8 +91,8 @@ public class DruidPlannerResourceAnalyzeTest extends BaseCalciteQueryTest
|
|||
|
||||
Assert.assertEquals(
|
||||
ImmutableSet.of(
|
||||
new Resource("foo", ResourceType.DATASOURCE),
|
||||
new Resource("numfoo", ResourceType.DATASOURCE)
|
||||
new ResourceAction(new Resource("foo", ResourceType.DATASOURCE), Action.READ),
|
||||
new ResourceAction(new Resource("numfoo", ResourceType.DATASOURCE), Action.READ)
|
||||
),
|
||||
requiredResources
|
||||
);
|
||||
|
@ -107,7 +109,7 @@ public class DruidPlannerResourceAnalyzeTest extends BaseCalciteQueryTest
|
|||
+ " FROM (SELECT * FROM druid.foo UNION ALL SELECT * FROM druid.foo2)\n"
|
||||
+ " GROUP BY dim2\n"
|
||||
+ ")";
|
||||
Set<Resource> requiredResources = analyzeResources(
|
||||
Set<ResourceAction> requiredResources = analyzeResources(
|
||||
PLANNER_CONFIG_DEFAULT,
|
||||
sql,
|
||||
CalciteTests.REGULAR_USER_AUTH_RESULT
|
||||
|
@ -115,8 +117,8 @@ public class DruidPlannerResourceAnalyzeTest extends BaseCalciteQueryTest
|
|||
|
||||
Assert.assertEquals(
|
||||
ImmutableSet.of(
|
||||
new Resource("foo", ResourceType.DATASOURCE),
|
||||
new Resource("foo2", ResourceType.DATASOURCE)
|
||||
new ResourceAction(new Resource("foo", ResourceType.DATASOURCE), Action.READ),
|
||||
new ResourceAction(new Resource("foo2", ResourceType.DATASOURCE), Action.READ)
|
||||
),
|
||||
requiredResources
|
||||
);
|
||||
|
@ -127,7 +129,7 @@ public class DruidPlannerResourceAnalyzeTest extends BaseCalciteQueryTest
|
|||
{
|
||||
final String sql = "SELECT COUNT(*) FROM foo INNER JOIN numfoo ON foo.dim1 = numfoo.dim1 WHERE numfoo.dim1 <> 'z'";
|
||||
|
||||
Set<Resource> requiredResources = analyzeResources(
|
||||
Set<ResourceAction> requiredResources = analyzeResources(
|
||||
PLANNER_CONFIG_DEFAULT,
|
||||
sql,
|
||||
CalciteTests.REGULAR_USER_AUTH_RESULT
|
||||
|
@ -135,8 +137,8 @@ public class DruidPlannerResourceAnalyzeTest extends BaseCalciteQueryTest
|
|||
|
||||
Assert.assertEquals(
|
||||
ImmutableSet.of(
|
||||
new Resource("foo", ResourceType.DATASOURCE),
|
||||
new Resource("numfoo", ResourceType.DATASOURCE)
|
||||
new ResourceAction(new Resource("foo", ResourceType.DATASOURCE), Action.READ),
|
||||
new ResourceAction(new Resource("numfoo", ResourceType.DATASOURCE), Action.READ)
|
||||
),
|
||||
requiredResources
|
||||
);
|
||||
|
@ -147,7 +149,7 @@ public class DruidPlannerResourceAnalyzeTest extends BaseCalciteQueryTest
|
|||
{
|
||||
final String sql = "SELECT COUNT(*) FROM view.aview as druid WHERE dim1_firstchar <> 'z'";
|
||||
|
||||
Set<Resource> requiredResources = analyzeResources(
|
||||
Set<ResourceAction> requiredResources = analyzeResources(
|
||||
PLANNER_CONFIG_DEFAULT,
|
||||
sql,
|
||||
CalciteTests.REGULAR_USER_AUTH_RESULT
|
||||
|
@ -155,7 +157,7 @@ public class DruidPlannerResourceAnalyzeTest extends BaseCalciteQueryTest
|
|||
|
||||
Assert.assertEquals(
|
||||
ImmutableSet.of(
|
||||
new Resource("aview", ResourceType.VIEW)
|
||||
new ResourceAction(new Resource("aview", ResourceType.VIEW), Action.READ)
|
||||
),
|
||||
requiredResources
|
||||
);
|
||||
|
@ -173,7 +175,7 @@ public class DruidPlannerResourceAnalyzeTest extends BaseCalciteQueryTest
|
|||
+ " )\n"
|
||||
+ ")";
|
||||
|
||||
Set<Resource> requiredResources = analyzeResources(
|
||||
Set<ResourceAction> requiredResources = analyzeResources(
|
||||
PLANNER_CONFIG_DEFAULT,
|
||||
sql,
|
||||
CalciteTests.REGULAR_USER_AUTH_RESULT
|
||||
|
@ -181,8 +183,8 @@ public class DruidPlannerResourceAnalyzeTest extends BaseCalciteQueryTest
|
|||
|
||||
Assert.assertEquals(
|
||||
ImmutableSet.of(
|
||||
new Resource("foo", ResourceType.DATASOURCE),
|
||||
new Resource("cview", ResourceType.VIEW)
|
||||
new ResourceAction(new Resource("foo", ResourceType.DATASOURCE), Action.READ),
|
||||
new ResourceAction(new Resource("cview", ResourceType.VIEW), Action.READ)
|
||||
),
|
||||
requiredResources
|
||||
);
|
||||
|
@ -193,7 +195,7 @@ public class DruidPlannerResourceAnalyzeTest extends BaseCalciteQueryTest
|
|||
{
|
||||
final String sql = "SELECT COUNT(*) FROM view.cview as aview INNER JOIN numfoo ON aview.dim2 = numfoo.dim2 WHERE numfoo.dim1 <> 'z'";
|
||||
|
||||
Set<Resource> requiredResources = analyzeResources(
|
||||
Set<ResourceAction> requiredResources = analyzeResources(
|
||||
PLANNER_CONFIG_DEFAULT,
|
||||
sql,
|
||||
CalciteTests.REGULAR_USER_AUTH_RESULT
|
||||
|
@ -201,8 +203,8 @@ public class DruidPlannerResourceAnalyzeTest extends BaseCalciteQueryTest
|
|||
|
||||
Assert.assertEquals(
|
||||
ImmutableSet.of(
|
||||
new Resource("cview", ResourceType.VIEW),
|
||||
new Resource("numfoo", ResourceType.DATASOURCE)
|
||||
new ResourceAction(new Resource("cview", ResourceType.VIEW), Action.READ),
|
||||
new ResourceAction(new Resource("numfoo", ResourceType.DATASOURCE), Action.READ)
|
||||
),
|
||||
requiredResources
|
||||
);
|
||||
|
@ -213,7 +215,7 @@ public class DruidPlannerResourceAnalyzeTest extends BaseCalciteQueryTest
|
|||
{
|
||||
final String sql = "SELECT COUNT(*) FROM view.dview as druid WHERE druid.numfoo <> 'z'";
|
||||
|
||||
Set<Resource> requiredResources = analyzeResources(
|
||||
Set<ResourceAction> requiredResources = analyzeResources(
|
||||
PLANNER_CONFIG_DEFAULT,
|
||||
sql,
|
||||
CalciteTests.REGULAR_USER_AUTH_RESULT
|
||||
|
@ -221,7 +223,7 @@ public class DruidPlannerResourceAnalyzeTest extends BaseCalciteQueryTest
|
|||
|
||||
Assert.assertEquals(
|
||||
ImmutableSet.of(
|
||||
new Resource("dview", ResourceType.VIEW)
|
||||
new ResourceAction(new Resource("dview", ResourceType.VIEW), Action.READ)
|
||||
),
|
||||
requiredResources
|
||||
);
|
||||
|
@ -231,7 +233,7 @@ public class DruidPlannerResourceAnalyzeTest extends BaseCalciteQueryTest
|
|||
public void testDynamicParameters()
|
||||
{
|
||||
final String sql = "SELECT SUBSTRING(dim2, CAST(? as BIGINT), CAST(? as BIGINT)) FROM druid.foo LIMIT ?";
|
||||
Set<Resource> requiredResources = analyzeResources(
|
||||
Set<ResourceAction> requiredResources = analyzeResources(
|
||||
PLANNER_CONFIG_DEFAULT,
|
||||
sql,
|
||||
CalciteTests.REGULAR_USER_AUTH_RESULT
|
||||
|
@ -239,7 +241,7 @@ public class DruidPlannerResourceAnalyzeTest extends BaseCalciteQueryTest
|
|||
|
||||
Assert.assertEquals(
|
||||
ImmutableSet.of(
|
||||
new Resource("foo", ResourceType.DATASOURCE)
|
||||
new ResourceAction(new Resource("foo", ResourceType.DATASOURCE), Action.READ)
|
||||
),
|
||||
requiredResources
|
||||
);
|
||||
|
@ -263,7 +265,7 @@ public class DruidPlannerResourceAnalyzeTest extends BaseCalciteQueryTest
|
|||
|
||||
private void testSysTable(String sql, String name, PlannerConfig plannerConfig)
|
||||
{
|
||||
Set<Resource> requiredResources = analyzeResources(
|
||||
Set<ResourceAction> requiredResources = analyzeResources(
|
||||
plannerConfig,
|
||||
sql,
|
||||
CalciteTests.REGULAR_USER_AUTH_RESULT
|
||||
|
@ -273,7 +275,7 @@ public class DruidPlannerResourceAnalyzeTest extends BaseCalciteQueryTest
|
|||
} else {
|
||||
Assert.assertEquals(
|
||||
ImmutableSet.of(
|
||||
new Resource(name, ResourceType.SYSTEM_TABLE)
|
||||
new ResourceAction(new Resource(name, ResourceType.SYSTEM_TABLE), Action.READ)
|
||||
),
|
||||
requiredResources
|
||||
);
|
||||
|
|
|
@ -132,7 +132,7 @@ public class SqlVectorizedExpressionSanityTest extends InitializedNullHandlingTe
|
|||
CalciteTests.createMockRootSchema(CONGLOMERATE, WALKER, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER);
|
||||
PLANNER_FACTORY = new PlannerFactory(
|
||||
rootSchema,
|
||||
CalciteTests.createMockQueryLifecycleFactory(WALKER, CONGLOMERATE),
|
||||
CalciteTests.createMockQueryMakerFactory(WALKER, CONGLOMERATE),
|
||||
CalciteTests.createOperatorTable(),
|
||||
CalciteTests.createExprMacroTable(),
|
||||
plannerConfig,
|
||||
|
@ -183,8 +183,8 @@ public class SqlVectorizedExpressionSanityTest extends InitializedNullHandlingTe
|
|||
final DruidPlanner vectorPlanner = plannerFactory.createPlannerForTesting(vector, query);
|
||||
final DruidPlanner nonVectorPlanner = plannerFactory.createPlannerForTesting(nonvector, query)
|
||||
) {
|
||||
final PlannerResult vectorPlan = vectorPlanner.plan(query);
|
||||
final PlannerResult nonVectorPlan = nonVectorPlanner.plan(query);
|
||||
final PlannerResult vectorPlan = vectorPlanner.plan();
|
||||
final PlannerResult nonVectorPlan = nonVectorPlanner.plan();
|
||||
final Sequence<Object[]> vectorSequence = vectorPlan.run();
|
||||
final Sequence<Object[]> nonVectorSequence = nonVectorPlan.run();
|
||||
Yielder<Object[]> vectorizedYielder = Yielders.each(vectorSequence);
|
||||
|
|
|
@ -0,0 +1,100 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.druid.sql.calcite;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import org.apache.calcite.rel.type.RelDataType;
|
||||
import org.apache.calcite.rel.type.RelDataTypeFactory;
|
||||
import org.apache.calcite.runtime.Hook;
|
||||
import org.apache.calcite.sql.type.SqlTypeName;
|
||||
import org.apache.druid.java.util.common.IAE;
|
||||
import org.apache.druid.java.util.common.guava.Sequence;
|
||||
import org.apache.druid.java.util.common.guava.Sequences;
|
||||
import org.apache.druid.segment.column.RowSignature;
|
||||
import org.apache.druid.sql.calcite.rel.DruidQuery;
|
||||
import org.apache.druid.sql.calcite.run.QueryFeature;
|
||||
import org.apache.druid.sql.calcite.run.QueryMaker;
|
||||
|
||||
/**
|
||||
* QueryMaker used by {@link CalciteInsertDmlTest}.
|
||||
*/
|
||||
public class TestInsertQueryMaker implements QueryMaker
|
||||
{
|
||||
private final RelDataType resultType;
|
||||
private final String targetDataSource;
|
||||
private final RowSignature signature;
|
||||
|
||||
public TestInsertQueryMaker(
|
||||
final RelDataTypeFactory typeFactory,
|
||||
final String targetDataSource,
|
||||
final RowSignature signature
|
||||
)
|
||||
{
|
||||
this.resultType = typeFactory.createStructType(
|
||||
ImmutableList.of(
|
||||
typeFactory.createSqlType(SqlTypeName.VARCHAR),
|
||||
typeFactory.createSqlType(SqlTypeName.OTHER)
|
||||
),
|
||||
ImmutableList.of("dataSource", "signature")
|
||||
);
|
||||
this.targetDataSource = targetDataSource;
|
||||
this.signature = signature;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean feature(final QueryFeature feature)
|
||||
{
|
||||
switch (feature) {
|
||||
// INSERT queries should stick to groupBy, scan.
|
||||
case CAN_RUN_TIMESERIES:
|
||||
case CAN_RUN_TOPN:
|
||||
return false;
|
||||
|
||||
// INSERT uses external data.
|
||||
case CAN_READ_EXTERNAL_DATA:
|
||||
return true;
|
||||
|
||||
// INSERT uses Scan + ORDER BY.
|
||||
case SCAN_CAN_ORDER_BY_NON_TIME:
|
||||
return true;
|
||||
|
||||
default:
|
||||
throw new IAE("Unrecognized feature: %s", feature);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public RelDataType getResultType()
|
||||
{
|
||||
return resultType;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Sequence<Object[]> runQuery(final DruidQuery druidQuery)
|
||||
{
|
||||
// Don't actually execute anything, but do record information that tests will check for.
|
||||
|
||||
// 1) Add the query to Hook.QUERY_PLAN, so it gets picked up by QueryLogHook.
|
||||
Hook.QUERY_PLAN.run(druidQuery.getQuery());
|
||||
|
||||
// 2) Return the dataSource and signature of the insert operation, so tests can confirm they are correct.
|
||||
return Sequences.simple(ImmutableList.of(new Object[]{targetDataSource, signature}));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,62 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.druid.sql.calcite;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import org.apache.calcite.rel.RelRoot;
|
||||
import org.apache.druid.segment.column.RowSignature;
|
||||
import org.apache.druid.server.QueryLifecycleFactory;
|
||||
import org.apache.druid.sql.calcite.planner.PlannerContext;
|
||||
import org.apache.druid.sql.calcite.run.NativeQueryMakerFactory;
|
||||
import org.apache.druid.sql.calcite.run.QueryMaker;
|
||||
import org.apache.druid.sql.calcite.run.QueryMakerFactory;
|
||||
import org.apache.druid.sql.calcite.table.RowSignatures;
|
||||
|
||||
public class TestQueryMakerFactory implements QueryMakerFactory
|
||||
{
|
||||
private final QueryLifecycleFactory queryLifecycleFactory;
|
||||
private final ObjectMapper jsonMapper;
|
||||
|
||||
TestQueryMakerFactory(
|
||||
final QueryLifecycleFactory queryLifecycleFactory,
|
||||
final ObjectMapper jsonMapper
|
||||
)
|
||||
{
|
||||
this.queryLifecycleFactory = queryLifecycleFactory;
|
||||
this.jsonMapper = jsonMapper;
|
||||
}
|
||||
|
||||
@Override
|
||||
public QueryMaker buildForSelect(RelRoot relRoot, PlannerContext plannerContext)
|
||||
{
|
||||
return new NativeQueryMakerFactory(queryLifecycleFactory, jsonMapper).buildForSelect(relRoot, plannerContext);
|
||||
}
|
||||
|
||||
@Override
|
||||
public QueryMaker buildForInsert(String targetDataSource, RelRoot relRoot, PlannerContext plannerContext)
|
||||
{
|
||||
final RowSignature signature = RowSignatures.fromRelDataType(
|
||||
relRoot.validatedRowType.getFieldNames(),
|
||||
relRoot.validatedRowType
|
||||
);
|
||||
|
||||
return new TestInsertQueryMaker(relRoot.rel.getCluster().getTypeFactory(), targetDataSource, signature);
|
||||
}
|
||||
}
|
|
@ -71,8 +71,10 @@ import java.util.stream.Collectors;
|
|||
class ExpressionTestHelper
|
||||
{
|
||||
private static final PlannerContext PLANNER_CONTEXT = PlannerContext.create(
|
||||
"SELECT 1", // The actual query isn't important for this test
|
||||
CalciteTests.createOperatorTable(),
|
||||
CalciteTests.createExprMacroTable(),
|
||||
CalciteTests.getJsonMapper(),
|
||||
new PlannerConfig(),
|
||||
new DruidSchemaCatalog(
|
||||
EasyMock.createMock(SchemaPlus.class),
|
||||
|
|
|
@ -115,8 +115,10 @@ public class CalcitePlannerModuleTest extends CalciteTestBase
|
|||
{
|
||||
DruidOperatorTable operatorTable = injector.getInstance(DruidOperatorTable.class);
|
||||
Assert.assertNotNull(operatorTable);
|
||||
|
||||
// Should be a singleton.
|
||||
DruidOperatorTable other = injector.getInstance(DruidOperatorTable.class);
|
||||
Assert.assertNotSame(other, operatorTable);
|
||||
Assert.assertSame(other, operatorTable);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -124,8 +126,10 @@ public class CalcitePlannerModuleTest extends CalciteTestBase
|
|||
{
|
||||
PlannerFactory plannerFactory = injector.getInstance(PlannerFactory.class);
|
||||
Assert.assertNotNull(PlannerFactory.class);
|
||||
|
||||
// Should be a singleton.
|
||||
PlannerFactory other = injector.getInstance(PlannerFactory.class);
|
||||
Assert.assertNotSame(other, plannerFactory);
|
||||
Assert.assertSame(other, plannerFactory);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -77,11 +77,13 @@ public class DruidRexExecutorTest extends InitializedNullHandlingTest
|
|||
.build();
|
||||
|
||||
private static final PlannerContext PLANNER_CONTEXT = PlannerContext.create(
|
||||
"SELECT 1", // The actual query isn't important for this test
|
||||
new DruidOperatorTable(
|
||||
Collections.emptySet(),
|
||||
ImmutableSet.of(new DirectOperatorConversion(OPERATOR, "hyper_unique"))
|
||||
),
|
||||
CalciteTests.createExprMacroTable(),
|
||||
CalciteTests.getJsonMapper(),
|
||||
new PlannerConfig(),
|
||||
new DruidSchemaCatalog(
|
||||
EasyMock.createMock(SchemaPlus.class),
|
||||
|
|
|
@ -50,6 +50,7 @@ public class DruidUnionDataSourceRuleTest
|
|||
.add("col1", ColumnType.STRING)
|
||||
.add("col2", ColumnType.LONG)
|
||||
.build(),
|
||||
null,
|
||||
false,
|
||||
false
|
||||
);
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.apache.druid.discovery.DruidLeaderClient;
|
|||
import org.apache.druid.discovery.DruidNodeDiscoveryProvider;
|
||||
import org.apache.druid.guice.LazySingleton;
|
||||
import org.apache.druid.guice.LifecycleModule;
|
||||
import org.apache.druid.guice.annotations.Json;
|
||||
import org.apache.druid.query.lookup.LookupExtractorFactoryContainerProvider;
|
||||
import org.apache.druid.query.lookup.LookupReferencesManager;
|
||||
import org.apache.druid.segment.join.JoinableFactory;
|
||||
|
@ -118,7 +119,7 @@ public class DruidCalciteSchemaModuleTest extends CalciteTestBase
|
|||
.annotatedWith(IndexingService.class)
|
||||
.toInstance(overlordDruidLeaderClient);
|
||||
binder.bind(DruidNodeDiscoveryProvider.class).toInstance(druidNodeDiscoveryProvider);
|
||||
binder.bind(ObjectMapper.class).toInstance(objectMapper);
|
||||
binder.bind(ObjectMapper.class).annotatedWith(Json.class).toInstance(objectMapper);
|
||||
binder.bindScope(LazySingleton.class, Scopes.SINGLETON);
|
||||
binder.bind(LookupExtractorFactoryContainerProvider.class).toInstance(lookupReferencesManager);
|
||||
},
|
||||
|
|
|
@ -115,9 +115,12 @@ import org.apache.druid.server.security.ResourceType;
|
|||
import org.apache.druid.sql.SqlLifecycleFactory;
|
||||
import org.apache.druid.sql.calcite.aggregation.SqlAggregationModule;
|
||||
import org.apache.druid.sql.calcite.expression.builtin.QueryLookupOperatorConversion;
|
||||
import org.apache.druid.sql.calcite.external.ExternalOperatorConversion;
|
||||
import org.apache.druid.sql.calcite.planner.DruidOperatorTable;
|
||||
import org.apache.druid.sql.calcite.planner.PlannerConfig;
|
||||
import org.apache.druid.sql.calcite.planner.PlannerFactory;
|
||||
import org.apache.druid.sql.calcite.run.NativeQueryMakerFactory;
|
||||
import org.apache.druid.sql.calcite.run.QueryMakerFactory;
|
||||
import org.apache.druid.sql.calcite.schema.DruidSchema;
|
||||
import org.apache.druid.sql.calcite.schema.DruidSchemaCatalog;
|
||||
import org.apache.druid.sql.calcite.schema.InformationSchema;
|
||||
|
@ -131,7 +134,6 @@ import org.apache.druid.sql.calcite.schema.NamedViewSchema;
|
|||
import org.apache.druid.sql.calcite.schema.SystemSchema;
|
||||
import org.apache.druid.sql.calcite.schema.ViewSchema;
|
||||
import org.apache.druid.sql.calcite.view.DruidViewMacroFactory;
|
||||
import org.apache.druid.sql.calcite.view.NoopViewManager;
|
||||
import org.apache.druid.sql.calcite.view.ViewManager;
|
||||
import org.apache.druid.sql.guice.SqlBindings;
|
||||
import org.apache.druid.timeline.DataSegment;
|
||||
|
@ -191,8 +193,10 @@ public class CalciteTests
|
|||
return new Access(false);
|
||||
} else if (ResourceType.VIEW.equals(resource.getType()) && resource.getName().equals("forbiddenView")) {
|
||||
return new Access(false);
|
||||
} else {
|
||||
} else if (ResourceType.DATASOURCE.equals(resource.getType()) || ResourceType.VIEW.equals(resource.getType())) {
|
||||
return Access.OK;
|
||||
} else {
|
||||
return new Access(false);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
@ -259,10 +263,11 @@ public class CalciteTests
|
|||
new LookupSerdeModule().getJacksonModules()
|
||||
);
|
||||
mapper.setInjectableValues(
|
||||
new InjectableValues.Std().addValue(ExprMacroTable.class.getName(), TestExprMacroTable.INSTANCE)
|
||||
.addValue(ObjectMapper.class.getName(), mapper)
|
||||
.addValue(DataSegment.PruneSpecsHolder.class, DataSegment.PruneSpecsHolder.DEFAULT)
|
||||
.addValue(LookupExtractorFactoryContainerProvider.class.getName(), lookupProvider)
|
||||
new InjectableValues.Std()
|
||||
.addValue(ExprMacroTable.class.getName(), TestExprMacroTable.INSTANCE)
|
||||
.addValue(ObjectMapper.class.getName(), mapper)
|
||||
.addValue(DataSegment.PruneSpecsHolder.class, DataSegment.PruneSpecsHolder.DEFAULT)
|
||||
.addValue(LookupExtractorFactoryContainerProvider.class.getName(), lookupProvider)
|
||||
);
|
||||
binder.bind(Key.get(ObjectMapper.class, Json.class)).toInstance(
|
||||
mapper
|
||||
|
@ -271,6 +276,9 @@ public class CalciteTests
|
|||
// This Module is just to get a LookupExtractorFactoryContainerProvider with a usable "lookyloo" lookup.
|
||||
binder.bind(LookupExtractorFactoryContainerProvider.class).toInstance(lookupProvider);
|
||||
SqlBindings.addOperatorConversion(binder, QueryLookupOperatorConversion.class);
|
||||
|
||||
// Add "EXTERN" table macro, for CalciteInsertDmlTest.
|
||||
SqlBindings.addOperatorConversion(binder, ExternalOperatorConversion.class);
|
||||
},
|
||||
new SqlAggregationModule()
|
||||
);
|
||||
|
@ -775,6 +783,14 @@ public class CalciteTests
|
|||
|
||||
public static final DruidViewMacroFactory DRUID_VIEW_MACRO_FACTORY = new TestDruidViewMacroFactory();
|
||||
|
||||
public static QueryMakerFactory createMockQueryMakerFactory(
|
||||
final QuerySegmentWalker walker,
|
||||
final QueryRunnerFactoryConglomerate conglomerate
|
||||
)
|
||||
{
|
||||
return new NativeQueryMakerFactory(createMockQueryLifecycleFactory(walker, conglomerate), getJsonMapper());
|
||||
}
|
||||
|
||||
public static QueryLifecycleFactory createMockQueryLifecycleFactory(
|
||||
final QuerySegmentWalker walker,
|
||||
final QueryRunnerFactoryConglomerate conglomerate
|
||||
|
@ -1149,56 +1165,34 @@ public class CalciteTests
|
|||
final AuthorizerMapper authorizerMapper
|
||||
)
|
||||
{
|
||||
DruidSchema druidSchema = createMockSchema(conglomerate, walker, plannerConfig);
|
||||
SystemSchema systemSchema =
|
||||
CalciteTests.createMockSystemSchema(druidSchema, walker, plannerConfig, authorizerMapper);
|
||||
|
||||
LookupSchema lookupSchema = CalciteTests.createMockLookupSchema();
|
||||
SchemaPlus rootSchema = CalciteSchema.createRootSchema(false, false).plus();
|
||||
Set<NamedSchema> namedSchemas = ImmutableSet.of(
|
||||
new NamedDruidSchema(druidSchema, CalciteTests.DRUID_SCHEMA_NAME),
|
||||
new NamedSystemSchema(plannerConfig, systemSchema),
|
||||
new NamedLookupSchema(lookupSchema)
|
||||
);
|
||||
DruidSchemaCatalog catalog = new DruidSchemaCatalog(
|
||||
rootSchema,
|
||||
namedSchemas.stream().collect(Collectors.toMap(NamedSchema::getSchemaName, x -> x))
|
||||
);
|
||||
InformationSchema informationSchema =
|
||||
new InformationSchema(
|
||||
catalog,
|
||||
authorizerMapper
|
||||
);
|
||||
rootSchema.add(CalciteTests.DRUID_SCHEMA_NAME, druidSchema);
|
||||
rootSchema.add(CalciteTests.INFORMATION_SCHEMA_NAME, informationSchema);
|
||||
rootSchema.add(NamedSystemSchema.NAME, systemSchema);
|
||||
rootSchema.add(NamedLookupSchema.NAME, lookupSchema);
|
||||
|
||||
return catalog;
|
||||
return createMockRootSchema(conglomerate, walker, plannerConfig, null, authorizerMapper);
|
||||
}
|
||||
|
||||
public static DruidSchemaCatalog createMockRootSchema(
|
||||
final QueryRunnerFactoryConglomerate conglomerate,
|
||||
final SpecificSegmentsQuerySegmentWalker walker,
|
||||
final PlannerConfig plannerConfig,
|
||||
final ViewManager viewManager,
|
||||
@Nullable final ViewManager viewManager,
|
||||
final AuthorizerMapper authorizerMapper
|
||||
)
|
||||
{
|
||||
DruidSchema druidSchema = createMockSchema(conglomerate, walker, plannerConfig, viewManager);
|
||||
DruidSchema druidSchema = createMockSchema(conglomerate, walker, plannerConfig);
|
||||
SystemSchema systemSchema =
|
||||
CalciteTests.createMockSystemSchema(druidSchema, walker, plannerConfig, authorizerMapper);
|
||||
|
||||
LookupSchema lookupSchema = CalciteTests.createMockLookupSchema();
|
||||
ViewSchema viewSchema = new ViewSchema(viewManager);
|
||||
ViewSchema viewSchema = viewManager != null ? new ViewSchema(viewManager) : null;
|
||||
|
||||
SchemaPlus rootSchema = CalciteSchema.createRootSchema(false, false).plus();
|
||||
Set<NamedSchema> namedSchemas = ImmutableSet.of(
|
||||
new NamedDruidSchema(druidSchema, CalciteTests.DRUID_SCHEMA_NAME),
|
||||
new NamedSystemSchema(plannerConfig, systemSchema),
|
||||
new NamedLookupSchema(lookupSchema),
|
||||
new NamedViewSchema(viewSchema)
|
||||
);
|
||||
Set<NamedSchema> namedSchemas = new HashSet<>();
|
||||
namedSchemas.add(new NamedDruidSchema(druidSchema, CalciteTests.DRUID_SCHEMA_NAME));
|
||||
namedSchemas.add(new NamedSystemSchema(plannerConfig, systemSchema));
|
||||
namedSchemas.add(new NamedLookupSchema(lookupSchema));
|
||||
|
||||
if (viewSchema != null) {
|
||||
namedSchemas.add(new NamedViewSchema(viewSchema));
|
||||
}
|
||||
|
||||
DruidSchemaCatalog catalog = new DruidSchemaCatalog(
|
||||
rootSchema,
|
||||
namedSchemas.stream().collect(Collectors.toMap(NamedSchema::getSchemaName, x -> x))
|
||||
|
@ -1212,7 +1206,11 @@ public class CalciteTests
|
|||
rootSchema.add(CalciteTests.INFORMATION_SCHEMA_NAME, informationSchema);
|
||||
rootSchema.add(NamedSystemSchema.NAME, systemSchema);
|
||||
rootSchema.add(NamedLookupSchema.NAME, lookupSchema);
|
||||
rootSchema.add(NamedViewSchema.NAME, viewSchema);
|
||||
|
||||
if (viewSchema != null) {
|
||||
rootSchema.add(NamedViewSchema.NAME, viewSchema);
|
||||
}
|
||||
|
||||
return catalog;
|
||||
}
|
||||
|
||||
|
@ -1236,16 +1234,6 @@ public class CalciteTests
|
|||
final SpecificSegmentsQuerySegmentWalker walker,
|
||||
final PlannerConfig plannerConfig
|
||||
)
|
||||
{
|
||||
return createMockSchema(conglomerate, walker, plannerConfig, new NoopViewManager());
|
||||
}
|
||||
|
||||
private static DruidSchema createMockSchema(
|
||||
final QueryRunnerFactoryConglomerate conglomerate,
|
||||
final SpecificSegmentsQuerySegmentWalker walker,
|
||||
final PlannerConfig plannerConfig,
|
||||
final ViewManager viewManager
|
||||
)
|
||||
{
|
||||
final DruidSchema schema = new DruidSchema(
|
||||
CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate),
|
||||
|
|
|
@ -230,7 +230,7 @@ public class SqlResourceTest extends CalciteTestBase
|
|||
|
||||
final PlannerFactory plannerFactory = new PlannerFactory(
|
||||
rootSchema,
|
||||
CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate),
|
||||
CalciteTests.createMockQueryMakerFactory(walker, conglomerate),
|
||||
operatorTable,
|
||||
macroTable,
|
||||
plannerConfig,
|
||||
|
|
Loading…
Reference in New Issue