mirror of https://github.com/apache/druid.git
SQL: Add server-wide default time zone config. (#5993)
* SQL: Add server-wide default time zone config. * Switch API.
This commit is contained in:
parent
2f51a3c673
commit
cd8ea3da8d
|
@ -416,10 +416,10 @@ Connection context can be specified as JDBC connection properties or as a "conte
|
|||
|
||||
|Parameter|Description|Default value|
|
||||
|---------|-----------|-------------|
|
||||
|`sqlTimeZone`|Sets the time zone for this connection, which will affect how time functions and timestamp literals behave. Should be a time zone name like "America/Los_Angeles" or offset like "-08:00".|UTC|
|
||||
|`useApproximateCountDistinct`|Whether to use an approximate cardinalty algorithm for `COUNT(DISTINCT foo)`.|druid.sql.planner.useApproximateCountDistinct on the broker|
|
||||
|`useApproximateTopN`|Whether to use approximate [TopN queries](topnquery.html) when a SQL query could be expressed as such. If false, exact [GroupBy queries](groupbyquery.html) will be used instead.|druid.sql.planner.useApproximateTopN on the broker|
|
||||
|`useFallback`|Whether to evaluate operations on the broker when they cannot be expressed as Druid queries. This option is not recommended for production since it can generate unscalable query plans. If false, SQL queries that cannot be translated to Druid queries will fail.|druid.sql.planner.useFallback on the broker|
|
||||
|`sqlTimeZone`|Sets the time zone for this connection, which will affect how time functions and timestamp literals behave. Should be a time zone name like "America/Los_Angeles" or offset like "-08:00".|druid.sql.planner.sqlTimeZone on the broker (default: UTC)|
|
||||
|`useApproximateCountDistinct`|Whether to use an approximate cardinalty algorithm for `COUNT(DISTINCT foo)`.|druid.sql.planner.useApproximateCountDistinct on the broker (default: true)|
|
||||
|`useApproximateTopN`|Whether to use approximate [TopN queries](topnquery.html) when a SQL query could be expressed as such. If false, exact [GroupBy queries](groupbyquery.html) will be used instead.|druid.sql.planner.useApproximateTopN on the broker (default: true)|
|
||||
|`useFallback`|Whether to evaluate operations on the broker when they cannot be expressed as Druid queries. This option is not recommended for production since it can generate unscalable query plans. If false, SQL queries that cannot be translated to Druid queries will fail.|druid.sql.planner.useFallback on the broker (default: false)|
|
||||
|
||||
### Retrieving metadata
|
||||
|
||||
|
@ -500,3 +500,4 @@ The Druid SQL server is configured through the following properties on the broke
|
|||
|`druid.sql.planner.useApproximateCountDistinct`|Whether to use an approximate cardinalty algorithm for `COUNT(DISTINCT foo)`.|true|
|
||||
|`druid.sql.planner.useApproximateTopN`|Whether to use approximate [TopN queries](../querying/topnquery.html) when a SQL query could be expressed as such. If false, exact [GroupBy queries](../querying/groupbyquery.html) will be used instead.|true|
|
||||
|`druid.sql.planner.useFallback`|Whether to evaluate operations on the broker when they cannot be expressed as Druid queries. This option is not recommended for production since it can generate unscalable query plans. If false, SQL queries that cannot be translated to Druid queries will fail.|false|
|
||||
|`druid.sql.planner.sqlTimeZone`|Sets the default time zone for the server, which will affect how time functions and timestamp literals behave. Should be a time zone name like "America/Los_Angeles" or offset like "-08:00".|UTC|
|
||||
|
|
|
@ -21,9 +21,11 @@ package io.druid.sql.calcite.planner;
|
|||
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
import io.druid.java.util.common.IAE;
|
||||
import org.joda.time.DateTimeZone;
|
||||
import org.joda.time.Period;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
public class PlannerConfig
|
||||
{
|
||||
|
@ -55,6 +57,9 @@ public class PlannerConfig
|
|||
@JsonProperty
|
||||
private boolean useFallback = false;
|
||||
|
||||
@JsonProperty
|
||||
private DateTimeZone sqlTimeZone = DateTimeZone.UTC;
|
||||
|
||||
public Period getMetadataRefreshPeriod()
|
||||
{
|
||||
return metadataRefreshPeriod;
|
||||
|
@ -95,6 +100,11 @@ public class PlannerConfig
|
|||
return useFallback;
|
||||
}
|
||||
|
||||
public DateTimeZone getSqlTimeZone()
|
||||
{
|
||||
return sqlTimeZone;
|
||||
}
|
||||
|
||||
public PlannerConfig withOverrides(final Map<String, Object> context)
|
||||
{
|
||||
if (context == null) {
|
||||
|
@ -122,6 +132,7 @@ public class PlannerConfig
|
|||
CTX_KEY_USE_FALLBACK,
|
||||
isUseFallback()
|
||||
);
|
||||
newConfig.sqlTimeZone = getSqlTimeZone();
|
||||
return newConfig;
|
||||
}
|
||||
|
||||
|
@ -152,47 +163,33 @@ public class PlannerConfig
|
|||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
final PlannerConfig that = (PlannerConfig) o;
|
||||
|
||||
if (maxSemiJoinRowsInMemory != that.maxSemiJoinRowsInMemory) {
|
||||
return false;
|
||||
}
|
||||
if (maxTopNLimit != that.maxTopNLimit) {
|
||||
return false;
|
||||
}
|
||||
if (maxQueryCount != that.maxQueryCount) {
|
||||
return false;
|
||||
}
|
||||
if (selectThreshold != that.selectThreshold) {
|
||||
return false;
|
||||
}
|
||||
if (useApproximateCountDistinct != that.useApproximateCountDistinct) {
|
||||
return false;
|
||||
}
|
||||
if (useApproximateTopN != that.useApproximateTopN) {
|
||||
return false;
|
||||
}
|
||||
if (useFallback != that.useFallback) {
|
||||
return false;
|
||||
}
|
||||
return metadataRefreshPeriod != null
|
||||
? metadataRefreshPeriod.equals(that.metadataRefreshPeriod)
|
||||
: that.metadataRefreshPeriod == null;
|
||||
return maxSemiJoinRowsInMemory == that.maxSemiJoinRowsInMemory &&
|
||||
maxTopNLimit == that.maxTopNLimit &&
|
||||
maxQueryCount == that.maxQueryCount &&
|
||||
selectThreshold == that.selectThreshold &&
|
||||
useApproximateCountDistinct == that.useApproximateCountDistinct &&
|
||||
useApproximateTopN == that.useApproximateTopN &&
|
||||
useFallback == that.useFallback &&
|
||||
Objects.equals(metadataRefreshPeriod, that.metadataRefreshPeriod) &&
|
||||
Objects.equals(sqlTimeZone, that.sqlTimeZone);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode()
|
||||
{
|
||||
int result = metadataRefreshPeriod != null ? metadataRefreshPeriod.hashCode() : 0;
|
||||
result = 31 * result + maxSemiJoinRowsInMemory;
|
||||
result = 31 * result + maxTopNLimit;
|
||||
result = 31 * result + maxQueryCount;
|
||||
result = 31 * result + selectThreshold;
|
||||
result = 31 * result + (useApproximateCountDistinct ? 1 : 0);
|
||||
result = 31 * result + (useApproximateTopN ? 1 : 0);
|
||||
result = 31 * result + (useFallback ? 1 : 0);
|
||||
return result;
|
||||
|
||||
return Objects.hash(
|
||||
metadataRefreshPeriod,
|
||||
maxSemiJoinRowsInMemory,
|
||||
maxTopNLimit,
|
||||
maxQueryCount,
|
||||
selectThreshold,
|
||||
useApproximateCountDistinct,
|
||||
useApproximateTopN,
|
||||
useFallback,
|
||||
sqlTimeZone
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -207,6 +204,7 @@ public class PlannerConfig
|
|||
", useApproximateCountDistinct=" + useApproximateCountDistinct +
|
||||
", useApproximateTopN=" + useApproximateTopN +
|
||||
", useFallback=" + useFallback +
|
||||
", sqlTimeZone=" + sqlTimeZone +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,7 +25,6 @@ import com.google.common.collect.Maps;
|
|||
import io.druid.java.util.common.DateTimes;
|
||||
import io.druid.math.expr.ExprMacroTable;
|
||||
import io.druid.server.security.AuthenticationResult;
|
||||
import io.druid.server.security.AuthorizerMapper;
|
||||
import org.apache.calcite.DataContext;
|
||||
import org.apache.calcite.adapter.java.JavaTypeFactory;
|
||||
import org.apache.calcite.linq4j.QueryProvider;
|
||||
|
@ -53,9 +52,7 @@ public class PlannerContext
|
|||
private final ExprMacroTable macroTable;
|
||||
private final PlannerConfig plannerConfig;
|
||||
private final DateTime localNow;
|
||||
private final long queryStartTimeMillis;
|
||||
private final Map<String, Object> queryContext;
|
||||
private final AuthorizerMapper authorizerMapper;
|
||||
|
||||
private AuthenticationResult authenticationResult;
|
||||
|
||||
|
@ -64,7 +61,6 @@ public class PlannerContext
|
|||
final ExprMacroTable macroTable,
|
||||
final PlannerConfig plannerConfig,
|
||||
final DateTime localNow,
|
||||
final AuthorizerMapper authorizerMapper,
|
||||
final Map<String, Object> queryContext
|
||||
)
|
||||
{
|
||||
|
@ -73,15 +69,12 @@ public class PlannerContext
|
|||
this.plannerConfig = Preconditions.checkNotNull(plannerConfig, "plannerConfig");
|
||||
this.queryContext = queryContext != null ? Maps.newHashMap(queryContext) : Maps.newHashMap();
|
||||
this.localNow = Preconditions.checkNotNull(localNow, "localNow");
|
||||
this.queryStartTimeMillis = System.currentTimeMillis();
|
||||
this.authorizerMapper = authorizerMapper;
|
||||
}
|
||||
|
||||
public static PlannerContext create(
|
||||
final DruidOperatorTable operatorTable,
|
||||
final ExprMacroTable macroTable,
|
||||
final PlannerConfig plannerConfig,
|
||||
final AuthorizerMapper authorizerMapper,
|
||||
final Map<String, Object> queryContext
|
||||
)
|
||||
{
|
||||
|
@ -101,11 +94,11 @@ public class PlannerContext
|
|||
if (tzParam != null) {
|
||||
timeZone = DateTimes.inferTzfromString(String.valueOf(tzParam));
|
||||
} else {
|
||||
timeZone = DateTimeZone.UTC;
|
||||
timeZone = plannerConfig.getSqlTimeZone();
|
||||
}
|
||||
} else {
|
||||
utcNow = new DateTime(DateTimeZone.UTC);
|
||||
timeZone = DateTimeZone.UTC;
|
||||
timeZone = plannerConfig.getSqlTimeZone();
|
||||
}
|
||||
|
||||
return new PlannerContext(
|
||||
|
@ -113,7 +106,6 @@ public class PlannerContext
|
|||
macroTable,
|
||||
plannerConfig.withOverrides(queryContext),
|
||||
utcNow.withZone(timeZone),
|
||||
authorizerMapper,
|
||||
queryContext
|
||||
);
|
||||
}
|
||||
|
@ -148,11 +140,6 @@ public class PlannerContext
|
|||
return queryContext;
|
||||
}
|
||||
|
||||
public long getQueryStartTimeMillis()
|
||||
{
|
||||
return queryStartTimeMillis;
|
||||
}
|
||||
|
||||
public AuthenticationResult getAuthenticationResult()
|
||||
{
|
||||
return authenticationResult;
|
||||
|
|
|
@ -91,7 +91,6 @@ public class PlannerFactory
|
|||
operatorTable,
|
||||
macroTable,
|
||||
plannerConfig,
|
||||
authorizerMapper,
|
||||
queryContext
|
||||
);
|
||||
final QueryMaker queryMaker = new QueryMaker(queryLifecycleFactory, plannerContext, jsonMapper);
|
||||
|
|
|
@ -169,6 +169,14 @@ public class CalciteQueryTest extends CalciteTestBase
|
|||
return 1;
|
||||
}
|
||||
};
|
||||
private static final PlannerConfig PLANNER_CONFIG_LOS_ANGELES = new PlannerConfig()
|
||||
{
|
||||
@Override
|
||||
public DateTimeZone getSqlTimeZone()
|
||||
{
|
||||
return DateTimes.inferTzfromString("America/Los_Angeles");
|
||||
}
|
||||
};
|
||||
|
||||
private static final String LOS_ANGELES = "America/Los_Angeles";
|
||||
|
||||
|
@ -5562,7 +5570,7 @@ public class CalciteQueryTest extends CalciteTestBase
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testTimeseriesLosAngeles() throws Exception
|
||||
public void testTimeseriesLosAngelesViaQueryContext() throws Exception
|
||||
{
|
||||
testQuery(
|
||||
PLANNER_CONFIG_DEFAULT,
|
||||
|
@ -5592,6 +5600,37 @@ public class CalciteQueryTest extends CalciteTestBase
|
|||
);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testTimeseriesLosAngelesViaPlannerConfig() throws Exception
|
||||
{
|
||||
testQuery(
|
||||
PLANNER_CONFIG_LOS_ANGELES,
|
||||
QUERY_CONTEXT_DEFAULT,
|
||||
"SELECT SUM(cnt), gran FROM (\n"
|
||||
+ " SELECT FLOOR(__time TO MONTH) AS gran,\n"
|
||||
+ " cnt FROM druid.foo\n"
|
||||
+ ") AS x\n"
|
||||
+ "GROUP BY gran\n"
|
||||
+ "ORDER BY gran",
|
||||
CalciteTests.REGULAR_USER_AUTH_RESULT,
|
||||
ImmutableList.of(
|
||||
Druids.newTimeseriesQueryBuilder()
|
||||
.dataSource(CalciteTests.DATASOURCE1)
|
||||
.intervals(QSS(Filtration.eternity()))
|
||||
.granularity(new PeriodGranularity(Period.months(1), null, DateTimes.inferTzfromString(LOS_ANGELES)))
|
||||
.aggregators(AGGS(new LongSumAggregatorFactory("a0", "cnt")))
|
||||
.context(TIMESERIES_CONTEXT_DEFAULT)
|
||||
.build()
|
||||
),
|
||||
ImmutableList.of(
|
||||
new Object[]{1L, T("1999-12-01", LOS_ANGELES)},
|
||||
new Object[]{2L, T("2000-01-01", LOS_ANGELES)},
|
||||
new Object[]{1L, T("2000-12-01", LOS_ANGELES)},
|
||||
new Object[]{2L, T("2001-01-01", LOS_ANGELES)}
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testTimeseriesUsingTimeFloor() throws Exception
|
||||
{
|
||||
|
|
|
@ -26,7 +26,6 @@ import io.druid.math.expr.ExprEval;
|
|||
import io.druid.math.expr.Parser;
|
||||
import io.druid.query.extraction.RegexDimExtractionFn;
|
||||
import io.druid.segment.column.ValueType;
|
||||
import io.druid.server.security.AuthTestUtils;
|
||||
import io.druid.sql.calcite.expression.builtin.DateTruncOperatorConversion;
|
||||
import io.druid.sql.calcite.expression.builtin.RegexpExtractOperatorConversion;
|
||||
import io.druid.sql.calcite.expression.builtin.StrposOperatorConversion;
|
||||
|
@ -72,7 +71,6 @@ public class ExpressionsTest extends CalciteTestBase
|
|||
CalciteTests.createOperatorTable(),
|
||||
CalciteTests.createExprMacroTable(),
|
||||
new PlannerConfig(),
|
||||
AuthTestUtils.TEST_AUTHORIZER_MAPPER,
|
||||
ImmutableMap.of()
|
||||
);
|
||||
private final RowSignature rowSignature = RowSignature
|
||||
|
|
Loading…
Reference in New Issue