Cluster-wide configuration for query vectorization (#8657)

* Cluster-wide configuration for query vectorization

* add doc

* fix build

* fix doc

* rename to QueryConfig and add javadoc

* fix checkstyle

* fix variable names
This commit is contained in:
Jihoon Son 2019-10-23 06:44:28 -07:00 committed by Benedict Jin
parent b453fda251
commit f5b9bf5525
22 changed files with 247 additions and 22 deletions

View File

@ -43,6 +43,7 @@ import org.apache.druid.offheap.OffheapBufferGenerator;
import org.apache.druid.query.DruidProcessingConfig;
import org.apache.druid.query.FinalizeResultsQueryRunner;
import org.apache.druid.query.Query;
import org.apache.druid.query.QueryConfig;
import org.apache.druid.query.QueryPlus;
import org.apache.druid.query.QueryRunner;
import org.apache.druid.query.QueryRunnerFactory;
@ -394,6 +395,7 @@ public class GroupByTypeInterfaceBenchmark
new GroupByStrategyV2(
druidProcessingConfig,
configSupplier,
QueryConfig::new,
bufferPool,
mergePool,
new ObjectMapper(new SmileFactory()),

View File

@ -65,6 +65,7 @@ import org.apache.druid.query.Druids;
import org.apache.druid.query.FinalizeResultsQueryRunner;
import org.apache.druid.query.FluentQueryRunnerBuilder;
import org.apache.druid.query.Query;
import org.apache.druid.query.QueryConfig;
import org.apache.druid.query.QueryPlus;
import org.apache.druid.query.QueryRunner;
import org.apache.druid.query.QueryRunnerFactory;
@ -336,6 +337,7 @@ public class CachingClusteredClientBenchmark
new GroupByStrategyV2(
processingConfig,
configSupplier,
QueryConfig::new,
bufferPool,
mergeBufferPool,
mapper,

View File

@ -45,6 +45,7 @@ import org.apache.druid.offheap.OffheapBufferGenerator;
import org.apache.druid.query.DruidProcessingConfig;
import org.apache.druid.query.FinalizeResultsQueryRunner;
import org.apache.druid.query.Query;
import org.apache.druid.query.QueryConfig;
import org.apache.druid.query.QueryPlus;
import org.apache.druid.query.QueryRunner;
import org.apache.druid.query.QueryRunnerFactory;
@ -522,6 +523,7 @@ public class GroupByBenchmark
new GroupByStrategyV2(
druidProcessingConfig,
configSupplier,
QueryConfig::new,
bufferPool,
mergePool,
new ObjectMapper(new SmileFactory()),

View File

@ -1663,6 +1663,15 @@ If there is an L1 miss and L2 hit, it will also populate L1.
This section describes configurations that control behavior of Druid's query types, applicable to Broker, Historical, and MiddleManager processes.
### Query vectorization config
The following configurations are to set the default behavior for query vectorization.
|Property|Description|Default|
|--------|-----------|-------|
|`druid.query.vectorize`|See [Vectorizable queries](../querying/query-context.html#vectorizable-queries) for details. This value can be overridden by `vectorize` in the query contexts.|`false`|
|`druid.query.vectorSize`|See [Vectorizable queries](../querying/query-context.html#vectorizable-queries) for details. This value can be overridden by `vectorSize` in the query contexts.|`512`|
### TopN query config
|Property|Description|Default|

View File

@ -85,5 +85,5 @@ from the community as we work to battle-test it.
|property|default| description|
|--------|-------|------------|
|vectorize|`false`|Enables or disables vectorized query execution. Possible values are `false` (disabled), `true` (enabled if possible, disabled otherwise, on a per-segment basis), and `force` (enabled, and groupBy or timeseries queries that cannot be vectorized will fail). The `"force"` setting is meant to aid in testing, and is not generally useful in production (since real-time segments can never be processed with vectorized execution, any queries on real-time data will fail).|
|vectorSize|`512`|Sets the row batching size for a particular query.|
|vectorize|`false`|Enables or disables vectorized query execution. Possible values are `false` (disabled), `true` (enabled if possible, disabled otherwise, on a per-segment basis), and `force` (enabled, and groupBy or timeseries queries that cannot be vectorized will fail). The `"force"` setting is meant to aid in testing, and is not generally useful in production (since real-time segments can never be processed with vectorized execution, any queries on real-time data will fail). This will override `druid.query.vectorize` if it's set.|
|vectorSize|`512`|Sets the row batching size for a particular query. This will override `druid.query.vectorSize` if it's set.|

View File

@ -28,6 +28,7 @@ import org.apache.druid.java.util.common.DateTimes;
import org.apache.druid.java.util.common.Intervals;
import org.apache.druid.java.util.common.granularity.Granularities;
import org.apache.druid.query.DruidProcessingConfig;
import org.apache.druid.query.QueryConfig;
import org.apache.druid.query.QueryPlus;
import org.apache.druid.query.QueryRunner;
import org.apache.druid.query.QueryRunnerTestHelper;
@ -98,6 +99,7 @@ public class MapVirtualColumnGroupByTest
}
},
GroupByQueryConfig::new,
QueryConfig::new,
new StupidPool<>("map-virtual-column-groupby-test", () -> ByteBuffer.allocate(1024)),
new DefaultBlockingPool<>(() -> ByteBuffer.allocate(1024), 1),
new DefaultObjectMapper(),

View File

@ -0,0 +1,62 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.druid.query;
import com.fasterxml.jackson.annotation.JsonProperty;
import org.apache.druid.query.QueryContexts.Vectorize;
import org.apache.druid.segment.QueryableIndexStorageAdapter;
/**
* A user configuration holder for all query types.
* Any query-specific configurations should go to their own configuration.
*
* @see org.apache.druid.query.groupby.GroupByQueryConfig
* @see org.apache.druid.query.search.SearchQueryConfig
* @see org.apache.druid.query.topn.TopNQueryConfig
* @see org.apache.druid.query.metadata.SegmentMetadataQueryConfig
* @see org.apache.druid.query.select.SelectQueryConfig
* @see org.apache.druid.query.scan.ScanQueryConfig
*/
public class QueryConfig
{
@JsonProperty
private Vectorize vectorize = QueryContexts.DEFAULT_VECTORIZE;
@JsonProperty
private int vectorSize = QueryableIndexStorageAdapter.DEFAULT_VECTOR_SIZE;
public Vectorize getVectorize()
{
return vectorize;
}
public int getVectorSize()
{
return vectorSize;
}
public QueryConfig withOverrides(final Query<?> query)
{
final QueryConfig newConfig = new QueryConfig();
newConfig.vectorize = QueryContexts.getVectorize(query, vectorize);
newConfig.vectorSize = QueryContexts.getVectorSize(query, vectorSize);
return newConfig;
}
}

View File

@ -19,6 +19,8 @@
package org.apache.druid.query;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonValue;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
import org.apache.druid.guice.annotations.PublicApi;
@ -26,7 +28,6 @@ import org.apache.druid.java.util.common.IAE;
import org.apache.druid.java.util.common.ISE;
import org.apache.druid.java.util.common.Numbers;
import org.apache.druid.java.util.common.StringUtils;
import org.apache.druid.segment.QueryableIndexStorageAdapter;
import java.util.concurrent.TimeUnit;
@ -40,6 +41,8 @@ public class QueryContexts
public static final String DEFAULT_TIMEOUT_KEY = "defaultTimeout";
@Deprecated
public static final String CHUNK_PERIOD_KEY = "chunkPeriod";
public static final String VECTORIZE_KEY = "vectorize";
public static final String VECTOR_SIZE_KEY = "vectorSize";
public static final boolean DEFAULT_BY_SEGMENT = false;
public static final boolean DEFAULT_POPULATE_CACHE = true;
@ -82,6 +85,19 @@ public class QueryContexts
};
public abstract boolean shouldVectorize(boolean canVectorize);
@JsonCreator
public static Vectorize fromString(String str)
{
return Vectorize.valueOf(StringUtils.toUpperCase(str));
}
@Override
@JsonValue
public String toString()
{
return StringUtils.toLowerCase(name()).replace('_', '-');
}
}
public static <T> boolean isBySegment(Query<T> query)
@ -149,14 +165,14 @@ public class QueryContexts
return parseBoolean(query, "serializeDateTimeAsLongInner", defaultValue);
}
public static <T> Vectorize getVectorize(Query<T> query)
public static <T> Vectorize getVectorize(Query<T> query, Vectorize defaultValue)
{
return parseEnum(query, "vectorize", Vectorize.class, DEFAULT_VECTORIZE);
return parseEnum(query, VECTORIZE_KEY, Vectorize.class, defaultValue);
}
public static <T> int getVectorSize(Query<T> query)
public static <T> int getVectorSize(Query<T> query, int defaultSize)
{
return parseInt(query, "vectorSize", QueryableIndexStorageAdapter.DEFAULT_VECTOR_SIZE);
return parseInt(query, VECTOR_SIZE_KEY, defaultSize);
}
public static <T> int getUncoveredIntervalsLimit(Query<T> query)

View File

@ -33,7 +33,7 @@ import org.apache.druid.java.util.common.guava.BaseSequence;
import org.apache.druid.java.util.common.guava.Sequence;
import org.apache.druid.java.util.common.logger.Logger;
import org.apache.druid.query.ColumnSelectorPlus;
import org.apache.druid.query.QueryContexts;
import org.apache.druid.query.QueryConfig;
import org.apache.druid.query.aggregation.AggregatorAdapters;
import org.apache.druid.query.aggregation.AggregatorFactory;
import org.apache.druid.query.dimension.ColumnSelectorStrategyFactory;
@ -112,7 +112,8 @@ public class GroupByQueryEngineV2
final GroupByQuery query,
@Nullable final StorageAdapter storageAdapter,
final NonBlockingPool<ByteBuffer> intermediateResultsBufferPool,
final GroupByQueryConfig querySpecificConfig
final GroupByQueryConfig querySpecificConfig,
final QueryConfig queryConfig
)
{
if (storageAdapter == null) {
@ -139,7 +140,7 @@ public class GroupByQueryEngineV2
final Filter filter = Filters.convertToCNFFromQueryContext(query, Filters.toFilter(query.getFilter()));
final Interval interval = Iterables.getOnlyElement(query.getIntervals());
final boolean doVectorize = QueryContexts.getVectorize(query).shouldVectorize(
final boolean doVectorize = queryConfig.getVectorize().shouldVectorize(
VectorGroupByEngine.canVectorize(query, storageAdapter, filter)
);
@ -153,7 +154,8 @@ public class GroupByQueryEngineV2
fudgeTimestamp,
filter,
interval,
querySpecificConfig
querySpecificConfig,
queryConfig
);
} else {
result = processNonVectorized(

View File

@ -24,7 +24,7 @@ import org.apache.druid.java.util.common.ISE;
import org.apache.druid.java.util.common.guava.BaseSequence;
import org.apache.druid.java.util.common.guava.Sequence;
import org.apache.druid.java.util.common.parsers.CloseableIterator;
import org.apache.druid.query.QueryContexts;
import org.apache.druid.query.QueryConfig;
import org.apache.druid.query.aggregation.AggregatorAdapters;
import org.apache.druid.query.aggregation.AggregatorFactory;
import org.apache.druid.query.dimension.DimensionSpec;
@ -96,7 +96,8 @@ public class VectorGroupByEngine
@Nullable final DateTime fudgeTimestamp,
@Nullable final Filter filter,
final Interval interval,
final GroupByQueryConfig config
final GroupByQueryConfig config,
final QueryConfig queryConfig
)
{
if (!canVectorize(query, storageAdapter, filter)) {
@ -114,7 +115,7 @@ public class VectorGroupByEngine
interval,
query.getVirtualColumns(),
false,
QueryContexts.getVectorSize(query),
queryConfig.getVectorSize(),
null
);

View File

@ -43,6 +43,7 @@ import org.apache.druid.query.DruidProcessingConfig;
import org.apache.druid.query.InsufficientResourcesException;
import org.apache.druid.query.IntervalChunkingQueryRunnerDecorator;
import org.apache.druid.query.Query;
import org.apache.druid.query.QueryConfig;
import org.apache.druid.query.QueryContexts;
import org.apache.druid.query.QueryDataSource;
import org.apache.druid.query.QueryPlus;
@ -89,6 +90,7 @@ public class GroupByStrategyV2 implements GroupByStrategy
private final DruidProcessingConfig processingConfig;
private final Supplier<GroupByQueryConfig> configSupplier;
private final Supplier<QueryConfig> queryConfigSupplier;
private final NonBlockingPool<ByteBuffer> bufferPool;
private final BlockingPool<ByteBuffer> mergeBufferPool;
private final ObjectMapper spillMapper;
@ -98,6 +100,7 @@ public class GroupByStrategyV2 implements GroupByStrategy
public GroupByStrategyV2(
DruidProcessingConfig processingConfig,
Supplier<GroupByQueryConfig> configSupplier,
Supplier<QueryConfig> queryConfigSupplier,
@Global NonBlockingPool<ByteBuffer> bufferPool,
@Merging BlockingPool<ByteBuffer> mergeBufferPool,
@Smile ObjectMapper spillMapper,
@ -106,6 +109,7 @@ public class GroupByStrategyV2 implements GroupByStrategy
{
this.processingConfig = processingConfig;
this.configSupplier = configSupplier;
this.queryConfigSupplier = queryConfigSupplier;
this.bufferPool = bufferPool;
this.mergeBufferPool = mergeBufferPool;
this.spillMapper = spillMapper;
@ -582,7 +586,13 @@ public class GroupByStrategyV2 implements GroupByStrategy
@Override
public Sequence<ResultRow> process(GroupByQuery query, StorageAdapter storageAdapter)
{
return GroupByQueryEngineV2.process(query, storageAdapter, bufferPool, configSupplier.get().withOverrides(query));
return GroupByQueryEngineV2.process(
query,
storageAdapter,
bufferPool,
configSupplier.get().withOverrides(query),
queryConfigSupplier.get().withOverrides(query)
);
}
@Override

View File

@ -20,6 +20,8 @@
package org.apache.druid.query.timeseries;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Supplier;
import com.google.common.base.Suppliers;
import com.google.common.collect.Iterables;
import com.google.inject.Inject;
import org.apache.druid.collections.NonBlockingPool;
@ -31,7 +33,7 @@ import org.apache.druid.java.util.common.granularity.Granularity;
import org.apache.druid.java.util.common.guava.Sequence;
import org.apache.druid.java.util.common.guava.Sequences;
import org.apache.druid.java.util.common.io.Closer;
import org.apache.druid.query.QueryContexts;
import org.apache.druid.query.QueryConfig;
import org.apache.druid.query.QueryRunnerHelper;
import org.apache.druid.query.Result;
import org.apache.druid.query.aggregation.Aggregator;
@ -57,6 +59,7 @@ import java.util.Objects;
*/
public class TimeseriesQueryEngine
{
private final Supplier<QueryConfig> queryConfigSupplier;
private final NonBlockingPool<ByteBuffer> bufferPool;
/**
@ -65,12 +68,17 @@ public class TimeseriesQueryEngine
@VisibleForTesting
public TimeseriesQueryEngine()
{
this.queryConfigSupplier = Suppliers.ofInstance(new QueryConfig());
this.bufferPool = new StupidPool<>("dummy", () -> ByteBuffer.allocate(1000000));
}
@Inject
public TimeseriesQueryEngine(final @Global NonBlockingPool<ByteBuffer> bufferPool)
public TimeseriesQueryEngine(
final Supplier<QueryConfig> queryConfigSupplier,
final @Global NonBlockingPool<ByteBuffer> bufferPool
)
{
this.queryConfigSupplier = queryConfigSupplier;
this.bufferPool = bufferPool;
}
@ -86,12 +94,13 @@ public class TimeseriesQueryEngine
);
}
final QueryConfig queryConfigToUse = queryConfigSupplier.get().withOverrides(query);
final Filter filter = Filters.convertToCNFFromQueryContext(query, Filters.toFilter(query.getFilter()));
final Interval interval = Iterables.getOnlyElement(query.getIntervals());
final Granularity gran = query.getGranularity();
final boolean descending = query.isDescending();
final boolean doVectorize = QueryContexts.getVectorize(query).shouldVectorize(
final boolean doVectorize = queryConfigToUse.getVectorize().shouldVectorize(
adapter.canVectorize(filter, query.getVirtualColumns(), descending)
&& query.getAggregatorSpecs().stream().allMatch(AggregatorFactory::canVectorize)
);
@ -99,7 +108,7 @@ public class TimeseriesQueryEngine
final Sequence<Result<TimeseriesResultValue>> result;
if (doVectorize) {
result = processVectorized(query, adapter, filter, interval, gran, descending);
result = processVectorized(query, queryConfigToUse, adapter, filter, interval, gran, descending);
} else {
result = processNonVectorized(query, adapter, filter, interval, gran, descending);
}
@ -114,6 +123,7 @@ public class TimeseriesQueryEngine
private Sequence<Result<TimeseriesResultValue>> processVectorized(
final TimeseriesQuery query,
final QueryConfig queryConfig,
final StorageAdapter adapter,
@Nullable final Filter filter,
final Interval queryInterval,
@ -129,7 +139,7 @@ public class TimeseriesQueryEngine
queryInterval,
query.getVirtualColumns(),
descending,
QueryContexts.getVectorSize(query),
queryConfig.getVectorSize(),
null
);

View File

@ -24,7 +24,7 @@ import org.apache.druid.query.spec.QuerySegmentSpec;
import java.util.Map;
class TestQuery extends BaseQuery
public class TestQuery extends BaseQuery
{
public TestQuery(DataSource dataSource, QuerySegmentSpec querySegmentSpec, boolean descending, Map context)

View File

@ -50,6 +50,7 @@ import org.apache.druid.query.DruidProcessingConfig;
import org.apache.druid.query.FinalizeResultsQueryRunner;
import org.apache.druid.query.IntervalChunkingQueryRunnerDecorator;
import org.apache.druid.query.Query;
import org.apache.druid.query.QueryConfig;
import org.apache.druid.query.QueryPlus;
import org.apache.druid.query.QueryRunner;
import org.apache.druid.query.QueryRunnerFactory;
@ -352,8 +353,10 @@ public class GroupByLimitPushDownInsufficientBufferTest
}
};
final Supplier<GroupByQueryConfig> configSupplier = Suppliers.ofInstance(config);
final Supplier<QueryConfig> queryConfigSupplier = Suppliers.ofInstance(
new QueryConfig()
);
final GroupByStrategySelector strategySelector = new GroupByStrategySelector(
configSupplier,
new GroupByStrategyV1(
@ -365,6 +368,7 @@ public class GroupByLimitPushDownInsufficientBufferTest
new GroupByStrategyV2(
druidProcessingConfig,
configSupplier,
queryConfigSupplier,
bufferPool,
mergePool,
new ObjectMapper(new SmileFactory()),
@ -383,6 +387,7 @@ public class GroupByLimitPushDownInsufficientBufferTest
new GroupByStrategyV2(
tooSmallDruidProcessingConfig,
configSupplier,
queryConfigSupplier,
bufferPool,
tooSmallMergePool,
new ObjectMapper(new SmileFactory()),

View File

@ -51,6 +51,7 @@ import org.apache.druid.query.DruidProcessingConfig;
import org.apache.druid.query.FinalizeResultsQueryRunner;
import org.apache.druid.query.IntervalChunkingQueryRunnerDecorator;
import org.apache.druid.query.Query;
import org.apache.druid.query.QueryConfig;
import org.apache.druid.query.QueryPlus;
import org.apache.druid.query.QueryRunner;
import org.apache.druid.query.QueryRunnerFactory;
@ -386,6 +387,9 @@ public class GroupByLimitPushDownMultiNodeMergeTest
};
final Supplier<GroupByQueryConfig> configSupplier = Suppliers.ofInstance(config);
final Supplier<QueryConfig> queryConfigSupplier = Suppliers.ofInstance(
new QueryConfig()
);
final GroupByStrategySelector strategySelector = new GroupByStrategySelector(
configSupplier,
new GroupByStrategyV1(
@ -397,6 +401,7 @@ public class GroupByLimitPushDownMultiNodeMergeTest
new GroupByStrategyV2(
druidProcessingConfig,
configSupplier,
queryConfigSupplier,
bufferPool,
mergePool,
new ObjectMapper(new SmileFactory()),
@ -415,6 +420,7 @@ public class GroupByLimitPushDownMultiNodeMergeTest
new GroupByStrategyV2(
druidProcessingConfig,
configSupplier,
queryConfigSupplier,
bufferPool,
mergePool2,
new ObjectMapper(new SmileFactory()),

View File

@ -47,6 +47,7 @@ import org.apache.druid.query.DruidProcessingConfig;
import org.apache.druid.query.FinalizeResultsQueryRunner;
import org.apache.druid.query.IntervalChunkingQueryRunnerDecorator;
import org.apache.druid.query.Query;
import org.apache.druid.query.QueryConfig;
import org.apache.druid.query.QueryPlus;
import org.apache.druid.query.QueryRunner;
import org.apache.druid.query.QueryRunnerFactory;
@ -276,6 +277,7 @@ public class GroupByMultiSegmentTest
new GroupByStrategyV2(
druidProcessingConfig,
configSupplier,
Suppliers.ofInstance(new QueryConfig()),
bufferPool,
mergePool,
new ObjectMapper(new SmileFactory()),

View File

@ -31,6 +31,7 @@ import org.apache.druid.collections.ReferenceCountingResourceHolder;
import org.apache.druid.java.util.common.concurrent.Execs;
import org.apache.druid.java.util.common.granularity.Granularities;
import org.apache.druid.query.DruidProcessingConfig;
import org.apache.druid.query.QueryConfig;
import org.apache.druid.query.QueryContexts;
import org.apache.druid.query.QueryDataSource;
import org.apache.druid.query.QueryRunner;
@ -147,6 +148,7 @@ public class GroupByQueryMergeBufferTest
new GroupByStrategyV2(
PROCESSING_CONFIG,
configSupplier,
Suppliers.ofInstance(new QueryConfig()),
BUFFER_POOL,
MERGE_BUFFER_POOL,
mapper,

View File

@ -31,6 +31,7 @@ import org.apache.druid.java.util.common.concurrent.Execs;
import org.apache.druid.java.util.common.granularity.Granularities;
import org.apache.druid.query.DruidProcessingConfig;
import org.apache.druid.query.InsufficientResourcesException;
import org.apache.druid.query.QueryConfig;
import org.apache.druid.query.QueryContexts;
import org.apache.druid.query.QueryDataSource;
import org.apache.druid.query.QueryInterruptedException;
@ -110,6 +111,7 @@ public class GroupByQueryRunnerFailureTest
new GroupByStrategyV2(
DEFAULT_PROCESSING_CONFIG,
configSupplier,
Suppliers.ofInstance(new QueryConfig()),
BUFFER_POOL,
MERGE_BUFFER_POOL,
mapper,

View File

@ -53,6 +53,7 @@ import org.apache.druid.query.BySegmentResultValueClass;
import org.apache.druid.query.ChainedExecutionQueryRunner;
import org.apache.druid.query.DruidProcessingConfig;
import org.apache.druid.query.FinalizeResultsQueryRunner;
import org.apache.druid.query.QueryConfig;
import org.apache.druid.query.QueryContexts;
import org.apache.druid.query.QueryDataSource;
import org.apache.druid.query.QueryPlus;
@ -393,6 +394,7 @@ public class GroupByQueryRunnerTest
new GroupByStrategyV2(
processingConfig,
configSupplier,
Suppliers.ofInstance(new QueryConfig()),
bufferPool,
mergeBufferPool,
mapper,

View File

@ -52,6 +52,7 @@ import org.apache.druid.query.DruidProcessingConfig;
import org.apache.druid.query.FinalizeResultsQueryRunner;
import org.apache.druid.query.IntervalChunkingQueryRunnerDecorator;
import org.apache.druid.query.Query;
import org.apache.druid.query.QueryConfig;
import org.apache.druid.query.QueryPlus;
import org.apache.druid.query.QueryRunner;
import org.apache.druid.query.QueryRunnerFactory;
@ -314,6 +315,9 @@ public class NestedQueryPushDownTest
};
final Supplier<GroupByQueryConfig> configSupplier = Suppliers.ofInstance(config);
final Supplier<QueryConfig> queryConfigSupplier = Suppliers.ofInstance(
new QueryConfig()
);
final GroupByStrategySelector strategySelector = new GroupByStrategySelector(
configSupplier,
new GroupByStrategyV1(
@ -325,6 +329,7 @@ public class NestedQueryPushDownTest
new GroupByStrategyV2(
druidProcessingConfig,
configSupplier,
queryConfigSupplier,
bufferPool,
mergePool,
new ObjectMapper(new SmileFactory()),
@ -343,6 +348,7 @@ public class NestedQueryPushDownTest
new GroupByStrategyV2(
druidProcessingConfig,
configSupplier,
queryConfigSupplier,
bufferPool,
mergePool2,
new ObjectMapper(new SmileFactory()),

View File

@ -0,0 +1,80 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.druid.query.search;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import org.apache.druid.query.Query;
import org.apache.druid.query.QueryConfig;
import org.apache.druid.query.QueryContexts;
import org.apache.druid.query.QueryContexts.Vectorize;
import org.apache.druid.query.TableDataSource;
import org.apache.druid.query.TestQuery;
import org.apache.druid.query.spec.MultipleIntervalSegmentSpec;
import org.apache.druid.segment.QueryableIndexStorageAdapter;
import org.apache.druid.segment.TestHelper;
import org.junit.Assert;
import org.junit.Test;
import java.io.IOException;
public class QueryConfigTest
{
@Test
public void testSerde() throws IOException
{
final ObjectMapper mapper = TestHelper.makeJsonMapper();
final String json = "{"
+ "\"vectorize\" : \"force\","
+ "\"vectorSize\" : 1"
+ "}";
final QueryConfig config = mapper.readValue(json, QueryConfig.class);
Assert.assertEquals(Vectorize.FORCE, config.getVectorize());
Assert.assertEquals(1, config.getVectorSize());
}
@Test
public void testDefault()
{
final QueryConfig config = new QueryConfig();
Assert.assertEquals(QueryContexts.DEFAULT_VECTORIZE, config.getVectorize());
Assert.assertEquals(QueryableIndexStorageAdapter.DEFAULT_VECTOR_SIZE, config.getVectorSize());
}
@Test
public void testOverrides()
{
final Query<?> query = new TestQuery(
new TableDataSource("datasource"),
new MultipleIntervalSegmentSpec(ImmutableList.of()),
false,
ImmutableMap.of(
QueryContexts.VECTORIZE_KEY,
"true",
QueryContexts.VECTOR_SIZE_KEY,
QueryableIndexStorageAdapter.DEFAULT_VECTOR_SIZE * 2
)
);
final QueryConfig config = new QueryConfig().withOverrides(query);
Assert.assertEquals(Vectorize.TRUE, config.getVectorize());
Assert.assertEquals(QueryableIndexStorageAdapter.DEFAULT_VECTOR_SIZE * 2, config.getVectorSize());
}
}

View File

@ -28,6 +28,7 @@ import org.apache.druid.query.DefaultGenericQueryMetricsFactory;
import org.apache.druid.query.GenericQueryMetricsFactory;
import org.apache.druid.query.MapQueryToolChestWarehouse;
import org.apache.druid.query.Query;
import org.apache.druid.query.QueryConfig;
import org.apache.druid.query.QueryToolChest;
import org.apache.druid.query.QueryToolChestWarehouse;
import org.apache.druid.query.datasourcemetadata.DataSourceMetadataQuery;
@ -103,6 +104,7 @@ public class QueryToolChestModule implements Module
binder.bind(QueryToolChestWarehouse.class).to(MapQueryToolChestWarehouse.class);
JsonConfigProvider.bind(binder, "druid.query", QueryConfig.class);
JsonConfigProvider.bind(binder, "druid.query.groupBy", GroupByQueryConfig.class);
JsonConfigProvider.bind(binder, "druid.query.search", SearchQueryConfig.class);
JsonConfigProvider.bind(binder, "druid.query.topN", TopNQueryConfig.class);