mirror of https://github.com/apache/druid.git
Merge pull request #2616 from gianm/getContextBoolean
Make specifying query context booleans more consistent.
This commit is contained in:
commit
0f3a7c94c6
|
@ -193,6 +193,12 @@ public abstract class BaseQuery<T extends Comparable<T>> implements Query<T>
|
|||
return retVal == null ? defaultValue : retVal;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean getContextBoolean(String key, boolean defaultValue)
|
||||
{
|
||||
return parseBoolean(this, key, defaultValue);
|
||||
}
|
||||
|
||||
protected Map<String, Object> computeOverridenContext(Map<String, Object> overrides)
|
||||
{
|
||||
Map<String, Object> overridden = Maps.newTreeMap();
|
||||
|
|
|
@ -81,6 +81,8 @@ public interface Query<T>
|
|||
|
||||
<ContextType> ContextType getContextValue(String key, ContextType defaultValue);
|
||||
|
||||
boolean getContextBoolean(String key, boolean defaultValue);
|
||||
|
||||
boolean isDescending();
|
||||
|
||||
Ordering<T> getResultOrdering();
|
||||
|
|
|
@ -81,7 +81,7 @@ public class GroupByQueryHelper
|
|||
);
|
||||
final IncrementalIndex index;
|
||||
|
||||
if (query.getContextValue("useOffheap", false)) {
|
||||
if (query.getContextBoolean("useOffheap", false)) {
|
||||
index = new OffheapIncrementalIndex(
|
||||
// use granularity truncated min timestamp
|
||||
// since incoming truncated timestamps may precede timeStart
|
||||
|
|
|
@ -130,7 +130,7 @@ public class GroupByQueryQueryToolChest extends QueryToolChest<Row, GroupByQuery
|
|||
return runner.run(query, responseContext);
|
||||
}
|
||||
|
||||
if (Boolean.valueOf(query.getContextValue(GROUP_BY_MERGE_KEY, "true"))) {
|
||||
if (query.getContextBoolean(GROUP_BY_MERGE_KEY, true)) {
|
||||
return mergeGroupByResults(
|
||||
(GroupByQuery) query,
|
||||
runner,
|
||||
|
|
|
@ -106,7 +106,7 @@ public class TimeseriesQuery extends BaseQuery<Result<TimeseriesResultValue>>
|
|||
|
||||
public boolean isSkipEmptyBuckets()
|
||||
{
|
||||
return Boolean.parseBoolean(getContextValue("skipEmptyBuckets", "false"));
|
||||
return getContextBoolean("skipEmptyBuckets", false);
|
||||
}
|
||||
|
||||
public TimeseriesQuery withQuerySegmentSpec(QuerySegmentSpec querySegmentSpec)
|
||||
|
|
|
@ -119,7 +119,7 @@ public class TopNQueryEngine
|
|||
topNAlgorithm = new DimExtractionTopNAlgorithm(capabilities, query);
|
||||
} else if (selector.isAggregateAllMetrics()) {
|
||||
topNAlgorithm = new PooledTopNAlgorithm(capabilities, query, bufferPool);
|
||||
} else if (selector.isAggregateTopNMetricFirst() || query.getContextValue("doAggregateTopNMetricFirst", false)) {
|
||||
} else if (selector.isAggregateTopNMetricFirst() || query.getContextBoolean("doAggregateTopNMetricFirst", false)) {
|
||||
topNAlgorithm = new AggregateTopNMetricFirstAlgorithm(capabilities, query, bufferPool);
|
||||
} else {
|
||||
topNAlgorithm = new PooledTopNAlgorithm(capabilities, query, bufferPool);
|
||||
|
|
|
@ -81,7 +81,7 @@ public class DataSourceMetadataQueryTest
|
|||
"useCache",
|
||||
true,
|
||||
"populateCache",
|
||||
true,
|
||||
"true",
|
||||
"finalize",
|
||||
true
|
||||
)
|
||||
|
@ -101,8 +101,11 @@ public class DataSourceMetadataQueryTest
|
|||
|
||||
Assert.assertEquals(1, serdeQuery.getContextValue("priority"));
|
||||
Assert.assertEquals(true, serdeQuery.getContextValue("useCache"));
|
||||
Assert.assertEquals(true, serdeQuery.getContextValue("populateCache"));
|
||||
Assert.assertEquals("true", serdeQuery.getContextValue("populateCache"));
|
||||
Assert.assertEquals(true, serdeQuery.getContextValue("finalize"));
|
||||
Assert.assertEquals(true, serdeQuery.getContextBoolean("useCache", false));
|
||||
Assert.assertEquals(true, serdeQuery.getContextBoolean("populateCache", false));
|
||||
Assert.assertEquals(true, serdeQuery.getContextBoolean("finalize", false));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -266,7 +266,7 @@ public class RealtimePlumber implements Plumber
|
|||
@Override
|
||||
public <T> QueryRunner<T> getQueryRunner(final Query<T> query)
|
||||
{
|
||||
final boolean skipIncrementalSegment = query.getContextValue(SKIP_INCREMENTAL_SEGMENT, false);
|
||||
final boolean skipIncrementalSegment = query.getContextBoolean(SKIP_INCREMENTAL_SEGMENT, false);
|
||||
final QueryRunnerFactory<T, Query<T>> factory = conglomerate.findFactory(query);
|
||||
final QueryToolChest<T, Query<T>> toolchest = factory.getToolchest();
|
||||
|
||||
|
|
Loading…
Reference in New Issue