Make specifying query context booleans more consistent.

Before, some needed to be strings and some needed to be real booleans. Now
they can all be either one.
This commit is contained in:
Gian Merlino 2016-03-08 19:37:12 -08:00
parent 163e536415
commit 708bc674fa
8 changed files with 18 additions and 7 deletions

View File

@ -193,6 +193,12 @@ public abstract class BaseQuery<T extends Comparable<T>> implements Query<T>
return retVal == null ? defaultValue : retVal; return retVal == null ? defaultValue : retVal;
} }
@Override
public boolean getContextBoolean(String key, boolean defaultValue)
{
return parseBoolean(this, key, defaultValue);
}
protected Map<String, Object> computeOverridenContext(Map<String, Object> overrides) protected Map<String, Object> computeOverridenContext(Map<String, Object> overrides)
{ {
Map<String, Object> overridden = Maps.newTreeMap(); Map<String, Object> overridden = Maps.newTreeMap();

View File

@ -81,6 +81,8 @@ public interface Query<T>
<ContextType> ContextType getContextValue(String key, ContextType defaultValue); <ContextType> ContextType getContextValue(String key, ContextType defaultValue);
boolean getContextBoolean(String key, boolean defaultValue);
boolean isDescending(); boolean isDescending();
Ordering<T> getResultOrdering(); Ordering<T> getResultOrdering();

View File

@ -81,7 +81,7 @@ public class GroupByQueryHelper
); );
final IncrementalIndex index; final IncrementalIndex index;
if (query.getContextValue("useOffheap", false)) { if (query.getContextBoolean("useOffheap", false)) {
index = new OffheapIncrementalIndex( index = new OffheapIncrementalIndex(
// use granularity truncated min timestamp // use granularity truncated min timestamp
// since incoming truncated timestamps may precede timeStart // since incoming truncated timestamps may precede timeStart

View File

@ -130,7 +130,7 @@ public class GroupByQueryQueryToolChest extends QueryToolChest<Row, GroupByQuery
return runner.run(query, responseContext); return runner.run(query, responseContext);
} }
if (Boolean.valueOf(query.getContextValue(GROUP_BY_MERGE_KEY, "true"))) { if (query.getContextBoolean(GROUP_BY_MERGE_KEY, true)) {
return mergeGroupByResults( return mergeGroupByResults(
(GroupByQuery) query, (GroupByQuery) query,
runner, runner,

View File

@ -106,7 +106,7 @@ public class TimeseriesQuery extends BaseQuery<Result<TimeseriesResultValue>>
public boolean isSkipEmptyBuckets() public boolean isSkipEmptyBuckets()
{ {
return Boolean.parseBoolean(getContextValue("skipEmptyBuckets", "false")); return getContextBoolean("skipEmptyBuckets", false);
} }
public TimeseriesQuery withQuerySegmentSpec(QuerySegmentSpec querySegmentSpec) public TimeseriesQuery withQuerySegmentSpec(QuerySegmentSpec querySegmentSpec)

View File

@ -119,7 +119,7 @@ public class TopNQueryEngine
topNAlgorithm = new DimExtractionTopNAlgorithm(capabilities, query); topNAlgorithm = new DimExtractionTopNAlgorithm(capabilities, query);
} else if (selector.isAggregateAllMetrics()) { } else if (selector.isAggregateAllMetrics()) {
topNAlgorithm = new PooledTopNAlgorithm(capabilities, query, bufferPool); topNAlgorithm = new PooledTopNAlgorithm(capabilities, query, bufferPool);
} else if (selector.isAggregateTopNMetricFirst() || query.getContextValue("doAggregateTopNMetricFirst", false)) { } else if (selector.isAggregateTopNMetricFirst() || query.getContextBoolean("doAggregateTopNMetricFirst", false)) {
topNAlgorithm = new AggregateTopNMetricFirstAlgorithm(capabilities, query, bufferPool); topNAlgorithm = new AggregateTopNMetricFirstAlgorithm(capabilities, query, bufferPool);
} else { } else {
topNAlgorithm = new PooledTopNAlgorithm(capabilities, query, bufferPool); topNAlgorithm = new PooledTopNAlgorithm(capabilities, query, bufferPool);

View File

@ -81,7 +81,7 @@ public class DataSourceMetadataQueryTest
"useCache", "useCache",
true, true,
"populateCache", "populateCache",
true, "true",
"finalize", "finalize",
true true
) )
@ -101,8 +101,11 @@ public class DataSourceMetadataQueryTest
Assert.assertEquals(1, serdeQuery.getContextValue("priority")); Assert.assertEquals(1, serdeQuery.getContextValue("priority"));
Assert.assertEquals(true, serdeQuery.getContextValue("useCache")); Assert.assertEquals(true, serdeQuery.getContextValue("useCache"));
Assert.assertEquals(true, serdeQuery.getContextValue("populateCache")); Assert.assertEquals("true", serdeQuery.getContextValue("populateCache"));
Assert.assertEquals(true, serdeQuery.getContextValue("finalize")); Assert.assertEquals(true, serdeQuery.getContextValue("finalize"));
Assert.assertEquals(true, serdeQuery.getContextBoolean("useCache", false));
Assert.assertEquals(true, serdeQuery.getContextBoolean("populateCache", false));
Assert.assertEquals(true, serdeQuery.getContextBoolean("finalize", false));
} }
@Test @Test

View File

@ -266,7 +266,7 @@ public class RealtimePlumber implements Plumber
@Override @Override
public <T> QueryRunner<T> getQueryRunner(final Query<T> query) public <T> QueryRunner<T> getQueryRunner(final Query<T> query)
{ {
final boolean skipIncrementalSegment = query.getContextValue(SKIP_INCREMENTAL_SEGMENT, false); final boolean skipIncrementalSegment = query.getContextBoolean(SKIP_INCREMENTAL_SEGMENT, false);
final QueryRunnerFactory<T, Query<T>> factory = conglomerate.findFactory(query); final QueryRunnerFactory<T, Query<T>> factory = conglomerate.findFactory(query);
final QueryToolChest<T, Query<T>> toolchest = factory.getToolchest(); final QueryToolChest<T, Query<T>> toolchest = factory.getToolchest();