Fix some query cache key collisions. (#4094)

The query caches generally store dimensions and aggregators positionally, so
appendCacheablesIgnoringOrder could lead to incorrect results being pulled
from the cache.
This commit is contained in:
Gian Merlino 2017-03-22 11:08:48 -07:00 committed by Fangjin Yang
parent 77b6213222
commit 1f48198607
4 changed files with 52 additions and 9 deletions

View File

@ -388,8 +388,8 @@ public class GroupByQueryQueryToolChest extends QueryToolChest<Row, GroupByQuery
.appendByte(CACHE_STRATEGY_VERSION) .appendByte(CACHE_STRATEGY_VERSION)
.appendCacheable(query.getGranularity()) .appendCacheable(query.getGranularity())
.appendCacheable(query.getDimFilter()) .appendCacheable(query.getDimFilter())
.appendCacheablesIgnoringOrder(query.getAggregatorSpecs()) .appendCacheables(query.getAggregatorSpecs())
.appendCacheablesIgnoringOrder(query.getDimensions()) .appendCacheables(query.getDimensions())
.appendCacheable(query.getVirtualColumns()) .appendCacheable(query.getVirtualColumns())
.build(); .build();
} }

View File

@ -141,7 +141,7 @@ public class TimeseriesQueryQueryToolChest extends QueryToolChest<Result<Timeser
.appendBoolean(query.isSkipEmptyBuckets()) .appendBoolean(query.isSkipEmptyBuckets())
.appendCacheable(query.getGranularity()) .appendCacheable(query.getGranularity())
.appendCacheable(query.getDimensionsFilter()) .appendCacheable(query.getDimensionsFilter())
.appendCacheablesIgnoringOrder(query.getAggregatorSpecs()) .appendCacheables(query.getAggregatorSpecs())
.appendCacheable(query.getVirtualColumns()) .appendCacheable(query.getVirtualColumns())
.build(); .build();
} }

View File

@ -317,7 +317,7 @@ public class TopNQueryQueryToolChest extends QueryToolChest<Result<TopNResultVal
.appendInt(query.getThreshold()) .appendInt(query.getThreshold())
.appendCacheable(query.getGranularity()) .appendCacheable(query.getGranularity())
.appendCacheable(query.getDimensionsFilter()) .appendCacheable(query.getDimensionsFilter())
.appendCacheablesIgnoringOrder(query.getAggregatorSpecs()) .appendCacheables(query.getAggregatorSpecs())
.appendCacheable(query.getVirtualColumns()); .appendCacheable(query.getVirtualColumns());
final List<PostAggregator> postAggregators = prunePostAggregators(query); final List<PostAggregator> postAggregators = prunePostAggregators(query);

View File

@ -25,11 +25,12 @@ import com.google.common.collect.ImmutableMap;
import io.druid.jackson.DefaultObjectMapper; import io.druid.jackson.DefaultObjectMapper;
import io.druid.java.util.common.granularity.Granularities; import io.druid.java.util.common.granularity.Granularities;
import io.druid.query.CacheStrategy; import io.druid.query.CacheStrategy;
import io.druid.query.Druids;
import io.druid.query.QueryRunnerTestHelper; import io.druid.query.QueryRunnerTestHelper;
import io.druid.query.Result; import io.druid.query.Result;
import io.druid.query.TableDataSource; import io.druid.query.TableDataSource;
import io.druid.query.aggregation.AggregatorFactory;
import io.druid.query.aggregation.CountAggregatorFactory; import io.druid.query.aggregation.CountAggregatorFactory;
import io.druid.query.aggregation.LongSumAggregatorFactory;
import io.druid.query.spec.MultipleIntervalSegmentSpec; import io.druid.query.spec.MultipleIntervalSegmentSpec;
import io.druid.segment.VirtualColumns; import io.druid.segment.VirtualColumns;
import org.joda.time.DateTime; import org.joda.time.DateTime;
@ -45,6 +46,8 @@ import java.util.Arrays;
@RunWith(Parameterized.class) @RunWith(Parameterized.class)
public class TimeseriesQueryQueryToolChestTest public class TimeseriesQueryQueryToolChestTest
{ {
private static final TimeseriesQueryQueryToolChest TOOL_CHEST = new TimeseriesQueryQueryToolChest(null);
@Parameterized.Parameters(name = "descending={0}") @Parameterized.Parameters(name = "descending={0}")
public static Iterable<Object[]> constructorFeeder() throws IOException public static Iterable<Object[]> constructorFeeder() throws IOException
{ {
@ -61,9 +64,8 @@ public class TimeseriesQueryQueryToolChestTest
@Test @Test
public void testCacheStrategy() throws Exception public void testCacheStrategy() throws Exception
{ {
CacheStrategy<Result<TimeseriesResultValue>, Object, TimeseriesQuery> strategy = CacheStrategy<Result<TimeseriesResultValue>, Object, TimeseriesQuery> strategy =
new TimeseriesQueryQueryToolChest(null).getCacheStrategy( TOOL_CHEST.getCacheStrategy(
new TimeseriesQuery( new TimeseriesQuery(
new TableDataSource("dummy"), new TableDataSource("dummy"),
new MultipleIntervalSegmentSpec( new MultipleIntervalSegmentSpec(
@ -77,7 +79,10 @@ public class TimeseriesQueryQueryToolChestTest
VirtualColumns.EMPTY, VirtualColumns.EMPTY,
null, null,
Granularities.ALL, Granularities.ALL,
ImmutableList.<AggregatorFactory>of(new CountAggregatorFactory("metric1")), ImmutableList.of(
new CountAggregatorFactory("metric1"),
new LongSumAggregatorFactory("metric0", "metric0")
),
null, null,
null null
) )
@ -87,7 +92,7 @@ public class TimeseriesQueryQueryToolChestTest
// test timestamps that result in integer size millis // test timestamps that result in integer size millis
new DateTime(123L), new DateTime(123L),
new TimeseriesResultValue( new TimeseriesResultValue(
ImmutableMap.<String, Object>of("metric1", 2) ImmutableMap.of("metric1", 2, "metric0", 3)
) )
); );
@ -103,4 +108,42 @@ public class TimeseriesQueryQueryToolChestTest
Assert.assertEquals(result, fromCacheResult); Assert.assertEquals(result, fromCacheResult);
} }
@Test
public void testCacheKey() throws Exception
{
final TimeseriesQuery query1 = Druids.newTimeseriesQueryBuilder()
.dataSource("dummy")
.intervals("2015-01-01/2015-01-02")
.descending(descending)
.granularity(Granularities.ALL)
.aggregators(
ImmutableList.of(
new CountAggregatorFactory("metric1"),
new LongSumAggregatorFactory("metric0", "metric0")
)
)
.build();
final TimeseriesQuery query2 = Druids.newTimeseriesQueryBuilder()
.dataSource("dummy")
.intervals("2015-01-01/2015-01-02")
.descending(descending)
.granularity(Granularities.ALL)
.aggregators(
ImmutableList.of(
new LongSumAggregatorFactory("metric0", "metric0"),
new CountAggregatorFactory("metric1")
)
)
.build();
// Test for https://github.com/druid-io/druid/issues/4093.
Assert.assertFalse(
Arrays.equals(
TOOL_CHEST.getCacheStrategy(query1).computeCacheKey(query1),
TOOL_CHEST.getCacheStrategy(query2).computeCacheKey(query2)
)
);
}
} }