fix serde issue when pulling timestamps from cache

This commit is contained in:
Xavier Léauté 2015-04-22 10:47:12 -07:00
parent 6b0ba7602b
commit a0a28de551
7 changed files with 324 additions and 5 deletions

View File

@ -214,8 +214,8 @@ public class SearchQueryQueryToolChest extends QueryToolChest<Result<SearchResul
{ {
List<Object> result = (List<Object>) input; List<Object> result = (List<Object>) input;
return new Result<SearchResultValue>( return new Result<>(
new DateTime(result.get(0)), new DateTime(((Number)result.get(0)).longValue()),
new SearchResultValue( new SearchResultValue(
Lists.transform( Lists.transform(
(List) result.get(1), (List) result.get(1),

View File

@ -187,7 +187,7 @@ public class TimeBoundaryQueryQueryToolChest
List<Object> result = (List<Object>) input; List<Object> result = (List<Object>) input;
return new Result<>( return new Result<>(
new DateTime(result.get(0)), new DateTime(((Number)result.get(0)).longValue()),
new TimeBoundaryResultValue(result.get(1)) new TimeBoundaryResultValue(result.get(1))
); );
} }

View File

@ -380,7 +380,7 @@ public class TopNQueryQueryToolChest extends QueryToolChest<Result<TopNResultVal
List<Map<String, Object>> retVal = Lists.newArrayListWithCapacity(results.size()); List<Map<String, Object>> retVal = Lists.newArrayListWithCapacity(results.size());
Iterator<Object> inputIter = results.iterator(); Iterator<Object> inputIter = results.iterator();
DateTime timestamp = granularity.toDateTime(new DateTime(inputIter.next()).getMillis()); DateTime timestamp = granularity.toDateTime(((Number) inputIter.next()).longValue());
while (inputIter.hasNext()) { while (inputIter.hasNext()) {
List<Object> result = (List<Object>) inputIter.next(); List<Object> result = (List<Object>) inputIter.next();

View File

@ -0,0 +1,87 @@
/*
* Licensed to Metamarkets Group Inc. (Metamarkets) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Metamarkets licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package io.druid.query.search;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableList;
import io.druid.granularity.QueryGranularity;
import io.druid.jackson.DefaultObjectMapper;
import io.druid.query.CacheStrategy;
import io.druid.query.Result;
import io.druid.query.TableDataSource;
import io.druid.query.search.search.FragmentSearchQuerySpec;
import io.druid.query.search.search.SearchHit;
import io.druid.query.search.search.SearchQuery;
import io.druid.query.spec.MultipleIntervalSegmentSpec;
import org.joda.time.DateTime;
import org.joda.time.Interval;
import org.junit.Assert;
import org.junit.Test;
public class SearchQueryQueryToolChestTest
{
@Test
public void testCacheStrategy() throws Exception
{
CacheStrategy<Result<SearchResultValue>, Object, SearchQuery> strategy =
new SearchQueryQueryToolChest(null, null).getCacheStrategy(
new SearchQuery(
new TableDataSource("dummy"),
null,
QueryGranularity.ALL,
1,
new MultipleIntervalSegmentSpec(
ImmutableList.of(
new Interval(
"2015-01-01/2015-01-02"
)
)
),
ImmutableList.of("dim1"),
new FragmentSearchQuerySpec(ImmutableList.of("a", "b")),
null,
null
)
);
final Result<SearchResultValue> result = new Result<>(
new DateTime(123L), new SearchResultValue(
ImmutableList.of(
new SearchHit("dim1", "a")
)
)
);
Object preparedValue = strategy.prepareForCache().apply(
result
);
ObjectMapper objectMapper = new DefaultObjectMapper();
Object fromCacheValue = objectMapper.readValue(
objectMapper.writeValueAsBytes(preparedValue),
strategy.getCacheObjectClazz()
);
Result<SearchResultValue> fromCacheResult = strategy.pullFromCache().apply(fromCacheValue);
Assert.assertEquals(result, fromCacheResult);
}
}

View File

@ -17,9 +17,18 @@
package io.druid.query.timeboundary; package io.druid.query.timeboundary;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import io.druid.jackson.DefaultObjectMapper;
import io.druid.query.CacheStrategy;
import io.druid.query.Result;
import io.druid.query.TableDataSource;
import io.druid.query.spec.MultipleIntervalSegmentSpec;
import io.druid.timeline.LogicalSegment; import io.druid.timeline.LogicalSegment;
import junit.framework.Assert; import org.joda.time.DateTime;
import org.joda.time.Interval; import org.joda.time.Interval;
import org.junit.Assert;
import org.junit.Test; import org.junit.Test;
import java.util.Arrays; import java.util.Arrays;
@ -95,4 +104,47 @@ public class TimeBoundaryQueryQueryToolChestTest
Assert.assertEquals(segments.get(i).getInterval(), expected.get(i).getInterval()); Assert.assertEquals(segments.get(i).getInterval(), expected.get(i).getInterval());
} }
} }
@Test
public void testCacheStrategy() throws Exception
{
CacheStrategy<Result<TimeBoundaryResultValue>, Object, TimeBoundaryQuery> strategy =
new TimeBoundaryQueryQueryToolChest().getCacheStrategy(
new TimeBoundaryQuery(
new TableDataSource("dummy"),
new MultipleIntervalSegmentSpec(
ImmutableList.of(
new Interval(
"2015-01-01/2015-01-02"
)
)
),
null,
null
)
);
final Result<TimeBoundaryResultValue> result = new Result<>(
new DateTime(123L), new TimeBoundaryResultValue(
ImmutableMap.of(
TimeBoundaryQuery.MIN_TIME, new DateTime(0L).toString(),
TimeBoundaryQuery.MAX_TIME, new DateTime("2015-01-01").toString()
)
)
);
Object preparedValue = strategy.prepareForCache().apply(
result
);
ObjectMapper objectMapper = new DefaultObjectMapper();
Object fromCacheValue = objectMapper.readValue(
objectMapper.writeValueAsBytes(preparedValue),
strategy.getCacheObjectClazz()
);
Result<TimeBoundaryResultValue> fromCacheResult = strategy.pullFromCache().apply(fromCacheValue);
Assert.assertEquals(result, fromCacheResult);
}
} }

View File

@ -0,0 +1,84 @@
/*
* Licensed to Metamarkets Group Inc. (Metamarkets) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Metamarkets licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package io.druid.query.timeseries;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import io.druid.granularity.QueryGranularity;
import io.druid.jackson.DefaultObjectMapper;
import io.druid.query.CacheStrategy;
import io.druid.query.Result;
import io.druid.query.TableDataSource;
import io.druid.query.aggregation.AggregatorFactory;
import io.druid.query.aggregation.CountAggregatorFactory;
import io.druid.query.spec.MultipleIntervalSegmentSpec;
import org.joda.time.DateTime;
import org.joda.time.Interval;
import org.junit.Assert;
import org.junit.Test;
public class TimeseriesQueryQueryToolChestTest
{
@Test
public void testCacheStrategy() throws Exception
{
CacheStrategy<Result<TimeseriesResultValue>, Object, TimeseriesQuery> strategy =
new TimeseriesQueryQueryToolChest(null).getCacheStrategy(
new TimeseriesQuery(
new TableDataSource("dummy"),
new MultipleIntervalSegmentSpec(
ImmutableList.of(
new Interval(
"2015-01-01/2015-01-02"
)
)
),
null,
QueryGranularity.ALL,
ImmutableList.<AggregatorFactory>of(new CountAggregatorFactory("metric1")),
null,
null
)
);
final Result<TimeseriesResultValue> result = new Result<>(
// test timestamps that result in integer size millis
new DateTime(123L),
new TimeseriesResultValue(
ImmutableMap.<String, Object>of("metric1", 2)
)
);
Object preparedValue = strategy.prepareForCache().apply(result);
ObjectMapper objectMapper = new DefaultObjectMapper();
Object fromCacheValue = objectMapper.readValue(
objectMapper.writeValueAsBytes(preparedValue),
strategy.getCacheObjectClazz()
);
Result<TimeseriesResultValue> fromCacheResult = strategy.pullFromCache().apply(fromCacheValue);
Assert.assertEquals(result, fromCacheResult);
}
}

View File

@ -0,0 +1,96 @@
/*
* Licensed to Metamarkets Group Inc. (Metamarkets) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Metamarkets licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package io.druid.query.topn;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import io.druid.granularity.QueryGranularity;
import io.druid.jackson.DefaultObjectMapper;
import io.druid.query.CacheStrategy;
import io.druid.query.Result;
import io.druid.query.TableDataSource;
import io.druid.query.aggregation.AggregatorFactory;
import io.druid.query.aggregation.CountAggregatorFactory;
import io.druid.query.dimension.DefaultDimensionSpec;
import io.druid.query.spec.MultipleIntervalSegmentSpec;
import org.joda.time.DateTime;
import org.joda.time.Interval;
import org.junit.Assert;
import org.junit.Test;
import java.util.Arrays;
public class TopNQueryQueryToolChestTest
{
@Test
public void testCacheStrategy() throws Exception
{
CacheStrategy<Result<TopNResultValue>, Object, TopNQuery> strategy =
new TopNQueryQueryToolChest(null, null).getCacheStrategy(
new TopNQuery(
new TableDataSource("dummy"),
new DefaultDimensionSpec("test", "test"),
new NumericTopNMetricSpec("metric1"),
3,
new MultipleIntervalSegmentSpec(
ImmutableList.of(
new Interval(
"2015-01-01/2015-01-02"
)
)
),
null,
QueryGranularity.ALL,
ImmutableList.<AggregatorFactory>of(new CountAggregatorFactory("metric1")),
null,
null
)
);
final Result<TopNResultValue> result = new Result<>(
// test timestamps that result in integer size millis
new DateTime(123L),
new TopNResultValue(
Arrays.asList(
ImmutableMap.<String, Object>of(
"test", "val1",
"metric1", 2
)
)
)
);
Object preparedValue = strategy.prepareForCache().apply(
result
);
ObjectMapper objectMapper = new DefaultObjectMapper();
Object fromCacheValue = objectMapper.readValue(
objectMapper.writeValueAsBytes(preparedValue),
strategy.getCacheObjectClazz()
);
Result<TopNResultValue> fromCacheResult = strategy.pullFromCache().apply(fromCacheValue);
Assert.assertEquals(result, fromCacheResult);
}
}