mirror of https://github.com/apache/druid.git
Fix NPE in compactionTask (#5613)
* Fix NPE in compactionTask * more annotations for metadata * better error message for empty input * fix build * revert some null checks * address comments
This commit is contained in:
parent
124c89e435
commit
f349e03091
|
@ -51,6 +51,7 @@ import io.druid.indexing.firehose.IngestSegmentFirehoseFactory;
|
|||
import io.druid.java.util.common.ISE;
|
||||
import io.druid.java.util.common.JodaUtils;
|
||||
import io.druid.java.util.common.Pair;
|
||||
import io.druid.java.util.common.RE;
|
||||
import io.druid.java.util.common.granularity.NoneGranularity;
|
||||
import io.druid.java.util.common.guava.Comparators;
|
||||
import io.druid.java.util.common.jackson.JacksonUtils;
|
||||
|
@ -79,6 +80,7 @@ import java.io.IOException;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -195,28 +197,37 @@ public class CompactionTask extends AbstractTask
|
|||
jsonMapper
|
||||
);
|
||||
|
||||
indexTaskSpec = new IndexTask(
|
||||
getId(),
|
||||
getGroupId(),
|
||||
getTaskResource(),
|
||||
getDataSource(),
|
||||
ingestionSpec,
|
||||
getContext(),
|
||||
authorizerMapper,
|
||||
null
|
||||
);
|
||||
if (ingestionSpec != null) {
|
||||
indexTaskSpec = new IndexTask(
|
||||
getId(),
|
||||
getGroupId(),
|
||||
getTaskResource(),
|
||||
getDataSource(),
|
||||
ingestionSpec,
|
||||
getContext(),
|
||||
authorizerMapper,
|
||||
null
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if (indexTaskSpec.getIngestionSchema() == null) {
|
||||
log.info("Cannot find segments for interval");
|
||||
if (indexTaskSpec == null) {
|
||||
log.warn("Failed to generate compaction spec");
|
||||
return TaskStatus.failure(getId());
|
||||
} else {
|
||||
final String json = jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(indexTaskSpec);
|
||||
log.info("Generated compaction task details: " + json);
|
||||
|
||||
return indexTaskSpec.run(toolbox);
|
||||
}
|
||||
|
||||
final String json = jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(indexTaskSpec);
|
||||
log.info("Generated compaction task details: " + json);
|
||||
|
||||
return indexTaskSpec.run(toolbox);
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate {@link IndexIngestionSpec} from input segments.
|
||||
|
||||
* @return null if input segments don't exist. Otherwise, a generated ingestionSpec.
|
||||
*/
|
||||
@Nullable
|
||||
@VisibleForTesting
|
||||
static IndexIngestionSpec createIngestionSchema(
|
||||
TaskToolbox toolbox,
|
||||
|
@ -289,12 +300,22 @@ public class CompactionTask extends AbstractTask
|
|||
throws IOException
|
||||
{
|
||||
// find metadata for interval
|
||||
final List<QueryableIndex> queryableIndices = loadSegments(timelineSegments, segmentFileMap, indexIO);
|
||||
final List<Pair<QueryableIndex, DataSegment>> queryableIndexAndSegments = loadSegments(
|
||||
timelineSegments,
|
||||
segmentFileMap,
|
||||
indexIO
|
||||
);
|
||||
|
||||
// find merged aggregators
|
||||
final List<AggregatorFactory[]> aggregatorFactories = queryableIndices
|
||||
for (Pair<QueryableIndex, DataSegment> pair : queryableIndexAndSegments) {
|
||||
final QueryableIndex index = pair.lhs;
|
||||
if (index.getMetadata() == null) {
|
||||
throw new RE("Index metadata doesn't exist for segment[%s]", pair.rhs.getIdentifier());
|
||||
}
|
||||
}
|
||||
final List<AggregatorFactory[]> aggregatorFactories = queryableIndexAndSegments
|
||||
.stream()
|
||||
.map(index -> index.getMetadata().getAggregators())
|
||||
.map(pair -> pair.lhs.getMetadata().getAggregators()) // We have already done null check on index.getMetadata()
|
||||
.collect(Collectors.toList());
|
||||
final AggregatorFactory[] mergedAggregators = AggregatorFactory.mergeAggregators(aggregatorFactories);
|
||||
|
||||
|
@ -304,7 +325,11 @@ public class CompactionTask extends AbstractTask
|
|||
|
||||
// find granularity spec
|
||||
// set rollup only if rollup is set for all segments
|
||||
final boolean rollup = queryableIndices.stream().allMatch(index -> index.getMetadata().isRollup());
|
||||
final boolean rollup = queryableIndexAndSegments.stream().allMatch(pair -> {
|
||||
// We have already checked getMetadata() doesn't return null
|
||||
final Boolean isRollup = pair.lhs.getMetadata().isRollup();
|
||||
return isRollup != null && isRollup;
|
||||
});
|
||||
final GranularitySpec granularitySpec = new ArbitraryGranularitySpec(
|
||||
new NoneGranularity(),
|
||||
rollup,
|
||||
|
@ -313,7 +338,7 @@ public class CompactionTask extends AbstractTask
|
|||
|
||||
// find unique dimensions
|
||||
final DimensionsSpec finalDimensionsSpec = dimensionsSpec == null ?
|
||||
createDimensionsSpec(queryableIndices) :
|
||||
createDimensionsSpec(queryableIndexAndSegments) :
|
||||
dimensionsSpec;
|
||||
final InputRowParser parser = new NoopInputRowParser(new TimeAndDimsParseSpec(null, finalDimensionsSpec));
|
||||
|
||||
|
@ -327,7 +352,7 @@ public class CompactionTask extends AbstractTask
|
|||
);
|
||||
}
|
||||
|
||||
private static DimensionsSpec createDimensionsSpec(List<QueryableIndex> queryableIndices)
|
||||
private static DimensionsSpec createDimensionsSpec(List<Pair<QueryableIndex, DataSegment>> queryableIndices)
|
||||
{
|
||||
final BiMap<String, Integer> uniqueDims = HashBiMap.create();
|
||||
final Map<String, DimensionSchema> dimensionSchemaMap = new HashMap<>();
|
||||
|
@ -337,9 +362,24 @@ public class CompactionTask extends AbstractTask
|
|||
// Dimensions are extracted from the recent segments to olders because recent segments are likely to be queried more
|
||||
// frequently, and thus the performance should be optimized for recent ones rather than old ones.
|
||||
|
||||
// timelineSegments are sorted in order of interval
|
||||
// timelineSegments are sorted in order of interval, but we do a sanity check here.
|
||||
final Comparator<Interval> intervalComparator = Comparators.intervalsByStartThenEnd();
|
||||
for (int i = 0; i < queryableIndices.size() - 1; i++) {
|
||||
final Interval shouldBeSmaller = queryableIndices.get(i).lhs.getDataInterval();
|
||||
final Interval shouldBeLarger = queryableIndices.get(i + 1).lhs.getDataInterval();
|
||||
Preconditions.checkState(
|
||||
intervalComparator.compare(shouldBeSmaller, shouldBeLarger) <= 0,
|
||||
"QueryableIndexes are not sorted! Interval[%s] of segment[%s] is laster than interval[%s] of segment[%s]",
|
||||
shouldBeSmaller,
|
||||
queryableIndices.get(i).rhs.getIdentifier(),
|
||||
shouldBeLarger,
|
||||
queryableIndices.get(i + 1).rhs.getIdentifier()
|
||||
);
|
||||
}
|
||||
|
||||
int index = 0;
|
||||
for (QueryableIndex queryableIndex : Lists.reverse(queryableIndices)) {
|
||||
for (Pair<QueryableIndex, DataSegment> pair : Lists.reverse(queryableIndices)) {
|
||||
final QueryableIndex queryableIndex = pair.lhs;
|
||||
final Map<String, DimensionHandler> dimensionHandlerMap = queryableIndex.getDimensionHandlers();
|
||||
|
||||
for (String dimension : queryableIndex.getAvailableDimensions()) {
|
||||
|
@ -385,23 +425,22 @@ public class CompactionTask extends AbstractTask
|
|||
return new DimensionsSpec(dimensionSchemas, null, null);
|
||||
}
|
||||
|
||||
private static List<QueryableIndex> loadSegments(
|
||||
private static List<Pair<QueryableIndex, DataSegment>> loadSegments(
|
||||
List<TimelineObjectHolder<String, DataSegment>> timelineSegments,
|
||||
Map<DataSegment, File> segmentFileMap,
|
||||
IndexIO indexIO
|
||||
) throws IOException
|
||||
{
|
||||
final List<QueryableIndex> segments = new ArrayList<>();
|
||||
final List<Pair<QueryableIndex, DataSegment>> segments = new ArrayList<>();
|
||||
|
||||
for (TimelineObjectHolder<String, DataSegment> timelineSegment : timelineSegments) {
|
||||
final PartitionHolder<DataSegment> partitionHolder = timelineSegment.getObject();
|
||||
for (PartitionChunk<DataSegment> chunk : partitionHolder) {
|
||||
final DataSegment segment = chunk.getObject();
|
||||
segments.add(
|
||||
indexIO.loadIndex(
|
||||
Preconditions.checkNotNull(segmentFileMap.get(segment), "File for segment %s", segment.getIdentifier())
|
||||
)
|
||||
final QueryableIndex queryableIndex = indexIO.loadIndex(
|
||||
Preconditions.checkNotNull(segmentFileMap.get(segment), "File for segment %s", segment.getIdentifier())
|
||||
);
|
||||
segments.add(Pair.of(queryableIndex, segment));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@ import com.fasterxml.jackson.databind.module.SimpleModule;
|
|||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.inject.Binder;
|
||||
import com.google.inject.Module;
|
||||
|
@ -88,6 +89,7 @@ import io.druid.timeline.partition.NumberedShardSpec;
|
|||
import org.hamcrest.CoreMatchers;
|
||||
import org.joda.time.Interval;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
|
@ -129,10 +131,12 @@ public class CompactionTaskTest
|
|||
private static Map<String, AggregatorFactory> AGGREGATORS;
|
||||
private static List<DataSegment> SEGMENTS;
|
||||
private static ObjectMapper objectMapper = setupInjectablesInObjectMapper(new DefaultObjectMapper());
|
||||
private static TaskToolbox toolbox;
|
||||
private static Map<DataSegment, File> segmentMap;
|
||||
|
||||
private TaskToolbox toolbox;
|
||||
|
||||
@BeforeClass
|
||||
public static void setup()
|
||||
public static void setupClass()
|
||||
{
|
||||
DIMENSIONS = new HashMap<>();
|
||||
AGGREGATORS = new HashMap<>();
|
||||
|
@ -166,7 +170,7 @@ public class CompactionTaskTest
|
|||
AGGREGATORS.put("agg_3", new FloatFirstAggregatorFactory("agg_3", "float_dim_3"));
|
||||
AGGREGATORS.put("agg_4", new DoubleLastAggregatorFactory("agg_4", "double_dim_4"));
|
||||
|
||||
final Map<DataSegment, File> segmentMap = new HashMap<>(5);
|
||||
segmentMap = new HashMap<>(5);
|
||||
for (int i = 0; i < 5; i++) {
|
||||
final Interval segmentInterval = Intervals.of(StringUtils.format("2017-0%d-01/2017-0%d-01", (i + 1), (i + 2)));
|
||||
segmentMap.put(
|
||||
|
@ -185,12 +189,6 @@ public class CompactionTaskTest
|
|||
);
|
||||
}
|
||||
SEGMENTS = new ArrayList<>(segmentMap.keySet());
|
||||
|
||||
toolbox = new TestTaskToolbox(
|
||||
new TestTaskActionClient(new ArrayList<>(segmentMap.keySet())),
|
||||
new TestIndexIO(objectMapper, segmentMap),
|
||||
segmentMap
|
||||
);
|
||||
}
|
||||
|
||||
private static ObjectMapper setupInjectablesInObjectMapper(ObjectMapper objectMapper)
|
||||
|
@ -272,6 +270,16 @@ public class CompactionTaskTest
|
|||
@Rule
|
||||
public ExpectedException expectedException = ExpectedException.none();
|
||||
|
||||
@Before
|
||||
public void setup()
|
||||
{
|
||||
toolbox = new TestTaskToolbox(
|
||||
new TestTaskActionClient(new ArrayList<>(segmentMap.keySet())),
|
||||
new TestIndexIO(objectMapper, segmentMap),
|
||||
segmentMap
|
||||
);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSerdeWithInterval() throws IOException
|
||||
{
|
||||
|
@ -415,6 +423,24 @@ public class CompactionTaskTest
|
|||
);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMissingMetadata() throws IOException, SegmentLoadingException
|
||||
{
|
||||
expectedException.expect(RuntimeException.class);
|
||||
expectedException.expectMessage(CoreMatchers.startsWith("Index metadata doesn't exist for segment"));
|
||||
|
||||
final TestIndexIO indexIO = (TestIndexIO) toolbox.getIndexIO();
|
||||
indexIO.removeMetadata(Iterables.getFirst(indexIO.getQueryableIndexMap().keySet(), null));
|
||||
final List<DataSegment> segments = new ArrayList<>(SEGMENTS);
|
||||
CompactionTask.createIngestionSchema(
|
||||
toolbox,
|
||||
new SegmentProvider(segments),
|
||||
null,
|
||||
TUNING_CONFIG,
|
||||
objectMapper
|
||||
);
|
||||
}
|
||||
|
||||
private static DimensionsSpec getExpectedDimensionsSpecForAutoGeneration()
|
||||
{
|
||||
return new DimensionsSpec(
|
||||
|
@ -599,9 +625,13 @@ public class CompactionTaskTest
|
|||
}
|
||||
}
|
||||
|
||||
final Metadata metadata = new Metadata();
|
||||
metadata.setAggregators(aggregatorFactories.toArray(new AggregatorFactory[aggregatorFactories.size()]));
|
||||
metadata.setRollup(false);
|
||||
final Metadata metadata = new Metadata(
|
||||
null,
|
||||
aggregatorFactories.toArray(new AggregatorFactory[0]),
|
||||
null,
|
||||
null,
|
||||
null
|
||||
);
|
||||
|
||||
queryableIndexMap.put(
|
||||
entry.getValue(),
|
||||
|
@ -622,6 +652,31 @@ public class CompactionTaskTest
|
|||
{
|
||||
return queryableIndexMap.get(file);
|
||||
}
|
||||
|
||||
void removeMetadata(File file)
|
||||
{
|
||||
final SimpleQueryableIndex index = (SimpleQueryableIndex) queryableIndexMap.get(file);
|
||||
if (index != null) {
|
||||
queryableIndexMap.put(
|
||||
file,
|
||||
new SimpleQueryableIndex(
|
||||
index.getDataInterval(),
|
||||
index.getColumnNames(),
|
||||
index.getAvailableDimensions(),
|
||||
index.getBitmapFactoryForDimensions(),
|
||||
index.getColumns(),
|
||||
index.getFileMapper(),
|
||||
null,
|
||||
index.getDimensionHandlers()
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Map<File, QueryableIndex> getQueryableIndexMap()
|
||||
{
|
||||
return queryableIndexMap;
|
||||
}
|
||||
}
|
||||
|
||||
private static Column createColumn(DimensionSchema dimensionSchema)
|
||||
|
|
|
@ -25,11 +25,13 @@ import io.druid.guice.annotations.PublicApi;
|
|||
import io.druid.java.util.common.granularity.Granularity;
|
||||
import io.druid.query.aggregation.AggregatorFactory;
|
||||
|
||||
import javax.annotation.Nullable;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
/**
|
||||
|
@ -39,71 +41,66 @@ public class Metadata
|
|||
{
|
||||
// container is used for arbitrary key-value pairs in segment metadata e.g.
|
||||
// kafka firehose uses it to store commit offset
|
||||
@JsonProperty
|
||||
private final Map<String, Object> container;
|
||||
@Nullable
|
||||
private final AggregatorFactory[] aggregators;
|
||||
@Nullable
|
||||
private final TimestampSpec timestampSpec;
|
||||
@Nullable
|
||||
private final Granularity queryGranularity;
|
||||
@Nullable
|
||||
private final Boolean rollup;
|
||||
|
||||
@JsonProperty
|
||||
private AggregatorFactory[] aggregators;
|
||||
|
||||
@JsonProperty
|
||||
private TimestampSpec timestampSpec;
|
||||
|
||||
@JsonProperty
|
||||
private Granularity queryGranularity;
|
||||
|
||||
@JsonProperty
|
||||
private Boolean rollup;
|
||||
|
||||
public Metadata()
|
||||
public Metadata(
|
||||
@JsonProperty("container") @Nullable Map<String, Object> container,
|
||||
@JsonProperty("aggregators") @Nullable AggregatorFactory[] aggregators,
|
||||
@JsonProperty("timestampSpec") @Nullable TimestampSpec timestampSpec,
|
||||
@JsonProperty("queryGranularity") @Nullable Granularity queryGranularity,
|
||||
@JsonProperty("rollup") @Nullable Boolean rollup
|
||||
)
|
||||
{
|
||||
container = new ConcurrentHashMap<>();
|
||||
this.container = container == null ? new ConcurrentHashMap<>() : container;
|
||||
this.aggregators = aggregators;
|
||||
this.timestampSpec = timestampSpec;
|
||||
this.queryGranularity = queryGranularity;
|
||||
this.rollup = rollup;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public Map<String, Object> getContainer()
|
||||
{
|
||||
return container;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
@Nullable
|
||||
public AggregatorFactory[] getAggregators()
|
||||
{
|
||||
return aggregators;
|
||||
}
|
||||
|
||||
public Metadata setAggregators(AggregatorFactory[] aggregators)
|
||||
{
|
||||
this.aggregators = aggregators;
|
||||
return this;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
@Nullable
|
||||
public TimestampSpec getTimestampSpec()
|
||||
{
|
||||
return timestampSpec;
|
||||
}
|
||||
|
||||
public Metadata setTimestampSpec(TimestampSpec timestampSpec)
|
||||
{
|
||||
this.timestampSpec = timestampSpec;
|
||||
return this;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
@Nullable
|
||||
public Granularity getQueryGranularity()
|
||||
{
|
||||
return queryGranularity;
|
||||
}
|
||||
|
||||
public Metadata setQueryGranularity(Granularity queryGranularity)
|
||||
{
|
||||
this.queryGranularity = queryGranularity;
|
||||
return this;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
@Nullable
|
||||
public Boolean isRollup()
|
||||
{
|
||||
return rollup;
|
||||
}
|
||||
|
||||
public Metadata setRollup(Boolean rollup)
|
||||
{
|
||||
this.rollup = rollup;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Metadata putAll(Map<String, Object> other)
|
||||
public Metadata putAll(@Nullable Map<String, Object> other)
|
||||
{
|
||||
if (other != null) {
|
||||
container.putAll(other);
|
||||
|
@ -116,7 +113,7 @@ public class Metadata
|
|||
return container.get(key);
|
||||
}
|
||||
|
||||
public Metadata put(String key, Object value)
|
||||
public Metadata put(String key, @Nullable Object value)
|
||||
{
|
||||
if (value != null) {
|
||||
container.put(key, value);
|
||||
|
@ -127,9 +124,10 @@ public class Metadata
|
|||
// arbitrary key-value pairs from the metadata just follow the semantics of last one wins if same
|
||||
// key exists in multiple input Metadata containers
|
||||
// for others e.g. Aggregators, appropriate merging is done
|
||||
@Nullable
|
||||
public static Metadata merge(
|
||||
List<Metadata> toBeMerged,
|
||||
AggregatorFactory[] overrideMergedAggregators
|
||||
@Nullable List<Metadata> toBeMerged,
|
||||
@Nullable AggregatorFactory[] overrideMergedAggregators
|
||||
)
|
||||
{
|
||||
if (toBeMerged == null || toBeMerged.size() == 0) {
|
||||
|
@ -139,7 +137,7 @@ public class Metadata
|
|||
boolean foundSomeMetadata = false;
|
||||
Map<String, Object> mergedContainer = new HashMap<>();
|
||||
List<AggregatorFactory[]> aggregatorsToMerge = overrideMergedAggregators == null
|
||||
? new ArrayList<AggregatorFactory[]>()
|
||||
? new ArrayList<>()
|
||||
: null;
|
||||
|
||||
List<TimestampSpec> timestampSpecsToMerge = new ArrayList<>();
|
||||
|
@ -179,20 +177,17 @@ public class Metadata
|
|||
return null;
|
||||
}
|
||||
|
||||
Metadata result = new Metadata();
|
||||
if (aggregatorsToMerge != null) {
|
||||
result.setAggregators(AggregatorFactory.mergeAggregators(aggregatorsToMerge));
|
||||
} else {
|
||||
result.setAggregators(overrideMergedAggregators);
|
||||
}
|
||||
final AggregatorFactory[] mergedAggregators = aggregatorsToMerge == null ?
|
||||
overrideMergedAggregators :
|
||||
AggregatorFactory.mergeAggregators(aggregatorsToMerge);
|
||||
|
||||
if (timestampSpecsToMerge != null) {
|
||||
result.setTimestampSpec(TimestampSpec.mergeTimestampSpec(timestampSpecsToMerge));
|
||||
}
|
||||
final TimestampSpec mergedTimestampSpec = timestampSpecsToMerge == null ?
|
||||
null :
|
||||
TimestampSpec.mergeTimestampSpec(timestampSpecsToMerge);
|
||||
|
||||
if (gransToMerge != null) {
|
||||
result.setQueryGranularity(Granularity.mergeGranularities(gransToMerge));
|
||||
}
|
||||
final Granularity mergedGranularity = gransToMerge == null ?
|
||||
null :
|
||||
Granularity.mergeGranularities(gransToMerge);
|
||||
|
||||
Boolean rollup = null;
|
||||
if (rollupToMerge != null && !rollupToMerge.isEmpty()) {
|
||||
|
@ -210,10 +205,13 @@ public class Metadata
|
|||
}
|
||||
}
|
||||
|
||||
result.setRollup(rollup);
|
||||
result.container.putAll(mergedContainer);
|
||||
return result;
|
||||
|
||||
return new Metadata(
|
||||
mergedContainer,
|
||||
mergedAggregators,
|
||||
mergedTimestampSpec,
|
||||
mergedGranularity,
|
||||
rollup
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -225,37 +223,18 @@ public class Metadata
|
|||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
Metadata metadata = (Metadata) o;
|
||||
|
||||
if (!container.equals(metadata.container)) {
|
||||
return false;
|
||||
}
|
||||
// Probably incorrect - comparing Object[] arrays with Arrays.equals
|
||||
if (!Arrays.equals(aggregators, metadata.aggregators)) {
|
||||
return false;
|
||||
}
|
||||
if (timestampSpec != null ? !timestampSpec.equals(metadata.timestampSpec) : metadata.timestampSpec != null) {
|
||||
return false;
|
||||
}
|
||||
if (rollup != null ? !rollup.equals(metadata.rollup) : metadata.rollup != null) {
|
||||
return false;
|
||||
}
|
||||
return queryGranularity != null
|
||||
? queryGranularity.equals(metadata.queryGranularity)
|
||||
: metadata.queryGranularity == null;
|
||||
|
||||
final Metadata metadata = (Metadata) o;
|
||||
return Objects.equals(container, metadata.container) &&
|
||||
Arrays.equals(aggregators, metadata.aggregators) &&
|
||||
Objects.equals(timestampSpec, metadata.timestampSpec) &&
|
||||
Objects.equals(queryGranularity, metadata.queryGranularity) &&
|
||||
Objects.equals(rollup, metadata.rollup);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode()
|
||||
{
|
||||
int result = container.hashCode();
|
||||
result = 31 * result + Arrays.hashCode(aggregators);
|
||||
result = 31 * result + (timestampSpec != null ? timestampSpec.hashCode() : 0);
|
||||
result = 31 * result + (queryGranularity != null ? queryGranularity.hashCode() : 0);
|
||||
result = 31 * result + (rollup != null ? rollup.hashCode() : 0);
|
||||
return result;
|
||||
return Objects.hash(container, Arrays.hashCode(aggregators), timestampSpec, queryGranularity, rollup);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -23,6 +23,7 @@ import io.druid.collections.bitmap.BitmapFactory;
|
|||
import io.druid.segment.data.Indexed;
|
||||
import org.joda.time.Interval;
|
||||
|
||||
import javax.annotation.Nullable;
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
@ -40,7 +41,7 @@ public interface QueryableIndex extends ColumnSelector, Closeable
|
|||
int getNumRows();
|
||||
Indexed<String> getAvailableDimensions();
|
||||
BitmapFactory getBitmapFactoryForDimensions();
|
||||
Metadata getMetadata();
|
||||
@Nullable Metadata getMetadata();
|
||||
Map<String, DimensionHandler> getDimensionHandlers();
|
||||
|
||||
/**
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package io.druid.segment;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.Maps;
|
||||
|
@ -43,6 +44,7 @@ public class SimpleQueryableIndex extends AbstractIndex implements QueryableInde
|
|||
private final BitmapFactory bitmapFactory;
|
||||
private final Map<String, Column> columns;
|
||||
private final SmooshedFileMapper fileMapper;
|
||||
@Nullable
|
||||
private final Metadata metadata;
|
||||
private final Map<String, DimensionHandler> dimensionHandlers;
|
||||
|
||||
|
@ -51,11 +53,11 @@ public class SimpleQueryableIndex extends AbstractIndex implements QueryableInde
|
|||
BitmapFactory bitmapFactory,
|
||||
Map<String, Column> columns,
|
||||
SmooshedFileMapper fileMapper,
|
||||
Metadata metadata
|
||||
@Nullable Metadata metadata
|
||||
)
|
||||
{
|
||||
Preconditions.checkNotNull(columns.get(Column.TIME_COLUMN_NAME));
|
||||
this.dataInterval = dataInterval;
|
||||
this.dataInterval = Preconditions.checkNotNull(dataInterval, "dataInterval");
|
||||
ImmutableList.Builder<String> columnNamesBuilder = ImmutableList.builder();
|
||||
for (String column : columns.keySet()) {
|
||||
if (!Column.TIME_COLUMN_NAME.equals(column)) {
|
||||
|
@ -72,6 +74,28 @@ public class SimpleQueryableIndex extends AbstractIndex implements QueryableInde
|
|||
initDimensionHandlers();
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public SimpleQueryableIndex(
|
||||
Interval interval,
|
||||
List<String> columnNames,
|
||||
Indexed<String> availableDimensions,
|
||||
BitmapFactory bitmapFactory,
|
||||
Map<String, Column> columns,
|
||||
SmooshedFileMapper fileMapper,
|
||||
@Nullable Metadata metadata,
|
||||
Map<String, DimensionHandler> dimensionHandlers
|
||||
)
|
||||
{
|
||||
this.dataInterval = interval;
|
||||
this.columnNames = columnNames;
|
||||
this.availableDimensions = availableDimensions;
|
||||
this.bitmapFactory = bitmapFactory;
|
||||
this.columns = columns;
|
||||
this.fileMapper = fileMapper;
|
||||
this.metadata = metadata;
|
||||
this.dimensionHandlers = dimensionHandlers;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Interval getDataInterval()
|
||||
{
|
||||
|
@ -115,6 +139,18 @@ public class SimpleQueryableIndex extends AbstractIndex implements QueryableInde
|
|||
return columns.get(columnName);
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public Map<String, Column> getColumns()
|
||||
{
|
||||
return columns;
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public SmooshedFileMapper getFileMapper()
|
||||
{
|
||||
return fileMapper;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close()
|
||||
{
|
||||
|
|
|
@ -276,11 +276,13 @@ public abstract class IncrementalIndex<AggregatorType> extends AbstractIndex imp
|
|||
this.reportParseExceptions = reportParseExceptions;
|
||||
|
||||
this.columnCapabilities = Maps.newHashMap();
|
||||
this.metadata = new Metadata()
|
||||
.setAggregators(getCombiningAggregators(metrics))
|
||||
.setTimestampSpec(incrementalIndexSchema.getTimestampSpec())
|
||||
.setQueryGranularity(this.gran)
|
||||
.setRollup(this.rollup);
|
||||
this.metadata = new Metadata(
|
||||
null,
|
||||
getCombiningAggregators(metrics),
|
||||
incrementalIndexSchema.getTimestampSpec(),
|
||||
this.gran,
|
||||
this.rollup
|
||||
);
|
||||
|
||||
this.aggs = initAggs(metrics, rowSupplier, deserializeComplexMetrics, concurrentEventAdd);
|
||||
|
||||
|
|
|
@ -274,13 +274,13 @@ public class IndexMergerTestBase
|
|||
assertDimCompression(index, indexSpec.getDimensionCompression());
|
||||
|
||||
Assert.assertEquals(
|
||||
new Metadata()
|
||||
.setAggregators(
|
||||
IncrementalIndexTest.getDefaultCombiningAggregatorFactories()
|
||||
)
|
||||
.setQueryGranularity(Granularities.NONE)
|
||||
.setRollup(Boolean.TRUE)
|
||||
.putAll(metadataElems),
|
||||
new Metadata(
|
||||
metadataElems,
|
||||
IncrementalIndexTest.getDefaultCombiningAggregatorFactories(),
|
||||
null,
|
||||
Granularities.NONE,
|
||||
Boolean.TRUE
|
||||
),
|
||||
index.getMetadata()
|
||||
);
|
||||
}
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.junit.Assert;
|
|||
import org.junit.Test;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
|
@ -42,15 +43,17 @@ public class MetadataTest
|
|||
{
|
||||
ObjectMapper jsonMapper = TestHelper.makeJsonMapper();
|
||||
|
||||
Metadata metadata = new Metadata();
|
||||
metadata.put("k", "v");
|
||||
|
||||
AggregatorFactory[] aggregators = new AggregatorFactory[] {
|
||||
new LongSumAggregatorFactory("out", "in")
|
||||
};
|
||||
metadata.setAggregators(aggregators);
|
||||
metadata.setQueryGranularity(Granularities.ALL);
|
||||
metadata.setRollup(Boolean.FALSE);
|
||||
|
||||
Metadata metadata = new Metadata(
|
||||
Collections.singletonMap("k", "v"),
|
||||
aggregators,
|
||||
null,
|
||||
Granularities.ALL,
|
||||
Boolean.FALSE
|
||||
);
|
||||
|
||||
Metadata other = jsonMapper.readValue(
|
||||
jsonMapper.writeValueAsString(metadata),
|
||||
|
@ -75,30 +78,39 @@ public class MetadataTest
|
|||
AggregatorFactory[] aggs = new AggregatorFactory[] {
|
||||
new LongMaxAggregatorFactory("n", "f")
|
||||
};
|
||||
Metadata m1 = new Metadata();
|
||||
m1.put("k", "v");
|
||||
m1.setAggregators(aggs);
|
||||
m1.setTimestampSpec(new TimestampSpec("ds", "auto", null));
|
||||
m1.setQueryGranularity(Granularities.ALL);
|
||||
m1.setRollup(Boolean.FALSE);
|
||||
final Metadata m1 = new Metadata(
|
||||
Collections.singletonMap("k", "v"),
|
||||
aggs,
|
||||
new TimestampSpec("ds", "auto", null),
|
||||
Granularities.ALL,
|
||||
Boolean.FALSE
|
||||
);
|
||||
|
||||
Metadata m2 = new Metadata();
|
||||
m2.put("k", "v");
|
||||
m2.setAggregators(aggs);
|
||||
m2.setTimestampSpec(new TimestampSpec("ds", "auto", null));
|
||||
m2.setQueryGranularity(Granularities.ALL);
|
||||
m2.setRollup(Boolean.FALSE);
|
||||
final Metadata m2 = new Metadata(
|
||||
Collections.singletonMap("k", "v"),
|
||||
aggs,
|
||||
new TimestampSpec("ds", "auto", null),
|
||||
Granularities.ALL,
|
||||
Boolean.FALSE
|
||||
);
|
||||
|
||||
Metadata merged = new Metadata();
|
||||
merged.put("k", "v");
|
||||
merged.setAggregators(
|
||||
final Metadata m3 = new Metadata(
|
||||
Collections.singletonMap("k", "v"),
|
||||
aggs,
|
||||
new TimestampSpec("ds", "auto", null),
|
||||
Granularities.ALL,
|
||||
Boolean.TRUE
|
||||
);
|
||||
|
||||
final Metadata merged = new Metadata(
|
||||
Collections.singletonMap("k", "v"),
|
||||
new AggregatorFactory[]{
|
||||
new LongMaxAggregatorFactory("n", "n")
|
||||
}
|
||||
},
|
||||
new TimestampSpec("ds", "auto", null),
|
||||
Granularities.ALL,
|
||||
Boolean.FALSE
|
||||
);
|
||||
merged.setTimestampSpec(new TimestampSpec("ds", "auto", null));
|
||||
merged.setRollup(Boolean.FALSE);
|
||||
merged.setQueryGranularity(Granularities.ALL);
|
||||
Assert.assertEquals(merged, Metadata.merge(ImmutableList.of(m1, m2), null));
|
||||
|
||||
//merge check with one metadata being null
|
||||
|
@ -107,29 +119,32 @@ public class MetadataTest
|
|||
metadataToBeMerged.add(m2);
|
||||
metadataToBeMerged.add(null);
|
||||
|
||||
merged.setAggregators(null);
|
||||
merged.setTimestampSpec(null);
|
||||
merged.setQueryGranularity(null);
|
||||
merged.setRollup(null);
|
||||
Assert.assertEquals(merged, Metadata.merge(metadataToBeMerged, null));
|
||||
final Metadata merged2 = new Metadata(Collections.singletonMap("k", "v"), null, null, null, null);
|
||||
|
||||
Assert.assertEquals(merged2, Metadata.merge(metadataToBeMerged, null));
|
||||
|
||||
//merge check with client explicitly providing merged aggregators
|
||||
AggregatorFactory[] explicitAggs = new AggregatorFactory[] {
|
||||
new DoubleMaxAggregatorFactory("x", "y")
|
||||
};
|
||||
merged.setAggregators(explicitAggs);
|
||||
|
||||
final Metadata merged3 = new Metadata(Collections.singletonMap("k", "v"), explicitAggs, null, null, null);
|
||||
|
||||
Assert.assertEquals(
|
||||
merged,
|
||||
merged3,
|
||||
Metadata.merge(metadataToBeMerged, explicitAggs)
|
||||
);
|
||||
|
||||
merged.setTimestampSpec(new TimestampSpec("ds", "auto", null));
|
||||
merged.setQueryGranularity(Granularities.ALL);
|
||||
m1.setRollup(Boolean.TRUE);
|
||||
final Metadata merged4 = new Metadata(
|
||||
Collections.singletonMap("k", "v"),
|
||||
explicitAggs,
|
||||
new TimestampSpec("ds", "auto", null),
|
||||
Granularities.ALL,
|
||||
null
|
||||
);
|
||||
Assert.assertEquals(
|
||||
merged,
|
||||
Metadata.merge(ImmutableList.of(m1, m2), explicitAggs)
|
||||
merged4,
|
||||
Metadata.merge(ImmutableList.of(m3, m2), explicitAggs)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue