Move RowSignature from druid-sql to druid-processing and make use of it. (#9508)

* Move RowSignature from druid-sql to druid-processing and make use of it.

1) Moved (most of) RowSignature from sql to processing. Left behind the SQL-specific
   stuff in a RowSignatures utility class. It also picked up some new convenience
   methods along the way.
2) There were a lot of places in the code where Map<String, ValueType> was used to
   associate columns with type info. These are now all replaced with RowSignature.
3) QueryToolChest's resultArrayFields method is replaced with resultArraySignature,
   and it now provides type info.

* Fix up extensions.

* Various fixes
This commit is contained in:
Gian Merlino 2020-03-12 11:06:44 -07:00 committed by GitHub
parent 3082b9289a
commit ff59d2e78b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
168 changed files with 883 additions and 681 deletions

View File

@ -24,6 +24,7 @@ import org.apache.druid.query.aggregation.AggregatorFactory;
import org.apache.druid.query.aggregation.BufferAggregator;
import org.apache.druid.query.movingaverage.averagers.AveragerFactory;
import org.apache.druid.segment.ColumnSelectorFactory;
import org.apache.druid.segment.column.ValueType;
import javax.annotation.Nullable;
import java.util.Comparator;
@ -161,12 +162,14 @@ public class AveragerFactoryWrapper<T, R> extends AggregatorFactory
}
/**
* Not implemented. Throws UnsupportedOperationException.
* This method must be implemented since it is called by
* {@link org.apache.druid.query.groupby.GroupByQuery#computeResultRowSignature}. Returning "COMPLEX" causes the
* return type to be treated as unknown.
*/
@Override
public String getTypeName()
{
throw new UnsupportedOperationException("Invalid operation for AveragerFactoryWrapper.");
return ValueType.COMPLEX.name();
}
/**

View File

@ -36,13 +36,13 @@ import org.apache.druid.query.aggregation.AggregatorFactory;
import org.apache.druid.query.aggregation.tdigestsketch.TDigestSketchAggregatorFactory;
import org.apache.druid.query.aggregation.tdigestsketch.TDigestSketchUtils;
import org.apache.druid.segment.VirtualColumn;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.sql.calcite.aggregation.Aggregation;
import org.apache.druid.sql.calcite.aggregation.SqlAggregator;
import org.apache.druid.sql.calcite.expression.DruidExpression;
import org.apache.druid.sql.calcite.expression.Expressions;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.rel.VirtualColumnRegistry;
import org.apache.druid.sql.calcite.table.RowSignature;
import javax.annotation.Nullable;
import java.util.ArrayList;

View File

@ -39,13 +39,13 @@ import org.apache.druid.query.aggregation.tdigestsketch.TDigestSketchAggregatorF
import org.apache.druid.query.aggregation.tdigestsketch.TDigestSketchToQuantilePostAggregator;
import org.apache.druid.query.aggregation.tdigestsketch.TDigestSketchUtils;
import org.apache.druid.segment.VirtualColumn;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.sql.calcite.aggregation.Aggregation;
import org.apache.druid.sql.calcite.aggregation.SqlAggregator;
import org.apache.druid.sql.calcite.expression.DruidExpression;
import org.apache.druid.sql.calcite.expression.Expressions;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.rel.VirtualColumnRegistry;
import org.apache.druid.sql.calcite.table.RowSignature;
import javax.annotation.Nullable;
import java.util.ArrayList;

View File

@ -185,8 +185,8 @@ public class TimestampGroupByAggregationTest
groupBy
);
int groupByFieldNumber = ((GroupByQuery) helper.readQuery(groupBy)).getResultRowPositionLookup()
.getInt(groupByField);
int groupByFieldNumber = ((GroupByQuery) helper.readQuery(groupBy)).getResultRowSignature()
.indexOf(groupByField);
List<ResultRow> results = seq.toList();
Assert.assertEquals(36, results.size());

View File

@ -34,6 +34,7 @@ import org.apache.druid.query.aggregation.datasketches.hll.HllSketchMergeAggrega
import org.apache.druid.query.dimension.DefaultDimensionSpec;
import org.apache.druid.query.dimension.DimensionSpec;
import org.apache.druid.segment.VirtualColumn;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.segment.column.ValueType;
import org.apache.druid.sql.calcite.aggregation.Aggregation;
import org.apache.druid.sql.calcite.aggregation.SqlAggregator;
@ -42,7 +43,6 @@ import org.apache.druid.sql.calcite.expression.Expressions;
import org.apache.druid.sql.calcite.planner.Calcites;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.rel.VirtualColumnRegistry;
import org.apache.druid.sql.calcite.table.RowSignature;
import javax.annotation.Nullable;
import java.util.ArrayList;
@ -120,7 +120,7 @@ public abstract class HllSketchBaseSqlAggregator implements SqlAggregator
final String aggregatorName = finalizeAggregations ? Calcites.makePrefixedName(name, "a") : name;
if (columnArg.isDirectColumnAccess()
&& rowSignature.getColumnType(columnArg.getDirectColumn()) == ValueType.COMPLEX) {
&& rowSignature.getColumnType(columnArg.getDirectColumn()).orElse(null) == ValueType.COMPLEX) {
aggregatorFactory = new HllSketchMergeAggregatorFactory(
aggregatorName,
columnArg.getDirectColumn(),

View File

@ -31,12 +31,12 @@ import org.apache.druid.java.util.common.StringUtils;
import org.apache.druid.query.aggregation.PostAggregator;
import org.apache.druid.query.aggregation.datasketches.hll.HllSketchAggregatorFactory;
import org.apache.druid.query.aggregation.datasketches.hll.HllSketchToEstimatePostAggregator;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.sql.calcite.expression.DirectOperatorConversion;
import org.apache.druid.sql.calcite.expression.DruidExpression;
import org.apache.druid.sql.calcite.expression.OperatorConversions;
import org.apache.druid.sql.calcite.expression.PostAggregatorVisitor;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.table.RowSignature;
import javax.annotation.Nullable;
import java.util.List;

View File

@ -30,12 +30,12 @@ import org.apache.calcite.sql.type.SqlTypeName;
import org.apache.druid.java.util.common.StringUtils;
import org.apache.druid.query.aggregation.PostAggregator;
import org.apache.druid.query.aggregation.datasketches.hll.HllSketchToEstimateWithBoundsPostAggregator;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.sql.calcite.expression.DirectOperatorConversion;
import org.apache.druid.sql.calcite.expression.DruidExpression;
import org.apache.druid.sql.calcite.expression.OperatorConversions;
import org.apache.druid.sql.calcite.expression.PostAggregatorVisitor;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.table.RowSignature;
import javax.annotation.Nullable;
import java.util.List;

View File

@ -32,13 +32,13 @@ import org.apache.calcite.sql.type.SqlOperandCountRanges;
import org.apache.calcite.sql.type.SqlTypeName;
import org.apache.druid.query.aggregation.PostAggregator;
import org.apache.druid.query.aggregation.datasketches.hll.HllSketchUnionPostAggregator;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.sql.calcite.expression.DruidExpression;
import org.apache.druid.sql.calcite.expression.OperatorConversions;
import org.apache.druid.sql.calcite.expression.PostAggregatorVisitor;
import org.apache.druid.sql.calcite.expression.SqlOperatorConversion;
import org.apache.druid.sql.calcite.planner.Calcites;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.table.RowSignature;
import javax.annotation.Nullable;
import java.util.ArrayList;

View File

@ -28,12 +28,12 @@ import org.apache.calcite.sql.type.SqlTypeName;
import org.apache.druid.java.util.common.StringUtils;
import org.apache.druid.query.aggregation.PostAggregator;
import org.apache.druid.query.aggregation.datasketches.hll.HllSketchToStringPostAggregator;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.sql.calcite.expression.DirectOperatorConversion;
import org.apache.druid.sql.calcite.expression.DruidExpression;
import org.apache.druid.sql.calcite.expression.OperatorConversions;
import org.apache.druid.sql.calcite.expression.PostAggregatorVisitor;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.table.RowSignature;
import javax.annotation.Nullable;
import java.util.List;

View File

@ -38,6 +38,7 @@ import org.apache.druid.query.aggregation.datasketches.quantiles.DoublesSketchAg
import org.apache.druid.query.aggregation.datasketches.quantiles.DoublesSketchToQuantilePostAggregator;
import org.apache.druid.query.aggregation.post.FieldAccessPostAggregator;
import org.apache.druid.segment.VirtualColumn;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.segment.virtual.ExpressionVirtualColumn;
import org.apache.druid.sql.calcite.aggregation.Aggregation;
import org.apache.druid.sql.calcite.aggregation.SqlAggregator;
@ -45,7 +46,6 @@ import org.apache.druid.sql.calcite.expression.DruidExpression;
import org.apache.druid.sql.calcite.expression.Expressions;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.rel.VirtualColumnRegistry;
import org.apache.druid.sql.calcite.table.RowSignature;
import javax.annotation.Nullable;
import java.util.ArrayList;

View File

@ -31,13 +31,13 @@ import org.apache.calcite.sql.type.ReturnTypes;
import org.apache.calcite.sql.type.SqlOperandCountRanges;
import org.apache.calcite.sql.type.SqlTypeName;
import org.apache.druid.query.aggregation.PostAggregator;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.sql.calcite.expression.DruidExpression;
import org.apache.druid.sql.calcite.expression.OperatorConversions;
import org.apache.druid.sql.calcite.expression.PostAggregatorVisitor;
import org.apache.druid.sql.calcite.expression.SqlOperatorConversion;
import org.apache.druid.sql.calcite.planner.Calcites;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.table.RowSignature;
import javax.annotation.Nullable;
import java.util.List;

View File

@ -36,13 +36,13 @@ import org.apache.druid.java.util.common.StringUtils;
import org.apache.druid.query.aggregation.AggregatorFactory;
import org.apache.druid.query.aggregation.datasketches.quantiles.DoublesSketchAggregatorFactory;
import org.apache.druid.segment.VirtualColumn;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.sql.calcite.aggregation.Aggregation;
import org.apache.druid.sql.calcite.aggregation.SqlAggregator;
import org.apache.druid.sql.calcite.expression.DruidExpression;
import org.apache.druid.sql.calcite.expression.Expressions;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.rel.VirtualColumnRegistry;
import org.apache.druid.sql.calcite.table.RowSignature;
import javax.annotation.Nullable;
import java.util.ArrayList;

View File

@ -25,12 +25,12 @@ import org.apache.calcite.rex.RexNode;
import org.apache.calcite.sql.SqlKind;
import org.apache.calcite.sql.SqlOperator;
import org.apache.druid.query.aggregation.PostAggregator;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.sql.calcite.expression.DirectOperatorConversion;
import org.apache.druid.sql.calcite.expression.DruidExpression;
import org.apache.druid.sql.calcite.expression.OperatorConversions;
import org.apache.druid.sql.calcite.expression.PostAggregatorVisitor;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.table.RowSignature;
import javax.annotation.Nullable;
import java.util.List;

View File

@ -28,12 +28,12 @@ import org.apache.calcite.sql.type.SqlTypeName;
import org.apache.druid.java.util.common.StringUtils;
import org.apache.druid.query.aggregation.PostAggregator;
import org.apache.druid.query.aggregation.datasketches.quantiles.DoublesSketchToStringPostAggregator;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.sql.calcite.expression.DirectOperatorConversion;
import org.apache.druid.sql.calcite.expression.DruidExpression;
import org.apache.druid.sql.calcite.expression.OperatorConversions;
import org.apache.druid.sql.calcite.expression.PostAggregatorVisitor;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.table.RowSignature;
import javax.annotation.Nullable;
import java.util.List;

View File

@ -33,6 +33,7 @@ import org.apache.druid.query.aggregation.datasketches.theta.SketchMergeAggregat
import org.apache.druid.query.dimension.DefaultDimensionSpec;
import org.apache.druid.query.dimension.DimensionSpec;
import org.apache.druid.segment.VirtualColumn;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.segment.column.ValueType;
import org.apache.druid.sql.calcite.aggregation.Aggregation;
import org.apache.druid.sql.calcite.aggregation.SqlAggregator;
@ -41,7 +42,6 @@ import org.apache.druid.sql.calcite.expression.Expressions;
import org.apache.druid.sql.calcite.planner.Calcites;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.rel.VirtualColumnRegistry;
import org.apache.druid.sql.calcite.table.RowSignature;
import javax.annotation.Nullable;
import java.util.ArrayList;
@ -98,7 +98,8 @@ public abstract class ThetaSketchBaseSqlAggregator implements SqlAggregator
final AggregatorFactory aggregatorFactory;
final String aggregatorName = finalizeAggregations ? Calcites.makePrefixedName(name, "a") : name;
if (columnArg.isDirectColumnAccess() && rowSignature.getColumnType(columnArg.getDirectColumn()) == ValueType.COMPLEX) {
if (columnArg.isDirectColumnAccess()
&& rowSignature.getColumnType(columnArg.getDirectColumn()).orElse(null) == ValueType.COMPLEX) {
aggregatorFactory = new SketchMergeAggregatorFactory(
aggregatorName,
columnArg.getDirectColumn(),

View File

@ -28,12 +28,12 @@ import org.apache.calcite.sql.type.SqlTypeFamily;
import org.apache.druid.java.util.common.StringUtils;
import org.apache.druid.query.aggregation.PostAggregator;
import org.apache.druid.query.aggregation.datasketches.theta.SketchEstimatePostAggregator;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.sql.calcite.expression.DirectOperatorConversion;
import org.apache.druid.sql.calcite.expression.DruidExpression;
import org.apache.druid.sql.calcite.expression.OperatorConversions;
import org.apache.druid.sql.calcite.expression.PostAggregatorVisitor;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.table.RowSignature;
import javax.annotation.Nullable;
import java.util.List;

View File

@ -30,12 +30,12 @@ import org.apache.calcite.sql.type.SqlTypeName;
import org.apache.druid.java.util.common.StringUtils;
import org.apache.druid.query.aggregation.PostAggregator;
import org.apache.druid.query.aggregation.datasketches.theta.SketchEstimatePostAggregator;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.sql.calcite.expression.DirectOperatorConversion;
import org.apache.druid.sql.calcite.expression.DruidExpression;
import org.apache.druid.sql.calcite.expression.OperatorConversions;
import org.apache.druid.sql.calcite.expression.PostAggregatorVisitor;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.table.RowSignature;
import javax.annotation.Nullable;
import java.util.List;

View File

@ -33,13 +33,13 @@ import org.apache.calcite.sql.type.SqlTypeName;
import org.apache.druid.java.util.common.StringUtils;
import org.apache.druid.query.aggregation.PostAggregator;
import org.apache.druid.query.aggregation.datasketches.theta.SketchSetPostAggregator;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.sql.calcite.expression.DruidExpression;
import org.apache.druid.sql.calcite.expression.OperatorConversions;
import org.apache.druid.sql.calcite.expression.PostAggregatorVisitor;
import org.apache.druid.sql.calcite.expression.SqlOperatorConversion;
import org.apache.druid.sql.calcite.planner.Calcites;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.table.RowSignature;
import javax.annotation.Nullable;
import java.util.ArrayList;

View File

@ -38,6 +38,7 @@ import org.apache.druid.query.dimension.DefaultDimensionSpec;
import org.apache.druid.query.dimension.DimensionSpec;
import org.apache.druid.query.dimension.ExtractionDimensionSpec;
import org.apache.druid.segment.VirtualColumn;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.segment.column.ValueType;
import org.apache.druid.segment.virtual.ExpressionVirtualColumn;
import org.apache.druid.sql.calcite.aggregation.Aggregation;
@ -47,7 +48,6 @@ import org.apache.druid.sql.calcite.expression.Expressions;
import org.apache.druid.sql.calcite.planner.Calcites;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.rel.VirtualColumnRegistry;
import org.apache.druid.sql.calcite.table.RowSignature;
import javax.annotation.Nullable;
import java.util.ArrayList;

View File

@ -34,13 +34,13 @@ import org.apache.druid.query.filter.BloomKFilter;
import org.apache.druid.query.filter.BloomKFilterHolder;
import org.apache.druid.query.filter.DimFilter;
import org.apache.druid.segment.VirtualColumn;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.sql.calcite.expression.DirectOperatorConversion;
import org.apache.druid.sql.calcite.expression.DruidExpression;
import org.apache.druid.sql.calcite.expression.Expressions;
import org.apache.druid.sql.calcite.expression.OperatorConversions;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.rel.VirtualColumnRegistry;
import org.apache.druid.sql.calcite.table.RowSignature;
import javax.annotation.Nullable;
import java.io.IOException;

View File

@ -38,6 +38,7 @@ import org.apache.druid.query.aggregation.histogram.FixedBucketsHistogram;
import org.apache.druid.query.aggregation.histogram.FixedBucketsHistogramAggregatorFactory;
import org.apache.druid.query.aggregation.histogram.QuantilePostAggregator;
import org.apache.druid.segment.VirtualColumn;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.segment.virtual.ExpressionVirtualColumn;
import org.apache.druid.sql.calcite.aggregation.Aggregation;
import org.apache.druid.sql.calcite.aggregation.SqlAggregator;
@ -45,7 +46,6 @@ import org.apache.druid.sql.calcite.expression.DruidExpression;
import org.apache.druid.sql.calcite.expression.Expressions;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.rel.VirtualColumnRegistry;
import org.apache.druid.sql.calcite.table.RowSignature;
import javax.annotation.Nullable;
import java.util.ArrayList;

View File

@ -39,6 +39,7 @@ import org.apache.druid.query.aggregation.histogram.ApproximateHistogramAggregat
import org.apache.druid.query.aggregation.histogram.ApproximateHistogramFoldingAggregatorFactory;
import org.apache.druid.query.aggregation.histogram.QuantilePostAggregator;
import org.apache.druid.segment.VirtualColumn;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.segment.column.ValueType;
import org.apache.druid.segment.virtual.ExpressionVirtualColumn;
import org.apache.druid.sql.calcite.aggregation.Aggregation;
@ -47,7 +48,6 @@ import org.apache.druid.sql.calcite.expression.DruidExpression;
import org.apache.druid.sql.calcite.expression.Expressions;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.rel.VirtualColumnRegistry;
import org.apache.druid.sql.calcite.table.RowSignature;
import javax.annotation.Nullable;
import java.util.ArrayList;
@ -175,7 +175,7 @@ public class QuantileSqlAggregator implements SqlAggregator
final List<VirtualColumn> virtualColumns = new ArrayList<>();
if (input.isDirectColumnAccess()) {
if (rowSignature.getColumnType(input.getDirectColumn()) == ValueType.COMPLEX) {
if (rowSignature.getColumnType(input.getDirectColumn()).orElse(null) == ValueType.COMPLEX) {
aggregatorFactory = new ApproximateHistogramFoldingAggregatorFactory(
histogramName,
input.getDirectColumn(),

View File

@ -36,6 +36,7 @@ import org.apache.druid.query.aggregation.variance.VarianceAggregatorFactory;
import org.apache.druid.query.dimension.DefaultDimensionSpec;
import org.apache.druid.query.dimension.DimensionSpec;
import org.apache.druid.segment.VirtualColumn;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.segment.column.ValueType;
import org.apache.druid.sql.calcite.aggregation.Aggregation;
import org.apache.druid.sql.calcite.aggregation.SqlAggregator;
@ -44,7 +45,6 @@ import org.apache.druid.sql.calcite.expression.Expressions;
import org.apache.druid.sql.calcite.planner.Calcites;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.rel.VirtualColumnRegistry;
import org.apache.druid.sql.calcite.table.RowSignature;
import javax.annotation.Nullable;
import java.util.ArrayList;

View File

@ -23,16 +23,15 @@ import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import org.apache.druid.java.util.common.IAE;
import org.apache.druid.segment.RowAdapter;
import org.apache.druid.segment.column.ColumnHolder;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.segment.column.ValueType;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.function.Function;
@ -167,18 +166,15 @@ public class InlineDataSource implements DataSource
return true;
}
public Map<String, ValueType> getRowSignature()
public RowSignature getRowSignature()
{
final ImmutableMap.Builder<String, ValueType> retVal = ImmutableMap.builder();
final RowSignature.Builder builder = RowSignature.builder();
for (int i = 0; i < columnNames.size(); i++) {
final ValueType columnType = columnTypes.get(i);
if (columnType != null) {
retVal.put(columnNames.get(i), columnType);
}
builder.add(columnNames.get(i), columnTypes.get(i));
}
return retVal.build();
return builder.build();
}
public RowAdapter<Object[]> rowAdapter()

View File

@ -28,6 +28,7 @@ import org.apache.druid.guice.annotations.ExtensionPoint;
import org.apache.druid.java.util.common.UOE;
import org.apache.druid.java.util.common.guava.Sequence;
import org.apache.druid.query.aggregation.MetricManipulationFn;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.timeline.LogicalSegment;
import javax.annotation.Nullable;
@ -286,23 +287,23 @@ public abstract class QueryToolChest<ResultType, QueryType extends Query<ResultT
}
/**
* Returns a list of field names in the order that {@link #resultsAsArrays} would return them. The returned list will
* Returns a {@link RowSignature} for the arrays returned by {@link #resultsAsArrays}. The returned signature will
* be the same length as each array returned by {@link #resultsAsArrays}.
*
* @param query same query passed to {@link #resultsAsArrays}
*
* @return list of field names
* @return row signature
*
* @throws UnsupportedOperationException if this query type does not support returning results as arrays
*/
public List<String> resultArrayFields(QueryType query)
public RowSignature resultArraySignature(QueryType query)
{
throw new UOE("Query type '%s' does not support returning results as arrays", query.getType());
}
/**
* Converts a sequence of this query's ResultType into arrays. The array schema is given by
* {@link #resultArrayFields}. This functionality is useful because it allows higher-level processors to operate on
* Converts a sequence of this query's ResultType into arrays. The array signature is given by
* {@link #resultArraySignature}. This functionality is useful because it allows higher-level processors to operate on
* the results of any query in a consistent way. This is useful for the SQL layer and for any algorithm that might
* operate on the results of an inner query.
*

View File

@ -126,11 +126,11 @@ public abstract class AggregatorFactory implements Cacheable
* For simple aggregators, the combining factory may be computed by simply creating a new factory that is the same as
* the current, except with its input column renamed to the same as the output column. For example, this aggregator:
*
* {"type": "longSum", "fieldName": "foo", "name": "bar"}
* {"type": "longSum", "fieldName": "foo", "name": "bar"}
*
* Would become:
*
* {"type": "longSum", "fieldName": "bar", "name": "bar"}
* {"type": "longSum", "fieldName": "bar", "name": "bar"}
*
* Sometimes, the type or other parameters of the combining aggregator will be different from the original aggregator.
* For example, the {@link CountAggregatorFactory} getCombiningFactory method will return a
@ -209,6 +209,22 @@ public abstract class AggregatorFactory implements Cacheable
*/
public abstract List<String> requiredFields();
/**
* Get the type name of the intermediate type for this aggregator. This is the same as the type returned by
* {@link #deserialize} and the type accepted by {@link #combine}. However, it is *not* necessarily the same type
* returned by {@link #finalizeComputation}.
*
* If the type is complex (i.e. not a simple, numeric {@link org.apache.druid.segment.column.ValueType}) then there
* must be a corresponding {@link org.apache.druid.segment.serde.ComplexMetricSerde} which was registered with
* {@link org.apache.druid.segment.serde.ComplexMetrics#registerSerde} using this type name.
*
* If you need a ValueType enum corresponding to this aggregator, a good way to do that is:
*
* <pre>
* Optional.ofNullable(GuavaUtils.getEnumIfPresent(ValueType.class, aggregator.getTypeName()))
* .orElse(ValueType.COMPLEX);
* </pre>
*/
public abstract String getTypeName();
/**

View File

@ -32,8 +32,6 @@ import com.google.common.collect.Ordering;
import com.google.common.primitives.Longs;
import it.unimi.dsi.fastutil.ints.IntArrayList;
import it.unimi.dsi.fastutil.ints.IntList;
import it.unimi.dsi.fastutil.objects.Object2IntMap;
import it.unimi.dsi.fastutil.objects.Object2IntOpenHashMap;
import org.apache.druid.java.util.common.DateTimes;
import org.apache.druid.java.util.common.IAE;
import org.apache.druid.java.util.common.ISE;
@ -66,6 +64,7 @@ import org.apache.druid.segment.DimensionHandlerUtils;
import org.apache.druid.segment.VirtualColumn;
import org.apache.druid.segment.VirtualColumns;
import org.apache.druid.segment.column.ColumnHolder;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.segment.column.ValueType;
import org.joda.time.DateTime;
import org.joda.time.Interval;
@ -111,8 +110,7 @@ public class GroupByQuery extends BaseQuery<ResultRow>
private final boolean applyLimitPushDown;
private final Function<Sequence<ResultRow>, Sequence<ResultRow>> postProcessingFn;
private final List<String> resultRowOrder;
private final Object2IntMap<String> resultRowPositionLookup;
private final RowSignature resultRowSignature;
/**
* This is set when we know that all rows will have the same timestamp, and allows us to not actually store
@ -205,18 +203,16 @@ public class GroupByQuery extends BaseQuery<ResultRow>
postAggregatorSpecs == null ? ImmutableList.of() : postAggregatorSpecs
);
// Verify no duplicate names between dimensions, aggregators, and postAggregators.
// They will all end up in the same namespace in the returned Rows and we can't have them clobbering each other.
verifyOutputNames(this.dimensions, this.aggregatorSpecs, this.postAggregatorSpecs);
this.universalTimestamp = computeUniversalTimestamp();
this.resultRowOrder = computeResultRowOrder();
this.resultRowPositionLookup = computeResultRowOrderLookup();
this.resultRowSignature = computeResultRowSignature();
this.havingSpec = havingSpec;
this.limitSpec = LimitSpec.nullToNoopLimitSpec(limitSpec);
this.subtotalsSpec = verifySubtotalsSpec(subtotalsSpec, this.dimensions);
// Verify no duplicate names between dimensions, aggregators, and postAggregators.
// They will all end up in the same namespace in the returned Rows and we can't have them clobbering each other.
// We're not counting __time, even though that name is problematic. See: https://github.com/apache/druid/pull/3684
verifyOutputNames(this.dimensions, this.aggregatorSpecs, this.postAggregatorSpecs);
this.postProcessingFn = postProcessingFn != null ? postProcessingFn : makePostProcessingFn();
// Check if limit push down configuration is valid and check if limit push down will be applied
@ -310,9 +306,9 @@ public class GroupByQuery extends BaseQuery<ResultRow>
*
* @see ResultRow for documentation about the order that fields will be in
*/
public List<String> getResultRowOrder()
public RowSignature getResultRowSignature()
{
return resultRowOrder;
return resultRowSignature;
}
/**
@ -328,16 +324,7 @@ public class GroupByQuery extends BaseQuery<ResultRow>
*/
public int getResultRowSizeWithPostAggregators()
{
return resultRowOrder.size();
}
/**
* Returns a map that can be used to look up the position within ResultRows of certain field names. The map's
* {@link Object2IntMap#getInt(Object)} method will return -1 if the field is not found.
*/
public Object2IntMap<String> getResultRowPositionLookup()
{
return resultRowPositionLookup;
return resultRowSignature.size();
}
/**
@ -470,32 +457,18 @@ public class GroupByQuery extends BaseQuery<ResultRow>
return forcePushDown;
}
private Object2IntMap<String> computeResultRowOrderLookup()
private RowSignature computeResultRowSignature()
{
final Object2IntMap<String> indexes = new Object2IntOpenHashMap<>();
indexes.defaultReturnValue(-1);
int index = 0;
for (String columnName : resultRowOrder) {
indexes.put(columnName, index++);
}
return indexes;
}
private List<String> computeResultRowOrder()
{
final List<String> retVal = new ArrayList<>();
final RowSignature.Builder builder = RowSignature.builder();
if (universalTimestamp == null) {
retVal.add(ColumnHolder.TIME_COLUMN_NAME);
builder.addTimeColumn();
}
dimensions.stream().map(DimensionSpec::getOutputName).forEach(retVal::add);
aggregatorSpecs.stream().map(AggregatorFactory::getName).forEach(retVal::add);
postAggregatorSpecs.stream().map(PostAggregator::getName).forEach(retVal::add);
return retVal;
return builder.addDimensions(dimensions)
.addAggregators(aggregatorSpecs)
.addPostAggregators(postAggregatorSpecs)
.build();
}
private boolean determineApplyLimitPushDown()
@ -562,7 +535,7 @@ public class GroupByQuery extends BaseQuery<ResultRow>
int dimIndex = OrderByColumnSpec.getDimIndexForOrderBy(orderSpec, dimensions);
if (dimIndex >= 0) {
DimensionSpec dim = dimensions.get(dimIndex);
orderedFieldNumbers.add(resultRowPositionLookup.getInt(dim.getOutputName()));
orderedFieldNumbers.add(resultRowSignature.indexOf(dim.getOutputName()));
dimsInOrderBy.add(dimIndex);
needsReverseList.add(needsReverse);
final ValueType type = dimensions.get(dimIndex).getOutputType();
@ -573,7 +546,7 @@ public class GroupByQuery extends BaseQuery<ResultRow>
for (int i = 0; i < dimensions.size(); i++) {
if (!dimsInOrderBy.contains(i)) {
orderedFieldNumbers.add(resultRowPositionLookup.getInt(dimensions.get(i).getOutputName()));
orderedFieldNumbers.add(resultRowSignature.indexOf(dimensions.get(i).getOutputName()));
needsReverseList.add(false);
final ValueType type = dimensions.get(i).getOutputType();
dimensionTypes.add(type);

View File

@ -20,10 +20,8 @@
package org.apache.druid.query.groupby;
import com.google.common.base.Function;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import org.apache.druid.collections.NonBlockingPool;
import org.apache.druid.common.guava.GuavaUtils;
import org.apache.druid.data.input.MapBasedInputRow;
import org.apache.druid.data.input.MapBasedRow;
import org.apache.druid.data.input.Row;
@ -32,7 +30,6 @@ import org.apache.druid.data.input.impl.DimensionsSpec;
import org.apache.druid.data.input.impl.StringDimensionSchema;
import org.apache.druid.java.util.common.ISE;
import org.apache.druid.java.util.common.Pair;
import org.apache.druid.java.util.common.StringUtils;
import org.apache.druid.java.util.common.granularity.Granularities;
import org.apache.druid.java.util.common.granularity.Granularity;
import org.apache.druid.java.util.common.guava.Accumulator;
@ -42,8 +39,6 @@ import org.apache.druid.query.ResourceLimitExceededException;
import org.apache.druid.query.aggregation.AggregatorFactory;
import org.apache.druid.query.aggregation.PostAggregator;
import org.apache.druid.query.dimension.DimensionSpec;
import org.apache.druid.segment.RowBasedColumnSelectorFactory;
import org.apache.druid.segment.column.ValueType;
import org.apache.druid.segment.incremental.IncrementalIndex;
import org.apache.druid.segment.incremental.IncrementalIndexSchema;
import org.apache.druid.segment.incremental.IndexSizeExceededException;
@ -53,7 +48,6 @@ import javax.annotation.Nullable;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Queue;
import java.util.concurrent.ConcurrentLinkedQueue;
@ -227,39 +221,6 @@ public class GroupByQueryHelper
);
}
/**
* Returns types for fields that will appear in the Rows output from "query". Useful for feeding them into
* {@link RowBasedColumnSelectorFactory}.
*
* @param query groupBy query
*
* @return row types
*/
public static Map<String, ValueType> rowSignatureFor(final GroupByQuery query)
{
final ImmutableMap.Builder<String, ValueType> types = ImmutableMap.builder();
for (DimensionSpec dimensionSpec : query.getDimensions()) {
types.put(dimensionSpec.getOutputName(), dimensionSpec.getOutputType());
}
for (AggregatorFactory aggregatorFactory : query.getAggregatorSpecs()) {
final String typeName = aggregatorFactory.getTypeName();
final ValueType valueType;
if (typeName != null) {
valueType = GuavaUtils.getEnumIfPresent(ValueType.class, StringUtils.toUpperCase(typeName));
} else {
valueType = null;
}
if (valueType != null) {
types.put(aggregatorFactory.getName(), valueType);
}
}
// Don't include post-aggregators since we don't know what types they are.
return types.build();
}
public static ResultRow toResultRow(final GroupByQuery query, final Row row)
{
final ResultRow resultRow = ResultRow.create(query.getResultRowSizeWithPostAggregators());

View File

@ -62,6 +62,7 @@ import org.apache.druid.query.groupby.resource.GroupByQueryResource;
import org.apache.druid.query.groupby.strategy.GroupByStrategy;
import org.apache.druid.query.groupby.strategy.GroupByStrategySelector;
import org.apache.druid.segment.DimensionHandlerUtils;
import org.apache.druid.segment.column.RowSignature;
import org.joda.time.DateTime;
import java.io.IOException;
@ -684,9 +685,9 @@ public class GroupByQueryQueryToolChest extends QueryToolChest<ResultRow, GroupB
}
@Override
public List<String> resultArrayFields(final GroupByQuery query)
public RowSignature resultArraySignature(GroupByQuery query)
{
return query.getResultRowOrder();
return query.getResultRowSignature();
}
@Override

View File

@ -26,12 +26,12 @@ import org.apache.druid.data.input.Row;
import org.apache.druid.query.aggregation.AggregatorFactory;
import org.apache.druid.query.aggregation.PostAggregator;
import org.apache.druid.query.dimension.DimensionSpec;
import org.apache.druid.segment.column.RowSignature;
import org.joda.time.DateTime;
import javax.annotation.Nullable;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
@ -45,7 +45,7 @@ import java.util.Map;
* ResultRows may sometimes be created without space reserved for post-aggregators, in contexts where it is known
* that post-aggregators will not be computed.
*
* @see GroupByQuery#getResultRowOrder()
* @see GroupByQuery#getResultRowSignature()
* @see GroupByQuery#getResultRowHasTimestamp()
* @see GroupByQuery#getUniversalTimestamp()
* @see GroupByQuery#getResultRowDimensionStart()
@ -158,11 +158,11 @@ public final class ResultRow
*/
public Map<String, Object> toMap(final GroupByQuery query)
{
final List<String> resultRowOrder = query.getResultRowOrder();
final RowSignature signature = query.getResultRowSignature();
final Map<String, Object> map = new HashMap<>();
for (int i = query.getResultRowDimensionStart(); i < row.length; i++) {
final String columnName = resultRowOrder.get(i);
final String columnName = signature.getColumnName(i);
map.put(columnName, row[i]);
}

View File

@ -51,7 +51,6 @@ import org.apache.druid.query.filter.Filter;
import org.apache.druid.query.filter.ValueMatcher;
import org.apache.druid.query.groupby.GroupByQuery;
import org.apache.druid.query.groupby.GroupByQueryConfig;
import org.apache.druid.query.groupby.GroupByQueryHelper;
import org.apache.druid.query.groupby.ResultRow;
import org.apache.druid.query.groupby.epinephelinae.Grouper.BufferComparator;
import org.apache.druid.query.groupby.orderby.DefaultLimitSpec;
@ -365,7 +364,7 @@ public class RowBasedGrouperHelper
@Override
public Function<ResultRow, Object> columnFunction(final String columnName)
{
final int columnIndex = query.getResultRowPositionLookup().getInt(columnName);
final int columnIndex = query.getResultRowSignature().indexOf(columnName);
if (columnIndex < 0) {
return row -> null;
} else {
@ -377,7 +376,7 @@ public class RowBasedGrouperHelper
return RowBasedColumnSelectorFactory.create(
adapter,
supplier::get,
GroupByQueryHelper.rowSignatureFor(query),
query.getResultRowSignature(),
false
);
}
@ -545,7 +544,7 @@ public class RowBasedGrouperHelper
if (dimsToInclude != null) {
for (String dimension : dimsToInclude) {
final int dimIndex = query.getResultRowPositionLookup().getInt(dimension);
final int dimIndex = query.getResultRowSignature().indexOf(dimension);
if (dimIndex >= 0) {
dimsToIncludeBitSet.set(dimIndex - resultRowDimensionStart);
}

View File

@ -76,7 +76,7 @@ public class DimFilterHavingSpec implements HavingSpec
this.finalizers = new Int2ObjectArrayMap<>(query.getAggregatorSpecs().size());
for (AggregatorFactory factory : query.getAggregatorSpecs()) {
final int i = query.getResultRowPositionLookup().getInt(factory.getName());
final int i = query.getResultRowSignature().indexOf(factory.getName());
this.finalizers.put(i, factory::finalizeComputation);
}

View File

@ -74,7 +74,7 @@ public class DimensionSelectorHavingSpec implements HavingSpec
@Override
public void setQuery(GroupByQuery query)
{
columnNumber = query.getResultRowPositionLookup().getInt(dimension);
columnNumber = query.getResultRowSignature().indexOf(dimension);
}
@Override

View File

@ -66,7 +66,7 @@ public class EqualToHavingSpec implements HavingSpec
@Override
public void setQuery(GroupByQuery query)
{
columnNumber = query.getResultRowPositionLookup().getInt(aggregationName);
columnNumber = query.getResultRowSignature().indexOf(aggregationName);
aggregators = HavingSpecUtil.computeAggregatorsMap(query.getAggregatorSpecs());
}

View File

@ -66,7 +66,7 @@ public class GreaterThanHavingSpec implements HavingSpec
@Override
public void setQuery(GroupByQuery query)
{
columnNumber = query.getResultRowPositionLookup().getInt(aggregationName);
columnNumber = query.getResultRowSignature().indexOf(aggregationName);
aggregators = HavingSpecUtil.computeAggregatorsMap(query.getAggregatorSpecs());
}

View File

@ -64,7 +64,7 @@ public class LessThanHavingSpec implements HavingSpec
@Override
public void setQuery(GroupByQuery query)
{
columnNumber = query.getResultRowPositionLookup().getInt(aggregationName);
columnNumber = query.getResultRowSignature().indexOf(aggregationName);
aggregators = HavingSpecUtil.computeAggregatorsMap(query.getAggregatorSpecs());
}

View File

@ -29,7 +29,6 @@ import com.google.common.collect.Iterables;
import com.google.common.collect.Ordering;
import com.google.common.primitives.Ints;
import com.google.common.primitives.Longs;
import it.unimi.dsi.fastutil.objects.Object2IntMap;
import org.apache.druid.common.config.NullHandling;
import org.apache.druid.data.input.Rows;
import org.apache.druid.java.util.common.ISE;
@ -43,6 +42,7 @@ import org.apache.druid.query.groupby.GroupByQuery;
import org.apache.druid.query.groupby.ResultRow;
import org.apache.druid.query.ordering.StringComparator;
import org.apache.druid.query.ordering.StringComparators;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.segment.column.ValueType;
import javax.annotation.Nullable;
@ -180,7 +180,7 @@ public class DefaultLimitSpec implements LimitSpec
// Materialize the Comparator first for fast-fail error checking.
final Ordering<ResultRow> ordering = makeComparator(
query.getResultRowPositionLookup(),
query.getResultRowSignature(),
query.getResultRowHasTimestamp(),
query.getDimensions(),
query.getAggregatorSpecs(),
@ -222,7 +222,7 @@ public class DefaultLimitSpec implements LimitSpec
}
private Ordering<ResultRow> makeComparator(
Object2IntMap<String> rowOrderLookup,
RowSignature rowSignature,
boolean hasTimestamp,
List<DimensionSpec> dimensions,
List<AggregatorFactory> aggs,
@ -265,7 +265,7 @@ public class DefaultLimitSpec implements LimitSpec
String columnName = columnSpec.getDimension();
Ordering<ResultRow> nextOrdering = null;
final int columnIndex = rowOrderLookup.applyAsInt(columnName);
final int columnIndex = rowSignature.indexOf(columnName);
if (columnIndex >= 0) {
if (postAggregatorsMap.containsKey(columnName)) {

View File

@ -421,7 +421,7 @@ public class GroupByStrategyV2 implements GroupByStrategy
// Insert dummy dimension so all subtotals queries have ResultRows with the same shape.
// Use a field name that does not appear in the main query result, to assure the result will be null.
String dimName = "_" + i;
while (query.getResultRowPositionLookup().getInt(dimName) >= 0) {
while (query.getResultRowSignature().indexOf(dimName) >= 0) {
dimName = "_" + dimName;
}
newDimensions.add(DefaultDimensionSpec.of(dimName));

View File

@ -19,10 +19,10 @@
package org.apache.druid.query.lookup;
import com.google.common.collect.ImmutableMap;
import org.apache.druid.java.util.common.ISE;
import org.apache.druid.segment.RowAdapter;
import org.apache.druid.segment.RowBasedSegment;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.segment.column.ValueType;
import org.apache.druid.segment.join.lookup.LookupColumnSelectorFactory;
import org.apache.druid.timeline.SegmentId;
@ -37,10 +37,11 @@ import java.util.function.ToLongFunction;
*/
public class LookupSegment extends RowBasedSegment<Map.Entry<String, String>>
{
private static final Map<String, ValueType> ROW_SIGNATURE = ImmutableMap.of(
LookupColumnSelectorFactory.KEY_COLUMN, ValueType.STRING,
LookupColumnSelectorFactory.VALUE_COLUMN, ValueType.STRING
);
private static final RowSignature ROW_SIGNATURE =
RowSignature.builder()
.add(LookupColumnSelectorFactory.KEY_COLUMN, ValueType.STRING)
.add(LookupColumnSelectorFactory.VALUE_COLUMN, ValueType.STRING)
.build();
public LookupSegment(final String lookupName, final LookupExtractorFactory lookupExtractorFactory)
{

View File

@ -37,9 +37,10 @@ import org.apache.druid.query.QueryPlus;
import org.apache.druid.query.QueryRunner;
import org.apache.druid.query.QueryToolChest;
import org.apache.druid.query.aggregation.MetricManipulationFn;
import org.apache.druid.segment.VirtualColumn;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.segment.column.ValueType;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
@ -127,28 +128,44 @@ public class ScanQueryQueryToolChest extends QueryToolChest<ScanResultValue, Sca
}
@Override
public List<String> resultArrayFields(final ScanQuery query)
public RowSignature resultArraySignature(final ScanQuery query)
{
if (query.getColumns() == null || query.getColumns().isEmpty()) {
// Note: if no specific list of columns is provided, then since we can't predict what columns will come back, we
// unfortunately can't do array-based results. In this case, there is a major difference between standard and
// array-based results: the standard results will detect and return _all_ columns, whereas the array-based results
// will include none of them.
return Collections.emptyList();
} else if (query.withNonNullLegacy(scanQueryConfig).isLegacy()) {
final List<String> retVal = new ArrayList<>();
retVal.add(ScanQueryEngine.LEGACY_TIMESTAMP_KEY);
retVal.addAll(query.getColumns());
return retVal;
return RowSignature.empty();
} else {
return query.getColumns();
final RowSignature.Builder builder = RowSignature.builder();
if (query.withNonNullLegacy(scanQueryConfig).isLegacy()) {
builder.add(ScanQueryEngine.LEGACY_TIMESTAMP_KEY, null);
}
for (String columnName : query.getColumns()) {
// With the Scan query we only know the columnType for virtual columns. Let's report those, at least.
final ValueType columnType;
final VirtualColumn virtualColumn = query.getVirtualColumns().getVirtualColumn(columnName);
if (virtualColumn != null) {
columnType = virtualColumn.capabilities(columnName).getType();
} else {
// Unknown type. In the future, it would be nice to have a way to fill these in.
columnType = null;
}
builder.add(columnName, columnType);
}
return builder.build();
}
}
@Override
public Sequence<Object[]> resultsAsArrays(final ScanQuery query, final Sequence<ScanResultValue> resultSequence)
{
final List<String> fields = resultArrayFields(query);
final List<String> fields = resultArraySignature(query).getColumnNames();
final Function<?, Object[]> mapper;
switch (query.getResultFormat()) {

View File

@ -50,10 +50,9 @@ import org.apache.druid.query.cache.CacheKeyBuilder;
import org.apache.druid.query.context.ResponseContext;
import org.apache.druid.segment.RowAdapters;
import org.apache.druid.segment.RowBasedColumnSelectorFactory;
import org.apache.druid.segment.column.ColumnHolder;
import org.apache.druid.segment.column.RowSignature;
import org.joda.time.DateTime;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
@ -219,7 +218,7 @@ public class TimeseriesQueryQueryToolChest extends QueryToolChest<Result<Timeser
RowBasedColumnSelectorFactory.create(
RowAdapters.standardRow(),
() -> new MapBasedRow(null, null),
null,
RowSignature.empty(),
false
)
);
@ -403,17 +402,13 @@ public class TimeseriesQueryQueryToolChest extends QueryToolChest<Result<Timeser
}
@Override
public List<String> resultArrayFields(TimeseriesQuery query)
public RowSignature resultArraySignature(TimeseriesQuery query)
{
final List<String> fields = new ArrayList<>(
1 + query.getAggregatorSpecs().size() + query.getPostAggregatorSpecs().size()
);
fields.add(ColumnHolder.TIME_COLUMN_NAME);
query.getAggregatorSpecs().stream().map(AggregatorFactory::getName).forEach(fields::add);
query.getPostAggregatorSpecs().stream().map(PostAggregator::getName).forEach(fields::add);
return fields;
return RowSignature.builder()
.addTimeColumn()
.addAggregators(query.getAggregatorSpecs())
.addPostAggregators(query.getPostAggregatorSpecs())
.build();
}
@Override
@ -422,7 +417,7 @@ public class TimeseriesQueryQueryToolChest extends QueryToolChest<Result<Timeser
final Sequence<Result<TimeseriesResultValue>> resultSequence
)
{
final List<String> fields = resultArrayFields(query);
final List<String> fields = resultArraySignature(query).getColumnNames();
return Sequences.map(
resultSequence,

View File

@ -48,10 +48,10 @@ import org.apache.druid.query.context.ResponseContext;
import org.apache.druid.query.dimension.DefaultDimensionSpec;
import org.apache.druid.query.dimension.DimensionSpec;
import org.apache.druid.segment.DimensionHandlerUtils;
import org.apache.druid.segment.column.ColumnHolder;
import org.apache.druid.segment.column.RowSignature;
import org.joda.time.DateTime;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.Iterator;
import java.util.List;
@ -510,24 +510,20 @@ public class TopNQueryQueryToolChest extends QueryToolChest<Result<TopNResultVal
}
@Override
public List<String> resultArrayFields(TopNQuery query)
public RowSignature resultArraySignature(TopNQuery query)
{
final List<String> fields = new ArrayList<>(
2 + query.getAggregatorSpecs().size() + query.getPostAggregatorSpecs().size()
);
fields.add(ColumnHolder.TIME_COLUMN_NAME);
fields.add(query.getDimensionSpec().getOutputName());
query.getAggregatorSpecs().stream().map(AggregatorFactory::getName).forEach(fields::add);
query.getPostAggregatorSpecs().stream().map(PostAggregator::getName).forEach(fields::add);
return fields;
return RowSignature.builder()
.addTimeColumn()
.addDimensions(Collections.singletonList(query.getDimensionSpec()))
.addAggregators(query.getAggregatorSpecs())
.addPostAggregators(query.getPostAggregatorSpecs())
.build();
}
@Override
public Sequence<Object[]> resultsAsArrays(TopNQuery query, Sequence<Result<TopNResultValue>> resultSequence)
{
final List<String> fields = resultArrayFields(query);
final List<String> fields = resultArraySignature(query).getColumnNames();
return resultSequence.flatMap(
result -> {

View File

@ -19,8 +19,8 @@
package org.apache.druid.segment;
import com.google.common.base.Preconditions;
import com.google.common.base.Predicate;
import com.google.common.collect.ImmutableMap;
import org.apache.druid.common.config.NullHandling;
import org.apache.druid.data.input.Rows;
import org.apache.druid.query.dimension.DimensionSpec;
@ -30,13 +30,13 @@ import org.apache.druid.query.monomorphicprocessing.RuntimeShapeInspector;
import org.apache.druid.segment.column.ColumnCapabilities;
import org.apache.druid.segment.column.ColumnCapabilitiesImpl;
import org.apache.druid.segment.column.ColumnHolder;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.segment.column.ValueType;
import org.apache.druid.segment.data.IndexedInts;
import org.apache.druid.segment.data.RangeIndexedInts;
import javax.annotation.Nullable;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.function.Function;
import java.util.function.Supplier;
@ -49,19 +49,19 @@ public class RowBasedColumnSelectorFactory<T> implements ColumnSelectorFactory
{
private final Supplier<T> supplier;
private final RowAdapter<T> adapter;
private final Map<String, ValueType> rowSignature;
private final RowSignature rowSignature;
private final boolean throwParseExceptions;
private RowBasedColumnSelectorFactory(
final Supplier<T> supplier,
final RowAdapter<T> adapter,
@Nullable final Map<String, ValueType> rowSignature,
final RowSignature rowSignature,
final boolean throwParseExceptions
)
{
this.supplier = supplier;
this.adapter = adapter;
this.rowSignature = rowSignature != null ? rowSignature : ImmutableMap.of();
this.rowSignature = Preconditions.checkNotNull(rowSignature, "rowSignature must be nonnull");
this.throwParseExceptions = throwParseExceptions;
}
@ -71,15 +71,17 @@ public class RowBasedColumnSelectorFactory<T> implements ColumnSelectorFactory
* @param adapter adapter for these row objects
* @param supplier supplier of row objects
* @param signature will be used for reporting available columns and their capabilities. Note that the this
* factory will still allow creation of selectors on any field in the rows, even if it
* doesn't appear in "rowSignature".
* factory will still allow creation of selectors on any named field in the rows, even if
* it doesn't appear in "rowSignature". (It only needs to be accessible via
* {@link RowAdapter#columnFunction}.) As a result, you can achieve an untyped mode by
* passing in {@link RowSignature#empty()}.
* @param throwParseExceptions whether numeric selectors should throw parse exceptions or use a default/null value
* when their inputs are not actually numeric
*/
public static <RowType> RowBasedColumnSelectorFactory<RowType> create(
final RowAdapter<RowType> adapter,
final Supplier<RowType> supplier,
@Nullable final Map<String, ValueType> signature,
final RowSignature signature,
final boolean throwParseExceptions
)
{
@ -88,7 +90,7 @@ public class RowBasedColumnSelectorFactory<T> implements ColumnSelectorFactory
@Nullable
static ColumnCapabilities getColumnCapabilities(
final Map<String, ValueType> rowSignature,
final RowSignature rowSignature,
final String columnName
)
{
@ -96,7 +98,7 @@ public class RowBasedColumnSelectorFactory<T> implements ColumnSelectorFactory
// TIME_COLUMN_NAME is handled specially; override the provided rowSignature.
return new ColumnCapabilitiesImpl().setType(ValueType.LONG).setIsComplete(true);
} else {
final ValueType valueType = rowSignature.get(columnName);
final ValueType valueType = rowSignature.getColumnType(columnName).orElse(null);
// Do _not_ set isDictionaryEncoded or hasBitmapIndexes, because Row-based columns do not have those things.
// Do set hasMultipleValues, because we might return multiple values.

View File

@ -23,13 +23,12 @@ import org.apache.druid.java.util.common.granularity.Granularity;
import org.apache.druid.query.BaseQuery;
import org.apache.druid.query.filter.Filter;
import org.apache.druid.query.filter.ValueMatcher;
import org.apache.druid.segment.column.ValueType;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.segment.filter.BooleanValueMatcher;
import org.joda.time.DateTime;
import org.joda.time.Interval;
import javax.annotation.Nullable;
import java.util.Map;
import java.util.function.ToLongFunction;
/**
@ -55,7 +54,7 @@ public class RowBasedCursor<RowType> implements Cursor
final VirtualColumns virtualColumns,
final Granularity gran,
final boolean descending,
final Map<String, ValueType> rowSignature
final RowSignature rowSignature
)
{
this.rowWalker = rowWalker;

View File

@ -20,13 +20,12 @@
package org.apache.druid.segment;
import com.google.common.base.Preconditions;
import org.apache.druid.segment.column.ValueType;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.timeline.SegmentId;
import org.joda.time.Interval;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.util.Map;
/**
* A {@link Segment} that is based on a stream of objects.
@ -57,7 +56,7 @@ public class RowBasedSegment<RowType> extends AbstractSegment
final SegmentId segmentId,
final Iterable<RowType> rowIterable,
final RowAdapter<RowType> rowAdapter,
final Map<String, ValueType> rowSignature
final RowSignature rowSignature
)
{
this.segmentId = Preconditions.checkNotNull(segmentId, "segmentId");

View File

@ -29,7 +29,7 @@ import org.apache.druid.java.util.common.guava.Sequences;
import org.apache.druid.query.QueryMetrics;
import org.apache.druid.query.filter.Filter;
import org.apache.druid.segment.column.ColumnCapabilities;
import org.apache.druid.segment.column.ValueType;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.segment.data.Indexed;
import org.apache.druid.segment.data.ListIndexed;
import org.joda.time.DateTime;
@ -39,7 +39,6 @@ import javax.annotation.Nullable;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
/**
* A {@link StorageAdapter} that is based on a stream of objects. Generally created by a {@link RowBasedSegment}.
@ -50,12 +49,12 @@ public class RowBasedStorageAdapter<RowType> implements StorageAdapter
{
private final Iterable<RowType> rowIterable;
private final RowAdapter<RowType> rowAdapter;
private final Map<String, ValueType> rowSignature;
private final RowSignature rowSignature;
RowBasedStorageAdapter(
final Iterable<RowType> rowIterable,
final RowAdapter<RowType> rowAdapter,
final Map<String, ValueType> rowSignature
final RowSignature rowSignature
)
{
this.rowIterable = Preconditions.checkNotNull(rowIterable, "rowIterable");
@ -72,7 +71,7 @@ public class RowBasedStorageAdapter<RowType> implements StorageAdapter
@Override
public Indexed<String> getAvailableDimensions()
{
return new ListIndexed<>(new ArrayList<>(rowSignature.keySet()));
return new ListIndexed<>(new ArrayList<>(rowSignature.getColumnNames()));
}
@Override

View File

@ -0,0 +1,283 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.druid.segment.column;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import it.unimi.dsi.fastutil.objects.Object2IntMap;
import it.unimi.dsi.fastutil.objects.Object2IntOpenHashMap;
import org.apache.druid.common.guava.GuavaUtils;
import org.apache.druid.java.util.common.IAE;
import org.apache.druid.java.util.common.Pair;
import org.apache.druid.java.util.common.StringUtils;
import org.apache.druid.query.aggregation.AggregatorFactory;
import org.apache.druid.query.aggregation.PostAggregator;
import org.apache.druid.query.dimension.DimensionSpec;
import javax.annotation.Nullable;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
/**
* Type signature for a row in a Druid datasource or query result. Rows have an ordering and every
* column has a defined type. This is a little bit of a fiction in the Druid world (where rows do not _actually_ have
* well defined types) but we do impose types for the SQL layer.
*
* @see org.apache.druid.query.QueryToolChest#resultArraySignature which returns signatures for query results
* @see org.apache.druid.query.InlineDataSource#getRowSignature which returns signatures for inline datasources
*/
public class RowSignature
{
private static final RowSignature EMPTY = new RowSignature(Collections.emptyList());
private final Map<String, ValueType> columnTypes = new HashMap<>();
private final Object2IntMap<String> columnPositions = new Object2IntOpenHashMap<>();
private final List<String> columnNames;
private RowSignature(final List<Pair<String, ValueType>> columnTypeList)
{
this.columnPositions.defaultReturnValue(-1);
final ImmutableList.Builder<String> columnNamesBuilder = ImmutableList.builder();
for (int i = 0; i < columnTypeList.size(); i++) {
final Pair<String, ValueType> pair = columnTypeList.get(i);
final ValueType existingType = columnTypes.get(pair.lhs);
if (columnTypes.containsKey(pair.lhs) && existingType != pair.rhs) {
// It's ok to add the same column twice as long as the type is consistent.
// Note: we need the containsKey because the existingType might be present, but null.
throw new IAE("Column[%s] has conflicting types [%s] and [%s]", pair.lhs, existingType, pair.rhs);
}
columnTypes.put(pair.lhs, pair.rhs);
columnPositions.put(pair.lhs, i);
columnNamesBuilder.add(pair.lhs);
}
this.columnNames = columnNamesBuilder.build();
}
public static Builder builder()
{
return new Builder();
}
public static RowSignature empty()
{
return EMPTY;
}
/**
* Returns the name of the column at position {@code columnNumber}.
*
* @throws IndexOutOfBoundsException if columnNumber is not within our row length
*/
public String getColumnName(final int columnNumber)
{
return columnNames.get(columnNumber);
}
/**
* Returns the type of the column named {@code columnName}, or empty if the type is unknown or the column does
* not exist.
*/
public Optional<ValueType> getColumnType(final String columnName)
{
return Optional.ofNullable(columnTypes.get(columnName));
}
/**
* Returns the type of the column at position {@code columnNumber}, or empty if the type is unknown.
*
* @throws IndexOutOfBoundsException if columnNumber is not within our row length
*/
public Optional<ValueType> getColumnType(final int columnNumber)
{
return Optional.ofNullable(columnTypes.get(getColumnName(columnNumber)));
}
/**
* Returns a list of column names in the order they appear in this signature.
*/
public List<String> getColumnNames()
{
return columnNames;
}
/**
* Returns the number of columns in this signature.
*/
public int size()
{
return columnNames.size();
}
/**
* Returns whether this signature contains a named column.
*/
public boolean contains(final String columnName)
{
return columnPositions.containsKey(columnName);
}
/**
* Returns the first position of {@code columnName} in this row signature, or -1 if it does not appear.
*
* Note: the same column name may appear more than once in a signature; if it does, this method will return the
* first appearance.
*/
public int indexOf(final String columnName)
{
return columnPositions.applyAsInt(columnName);
}
@Override
public boolean equals(Object o)
{
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
RowSignature that = (RowSignature) o;
return columnTypes.equals(that.columnTypes) &&
columnNames.equals(that.columnNames);
}
@Override
public int hashCode()
{
return Objects.hash(columnTypes, columnNames);
}
@Override
public String toString()
{
final StringBuilder s = new StringBuilder("{");
for (int i = 0; i < columnNames.size(); i++) {
if (i > 0) {
s.append(", ");
}
final String columnName = columnNames.get(i);
s.append(columnName).append(":").append(columnTypes.get(columnName));
}
return s.append("}").toString();
}
public static class Builder
{
private final List<Pair<String, ValueType>> columnTypeList;
private Builder()
{
this.columnTypeList = new ArrayList<>();
}
/**
* Add a column to this signature.
*
* @param columnName name, must be nonnull
* @param columnType type, may be null if unknown
*/
public Builder add(final String columnName, @Nullable final ValueType columnType)
{
// Name must be nonnull, but type can be null (if the type is unknown)
Preconditions.checkNotNull(columnName, "'columnName' must be nonnull");
columnTypeList.add(Pair.of(columnName, columnType));
return this;
}
public Builder addAll(final RowSignature other)
{
for (String columnName : other.getColumnNames()) {
add(columnName, other.getColumnType(columnName).orElse(null));
}
return this;
}
public Builder addTimeColumn()
{
return add(ColumnHolder.TIME_COLUMN_NAME, ValueType.LONG);
}
public Builder addDimensions(final List<DimensionSpec> dimensions)
{
for (final DimensionSpec dimension : dimensions) {
add(dimension.getOutputName(), dimension.getOutputType());
}
return this;
}
public Builder addAggregators(final List<AggregatorFactory> aggregators)
{
for (final AggregatorFactory aggregator : aggregators) {
final ValueType type = GuavaUtils.getEnumIfPresent(
ValueType.class,
StringUtils.toUpperCase(aggregator.getTypeName())
);
// Use null instead of COMPLEX for nonnumeric types, since in that case, the type depends on whether or not
// the aggregator is finalized, and we don't know (a) if it will be finalized, or even (b) what the type would
// be if it were finalized. So null (i.e. unknown) is the proper thing to do.
//
// Another note: technically, we don't know what the finalized type will be even if the type here is numeric,
// but we're assuming that it doesn't change upon finalization. All builtin aggregators work this way.
if (type != null && type.isNumeric()) {
add(aggregator.getName(), type);
} else {
add(aggregator.getName(), null);
}
}
return this;
}
public Builder addPostAggregators(final List<PostAggregator> postAggregators)
{
for (final PostAggregator postAggregator : postAggregators) {
// PostAggregator#getName is marked nullable, but we require column names for everything.
final String name = Preconditions.checkNotNull(
postAggregator.getName(),
"postAggregators must have nonnull names"
);
// PostAggregators don't have known types; use null for the type.
add(name, null);
}
return this;
}
public RowSignature build()
{
return new RowSignature(columnTypeList);
}
}
}

View File

@ -121,7 +121,8 @@ public enum ValueType
}
@JsonCreator
public static ValueType fromString(String name)
@Nullable
public static ValueType fromString(@Nullable String name)
{
if (name == null) {
return null;

View File

@ -69,6 +69,7 @@ import org.apache.druid.segment.VirtualColumns;
import org.apache.druid.segment.column.ColumnCapabilities;
import org.apache.druid.segment.column.ColumnCapabilitiesImpl;
import org.apache.druid.segment.column.ColumnHolder;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.segment.column.ValueType;
import org.apache.druid.segment.serde.ComplexMetricExtractor;
import org.apache.druid.segment.serde.ComplexMetricSerde;
@ -134,7 +135,7 @@ public abstract class IncrementalIndex<AggregatorType> extends AbstractIndex imp
final RowBasedColumnSelectorFactory<InputRow> baseSelectorFactory = RowBasedColumnSelectorFactory.create(
RowAdapters.standardRow(),
in::get,
null,
RowSignature.empty(),
true
);

View File

@ -20,11 +20,9 @@
package org.apache.druid.segment.join.table;
import it.unimi.dsi.fastutil.ints.IntList;
import org.apache.druid.segment.column.ValueType;
import org.apache.druid.segment.column.RowSignature;
import javax.annotation.Nullable;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
@ -40,15 +38,10 @@ public interface IndexedTable
Set<String> keyColumns();
/**
* Returns all columns of this table, including the key and non-key columns.
* Returns the signature of this table, which includes all key columns (as well as other columns that can be
* selected, but are not keys).
*/
List<String> allColumns();
/**
* Returns the signature of this table: a map where each key is a column from {@link #allColumns()} and each value
* is a type code.
*/
Map<String, ValueType> rowSignature();
RowSignature rowSignature();
/**
* Returns the number of rows in this table. It must not change over time, since it is used for things like algorithm
@ -58,13 +51,13 @@ public interface IndexedTable
/**
* Returns the index for a particular column. The provided column number must be that column's position in
* {@link #allColumns()}.
* {@link #rowSignature()}.
*/
Index columnIndex(int column);
/**
* Returns a reader for a particular column. The provided column number must be that column's position in
* {@link #allColumns()}.
* {@link #rowSignature()}.
*/
Reader columnReader(int column);

View File

@ -46,7 +46,7 @@ public class IndexedTableColumnSelectorFactory implements ColumnSelectorFactory
@Nullable
static ColumnCapabilities columnCapabilities(final IndexedTable table, final String columnName)
{
final ValueType valueType = table.rowSignature().get(columnName);
final ValueType valueType = table.rowSignature().getColumnType(columnName).orElse(null);
if (valueType != null) {
final ColumnCapabilitiesImpl capabilities = new ColumnCapabilitiesImpl().setType(valueType);
@ -66,7 +66,7 @@ public class IndexedTableColumnSelectorFactory implements ColumnSelectorFactory
@Override
public DimensionSelector makeDimensionSelector(final DimensionSpec dimensionSpec)
{
final int columnNumber = table.allColumns().indexOf(dimensionSpec.getDimension());
final int columnNumber = table.rowSignature().indexOf(dimensionSpec.getDimension());
if (columnNumber < 0) {
return dimensionSpec.decorate(DimensionSelector.constant(null, dimensionSpec.getExtractionFn()));
@ -86,7 +86,7 @@ public class IndexedTableColumnSelectorFactory implements ColumnSelectorFactory
@Override
public ColumnValueSelector makeColumnValueSelector(final String columnName)
{
final int columnNumber = table.allColumns().indexOf(columnName);
final int columnNumber = table.rowSignature().indexOf(columnName);
if (columnNumber < 0) {
return NilColumnValueSelector.instance();

View File

@ -30,6 +30,7 @@ import it.unimi.dsi.fastutil.ints.IntRBTreeSet;
import it.unimi.dsi.fastutil.ints.IntSet;
import org.apache.druid.common.config.NullHandling;
import org.apache.druid.java.util.common.IAE;
import org.apache.druid.java.util.common.ISE;
import org.apache.druid.segment.BaseDoubleColumnValueSelector;
import org.apache.druid.segment.BaseFloatColumnValueSelector;
import org.apache.druid.segment.BaseLongColumnValueSelector;
@ -122,8 +123,12 @@ public class IndexedTableJoinMatcher implements JoinMatcher
throw new IAE("Cannot build hash-join matcher on non-key-based condition: %s", condition);
}
final int keyColumnNumber = table.allColumns().indexOf(condition.getRightColumn());
final ValueType keyColumnType = table.rowSignature().get(condition.getRightColumn());
final int keyColumnNumber = table.rowSignature().indexOf(condition.getRightColumn());
final ValueType keyColumnType =
table.rowSignature().getColumnType(condition.getRightColumn())
.orElseThrow(() -> new ISE("Encountered null type for column[%s]", condition.getRightColumn()));
final IndexedTable.Index index = table.columnIndex(keyColumnNumber);
return ColumnProcessors.makeProcessor(

View File

@ -44,13 +44,13 @@ public class IndexedTableJoinable implements Joinable
@Override
public List<String> getAvailableColumns()
{
return table.allColumns();
return table.rowSignature().getColumnNames();
}
@Override
public int getCardinality(String columnName)
{
if (table.allColumns().contains(columnName)) {
if (table.rowSignature().contains(columnName)) {
return table.numRows();
} else {
// NullDimensionSelector has cardinality = 1 (one null, nothing else).
@ -87,8 +87,8 @@ public class IndexedTableJoinable implements Joinable
String retrievalColumnName
)
{
int filterColumnPosition = table.allColumns().indexOf(searchColumnName);
int correlatedColumnPosition = table.allColumns().indexOf(retrievalColumnName);
int filterColumnPosition = table.rowSignature().indexOf(searchColumnName);
int correlatedColumnPosition = table.rowSignature().indexOf(retrievalColumnName);
if (filterColumnPosition < 0 || correlatedColumnPosition < 0) {
return ImmutableSet.of();

View File

@ -19,6 +19,7 @@
package org.apache.druid.segment.join.table;
import com.google.common.collect.ImmutableSet;
import it.unimi.dsi.fastutil.ints.IntArrayList;
import it.unimi.dsi.fastutil.ints.IntList;
import it.unimi.dsi.fastutil.ints.IntLists;
@ -26,6 +27,7 @@ import org.apache.druid.java.util.common.IAE;
import org.apache.druid.java.util.common.ISE;
import org.apache.druid.segment.DimensionHandlerUtils;
import org.apache.druid.segment.RowAdapter;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.segment.column.ValueType;
import java.util.ArrayList;
@ -45,48 +47,46 @@ public class RowBasedIndexedTable<RowType> implements IndexedTable
{
private final List<RowType> table;
private final List<Map<Object, IntList>> index;
private final Map<String, ValueType> rowSignature;
private final List<String> columns;
private final List<ValueType> columnTypes;
private final RowSignature rowSignature;
private final List<Function<RowType, Object>> columnFunctions;
private final Set<String> keyColumns;
public RowBasedIndexedTable(
final List<RowType> table,
final RowAdapter<RowType> rowAdapter,
final Map<String, ValueType> rowSignature,
final RowSignature rowSignature,
final Set<String> keyColumns
)
{
this.table = table;
this.rowSignature = rowSignature;
this.columns = rowSignature.keySet().stream().sorted().collect(Collectors.toList());
this.columnTypes = new ArrayList<>(columns.size());
this.columnFunctions = columns.stream().map(rowAdapter::columnFunction).collect(Collectors.toList());
this.columnFunctions =
rowSignature.getColumnNames().stream().map(rowAdapter::columnFunction).collect(Collectors.toList());
this.keyColumns = keyColumns;
if (new HashSet<>(keyColumns).size() != keyColumns.size()) {
throw new ISE("keyColumns[%s] must not contain duplicates", keyColumns);
}
if (!rowSignature.keySet().containsAll(keyColumns)) {
if (!ImmutableSet.copyOf(rowSignature.getColumnNames()).containsAll(keyColumns)) {
throw new ISE(
"keyColumns[%s] must all be contained in rowSignature[%s]",
String.join(", ", keyColumns),
String.join(", ", rowSignature.keySet())
rowSignature
);
}
index = new ArrayList<>(columns.size());
index = new ArrayList<>(rowSignature.size());
for (int i = 0; i < columns.size(); i++) {
final String column = columns.get(i);
for (int i = 0; i < rowSignature.size(); i++) {
final String column = rowSignature.getColumnName(i);
final Map<Object, IntList> m;
final ValueType columnType = rowSignature.get(column);
columnTypes.add(columnType);
if (keyColumns.contains(column)) {
final ValueType columnType =
rowSignature.getColumnType(column)
.orElseThrow(() -> new ISE("Key column[%s] must have nonnull type", column));
final Function<RowType, Object> columnFunction = columnFunctions.get(i);
m = new HashMap<>();
@ -114,13 +114,7 @@ public class RowBasedIndexedTable<RowType> implements IndexedTable
}
@Override
public List<String> allColumns()
{
return columns;
}
@Override
public Map<String, ValueType> rowSignature()
public RowSignature rowSignature()
{
return rowSignature;
}
@ -134,7 +128,9 @@ public class RowBasedIndexedTable<RowType> implements IndexedTable
throw new IAE("Column[%d] is not a key column", column);
}
final ValueType columnType = columnTypes.get(column);
final ValueType columnType =
rowSignature.getColumnType(column)
.orElseThrow(() -> new ISE("Key column[%s] must have nonnull type", column));
return key -> {
final Object convertedKey = DimensionHandlerUtils.convertObjectToType(key, columnType, false);

View File

@ -28,6 +28,7 @@ import org.apache.druid.query.filter.ValueMatcher;
import org.apache.druid.segment.RowAdapters;
import org.apache.druid.segment.RowBasedColumnSelectorFactory;
import org.apache.druid.segment.column.ColumnHolder;
import org.apache.druid.segment.column.RowSignature;
import org.joda.time.DateTime;
import javax.annotation.Nullable;
@ -58,7 +59,7 @@ public class Transformer
RowBasedColumnSelectorFactory.create(
RowAdapters.standardRow(),
rowSupplierForValueMatcher::get,
null,
RowSignature.empty(),
false
)
);

View File

@ -28,6 +28,7 @@ import org.apache.druid.java.util.common.DateTimes;
import org.apache.druid.segment.RowAdapter;
import org.apache.druid.segment.TestHelper;
import org.apache.druid.segment.column.ColumnHolder;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.segment.column.ValueType;
import org.junit.Assert;
import org.junit.Rule;
@ -114,12 +115,12 @@ public class InlineDataSourceTest
public void test_getRowSignature()
{
Assert.assertEquals(
ImmutableMap.of(
ColumnHolder.TIME_COLUMN_NAME, ValueType.LONG,
"str", ValueType.STRING,
"double", ValueType.DOUBLE,
"complex", ValueType.COMPLEX
),
RowSignature.builder()
.add(ColumnHolder.TIME_COLUMN_NAME, ValueType.LONG)
.add("str", ValueType.STRING)
.add("double", ValueType.DOUBLE)
.add("complex", ValueType.COMPLEX)
.build(),
listDataSource.getRowSignature()
);
}

View File

@ -64,6 +64,7 @@ import org.apache.druid.query.groupby.orderby.DefaultLimitSpec;
import org.apache.druid.query.groupby.orderby.OrderByColumnSpec;
import org.apache.druid.query.ordering.StringComparators;
import org.apache.druid.segment.TestHelper;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.segment.column.ValueType;
import org.apache.druid.segment.virtual.ExpressionVirtualColumn;
import org.junit.Assert;
@ -666,7 +667,7 @@ public class GroupByQueryQueryToolChestTest
}
@Test
public void testResultArrayFieldsAllGran()
public void testResultArraySignatureAllGran()
{
final GroupByQuery query = new GroupByQuery.Builder()
.setDataSource(QueryRunnerTestHelper.DATA_SOURCE)
@ -678,13 +679,19 @@ public class GroupByQueryQueryToolChestTest
.build();
Assert.assertEquals(
ImmutableList.of("dim", "rows", "index", "uniques", "const"),
new GroupByQueryQueryToolChest(null, null).resultArrayFields(query)
RowSignature.builder()
.add("dim", ValueType.STRING)
.add("rows", ValueType.LONG)
.add("index", ValueType.DOUBLE)
.add("uniques", null)
.add("const", null)
.build(),
new GroupByQueryQueryToolChest(null, null).resultArraySignature(query)
);
}
@Test
public void testResultArrayFieldsDayGran()
public void testResultArraySignatureDayGran()
{
final GroupByQuery query = new GroupByQuery.Builder()
.setDataSource(QueryRunnerTestHelper.DATA_SOURCE)
@ -696,8 +703,15 @@ public class GroupByQueryQueryToolChestTest
.build();
Assert.assertEquals(
ImmutableList.of("__time", "dim", "rows", "index", "uniques", "const"),
new GroupByQueryQueryToolChest(null, null).resultArrayFields(query)
RowSignature.builder()
.addTimeColumn()
.add("dim", ValueType.STRING)
.add("rows", ValueType.LONG)
.add("index", ValueType.DOUBLE)
.add("uniques", null)
.add("const", null)
.build(),
new GroupByQueryQueryToolChest(null, null).resultArraySignature(query)
);
}

View File

@ -3300,8 +3300,8 @@ public class GroupByQueryRunnerTest extends InitializedNullHandlingTest
makeRow(baseQuery, "2011-04-01", "alias", "travel", "rows", 2L, "idx", 243L)
);
final int idxPosition = baseQuery.getResultRowPositionLookup().getInt("idx");
final int rowsPosition = baseQuery.getResultRowPositionLookup().getInt("rows");
final int idxPosition = baseQuery.getResultRowSignature().indexOf("idx");
final int rowsPosition = baseQuery.getResultRowSignature().indexOf("rows");
Comparator<ResultRow> idxComparator = Comparator.comparing(row -> ((Number) row.get(idxPosition)).floatValue());
Comparator<ResultRow> rowsComparator = Comparator.comparing(row -> ((Number) row.get(rowsPosition)).floatValue());
@ -5356,7 +5356,7 @@ public class GroupByQueryRunnerTest extends InitializedNullHandlingTest
public boolean eval(ResultRow row)
{
final String field = "idx_subpostagg";
final int p = query.getResultRowPositionLookup().getInt(field);
final int p = query.getResultRowSignature().indexOf(field);
return (Rows.objectToNumber(field, row.get(p), true).floatValue() < 3800);
}
}
@ -5650,7 +5650,7 @@ public class GroupByQueryRunnerTest extends InitializedNullHandlingTest
public boolean eval(ResultRow row)
{
final String field = "idx_subpostagg";
final int p = query.getResultRowPositionLookup().getInt(field);
final int p = query.getResultRowSignature().indexOf(field);
return (Rows.objectToNumber(field, row.get(p), true).floatValue() < 3800);
}
}

View File

@ -74,7 +74,7 @@ public class GroupByQueryRunnerTestHelper
}
for (int i = 0; i < vals.length; i += 2) {
final int position = query.getResultRowPositionLookup().getInt(vals[i].toString());
final int position = query.getResultRowSignature().indexOf(vals[i].toString());
row.set(position, vals[i + 1]);
}
@ -100,7 +100,7 @@ public class GroupByQueryRunnerTestHelper
ResultRow row = ResultRow.create(query.getResultRowSizeWithPostAggregators());
for (int i = 0; i < columnNames.length; i++) {
if (i != timeIndex) {
final int position = query.getResultRowPositionLookup().getInt(columnNames[i]);
final int position = query.getResultRowSignature().indexOf(columnNames[i]);
row.set(position, value[i]);
} else if (query.getResultRowHasTimestamp()) {
row.set(0, new DateTime(value[i], ISOChronology.getInstanceUTC()).getMillis());

View File

@ -136,8 +136,7 @@ public class GroupByQueryTest
.withIgnoredFields(
"applyLimitPushDown",
"postProcessingFn",
"resultRowOrder",
"resultRowPositionLookup",
"resultRowSignature",
"universalTimestamp"
)
.verify();

View File

@ -28,6 +28,7 @@ import org.apache.druid.query.DefaultGenericQueryMetricsFactory;
import org.apache.druid.query.Druids;
import org.apache.druid.query.QueryToolChestTestHelper;
import org.apache.druid.query.spec.MultipleIntervalSegmentSpec;
import org.apache.druid.segment.column.RowSignature;
import org.junit.Assert;
import org.junit.Test;
@ -51,7 +52,7 @@ public class ScanQueryQueryToolChestTest
);
@Test
public void test_resultArrayFields_columnsNotSpecified()
public void test_resultArraySignature_columnsNotSpecified()
{
final ScanQuery scanQuery =
Druids.newScanQueryBuilder()
@ -59,11 +60,11 @@ public class ScanQueryQueryToolChestTest
.intervals(new MultipleIntervalSegmentSpec(ImmutableList.of(Intervals.of("2000/3000"))))
.build();
Assert.assertEquals(ImmutableList.of(), toolChest.resultArrayFields(scanQuery));
Assert.assertEquals(RowSignature.empty(), toolChest.resultArraySignature(scanQuery));
}
@Test
public void test_resultArrayFields_columnsNotSpecifiedLegacyMode()
public void test_resultArraySignature_columnsNotSpecifiedLegacyMode()
{
final ScanQuery scanQuery =
Druids.newScanQueryBuilder()
@ -72,11 +73,11 @@ public class ScanQueryQueryToolChestTest
.legacy(true)
.build();
Assert.assertEquals(ImmutableList.of(), toolChest.resultArrayFields(scanQuery));
Assert.assertEquals(RowSignature.empty(), toolChest.resultArraySignature(scanQuery));
}
@Test
public void test_resultArrayFields_columnsSpecified()
public void test_resultArraySignature_columnsSpecified()
{
final ScanQuery scanQuery =
Druids.newScanQueryBuilder()
@ -85,11 +86,14 @@ public class ScanQueryQueryToolChestTest
.columns("foo", "bar")
.build();
Assert.assertEquals(ImmutableList.of("foo", "bar"), toolChest.resultArrayFields(scanQuery));
Assert.assertEquals(
RowSignature.builder().add("foo", null).add("bar", null).build(),
toolChest.resultArraySignature(scanQuery)
);
}
@Test
public void test_resultArrayFields_columnsSpecifiedLegacyMode()
public void test_resultArraySignature_columnsSpecifiedLegacyMode()
{
final ScanQuery scanQuery =
Druids.newScanQueryBuilder()
@ -99,7 +103,10 @@ public class ScanQueryQueryToolChestTest
.legacy(true)
.build();
Assert.assertEquals(ImmutableList.of("timestamp", "foo", "bar"), toolChest.resultArrayFields(scanQuery));
Assert.assertEquals(
RowSignature.builder().add("timestamp", null).add("foo", null).add("bar", null).build(),
toolChest.resultArraySignature(scanQuery)
);
}
@Test

View File

@ -22,6 +22,7 @@ package org.apache.druid.query.timeseries;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import org.apache.druid.common.config.NullHandling;
import org.apache.druid.java.util.common.DateTimes;
import org.apache.druid.java.util.common.Intervals;
import org.apache.druid.java.util.common.granularity.Granularities;
@ -42,7 +43,10 @@ import org.apache.druid.query.aggregation.post.FieldAccessPostAggregator;
import org.apache.druid.query.spec.MultipleIntervalSegmentSpec;
import org.apache.druid.segment.TestHelper;
import org.apache.druid.segment.VirtualColumns;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.segment.column.ValueType;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
@ -54,6 +58,12 @@ public class TimeseriesQueryQueryToolChestTest
{
private static final TimeseriesQueryQueryToolChest TOOL_CHEST = new TimeseriesQueryQueryToolChest(null);
@BeforeClass
public static void setUpClass()
{
NullHandling.initializeForTests();
}
@Parameterized.Parameters(name = "descending={0}")
public static Iterable<Object[]> constructorFeeder()
{
@ -354,7 +364,7 @@ public class TimeseriesQueryQueryToolChestTest
}
@Test
public void testResultArrayFields()
public void testResultArraySignature()
{
final TimeseriesQuery query =
Druids.newTimeseriesQueryBuilder()
@ -367,8 +377,14 @@ public class TimeseriesQueryQueryToolChestTest
.build();
Assert.assertEquals(
ImmutableList.of("__time", "rows", "index", "uniques", "const"),
TOOL_CHEST.resultArrayFields(query)
RowSignature.builder()
.addTimeColumn()
.add("rows", ValueType.LONG)
.add("index", ValueType.DOUBLE)
.add("uniques", null)
.add("const", null)
.build(),
TOOL_CHEST.resultArraySignature(query)
);
}

View File

@ -61,6 +61,7 @@ import org.apache.druid.segment.IncrementalIndexSegment;
import org.apache.druid.segment.TestHelper;
import org.apache.druid.segment.TestIndex;
import org.apache.druid.segment.VirtualColumns;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.segment.column.ValueType;
import org.apache.druid.testing.InitializedNullHandlingTest;
import org.apache.druid.timeline.SegmentId;
@ -298,7 +299,7 @@ public class TopNQueryQueryToolChestTest extends InitializedNullHandlingTest
}
@Test
public void testResultArrayFields()
public void testResultArraySignature()
{
final TopNQuery query = new TopNQueryBuilder()
.dataSource(QueryRunnerTestHelper.DATA_SOURCE)
@ -312,8 +313,15 @@ public class TopNQueryQueryToolChestTest extends InitializedNullHandlingTest
.build();
Assert.assertEquals(
ImmutableList.of("__time", "dim", "rows", "index", "uniques", "const"),
new TopNQueryQueryToolChest(null, null).resultArrayFields(query)
RowSignature.builder()
.addTimeColumn()
.add("dim", ValueType.STRING)
.add("rows", ValueType.LONG)
.add("index", ValueType.DOUBLE)
.add("uniques", null)
.add("const", null)
.build(),
new TopNQueryQueryToolChest(null, null).resultArraySignature(query)
);
}

View File

@ -20,7 +20,6 @@
package org.apache.druid.segment;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import org.apache.druid.data.input.InputRow;
@ -29,7 +28,7 @@ import org.apache.druid.query.aggregation.AggregatorFactory;
import org.apache.druid.query.aggregation.CountAggregatorFactory;
import org.apache.druid.segment.column.ColumnCapabilities;
import org.apache.druid.segment.column.ColumnConfig;
import org.apache.druid.segment.column.ValueType;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.segment.incremental.IncrementalIndex;
import org.apache.druid.segment.incremental.IncrementalIndexSchema;
import org.apache.druid.segment.incremental.IndexSizeExceededException;
@ -42,10 +41,8 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.ThreadLocalRandom;
import java.util.stream.Collectors;
/**
* Helps tests make segments.
@ -198,7 +195,7 @@ public class IndexBuilder
SegmentId.dummy("IndexBuilder"),
rows,
RowAdapters.standardRow(),
ImmutableMap.of()
RowSignature.empty()
);
}
@ -206,22 +203,17 @@ public class IndexBuilder
{
// Determine row signature by building an mmapped index first.
try (final QueryableIndex index = buildMMappedIndex()) {
final Map<String, ValueType> rowSignature =
index.getColumnNames().stream().collect(
Collectors.toMap(
column -> column,
column -> {
final ColumnCapabilities capabilities = index.getColumnHolder(column).getCapabilities();
return capabilities.getType();
}
)
);
final RowSignature.Builder rowSignatureBuilder = RowSignature.builder();
for (final String columnName : index.getColumnNames()) {
final ColumnCapabilities capabilities = index.getColumnHolder(columnName).getCapabilities();
rowSignatureBuilder.add(columnName, capabilities.getType());
}
return new RowBasedSegment<>(
SegmentId.dummy("IndexBuilder"),
rows,
RowAdapters.standardRow(),
rowSignature
rowSignatureBuilder.build()
);
}
}

View File

@ -20,7 +20,6 @@
package org.apache.druid.segment;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import org.apache.druid.common.config.NullHandling;
import org.apache.druid.common.guava.GuavaUtils;
@ -34,6 +33,7 @@ import org.apache.druid.math.expr.ExprMacroTable;
import org.apache.druid.query.dimension.DefaultDimensionSpec;
import org.apache.druid.query.filter.SelectorDimFilter;
import org.apache.druid.segment.column.ColumnCapabilities;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.segment.column.ValueType;
import org.apache.druid.segment.virtual.ExpressionVirtualColumn;
import org.joda.time.Duration;
@ -48,7 +48,6 @@ import java.util.Arrays;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
import java.util.function.Supplier;
import java.util.function.ToLongFunction;
@ -56,14 +55,14 @@ import java.util.stream.Collectors;
public class RowBasedStorageAdapterTest
{
private static final Map<String, ValueType> ROW_SIGNATURE =
ImmutableMap.<String, ValueType>builder()
.put(ValueType.FLOAT.name(), ValueType.FLOAT)
.put(ValueType.DOUBLE.name(), ValueType.DOUBLE)
.put(ValueType.LONG.name(), ValueType.LONG)
.put(ValueType.STRING.name(), ValueType.STRING)
.put(ValueType.COMPLEX.name(), ValueType.COMPLEX)
.build();
private static final RowSignature ROW_SIGNATURE =
RowSignature.builder()
.add(ValueType.FLOAT.name(), ValueType.FLOAT)
.add(ValueType.DOUBLE.name(), ValueType.DOUBLE)
.add(ValueType.LONG.name(), ValueType.LONG)
.add(ValueType.STRING.name(), ValueType.STRING)
.add(ValueType.COMPLEX.name(), ValueType.COMPLEX)
.build();
private static final List<Function<Cursor, Supplier<Object>>> READ_STRING =
ImmutableList.of(
@ -101,7 +100,7 @@ public class RowBasedStorageAdapterTest
// Read all the types as all the other types.
for (final String valueTypeName : ROW_SIGNATURE.keySet()) {
for (final String valueTypeName : ROW_SIGNATURE.getColumnNames()) {
PROCESSORS.put(
StringUtils.format("%s-float", StringUtils.toLowerCase(valueTypeName)),
cursor -> {
@ -223,7 +222,7 @@ public class RowBasedStorageAdapterTest
// Sort them for comparison purposes.
Assert.assertEquals(
ROW_SIGNATURE.keySet().stream().sorted().collect(Collectors.toList()),
ROW_SIGNATURE.getColumnNames().stream().sorted().collect(Collectors.toList()),
Lists.newArrayList(adapter.getAvailableDimensions()).stream().sorted().collect(Collectors.toList())
);
}
@ -245,7 +244,7 @@ public class RowBasedStorageAdapterTest
final RowBasedStorageAdapter<Integer> adapter = createIntAdapter(0, 1, 2);
// Row based adapters don't know cardinality (they don't walk their Iterables until makeCursors is called).
for (String column : ROW_SIGNATURE.keySet()) {
for (String column : ROW_SIGNATURE.getColumnNames()) {
Assert.assertEquals(Integer.MAX_VALUE, adapter.getDimensionCardinality(column));
}
}
@ -286,7 +285,7 @@ public class RowBasedStorageAdapterTest
// Row based adapters don't know min/max values, so they always return null.
// Test both known and unknown columns.
final List<String> columns =
ImmutableList.<String>builder().addAll(ROW_SIGNATURE.keySet()).add("unknown", "__time").build();
ImmutableList.<String>builder().addAll(ROW_SIGNATURE.getColumnNames()).add("unknown", "__time").build();
for (String column : columns) {
Assert.assertNull(column, adapter.getMinValue(column));
@ -301,7 +300,7 @@ public class RowBasedStorageAdapterTest
// Row based adapters don't know min/max values, so they always return null.
// Test both known and unknown columns.
final List<String> columns =
ImmutableList.<String>builder().addAll(ROW_SIGNATURE.keySet()).add("unknown", "__time").build();
ImmutableList.<String>builder().addAll(ROW_SIGNATURE.getColumnNames()).add("unknown", "__time").build();
for (String column : columns) {
Assert.assertNull(column, adapter.getMaxValue(column));
@ -314,7 +313,7 @@ public class RowBasedStorageAdapterTest
final RowBasedStorageAdapter<Integer> adapter = createIntAdapter(0, 1, 2);
// Row based adapters don't know cardinality (they don't walk their Iterables until makeCursors is called).
for (String column : ROW_SIGNATURE.keySet()) {
for (String column : ROW_SIGNATURE.getColumnNames()) {
Assert.assertEquals(Integer.MAX_VALUE, adapter.getDimensionCardinality(column));
}
}
@ -393,7 +392,7 @@ public class RowBasedStorageAdapterTest
{
final RowBasedStorageAdapter<Integer> adapter = createIntAdapter(0, 1, 2);
for (String columnName : ROW_SIGNATURE.keySet()) {
for (String columnName : ROW_SIGNATURE.getColumnNames()) {
Assert.assertEquals(columnName, ValueType.valueOf(columnName).name(), adapter.getColumnTypeName(columnName));
}
}

View File

@ -70,6 +70,7 @@ import org.apache.druid.segment.RowBasedColumnSelectorFactory;
import org.apache.druid.segment.RowBasedStorageAdapter;
import org.apache.druid.segment.StorageAdapter;
import org.apache.druid.segment.VirtualColumns;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.segment.column.ValueType;
import org.apache.druid.segment.data.BitmapSerdeFactory;
import org.apache.druid.segment.data.ConciseBitmapSerdeFactory;
@ -647,10 +648,10 @@ public abstract class BaseFilterTest extends InitializedNullHandlingTest
final String selectColumn
)
{
// Generate rowType
final Map<String, ValueType> rowSignature = new HashMap<>();
// Generate rowSignature
final RowSignature.Builder rowSignatureBuilder = RowSignature.builder();
for (String columnName : Iterables.concat(adapter.getAvailableDimensions(), adapter.getAvailableMetrics())) {
rowSignature.put(columnName, adapter.getColumnCapabilities(columnName).getType());
rowSignatureBuilder.add(columnName, adapter.getColumnCapabilities(columnName).getType());
}
// Perform test
@ -660,7 +661,7 @@ public abstract class BaseFilterTest extends InitializedNullHandlingTest
RowBasedColumnSelectorFactory.create(
RowAdapters.standardRow(),
rowSupplier::get,
rowSignature,
rowSignatureBuilder.build(),
false
)
)

View File

@ -68,9 +68,9 @@ public class HashJoinSegmentStorageAdapterTest extends BaseHashJoinSegmentStorag
"namespace",
"page",
"delta",
"c1.countryNumber",
"c1.countryIsoCode",
"c1.countryName",
"c1.countryNumber"
"c1.countryName"
),
Lists.newArrayList(makeFactToCountrySegment().getAvailableDimensions().iterator())
);

View File

@ -22,7 +22,6 @@ package org.apache.druid.segment.join;
import com.fasterxml.jackson.databind.MappingIterator;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Lists;
import org.apache.druid.common.config.NullHandling;
@ -53,6 +52,7 @@ import org.apache.druid.segment.DimensionSelector;
import org.apache.druid.segment.IndexBuilder;
import org.apache.druid.segment.RowAdapter;
import org.apache.druid.segment.TestHelper;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.segment.column.ValueType;
import org.apache.druid.segment.incremental.IncrementalIndexSchema;
import org.apache.druid.segment.join.table.RowBasedIndexedTable;
@ -87,18 +87,19 @@ public class JoinTestHelper
new StringDimensionSchema("page"),
new LongDimensionSchema("delta")
);
private static final Map<String, ValueType> COUNTRIES_SIGNATURE =
ImmutableMap.<String, ValueType>builder()
.put("countryNumber", ValueType.LONG)
.put("countryIsoCode", ValueType.STRING)
.put("countryName", ValueType.STRING)
.build();
private static final Map<String, ValueType> REGIONS_SIGNATURE =
ImmutableMap.<String, ValueType>builder()
.put("regionIsoCode", ValueType.STRING)
.put("countryIsoCode", ValueType.STRING)
.put("regionName", ValueType.STRING)
.build();
private static final RowSignature COUNTRIES_SIGNATURE =
RowSignature.builder()
.add("countryNumber", ValueType.LONG)
.add("countryIsoCode", ValueType.STRING)
.add("countryName", ValueType.STRING)
.build();
private static final RowSignature REGIONS_SIGNATURE =
RowSignature.builder()
.add("regionIsoCode", ValueType.STRING)
.add("countryIsoCode", ValueType.STRING)
.add("regionName", ValueType.STRING)
.build();
private static final ColumnProcessorFactory<Supplier<Object>> SIMPLE_READER =
new ColumnProcessorFactory<Supplier<Object>>()
@ -140,7 +141,7 @@ public class JoinTestHelper
}
};
private static RowAdapter<Map<String, Object>> createMapRowAdapter(final Map<String, ValueType> signature)
private static RowAdapter<Map<String, Object>> createMapRowAdapter(final RowSignature signature)
{
return new RowAdapter<Map<String, Object>>()
{
@ -153,7 +154,7 @@ public class JoinTestHelper
@Override
public Function<Map<String, Object>, Object> columnFunction(String columnName)
{
final ValueType columnType = signature.get(columnName);
final ValueType columnType = signature.getColumnType(columnName).orElse(null);
if (columnType == null) {
return row -> row.get(columnName);

View File

@ -75,7 +75,7 @@ public class JoinableClauseTest
public void test_getAvailableColumnsPrefixed()
{
Assert.assertEquals(
ImmutableList.of("j.countryIsoCode", "j.countryName", "j.countryNumber"),
ImmutableList.of("j.countryNumber", "j.countryIsoCode", "j.countryName"),
clause.getAvailableColumnsPrefixed()
);
}

View File

@ -86,7 +86,7 @@ public class IndexedTableJoinableTest
public void test_getAvailableColumns()
{
final IndexedTableJoinable joinable = new IndexedTableJoinable(indexedTable);
Assert.assertEquals(ImmutableList.of("long", "str"), joinable.getAvailableColumns());
Assert.assertEquals(ImmutableList.of("str", "long"), joinable.getAvailableColumns());
}
@Test

View File

@ -20,9 +20,9 @@
package org.apache.druid.segment.join.table;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import org.apache.druid.common.config.NullHandling;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.segment.column.ValueType;
import org.apache.druid.segment.join.JoinTestHelper;
import org.junit.Assert;
@ -38,10 +38,10 @@ import java.util.Map;
public class RowBasedIndexedTableTest
{
// Indexes of fields within the "countries" and "regions" tables.
private static final int INDEX_COUNTRIES_COUNTRY_ISO_CODE = 0;
private static final int INDEX_COUNTRIES_COUNTRY_NAME = 1;
private static final int INDEX_COUNTRIES_COUNTRY_NUMBER = 2;
private static final int INDEX_REGIONS_REGION_ISO_CODE = 1;
private static final int INDEX_COUNTRIES_COUNTRY_NUMBER = 0;
private static final int INDEX_COUNTRIES_COUNTRY_ISO_CODE = 1;
private static final int INDEX_COUNTRIES_COUNTRY_NAME = 2;
private static final int INDEX_REGIONS_REGION_ISO_CODE = 0;
@Rule
public ExpectedException expectedException = ExpectedException.none();
@ -68,24 +68,15 @@ public class RowBasedIndexedTableTest
Assert.assertEquals(ImmutableSet.of("countryNumber", "countryIsoCode"), countriesTable.keyColumns());
}
@Test
public void test_allColumns_countries()
{
Assert.assertEquals(
ImmutableList.of("countryIsoCode", "countryName", "countryNumber"),
countriesTable.allColumns()
);
}
@Test
public void test_rowSignature_countries()
{
Assert.assertEquals(
ImmutableMap.<String, ValueType>builder()
.put("countryIsoCode", ValueType.STRING)
.put("countryName", ValueType.STRING)
.put("countryNumber", ValueType.LONG)
.build(),
RowSignature.builder()
.add("countryNumber", ValueType.LONG)
.add("countryIsoCode", ValueType.STRING)
.add("countryName", ValueType.STRING)
.build(),
countriesTable.rowSignature()
);
}
@ -125,7 +116,7 @@ public class RowBasedIndexedTableTest
@Test
public void test_columnIndex_countriesCountryName()
{
expectedException.expectMessage("Column[1] is not a key column");
expectedException.expectMessage("Column[2] is not a key column");
countriesTable.columnIndex(INDEX_COUNTRIES_COUNTRY_NAME);
}

View File

@ -48,6 +48,7 @@ import org.apache.druid.segment.RowAdapters;
import org.apache.druid.segment.RowBasedColumnSelectorFactory;
import org.apache.druid.segment.column.ColumnCapabilities;
import org.apache.druid.segment.column.ColumnCapabilitiesImpl;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.segment.column.ValueType;
import org.apache.druid.segment.data.IndexedInts;
import org.apache.druid.testing.InitializedNullHandlingTest;
@ -201,7 +202,7 @@ public class ExpressionVirtualColumnTest extends InitializedNullHandlingTest
private static final ColumnSelectorFactory COLUMN_SELECTOR_FACTORY = RowBasedColumnSelectorFactory.create(
RowAdapters.standardRow(),
CURRENT_ROW::get,
null,
RowSignature.empty(),
false
);
@ -742,7 +743,7 @@ public class ExpressionVirtualColumnTest extends InitializedNullHandlingTest
RowBasedColumnSelectorFactory.create(
RowAdapters.standardRow(),
CURRENT_ROW::get,
ImmutableMap.of("x", ValueType.LONG),
RowSignature.builder().add("x", ValueType.LONG).build(),
false
),
Parser.parse(SCALE_LONG.getExpression(), TestExprMacroTable.INSTANCE)
@ -765,7 +766,7 @@ public class ExpressionVirtualColumnTest extends InitializedNullHandlingTest
RowBasedColumnSelectorFactory.create(
RowAdapters.standardRow(),
CURRENT_ROW::get,
ImmutableMap.of("x", ValueType.DOUBLE),
RowSignature.builder().add("x", ValueType.DOUBLE).build(),
false
),
Parser.parse(SCALE_FLOAT.getExpression(), TestExprMacroTable.INSTANCE)
@ -788,7 +789,7 @@ public class ExpressionVirtualColumnTest extends InitializedNullHandlingTest
RowBasedColumnSelectorFactory.create(
RowAdapters.standardRow(),
CURRENT_ROW::get,
ImmutableMap.of("x", ValueType.FLOAT),
RowSignature.builder().add("x", ValueType.FLOAT).build(),
false
),
Parser.parse(SCALE_FLOAT.getExpression(), TestExprMacroTable.INSTANCE)

View File

@ -2348,7 +2348,7 @@ public class CachingClusteredClientTest
}
for (Map.Entry<String, Object> entry : rowMap.entrySet()) {
final int position = query.getResultRowPositionLookup().getInt(entry.getKey());
final int position = query.getResultRowSignature().indexOf(entry.getKey());
row.set(position, entry.getValue());
}

View File

@ -75,7 +75,7 @@ public class InlineJoinableFactoryTest
final Joinable joinable = factory.build(inlineDataSource, makeCondition("x == \"j.long\"")).get();
Assert.assertThat(joinable, CoreMatchers.instanceOf(IndexedTableJoinable.class));
Assert.assertEquals(ImmutableList.of("long", "str"), joinable.getAvailableColumns());
Assert.assertEquals(ImmutableList.of("str", "long"), joinable.getAvailableColumns());
Assert.assertEquals(2, joinable.getCardinality("str"));
Assert.assertEquals(2, joinable.getCardinality("long"));
}

View File

@ -30,9 +30,9 @@ import org.apache.druid.query.aggregation.PostAggregator;
import org.apache.druid.query.filter.AndDimFilter;
import org.apache.druid.query.filter.DimFilter;
import org.apache.druid.segment.VirtualColumn;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.sql.calcite.filtration.Filtration;
import org.apache.druid.sql.calcite.rel.VirtualColumnRegistry;
import org.apache.druid.sql.calcite.table.RowSignature;
import javax.annotation.Nullable;
import java.util.ArrayList;

View File

@ -22,11 +22,11 @@ package org.apache.druid.sql.calcite.aggregation;
import org.apache.calcite.rel.core.AggregateCall;
import org.apache.calcite.rel.core.Project;
import org.apache.calcite.rex.RexNode;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.segment.column.ValueType;
import org.apache.druid.sql.calcite.expression.DruidExpression;
import org.apache.druid.sql.calcite.expression.Expressions;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.table.RowSignature;
import javax.annotation.Nullable;
import java.util.List;
@ -90,7 +90,7 @@ public class Aggregations
if (druidExpression.isSimpleExtraction() &&
(!druidExpression.isDirectColumnAccess()
|| rowSignature.getColumnType(druidExpression.getDirectColumn()) == ValueType.STRING)) {
|| rowSignature.getColumnType(druidExpression.getDirectColumn()).orElse(null) == ValueType.STRING)) {
// Aggregators are unable to implicitly cast strings to numbers.
// So remove the simple extraction, which forces the expression to be used instead of the direct column access.
return druidExpression.map(simpleExtraction -> null, Function.identity());

View File

@ -23,9 +23,9 @@ import org.apache.calcite.rel.core.AggregateCall;
import org.apache.calcite.rel.core.Project;
import org.apache.calcite.rex.RexBuilder;
import org.apache.calcite.sql.SqlAggFunction;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.rel.VirtualColumnRegistry;
import org.apache.druid.sql.calcite.table.RowSignature;
import javax.annotation.Nullable;
import java.util.List;

View File

@ -40,6 +40,7 @@ import org.apache.druid.query.aggregation.hyperloglog.HyperUniquesAggregatorFact
import org.apache.druid.query.dimension.DefaultDimensionSpec;
import org.apache.druid.query.dimension.DimensionSpec;
import org.apache.druid.segment.VirtualColumn;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.segment.column.ValueType;
import org.apache.druid.sql.calcite.aggregation.Aggregation;
import org.apache.druid.sql.calcite.aggregation.SqlAggregator;
@ -48,7 +49,6 @@ import org.apache.druid.sql.calcite.expression.Expressions;
import org.apache.druid.sql.calcite.planner.Calcites;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.rel.VirtualColumnRegistry;
import org.apache.druid.sql.calcite.table.RowSignature;
import javax.annotation.Nullable;
import java.util.ArrayList;
@ -97,7 +97,8 @@ public class ApproxCountDistinctSqlAggregator implements SqlAggregator
final AggregatorFactory aggregatorFactory;
final String aggregatorName = finalizeAggregations ? Calcites.makePrefixedName(name, "a") : name;
if (arg.isDirectColumnAccess() && rowSignature.getColumnType(arg.getDirectColumn()) == ValueType.COMPLEX) {
if (arg.isDirectColumnAccess()
&& rowSignature.getColumnType(arg.getDirectColumn()).orElse(null) == ValueType.COMPLEX) {
aggregatorFactory = new HyperUniquesAggregatorFactory(aggregatorName, arg.getDirectColumn(), false, true);
} else {
final SqlTypeName sqlTypeName = rexNode.getType().getSqlTypeName();

View File

@ -30,6 +30,7 @@ import org.apache.calcite.sql.fun.SqlStdOperatorTable;
import org.apache.druid.java.util.common.ISE;
import org.apache.druid.query.aggregation.CountAggregatorFactory;
import org.apache.druid.query.filter.DimFilter;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.sql.calcite.aggregation.Aggregation;
import org.apache.druid.sql.calcite.aggregation.Aggregations;
import org.apache.druid.sql.calcite.aggregation.SqlAggregator;
@ -37,7 +38,6 @@ import org.apache.druid.sql.calcite.expression.DruidExpression;
import org.apache.druid.sql.calcite.expression.Expressions;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.rel.VirtualColumnRegistry;
import org.apache.druid.sql.calcite.table.RowSignature;
import javax.annotation.Nullable;
import java.util.List;

View File

@ -48,6 +48,7 @@ import org.apache.druid.query.aggregation.last.LongLastAggregatorFactory;
import org.apache.druid.query.aggregation.last.StringLastAggregatorFactory;
import org.apache.druid.query.aggregation.post.FinalizingFieldAccessPostAggregator;
import org.apache.druid.segment.VirtualColumn;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.segment.column.ValueType;
import org.apache.druid.sql.calcite.aggregation.Aggregation;
import org.apache.druid.sql.calcite.aggregation.SqlAggregator;
@ -56,7 +57,6 @@ import org.apache.druid.sql.calcite.expression.Expressions;
import org.apache.druid.sql.calcite.planner.Calcites;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.rel.VirtualColumnRegistry;
import org.apache.druid.sql.calcite.table.RowSignature;
import javax.annotation.Nullable;
import java.util.Collections;

View File

@ -27,6 +27,7 @@ import org.apache.druid.math.expr.ExprMacroTable;
import org.apache.druid.query.aggregation.AggregatorFactory;
import org.apache.druid.query.aggregation.PostAggregator;
import org.apache.druid.query.aggregation.post.FieldAccessPostAggregator;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.segment.column.ValueType;
import org.apache.druid.sql.calcite.aggregation.Aggregation;
import org.apache.druid.sql.calcite.aggregation.Aggregations;
@ -35,7 +36,6 @@ import org.apache.druid.sql.calcite.expression.DruidExpression;
import org.apache.druid.sql.calcite.planner.Calcites;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.rel.VirtualColumnRegistry;
import org.apache.druid.sql.calcite.table.RowSignature;
import javax.annotation.Nullable;
import java.util.ArrayList;

View File

@ -24,13 +24,13 @@ import org.apache.calcite.rel.core.AggregateCall;
import org.apache.calcite.rel.core.Project;
import org.apache.calcite.rex.RexBuilder;
import org.apache.druid.math.expr.ExprMacroTable;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.sql.calcite.aggregation.Aggregation;
import org.apache.druid.sql.calcite.aggregation.Aggregations;
import org.apache.druid.sql.calcite.aggregation.SqlAggregator;
import org.apache.druid.sql.calcite.expression.DruidExpression;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.rel.VirtualColumnRegistry;
import org.apache.druid.sql.calcite.table.RowSignature;
import javax.annotation.Nullable;
import java.util.List;

View File

@ -26,9 +26,9 @@ import org.apache.calcite.sql.SqlOperator;
import org.apache.druid.java.util.common.IAE;
import org.apache.druid.query.aggregation.PostAggregator;
import org.apache.druid.query.filter.DimFilter;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.rel.VirtualColumnRegistry;
import org.apache.druid.sql.calcite.table.RowSignature;
import javax.annotation.Nullable;

View File

@ -24,8 +24,8 @@ import org.apache.calcite.rex.RexNode;
import org.apache.calcite.sql.SqlOperator;
import org.apache.druid.java.util.common.ISE;
import org.apache.druid.java.util.common.StringUtils;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.table.RowSignature;
import javax.annotation.Nullable;
import java.util.stream.Collectors;

View File

@ -21,8 +21,8 @@ package org.apache.druid.sql.calcite.expression;
import org.apache.calcite.rex.RexNode;
import org.apache.calcite.sql.SqlOperator;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.table.RowSignature;
import javax.annotation.Nullable;

View File

@ -53,6 +53,7 @@ import org.apache.druid.query.ordering.StringComparator;
import org.apache.druid.query.ordering.StringComparators;
import org.apache.druid.segment.VirtualColumn;
import org.apache.druid.segment.column.ColumnHolder;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.segment.column.ValueType;
import org.apache.druid.sql.calcite.filtration.BoundRefKey;
import org.apache.druid.sql.calcite.filtration.Bounds;
@ -60,7 +61,7 @@ import org.apache.druid.sql.calcite.filtration.Filtration;
import org.apache.druid.sql.calcite.planner.Calcites;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.rel.VirtualColumnRegistry;
import org.apache.druid.sql.calcite.table.RowSignature;
import org.apache.druid.sql.calcite.table.RowSignatures;
import org.joda.time.Interval;
import javax.annotation.Nullable;
@ -94,7 +95,7 @@ public class Expressions
{
if (project == null) {
// I don't think the factory impl matters here.
return RexInputRef.of(fieldNumber, rowSignature.getRelDataType(new JavaTypeFactoryImpl()));
return RexInputRef.of(fieldNumber, RowSignatures.toRelDataType(rowSignature, new JavaTypeFactoryImpl()));
} else {
return project.getChildExps().get(fieldNumber);
}
@ -132,9 +133,9 @@ public class Expressions
/**
* Translate a list of Calcite {@code RexNode} to Druid expressions, with the possibility of having postagg operands.
*
* @param plannerContext SQL planner context
* @param rowSignature signature of the rows to be extracted from
* @param rexNodes list of Calcite expressions meant to be applied on top of the rows
* @param plannerContext SQL planner context
* @param rowSignature signature of the rows to be extracted from
* @param rexNodes list of Calcite expressions meant to be applied on top of the rows
* @param postAggregatorVisitor visitor that manages postagg names and tracks postaggs that were created as
* by the translation
*
@ -218,7 +219,7 @@ public class Expressions
{
// Translate field references.
final RexInputRef ref = (RexInputRef) rexNode;
final String columnName = rowSignature.getRowOrder().get(ref.getIndex());
final String columnName = rowSignature.getColumnName(ref.getIndex());
if (columnName == null) {
throw new ISE("WTF?! Expression referred to nonexistent index[%d]", ref.getIndex());
}

View File

@ -50,10 +50,10 @@ import org.apache.druid.java.util.common.IAE;
import org.apache.druid.java.util.common.ISE;
import org.apache.druid.query.aggregation.PostAggregator;
import org.apache.druid.query.aggregation.post.FieldAccessPostAggregator;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.sql.calcite.planner.Calcites;
import org.apache.druid.sql.calcite.planner.DruidTypeSystem;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.table.RowSignature;
import javax.annotation.Nullable;
import java.util.Arrays;
@ -171,12 +171,12 @@ public class OperatorConversions
/**
* Translate a Calcite {@code RexNode} to a Druid PostAggregator
*
* @param plannerContext SQL planner context
* @param rowSignature signature of the rows to be extracted from
* @param rexNode expression meant to be applied on top of the rows
*
* @param plannerContext SQL planner context
* @param rowSignature signature of the rows to be extracted from
* @param rexNode expression meant to be applied on top of the rows
* @param postAggregatorVisitor visitor that manages postagg names and tracks postaggs that were created
* by the translation
*
* @return rexNode referring to fields in rowOrder, or null if not possible
*/
@Nullable
@ -191,7 +191,7 @@ public class OperatorConversions
if (kind == SqlKind.INPUT_REF) {
// Translate field references.
final RexInputRef ref = (RexInputRef) rexNode;
final String columnName = rowSignature.getRowOrder().get(ref.getIndex());
final String columnName = rowSignature.getColumnName(ref.getIndex());
if (columnName == null) {
throw new ISE("WTF?! PostAgg referred to nonexistent index[%d]", ref.getIndex());
}

View File

@ -23,9 +23,9 @@ import org.apache.calcite.rex.RexNode;
import org.apache.calcite.sql.SqlOperator;
import org.apache.druid.query.aggregation.PostAggregator;
import org.apache.druid.query.filter.DimFilter;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.rel.VirtualColumnRegistry;
import org.apache.druid.sql.calcite.table.RowSignature;
import javax.annotation.Nullable;

View File

@ -23,8 +23,8 @@ import com.google.common.collect.Iterables;
import org.apache.calcite.rex.RexNode;
import org.apache.calcite.sql.SqlOperator;
import org.apache.druid.java.util.common.StringUtils;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.table.RowSignature;
public class UnaryFunctionOperatorConversion implements SqlOperatorConversion
{

View File

@ -23,8 +23,8 @@ import com.google.common.collect.Iterables;
import org.apache.calcite.rex.RexNode;
import org.apache.calcite.sql.SqlOperator;
import org.apache.druid.java.util.common.StringUtils;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.table.RowSignature;
public class UnaryPrefixOperatorConversion implements SqlOperatorConversion
{

View File

@ -23,8 +23,8 @@ import com.google.common.collect.Iterables;
import org.apache.calcite.rex.RexNode;
import org.apache.calcite.sql.SqlOperator;
import org.apache.druid.java.util.common.StringUtils;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.table.RowSignature;
public class UnarySuffixOperatorConversion implements SqlOperatorConversion
{

View File

@ -22,11 +22,11 @@ package org.apache.druid.sql.calcite.expression.builtin;
import org.apache.calcite.rex.RexNode;
import org.apache.calcite.sql.SqlOperator;
import org.apache.calcite.sql.fun.SqlStdOperatorTable;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.sql.calcite.expression.DruidExpression;
import org.apache.druid.sql.calcite.expression.OperatorConversions;
import org.apache.druid.sql.calcite.expression.SqlOperatorConversion;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.table.RowSignature;
public class ArrayConstructorOperatorConversion implements SqlOperatorConversion
{

View File

@ -30,12 +30,12 @@ import org.apache.druid.math.expr.ExprEval;
import org.apache.druid.math.expr.Parser;
import org.apache.druid.query.filter.AndDimFilter;
import org.apache.druid.query.filter.DimFilter;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.sql.calcite.expression.DruidExpression;
import org.apache.druid.sql.calcite.expression.Expressions;
import org.apache.druid.sql.calcite.expression.OperatorConversions;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.rel.VirtualColumnRegistry;
import org.apache.druid.sql.calcite.table.RowSignature;
import javax.annotation.Nullable;
import java.util.Arrays;

View File

@ -26,11 +26,11 @@ import org.apache.calcite.sql.SqlOperator;
import org.apache.calcite.sql.type.OperandTypes;
import org.apache.calcite.sql.type.SqlTypeFamily;
import org.apache.calcite.sql.type.SqlTypeName;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.sql.calcite.expression.DruidExpression;
import org.apache.druid.sql.calcite.expression.OperatorConversions;
import org.apache.druid.sql.calcite.expression.SqlOperatorConversion;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.table.RowSignature;
public class ArrayLengthOperatorConversion implements SqlOperatorConversion
{

View File

@ -26,11 +26,11 @@ import org.apache.calcite.sql.SqlOperator;
import org.apache.calcite.sql.type.OperandTypes;
import org.apache.calcite.sql.type.SqlTypeFamily;
import org.apache.calcite.sql.type.SqlTypeName;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.sql.calcite.expression.DruidExpression;
import org.apache.druid.sql.calcite.expression.OperatorConversions;
import org.apache.druid.sql.calcite.expression.SqlOperatorConversion;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.table.RowSignature;
public class ArrayOffsetOfOperatorConversion implements SqlOperatorConversion
{

View File

@ -26,11 +26,11 @@ import org.apache.calcite.sql.SqlOperator;
import org.apache.calcite.sql.type.OperandTypes;
import org.apache.calcite.sql.type.SqlTypeFamily;
import org.apache.calcite.sql.type.SqlTypeName;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.sql.calcite.expression.DruidExpression;
import org.apache.druid.sql.calcite.expression.OperatorConversions;
import org.apache.druid.sql.calcite.expression.SqlOperatorConversion;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.table.RowSignature;
public class ArrayOffsetOperatorConversion implements SqlOperatorConversion
{

View File

@ -26,11 +26,11 @@ import org.apache.calcite.sql.SqlOperator;
import org.apache.calcite.sql.type.OperandTypes;
import org.apache.calcite.sql.type.SqlTypeFamily;
import org.apache.calcite.sql.type.SqlTypeName;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.sql.calcite.expression.DruidExpression;
import org.apache.druid.sql.calcite.expression.OperatorConversions;
import org.apache.druid.sql.calcite.expression.SqlOperatorConversion;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.table.RowSignature;
public class ArrayOrdinalOfOperatorConversion implements SqlOperatorConversion
{

View File

@ -26,11 +26,11 @@ import org.apache.calcite.sql.SqlOperator;
import org.apache.calcite.sql.type.OperandTypes;
import org.apache.calcite.sql.type.SqlTypeFamily;
import org.apache.calcite.sql.type.SqlTypeName;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.sql.calcite.expression.DruidExpression;
import org.apache.druid.sql.calcite.expression.OperatorConversions;
import org.apache.druid.sql.calcite.expression.SqlOperatorConversion;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.table.RowSignature;
public class ArrayOrdinalOperatorConversion implements SqlOperatorConversion
{

View File

@ -31,12 +31,12 @@ import org.apache.druid.math.expr.ExprEval;
import org.apache.druid.math.expr.Parser;
import org.apache.druid.query.filter.DimFilter;
import org.apache.druid.query.filter.InDimFilter;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.sql.calcite.expression.DruidExpression;
import org.apache.druid.sql.calcite.expression.Expressions;
import org.apache.druid.sql.calcite.expression.OperatorConversions;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.rel.VirtualColumnRegistry;
import org.apache.druid.sql.calcite.table.RowSignature;
import javax.annotation.Nullable;
import java.util.List;

View File

@ -26,11 +26,11 @@ import org.apache.calcite.sql.SqlOperator;
import org.apache.calcite.sql.type.OperandTypes;
import org.apache.calcite.sql.type.SqlTypeFamily;
import org.apache.calcite.sql.type.SqlTypeName;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.sql.calcite.expression.DruidExpression;
import org.apache.druid.sql.calcite.expression.OperatorConversions;
import org.apache.druid.sql.calcite.expression.SqlOperatorConversion;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.table.RowSignature;
public class ArrayToStringOperatorConversion implements SqlOperatorConversion
{

Some files were not shown because too many files have changed in this diff Show More