diff --git a/lucene/ivy-versions.properties b/lucene/ivy-versions.properties index 70e33a22492..172b95bb100 100644 --- a/lucene/ivy-versions.properties +++ b/lucene/ivy-versions.properties @@ -17,8 +17,6 @@ com.carrotsearch.randomizedtesting.version = 2.5.0 /com.cybozu.labs/langdetect = 1.1-20120112 /com.drewnoakes/metadata-extractor = 2.8.1 -/com.facebook.presto/presto-parser = 0.122 - com.fasterxml.jackson.core.version = 2.5.4 /com.fasterxml.jackson.core/jackson-annotations = ${com.fasterxml.jackson.core.version} /com.fasterxml.jackson.core/jackson-core = ${com.fasterxml.jackson.core.version} @@ -33,7 +31,7 @@ com.google.inject.guice.version = 3.0 /com.google.inject.extensions/guice-servlet = ${com.google.inject.guice.version} /com.google.inject/guice = ${com.google.inject.guice.version} -/com.google.protobuf/protobuf-java = 2.5.0 +/com.google.protobuf/protobuf-java = 3.1.0 /com.googlecode.juniversalchardet/juniversalchardet = 1.0.3 /com.googlecode.mp4parser/isoparser = 1.1.18 /com.healthmarketscience.jackcess/jackcess = 2.1.3 @@ -72,7 +70,6 @@ com.sun.jersey.version = 1.9 /dom4j/dom4j = 1.6.1 /hsqldb/hsqldb = 1.8.0.10 /info.ganglia.gmetric4j/gmetric4j = 1.0.7 -/io.airlift/slice = 0.10 io.dropwizard.metrics.version = 3.1.2 /io.dropwizard.metrics/metrics-core = ${io.dropwizard.metrics.version} @@ -97,6 +94,7 @@ io.netty.netty-all.version = 4.0.36.Final /mecab/mecab-naist-jdic = 0.6.3b-20111013 /net.arnx/jsonic = 1.2.7 /net.bytebuddy/byte-buddy = 1.6.2 +/net.hydromatic/eigenbase-properties = 1.1.5 /net.sf.ehcache/ehcache-core = 2.4.4 /net.sf.saxon/Saxon-HE = 9.6.0-2 /net.sourceforge.argparse4j/argparse4j = 0.4.3 @@ -106,6 +104,14 @@ io.netty.netty-all.version = 4.0.36.Final /org.apache.ant/ant = 1.8.2 /org.apache.avro/avro = 1.7.5 + +org.apache.calcite.avatica.version = 1.9.0 +/org.apache.calcite.avatica/avatica-core = ${org.apache.calcite.avatica.version} + +org.apache.calcite.version = 1.11.0 +/org.apache.calcite/calcite-core = ${org.apache.calcite.version} +/org.apache.calcite/calcite-linq4j = ${org.apache.calcite.version} + /org.apache.commons/commons-compress = 1.11 /org.apache.commons/commons-exec = 1.3 /org.apache.commons/commons-math3 = 3.4.1 @@ -243,6 +249,10 @@ org.codehaus.jackson.version = 1.9.13 /org.codehaus.jackson/jackson-jaxrs = ${org.codehaus.jackson.version} /org.codehaus.jackson/jackson-mapper-asl = ${org.codehaus.jackson.version} +org.codehaus.janino.version = 2.7.6 +/org.codehaus.janino/commons-compiler = ${org.codehaus.janino.version} +/org.codehaus.janino/janino = ${org.codehaus.janino.version} + /org.codehaus.woodstox/stax2-api = 3.1.4 /org.codehaus.woodstox/woodstox-core-asl = 4.4.1 /org.easymock/easymock = 3.0 diff --git a/solr/core/ivy.xml b/solr/core/ivy.xml index 074e35f201f..dc58ad5c17a 100644 --- a/solr/core/ivy.xml +++ b/solr/core/ivy.xml @@ -60,8 +60,8 @@ - - + + @@ -142,10 +142,15 @@ - - - + + + + + + + + diff --git a/solr/core/src/java/org/apache/solr/handler/SQLHandler.java b/solr/core/src/java/org/apache/solr/handler/SQLHandler.java index 3074d9bb59d..d65ea560ab5 100644 --- a/solr/core/src/java/org/apache/solr/handler/SQLHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/SQLHandler.java @@ -18,65 +18,35 @@ package org.apache.solr.handler; import java.io.IOException; import java.lang.invoke.MethodHandles; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; import java.util.ArrayList; -import java.util.Collections; -import java.util.HashSet; +import java.util.HashMap; import java.util.Iterator; -import java.util.Locale; -import java.util.Set; +import java.util.List; +import java.util.Map; +import java.util.Properties; -import com.facebook.presto.sql.tree.*; -import com.google.common.base.Strings; -import com.google.common.collect.Iterables; - -import org.apache.solr.client.solrj.impl.CloudSolrClient; +import org.apache.calcite.config.Lex; import org.apache.solr.client.solrj.io.Tuple; -import org.apache.solr.client.solrj.io.comp.ComparatorOrder; -import org.apache.solr.client.solrj.io.comp.FieldComparator; -import org.apache.solr.client.solrj.io.comp.MultipleFieldComparator; import org.apache.solr.client.solrj.io.comp.StreamComparator; -import org.apache.solr.client.solrj.io.eq.FieldEqualitor; -import org.apache.solr.client.solrj.io.eq.MultipleFieldEqualitor; -import org.apache.solr.client.solrj.io.eq.StreamEqualitor; -import org.apache.solr.client.solrj.io.stream.CloudSolrStream; -import org.apache.solr.client.solrj.io.stream.FacetStream; -import org.apache.solr.client.solrj.io.stream.ParallelStream; -import org.apache.solr.client.solrj.io.stream.RankStream; -import org.apache.solr.client.solrj.io.stream.RollupStream; -import org.apache.solr.client.solrj.io.stream.SelectStream; -import org.apache.solr.client.solrj.io.stream.StatsStream; -import org.apache.solr.client.solrj.io.stream.StreamContext; -import org.apache.solr.client.solrj.io.stream.TupleStream; import org.apache.solr.client.solrj.io.stream.ExceptionStream; -import org.apache.solr.client.solrj.io.stream.UniqueStream; -import org.apache.solr.client.solrj.io.stream.expr.Explanation; -import org.apache.solr.client.solrj.io.stream.expr.StreamExplanation; -import org.apache.solr.client.solrj.io.stream.expr.StreamFactory; -import org.apache.solr.client.solrj.io.stream.expr.Explanation.ExpressionType; -import org.apache.solr.client.solrj.io.stream.metrics.*; +import org.apache.solr.client.solrj.io.stream.JDBCStream; +import org.apache.solr.client.solrj.io.stream.TupleStream; import org.apache.solr.common.SolrException; -import org.apache.solr.common.cloud.ZkStateReader; -import org.apache.solr.common.params.CommonParams; import org.apache.solr.common.params.ModifiableSolrParams; -import org.apache.solr.common.params.SolrParams; import org.apache.solr.core.CoreContainer; import org.apache.solr.core.SolrCore; +import org.apache.solr.handler.sql.CalciteSolrDriver; import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.response.SolrQueryResponse; import org.apache.solr.security.AuthorizationContext; import org.apache.solr.security.PermissionNameProvider; import org.apache.solr.util.plugin.SolrCoreAware; - -import java.util.List; -import java.util.Map; -import java.util.HashMap; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.facebook.presto.sql.parser.SqlParser; - -public class SQLHandler extends RequestHandlerBase implements SolrCoreAware , PermissionNameProvider { +public class SQLHandler extends RequestHandlerBase implements SolrCoreAware, PermissionNameProvider { private static final Logger logger = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); @@ -103,19 +73,15 @@ public class SQLHandler extends RequestHandlerBase implements SolrCoreAware , Pe } public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception { - SolrParams params = req.getParams(); - params = adjustParams(params); - req.setParams(params); + ModifiableSolrParams params = new ModifiableSolrParams(req.getParams()); String sql = params.get("stmt"); - int numWorkers = params.getInt("numWorkers", 1); - String workerCollection = params.get("workerCollection", defaultWorkerCollection); - String workerZkhost = params.get("workerZkhost", defaultZkhost); - String mode = params.get("aggregationMode", "map_reduce"); - StreamContext context = new StreamContext(); - - // JDBC driver requires metadata from the SQLHandler. Default to false since this adds a new Metadata stream. - boolean includeMetadata = params.getBool("includeMetadata", false); + // Set defaults for parameters + params.set("numWorkers", params.getInt("numWorkers", 1)); + params.set("workerCollection", params.get("workerCollection", defaultWorkerCollection)); + params.set("workerZkhost", params.get("workerZkhost", defaultZkhost)); + params.set("aggregationMode", params.get("aggregationMode", "facet")); + TupleStream tupleStream = null; try { if(!isCloud) { @@ -126,30 +92,39 @@ public class SQLHandler extends RequestHandlerBase implements SolrCoreAware , Pe throw new Exception("stmt parameter cannot be null"); } - context.setSolrClientCache(StreamHandler.clientCache); + String url = CalciteSolrDriver.CONNECT_STRING_PREFIX; - TupleStream tupleStream = SQLTupleStreamParser.parse(sql, - numWorkers, - workerCollection, - workerZkhost, - AggregationMode.getMode(mode), - includeMetadata, - context); + Properties properties = new Properties(); + // Add all query parameters + Iterator parameterNamesIterator = params.getParameterNamesIterator(); + while(parameterNamesIterator.hasNext()) { + String param = parameterNamesIterator.next(); + properties.setProperty(param, params.get(param)); + } - rsp.add("result-set", new StreamHandler.TimerStream(new ExceptionStream(tupleStream))); + // Set these last to ensure that they are set properly + properties.setProperty("lex", Lex.MYSQL.toString()); + properties.setProperty("zk", defaultZkhost); + + String driverClass = CalciteSolrDriver.class.getCanonicalName(); + + // JDBC driver requires metadata from the SQLHandler. Default to false since this adds a new Metadata stream. + boolean includeMetadata = params.getBool("includeMetadata", false); + tupleStream = new SqlHandlerStream(url, sql, null, properties, driverClass, includeMetadata); + + tupleStream = new StreamHandler.TimerStream(new ExceptionStream(tupleStream)); + + rsp.add("result-set", tupleStream); } catch(Exception e) { //Catch the SQL parsing and query transformation exceptions. + if(tupleStream != null) { + tupleStream.close(); + } SolrException.log(logger, e); rsp.add("result-set", new StreamHandler.DummyErrorStream(e)); } } - private SolrParams adjustParams(SolrParams params) { - ModifiableSolrParams adjustedParams = new ModifiableSolrParams(params); - adjustedParams.set(CommonParams.OMIT_HEADER, "true"); - return adjustedParams; - } - public String getDescription() { return "SQLHandler"; } @@ -158,1569 +133,51 @@ public class SQLHandler extends RequestHandlerBase implements SolrCoreAware , Pe return null; } - public static class SQLTupleStreamParser { - - public static TupleStream parse(String sql, - int numWorkers, - String workerCollection, - String workerZkhost, - AggregationMode aggregationMode, - boolean includeMetadata, - StreamContext context) throws IOException { - SqlParser parser = new SqlParser(); - Statement statement = parser.createStatement(sql); - - SQLVisitor sqlVistor = new SQLVisitor(new StringBuilder()); - - sqlVistor.process(statement, new Integer(0)); - sqlVistor.reverseAliases(); - - TupleStream sqlStream = null; - - if(sqlVistor.table.toUpperCase(Locale.ROOT).contains("_CATALOGS_")) { - sqlStream = new SelectStream(new CatalogsStream(defaultZkhost), sqlVistor.columnAliases); - } else if(sqlVistor.table.toUpperCase(Locale.ROOT).contains("_SCHEMAS_")) { - sqlStream = new SelectStream(new SchemasStream(defaultZkhost), sqlVistor.columnAliases); - } else if(sqlVistor.table.toUpperCase(Locale.ROOT).contains("_TABLES_")) { - sqlStream = new SelectStream(new TableStream(defaultZkhost), sqlVistor.columnAliases); - } else if(sqlVistor.groupByQuery) { - if(aggregationMode == AggregationMode.FACET) { - sqlStream = doGroupByWithAggregatesFacets(sqlVistor); - } else { - context.numWorkers = numWorkers; - sqlStream = doGroupByWithAggregates(sqlVistor, numWorkers, workerCollection, workerZkhost); - } - } else if(sqlVistor.isDistinct) { - if(aggregationMode == AggregationMode.FACET) { - sqlStream = doSelectDistinctFacets(sqlVistor); - } else { - context.numWorkers = numWorkers; - sqlStream = doSelectDistinct(sqlVistor, numWorkers, workerCollection, workerZkhost); - } - } else { - sqlStream = doSelect(sqlVistor); - } - - if(includeMetadata) { - sqlStream = new MetadataStream(sqlStream, sqlVistor); - } - - sqlStream.setStreamContext(context); - return sqlStream; - } - } - - private static TupleStream doGroupByWithAggregates(SQLVisitor sqlVisitor, - int numWorkers, - String workerCollection, - String workerZkHost) throws IOException { - - Set fieldSet = new HashSet(); - Bucket[] buckets = getBuckets(sqlVisitor.groupBy, fieldSet); - Metric[] metrics = getMetrics(sqlVisitor.fields, fieldSet); - if(metrics.length == 0) { - throw new IOException("Group by queries must include atleast one aggregate function."); - } - - String fl = fields(fieldSet); - String sortDirection = getSortDirection(sqlVisitor.sorts); - String sort = bucketSort(buckets, sortDirection); - - TableSpec tableSpec = new TableSpec(sqlVisitor.table, defaultZkhost); - - String zkHost = tableSpec.zkHost; - String collection = tableSpec.collection; - ModifiableSolrParams params = new ModifiableSolrParams(); - - params.set(CommonParams.FL, fl); - params.set(CommonParams.Q, sqlVisitor.query); - //Always use the /export handler for Group By Queries because it requires exporting full result sets. - params.set(CommonParams.QT, "/export"); - - if(numWorkers > 1) { - params.set("partitionKeys", getPartitionKeys(buckets)); - } - - params.set("sort", sort); - - TupleStream tupleStream = null; - - CloudSolrStream cstream = new CloudSolrStream(zkHost, collection, params); - tupleStream = new RollupStream(cstream, buckets, metrics); - - if(numWorkers > 1) { - // Do the rollups in parallel - // Maintain the sort of the Tuples coming from the workers. - StreamComparator comp = bucketSortComp(buckets, sortDirection); - ParallelStream parallelStream = new ParallelStream(workerZkHost, workerCollection, tupleStream, numWorkers, comp); - - StreamFactory factory = new StreamFactory() - .withFunctionName("search", CloudSolrStream.class) - .withFunctionName("parallel", ParallelStream.class) - .withFunctionName("rollup", RollupStream.class) - .withFunctionName("sum", SumMetric.class) - .withFunctionName("min", MinMetric.class) - .withFunctionName("max", MaxMetric.class) - .withFunctionName("avg", MeanMetric.class) - .withFunctionName("count", CountMetric.class); - - parallelStream.setStreamFactory(factory); - tupleStream = parallelStream; - } - - //TODO: This should be done on the workers, but it won't serialize because it relies on Presto classes. - // Once we make this a Expressionable the problem will be solved. - - if(sqlVisitor.havingExpression != null) { - tupleStream = new HavingStream(tupleStream, sqlVisitor.havingExpression, sqlVisitor.reverseColumnAliases ); - } - - if(sqlVisitor.sorts != null && sqlVisitor.sorts.size() > 0) { - if(!sortsEqual(buckets, sortDirection, sqlVisitor.sorts, sqlVisitor.reverseColumnAliases)) { - int limit = sqlVisitor.limit == -1 ? 100 : sqlVisitor.limit; - StreamComparator comp = getComp(sqlVisitor.sorts, sqlVisitor.reverseColumnAliases); - //Rank the Tuples - //If parallel stream is used ALL the Rolled up tuples from the workers will be ranked - //Providing a true Top or Bottom. - tupleStream = new RankStream(tupleStream, limit, comp); - } else { - // Sort is the same as the same as the underlying stream - // Only need to limit the result, not Rank the result - if(sqlVisitor.limit > -1) { - tupleStream = new LimitStream(tupleStream, sqlVisitor.limit); - } - } - } - - if(sqlVisitor.hasColumnAliases) { - tupleStream = new SelectStream(tupleStream, sqlVisitor.columnAliases); - } - - return tupleStream; - } - - private static TupleStream doSelectDistinct(SQLVisitor sqlVisitor, - int numWorkers, - String workerCollection, - String workerZkHost) throws IOException { - - Set fieldSet = new HashSet(); - Bucket[] buckets = getBuckets(sqlVisitor.fields, fieldSet); - Metric[] metrics = getMetrics(sqlVisitor.fields, fieldSet); - - if(metrics.length > 0) { - throw new IOException("Select Distinct queries cannot include aggregate functions."); - } - - String fl = fields(fieldSet); - - String sort = null; - StreamEqualitor ecomp = null; - StreamComparator comp = null; - - if(sqlVisitor.sorts != null && sqlVisitor.sorts.size() > 0) { - StreamComparator[] adjustedSorts = adjustSorts(sqlVisitor.sorts, buckets, sqlVisitor.reverseColumnAliases); - // Because of the way adjustSorts works we know that each FieldComparator has a single - // field name. For this reason we can just look at the leftFieldName - FieldEqualitor[] fieldEqualitors = new FieldEqualitor[adjustedSorts.length]; - StringBuilder buf = new StringBuilder(); - for(int i=0; i0) { - buf.append(","); - } - buf.append(fieldComparator.getLeftFieldName()).append(" ").append(fieldComparator.getOrder().toString()); - } - - sort = buf.toString(); - - if(adjustedSorts.length == 1) { - ecomp = fieldEqualitors[0]; - comp = adjustedSorts[0]; - } else { - ecomp = new MultipleFieldEqualitor(fieldEqualitors); - comp = new MultipleFieldComparator(adjustedSorts); - } - } else { - StringBuilder sortBuf = new StringBuilder(); - FieldEqualitor[] equalitors = new FieldEqualitor[buckets.length]; - StreamComparator[] streamComparators = new StreamComparator[buckets.length]; - for(int i=0; i0) { - sortBuf.append(','); - } - sortBuf.append(buckets[i].toString()).append(" asc"); - } - - sort = sortBuf.toString(); - - if(equalitors.length == 1) { - ecomp = equalitors[0]; - comp = streamComparators[0]; - } else { - ecomp = new MultipleFieldEqualitor(equalitors); - comp = new MultipleFieldComparator(streamComparators); - } - } - - TableSpec tableSpec = new TableSpec(sqlVisitor.table, defaultZkhost); - - String zkHost = tableSpec.zkHost; - String collection = tableSpec.collection; - ModifiableSolrParams params = new ModifiableSolrParams(); - - params.set(CommonParams.FL, fl); - params.set(CommonParams.Q, sqlVisitor.query); - //Always use the /export handler for Distinct Queries because it requires exporting full result sets. - params.set(CommonParams.QT, "/export"); - - if(numWorkers > 1) { - params.set("partitionKeys", getPartitionKeys(buckets)); - } - - params.set("sort", sort); - - TupleStream tupleStream = null; - - CloudSolrStream cstream = new CloudSolrStream(zkHost, collection, params); - tupleStream = new UniqueStream(cstream, ecomp); - - if(numWorkers > 1) { - // Do the unique in parallel - // Maintain the sort of the Tuples coming from the workers. - ParallelStream parallelStream = new ParallelStream(workerZkHost, workerCollection, tupleStream, numWorkers, comp); - - StreamFactory factory = new StreamFactory() - .withFunctionName("search", CloudSolrStream.class) - .withFunctionName("parallel", ParallelStream.class) - .withFunctionName("unique", UniqueStream.class); - - parallelStream.setStreamFactory(factory); - tupleStream = parallelStream; - } - - if(sqlVisitor.limit > 0) { - tupleStream = new LimitStream(tupleStream, sqlVisitor.limit); - } - - if(sqlVisitor.hasColumnAliases) { - tupleStream = new SelectStream(tupleStream, sqlVisitor.columnAliases); - } - - return tupleStream; - } - - private static StreamComparator[] adjustSorts(List sorts, Bucket[] buckets, Map reverseColumnAliases) throws IOException { - List adjustedSorts = new ArrayList(); - Set bucketFields = new HashSet(); - Set sortFields = new HashSet(); - - for(SortItem sortItem : sorts) { - - sortFields.add(getSortField(sortItem, reverseColumnAliases)); - adjustedSorts.add(new FieldComparator(getSortField(sortItem, reverseColumnAliases), - ascDescComp(sortItem.getOrdering().toString()))); - } - - for(Bucket bucket : buckets) { - bucketFields.add(bucket.toString()); - } - - for(SortItem sortItem : sorts) { - String sortField = getSortField(sortItem, reverseColumnAliases); - if(!bucketFields.contains(sortField)) { - throw new IOException("All sort fields must be in the field list."); - } - } - - //Add sort fields if needed - if(sorts.size() < buckets.length) { - for(Bucket bucket : buckets) { - String b = bucket.toString(); - if(!sortFields.contains(b)) { - adjustedSorts.add(new FieldComparator(bucket.toString(), ComparatorOrder.ASCENDING)); - } - } - } - - return adjustedSorts.toArray(new FieldComparator[adjustedSorts.size()]); - } - - private static TupleStream doSelectDistinctFacets(SQLVisitor sqlVisitor) throws IOException { - - Set fieldSet = new HashSet(); - Bucket[] buckets = getBuckets(sqlVisitor.fields, fieldSet); - Metric[] metrics = getMetrics(sqlVisitor.fields, fieldSet); - - if(metrics.length > 0) { - throw new IOException("Select Distinct queries cannot include aggregate functions."); - } - - TableSpec tableSpec = new TableSpec(sqlVisitor.table, defaultZkhost); - - String zkHost = tableSpec.zkHost; - String collection = tableSpec.collection; - ModifiableSolrParams params = new ModifiableSolrParams(); - - params.set(CommonParams.Q, sqlVisitor.query); - - int limit = sqlVisitor.limit > 0 ? sqlVisitor.limit : 100; - - FieldComparator[] sorts = null; - - if(sqlVisitor.sorts == null) { - sorts = new FieldComparator[buckets.length]; - for(int i=0; i 0) { - tupleStream = new LimitStream(tupleStream, sqlVisitor.limit); - } - - return new SelectStream(tupleStream, sqlVisitor.columnAliases); - } - - private static TupleStream doGroupByWithAggregatesFacets(SQLVisitor sqlVisitor) throws IOException { - - Set fieldSet = new HashSet(); - Bucket[] buckets = getBuckets(sqlVisitor.groupBy, fieldSet); - Metric[] metrics = getMetrics(sqlVisitor.fields, fieldSet); - if(metrics.length == 0) { - throw new IOException("Group by queries must include at least one aggregate function."); - } - - TableSpec tableSpec = new TableSpec(sqlVisitor.table, defaultZkhost); - - String zkHost = tableSpec.zkHost; - String collection = tableSpec.collection; - ModifiableSolrParams params = new ModifiableSolrParams(); - - params.set(CommonParams.Q, sqlVisitor.query); - - int limit = sqlVisitor.limit > 0 ? sqlVisitor.limit : 100; - - FieldComparator[] sorts = null; - - if(sqlVisitor.sorts == null) { - sorts = new FieldComparator[buckets.length]; - for(int i=0; i 0) - { - tupleStream = new LimitStream(tupleStream, sqlVisitor.limit); - } - - if(sqlVisitor.hasColumnAliases) { - tupleStream = new SelectStream(tupleStream, sqlVisitor.columnAliases); - } - - return tupleStream; - } - - private static TupleStream doSelect(SQLVisitor sqlVisitor) throws IOException { - List fields = sqlVisitor.fields; - Set fieldSet = new HashSet(); - Metric[] metrics = getMetrics(fields, fieldSet); - if(metrics.length > 0) { - return doAggregates(sqlVisitor, metrics); - } - - StringBuilder flbuf = new StringBuilder(); - boolean comma = false; - - if(fields.size() == 0) { - throw new IOException("Select columns must be specified."); - } - - TableSpec tableSpec = new TableSpec(sqlVisitor.table, defaultZkhost); - - String zkHost = tableSpec.zkHost; - String collection = tableSpec.collection; - - boolean score = false; - - for (String field : fields) { - - if(field.contains("(")) { - throw new IOException("Aggregate functions only supported with group by queries."); - } - - if(field.contains("*")) { - throw new IOException("* is not supported for column selection."); - } - - if(field.equals("score")) { - if(sqlVisitor.limit < 0) { - throw new IOException("score is not a valid field for unlimited select queries"); - } else { - score = true; - } - } - - if (comma) { - flbuf.append(","); - } - - comma = true; - flbuf.append(field); - } - - String fl = flbuf.toString(); - - List sorts = sqlVisitor.sorts; - - StringBuilder siBuf = new StringBuilder(); - - comma = false; - - if(sorts != null) { - for (SortItem sortItem : sorts) { - if (comma) { - siBuf.append(","); - } - siBuf.append(getSortField(sortItem, sqlVisitor.reverseColumnAliases) + " " + ascDesc(sortItem.getOrdering().toString())); - } - } else { - if(sqlVisitor.limit < 0) { - siBuf.append("_version_ desc"); - fl = fl+",_version_"; - } else { - siBuf.append("score desc"); - if(!score) { - fl = fl+",score"; - } - } - } - - ModifiableSolrParams params = new ModifiableSolrParams(); - params.set("fl", fl.toString()); - params.set("q", sqlVisitor.query); - - if(siBuf.length() > 0) { - params.set("sort", siBuf.toString()); - } - - TupleStream tupleStream; - - if(sqlVisitor.limit > -1) { - params.set("rows", Integer.toString(sqlVisitor.limit)); - tupleStream = new LimitStream(new CloudSolrStream(zkHost, collection, params), sqlVisitor.limit); - } else { - //Only use the export handler when no limit is specified. - params.set(CommonParams.QT, "/export"); - tupleStream = new CloudSolrStream(zkHost, collection, params); - } - - return new SelectStream(tupleStream, sqlVisitor.columnAliases); - } - - private static boolean sortsEqual(Bucket[] buckets, String direction, List sortItems, Map reverseColumnAliases) { - if(buckets.length != sortItems.size()) { - return false; - } - - for(int i=0; i< buckets.length; i++) { - Bucket bucket = buckets[i]; - SortItem sortItem = sortItems.get(i); - if(!bucket.toString().equals(getSortField(sortItem, reverseColumnAliases))) { - return false; - } - - - if(!sortItem.getOrdering().toString().toLowerCase(Locale.ROOT).contains(direction.toLowerCase(Locale.ROOT))) { - return false; - } - } - - return true; - } - - private static TupleStream doAggregates(SQLVisitor sqlVisitor, Metric[] metrics) throws IOException { - - if(metrics.length != sqlVisitor.fields.size()) { - throw new IOException("Only aggregate functions are allowed when group by is not specified."); - } - - TableSpec tableSpec = new TableSpec(sqlVisitor.table, defaultZkhost); - - String zkHost = tableSpec.zkHost; - String collection = tableSpec.collection; - ModifiableSolrParams params = new ModifiableSolrParams(); - - params.set(CommonParams.Q, sqlVisitor.query); - - TupleStream tupleStream = new StatsStream(zkHost, - collection, - params, - metrics); - - if(sqlVisitor.hasColumnAliases) { - tupleStream = new SelectStream(tupleStream, sqlVisitor.columnAliases); - } - - return tupleStream; - } - - private static String bucketSort(Bucket[] buckets, String dir) { - StringBuilder buf = new StringBuilder(); - boolean comma = false; - for(Bucket bucket : buckets) { - if(comma) { - buf.append(","); - } - buf.append(bucket.toString()).append(" ").append(dir); - comma = true; - } - - return buf.toString(); - } - - private static String getPartitionKeys(Bucket[] buckets) { - StringBuilder buf = new StringBuilder(); - boolean comma = false; - for(Bucket bucket : buckets) { - if(comma) { - buf.append(","); - } - buf.append(bucket.toString()); - comma = true; - } - return buf.toString(); - } - - private static String getSortDirection(List sorts) { - if(sorts != null && sorts.size() > 0) { - for(SortItem item : sorts) { - return ascDesc(stripSingleQuotes(stripQuotes(item.getOrdering().toString()))); - } - } - - return "asc"; - } - - private static StreamComparator bucketSortComp(Bucket[] buckets, String dir) { - FieldComparator[] comps = new FieldComparator[buckets.length]; - for(int i=0; i sortItems, Map reverseColumnAliases) { - FieldComparator[] comps = new FieldComparator[sortItems.size()]; - for(int i=0; i sortItems, Map reverseColumnAliases) { - FieldComparator[] comps = new FieldComparator[sortItems.size()]; - for(int i=0; i fieldSet) { - StringBuilder buf = new StringBuilder(); - boolean comma = false; - for(String field : fieldSet) { - if(comma) { - buf.append(","); - } - buf.append(field); - comma = true; - } - - return buf.toString(); - } - - private static Metric[] getMetrics(List fields, Set fieldSet) throws IOException { - List metrics = new ArrayList(); - for(String field : fields) { - if(field.contains("(")) { - - field = field.substring(0, field.length()-1); - String[] parts = field.split("\\("); - String function = parts[0]; - validateFunction(function); - String column = parts[1]; - if(function.equals("min")) { - metrics.add(new MinMetric(column)); - fieldSet.add(column); - } else if(function.equals("max")) { - metrics.add(new MaxMetric(column)); - fieldSet.add(column); - } else if(function.equals("sum")) { - metrics.add(new SumMetric(column)); - fieldSet.add(column); - } else if(function.equals("avg")) { - metrics.add(new MeanMetric(column)); - fieldSet.add(column); - } else if(function.equals("count")) { - metrics.add(new CountMetric()); - } - } - } - return metrics.toArray(new Metric[metrics.size()]); - } - - private static void validateFunction(String function) throws IOException { - if(function.equals("min") || function.equals("max") || function.equals("sum") || function.equals("avg") || function.equals("count")) { - return; - } else { - throw new IOException("Invalid function: "+function); - } - } - - private static Bucket[] getBuckets(List fields, Set fieldSet) { - List buckets = new ArrayList(); - for(String field : fields) { - String f = stripQuotes(field); - buckets.add(new Bucket(f)); - fieldSet.add(f); - } - - return buckets.toArray(new Bucket[buckets.size()]); - } - - private static String ascDesc(String s) { - if(s.toLowerCase(Locale.ROOT).contains("desc")) { - return "desc"; - } else { - return "asc"; - } - } - - private static ComparatorOrder ascDescComp(String s) { - if(s.toLowerCase(Locale.ROOT).contains("desc")) { - return ComparatorOrder.DESCENDING; - } else { - return ComparatorOrder.ASCENDING; - } - } - - private static String stripQuotes(String s) { - StringBuilder buf = new StringBuilder(); - for(int i=0; i { - - protected Void visitLogicalBinaryExpression(LogicalBinaryExpression node, StringBuilder buf) { - buf.append("("); - process(node.getLeft(), buf); - buf.append(" ").append(node.getType().toString()).append(" "); - process(node.getRight(), buf); - buf.append(")"); - return null; - } - - protected Void visitNotExpression(NotExpression node, StringBuilder buf) { - buf.append("-"); - process(node.getValue(), buf); - return null; - } - - protected Void visitComparisonExpression(ComparisonExpression node, StringBuilder buf) { - if (!(node.getLeft() instanceof StringLiteral || node.getLeft() instanceof QualifiedNameReference)) { - throw new RuntimeException("Left side of comparison must be a literal."); - } - - String field = getPredicateField(node.getLeft()); - String value = node.getRight().toString(); - value = stripSingleQuotes(value); - - if(!value.startsWith("(") && !value.startsWith("[")) { - //If no parens default to a phrase search. - value = '"'+value+'"'; - } - - String lowerBound; - String upperBound; - String lowerValue; - String upperValue; - - ComparisonExpression.Type t = node.getType(); - switch(t) { - case NOT_EQUAL: - buf.append('(').append('-').append(field).append(":").append(value).append(')'); - return null; - case EQUAL: - buf.append('(').append(field).append(":").append(value).append(')'); - return null; - case LESS_THAN: - lowerBound = "["; - upperBound = "}"; - lowerValue = "*"; - upperValue = value; - buf.append('(').append(field).append(":").append(lowerBound).append(lowerValue).append(" TO ").append(upperValue).append(upperBound).append(')'); - return null; - case LESS_THAN_OR_EQUAL: - lowerBound = "["; - upperBound = "]"; - lowerValue = "*"; - upperValue = value; - buf.append('(').append(field).append(":").append(lowerBound).append(lowerValue).append(" TO ").append(upperValue).append(upperBound).append(')'); - return null; - case GREATER_THAN: - lowerBound = "{"; - upperBound = "]"; - lowerValue = value; - upperValue = "*"; - buf.append('(').append(field).append(":").append(lowerBound).append(lowerValue).append(" TO ").append(upperValue).append(upperBound).append(')'); - return null; - case GREATER_THAN_OR_EQUAL: - lowerBound = "["; - upperBound = "]"; - lowerValue = value; - upperValue = "*"; - buf.append('(').append(field).append(":").append(lowerBound).append(lowerValue).append(" TO ").append(upperValue).append(upperBound).append(')'); - return null; - } - - return null; - } - } - - static class SQLVisitor extends AstVisitor { - private final StringBuilder builder; - public String table; - public List fields = new ArrayList(); - public List groupBy = new ArrayList(); - public List sorts; - public String query ="*:*"; //If no query is specified pull all the records - public int limit = -1; - public boolean groupByQuery; - public Expression havingExpression; - public boolean isDistinct; - public boolean hasColumnAliases; - public Map columnAliases = new HashMap(); - public Map reverseColumnAliases = new HashMap(); - - public SQLVisitor(StringBuilder builder) { - this.builder = builder; - } - - protected Void visitNode(Node node, Integer indent) { - throw new UnsupportedOperationException("not yet implemented: " + node); - } - - protected void reverseAliases() { - for(String key : columnAliases.keySet()) { - reverseColumnAliases.put(columnAliases.get(key), key); - } - - //Handle the group by. - List newGroups = new ArrayList(); - - for(String g : groupBy) { - if (reverseColumnAliases.containsKey(g)) { - newGroups.add(reverseColumnAliases.get(g)); - } else { - newGroups.add(g); - } - } - - groupBy = newGroups; - } - - - - - protected Void visitUnnest(Unnest node, Integer indent) { - return null; - } - - protected Void visitQuery(Query node, Integer indent) { - if(node.getWith().isPresent()) { - With confidence = (With)node.getWith().get(); - this.append(indent.intValue(), "WITH"); - if(confidence.isRecursive()) { - } - - Iterator queries = confidence.getQueries().iterator(); - - while(queries.hasNext()) { - WithQuery query = (WithQuery)queries.next(); - this.process(new TableSubquery(query.getQuery()), indent); - if(queries.hasNext()) { - } - } - } - - this.processRelation(node.getQueryBody(), indent); - if(!node.getOrderBy().isEmpty()) { - this.sorts = node.getOrderBy(); - } - - if(node.getLimit().isPresent()) { - } - - if(node.getApproximate().isPresent()) { - - } - - return null; - } - - protected Void visitQuerySpecification(QuerySpecification node, Integer indent) { - this.process(node.getSelect(), indent); - if(node.getFrom().isPresent()) { - this.process((Node)node.getFrom().get(), indent); - } - - if(node.getWhere().isPresent()) { - Expression ex = node.getWhere().get(); - ExpressionVisitor expressionVisitor = new ExpressionVisitor(); - StringBuilder buf = new StringBuilder(); - expressionVisitor.process(ex, buf); - this.query = buf.toString(); - } - - if(!node.getGroupBy().isEmpty()) { - this.groupByQuery = true; - List groups = node.getGroupBy(); - for(Expression group : groups) { - groupBy.add(getGroupField(group)); - } - } - - if(node.getHaving().isPresent()) { - this.havingExpression = node.getHaving().get(); - } - - if(!node.getOrderBy().isEmpty()) { - this.sorts = node.getOrderBy(); - } - - if(node.getLimit().isPresent()) { - this.limit = Integer.parseInt(stripQuotes(node.getLimit().get())); - } - - return null; - } - - protected Void visitComparisonExpression(ComparisonExpression node, Integer index) { - String field = node.getLeft().toString(); - String value = node.getRight().toString(); - query = stripSingleQuotes(stripQuotes(field))+":"+stripQuotes(value); - return null; - } - - protected Void visitSelect(Select node, Integer indent) { - this.append(indent.intValue(), "SELECT"); - if(node.isDistinct()) { - this.isDistinct = true; - } - - if(node.getSelectItems().size() > 1) { - boolean first = true; - - for(Iterator var4 = node.getSelectItems().iterator(); var4.hasNext(); first = false) { - SelectItem item = (SelectItem)var4.next(); - this.process(item, indent); - } - } else { - this.process((Node) Iterables.getOnlyElement(node.getSelectItems()), indent); - } - - return null; - } - - protected Void visitSingleColumn(SingleColumn node, Integer indent) { - - Expression ex = node.getExpression(); - String field = null; - - if(ex instanceof QualifiedNameReference) { - - QualifiedNameReference ref = (QualifiedNameReference)ex; - List parts = ref.getName().getOriginalParts(); - field = parts.get(0); - - } else if(ex instanceof FunctionCall) { - - FunctionCall functionCall = (FunctionCall)ex; - List parts = functionCall.getName().getOriginalParts(); - List args = functionCall.getArguments(); - String col = null; - - if(args.size() > 0 && args.get(0) instanceof QualifiedNameReference) { - QualifiedNameReference ref = (QualifiedNameReference) args.get(0); - col = ref.getName().getOriginalParts().get(0); - field = parts.get(0)+"("+stripSingleQuotes(col)+")"; - } else { - field = stripSingleQuotes(stripQuotes(functionCall.toString())); - } - - } else if(ex instanceof StringLiteral) { - StringLiteral stringLiteral = (StringLiteral)ex; - field = stripSingleQuotes(stringLiteral.toString()); - } - - fields.add(field); - - if(node.getAlias().isPresent()) { - String alias = node.getAlias().get(); - columnAliases.put(field, alias); - hasColumnAliases = true; - } else { - columnAliases.put(field, field); - } - - return null; - } - - - - - protected Void visitAllColumns(AllColumns node, Integer context) { - return null; - } - - protected Void visitTable(Table node, Integer indent) { - this.table = stripSingleQuotes(node.getName().toString()); - return null; - } - - protected Void visitAliasedRelation(AliasedRelation node, Integer indent) { - this.process(node.getRelation(), indent); - return null; - } - - protected Void visitValues(Values node, Integer indent) { - boolean first = true; - - for(Iterator var4 = node.getRows().iterator(); var4.hasNext(); first = false) { - Expression row = (Expression)var4.next(); - - } - - return null; - } - - private void processRelation(Relation relation, Integer indent) { - if(relation instanceof Table) { - } else { - this.process(relation, indent); - } - } - - private StringBuilder append(int indent, String value) { - return this.builder.append(indentString(indent)).append(value); - } - - private static String indentString(int indent) { - return Strings.repeat(" ", indent); - } - } - - private static String getSortField(SortItem sortItem, Map reverseColumnAliases) - { - String field; - Expression ex = sortItem.getSortKey(); - if(ex instanceof QualifiedNameReference) { - QualifiedNameReference ref = (QualifiedNameReference)ex; - List parts = ref.getName().getOriginalParts(); - field = parts.get(0); - } else if(ex instanceof FunctionCall) { - FunctionCall functionCall = (FunctionCall)ex; - List parts = functionCall.getName().getOriginalParts(); - List args = functionCall.getArguments(); - String col = null; - - if(args.size() > 0 && args.get(0) instanceof QualifiedNameReference) { - QualifiedNameReference ref = (QualifiedNameReference) args.get(0); - col = ref.getName().getOriginalParts().get(0); - field = parts.get(0)+"("+stripSingleQuotes(col)+")"; - } else { - field = stripSingleQuotes(stripQuotes(functionCall.toString())); - } - - } else { - StringLiteral stringLiteral = (StringLiteral)ex; - field = stripSingleQuotes(stringLiteral.toString()); - } - - if(reverseColumnAliases.containsKey(field)) { - field = reverseColumnAliases.get(field); - } - - return field; - } - - - private static String getHavingField(Expression ex) - { - String field; - if(ex instanceof QualifiedNameReference) { - QualifiedNameReference ref = (QualifiedNameReference)ex; - List parts = ref.getName().getOriginalParts(); - field = parts.get(0); - } else if(ex instanceof FunctionCall) { - FunctionCall functionCall = (FunctionCall)ex; - List parts = functionCall.getName().getOriginalParts(); - List args = functionCall.getArguments(); - String col = null; - - if(args.size() > 0 && args.get(0) instanceof QualifiedNameReference) { - QualifiedNameReference ref = (QualifiedNameReference) args.get(0); - col = ref.getName().getOriginalParts().get(0); - field = parts.get(0)+"("+stripSingleQuotes(col)+")"; - } else { - field = stripSingleQuotes(stripQuotes(functionCall.toString())); - } - - } else { - StringLiteral stringLiteral = (StringLiteral)ex; - field = stripSingleQuotes(stringLiteral.toString()); - } - - return field; - } - - - private static String getPredicateField(Expression ex) - { - String field; - if(ex instanceof QualifiedNameReference) { - QualifiedNameReference ref = (QualifiedNameReference)ex; - List parts = ref.getName().getOriginalParts(); - field = parts.get(0); - } else { - StringLiteral stringLiteral = (StringLiteral)ex; - field = stripSingleQuotes(stringLiteral.toString()); - } - - return field; - } - - private static String getGroupField(Expression ex) - { - String field; - if(ex instanceof QualifiedNameReference) { - QualifiedNameReference ref = (QualifiedNameReference)ex; - List parts = ref.getName().getOriginalParts(); - field = parts.get(0); - } else { - StringLiteral stringLiteral = (StringLiteral)ex; - field = stripSingleQuotes(stringLiteral.toString()); - } - - return field; - } - - - private static class LimitStream extends TupleStream { - - private TupleStream stream; - private int limit; - private int count; - - public LimitStream(TupleStream stream, int limit) { - this.stream = stream; - this.limit = limit; - } - - public void open() throws IOException { - this.stream.open(); - } - - public void close() throws IOException { - this.stream.close(); - } - - public List children() { - List children = new ArrayList(); - children.add(stream); - return children; - } - - public StreamComparator getStreamSort(){ - return stream.getStreamSort(); - } - - public void setStreamContext(StreamContext context) { - stream.setStreamContext(context); - } - - @Override - public Explanation toExplanation(StreamFactory factory) throws IOException { - - return new StreamExplanation(getStreamNodeId().toString()) - .withChildren(new Explanation[]{ - stream.toExplanation(factory) - }) - .withFunctionName("SQL LIMIT") - .withExpression("--non-expressible--") - .withImplementingClass(this.getClass().getName()) - .withExpressionType(ExpressionType.STREAM_DECORATOR); - } - - public Tuple read() throws IOException { - ++count; - if(count > limit) { - Map fields = new HashMap(); - fields.put("EOF", "true"); - return new Tuple(fields); - } - - Tuple tuple = stream.read(); - return tuple; - } - } - - public static enum AggregationMode { - - MAP_REDUCE, - FACET; - - public static AggregationMode getMode(String mode) throws IOException{ - if(mode.equalsIgnoreCase("facet")) { - return FACET; - } else if(mode.equalsIgnoreCase("map_reduce")) { - return MAP_REDUCE; - } else { - throw new IOException("Invalid aggregation mode:"+mode); - } - } - } - - private static class HavingStream extends TupleStream { - - private TupleStream stream; - private HavingVisitor havingVisitor; - private Expression havingExpression; - - public HavingStream(TupleStream stream, Expression havingExpression, Map reverseAliasMap) { - this.stream = stream; - this.havingVisitor = new HavingVisitor(reverseAliasMap); - this.havingExpression = havingExpression; - } - - public void open() throws IOException { - this.stream.open(); - } - - public void close() throws IOException { - this.stream.close(); - } - - public StreamComparator getStreamSort(){ - return stream.getStreamSort(); - } - - public List children() { - List children = new ArrayList(); - children.add(stream); - return children; - } - - @Override - public Explanation toExplanation(StreamFactory factory) throws IOException { - - return new StreamExplanation(getStreamNodeId().toString()) - .withChildren(new Explanation[]{ - stream.toExplanation(factory) - }) - .withFunctionName("SQL HAVING") - .withExpression("--non-expressible--") - .withImplementingClass(this.getClass().getName()) - .withExpressionType(ExpressionType.STREAM_DECORATOR); - } - - public void setStreamContext(StreamContext context) { - stream.setStreamContext(context); - } - - public Tuple read() throws IOException { - while (true) { - Tuple tuple = stream.read(); - if (tuple.EOF) { - return tuple; - } - - if (havingVisitor.process(havingExpression, tuple)) { - return tuple; - } - } - } - } - - private static class CatalogsStream extends TupleStream { - private final String zkHost; - private StreamContext context; - private int currentIndex = 0; - private List catalogs; - - CatalogsStream(String zkHost) { - this.zkHost = zkHost; - } - - public List children() { - return new ArrayList<>(); - } - - public void open() throws IOException { - this.catalogs = new ArrayList<>(); - this.catalogs.add(this.zkHost); - } - - @Override - public Explanation toExplanation(StreamFactory factory) throws IOException { - - return new StreamExplanation(getStreamNodeId().toString()) - .withFunctionName("SQL CATALOG") - .withExpression("--non-expressible--") - .withImplementingClass(this.getClass().getName()) - .withExpressionType(ExpressionType.STREAM_DECORATOR); - } - - public Tuple read() throws IOException { - Map fields = new HashMap<>(); - if (this.currentIndex < this.catalogs.size()) { - fields.put("TABLE_CAT", this.catalogs.get(this.currentIndex)); - this.currentIndex += 1; - } else { - fields.put("EOF", "true"); - } - return new Tuple(fields); - } - - public StreamComparator getStreamSort() { - return null; - } - - public void close() throws IOException { - - } - - public void setStreamContext(StreamContext context) { - this.context = context; - } - } - - private static class SchemasStream extends TupleStream { - private final String zkHost; - private StreamContext context; - - SchemasStream(String zkHost) { - this.zkHost = zkHost; - } - - public List children() { - return new ArrayList<>(); - } - - public void open() throws IOException { - - } - - @Override - public Explanation toExplanation(StreamFactory factory) throws IOException { - - return new StreamExplanation(getStreamNodeId().toString()) - .withFunctionName("SQL SCHEMA") - .withExpression("--non-expressible--") - .withImplementingClass(this.getClass().getName()) - .withExpressionType(ExpressionType.STREAM_DECORATOR); - } - - public Tuple read() throws IOException { - Map fields = new HashMap<>(); - fields.put("EOF", "true"); - return new Tuple(fields); - } - - public StreamComparator getStreamSort() { - return null; - } - - public void close() throws IOException { - - } - - public void setStreamContext(StreamContext context) { - this.context = context; - } - } - - private static class TableStream extends TupleStream { - private final String zkHost; - private StreamContext context; - private int currentIndex = 0; - private List tables; - - TableStream(String zkHost) { - this.zkHost = zkHost; - } - - public List children() { - return new ArrayList<>(); - } - - public void open() throws IOException { - this.tables = new ArrayList<>(); - - CloudSolrClient cloudSolrClient = this.context.getSolrClientCache().getCloudSolrClient(this.zkHost); - cloudSolrClient.connect(); - ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader(); - Set collections = zkStateReader.getClusterState().getCollectionStates().keySet(); - if (collections.size() != 0) { - this.tables.addAll(collections); - } - Collections.sort(this.tables); - } - - @Override - public Explanation toExplanation(StreamFactory factory) throws IOException { - - return new StreamExplanation(getStreamNodeId().toString()) - .withFunctionName("SQL TABLE") - .withExpression("--non-expressible--") - .withImplementingClass(this.getClass().getName()) - .withExpressionType(ExpressionType.STREAM_DECORATOR); - } - - public Tuple read() throws IOException { - Map fields = new HashMap<>(); - if (this.currentIndex < this.tables.size()) { - fields.put("TABLE_CAT", this.zkHost); - fields.put("TABLE_SCHEM", null); - fields.put("TABLE_NAME", this.tables.get(this.currentIndex)); - fields.put("TABLE_TYPE", "TABLE"); - fields.put("REMARKS", null); - this.currentIndex += 1; - } else { - fields.put("EOF", "true"); - } - return new Tuple(fields); - } - - public StreamComparator getStreamSort() { - return null; - } - - public void close() throws IOException { - - } - - public void setStreamContext(StreamContext context) { - this.context = context; - } - } - - private static class MetadataStream extends TupleStream { - - private final TupleStream stream; - private final SQLVisitor sqlVisitor; + /* + * Only necessary for SolrJ JDBC driver since metadata has to be passed back + */ + private class SqlHandlerStream extends JDBCStream { + private final boolean includeMetadata; private boolean firstTuple = true; - public MetadataStream(TupleStream stream, SQLVisitor sqlVistor) { - this.stream = stream; - this.sqlVisitor = sqlVistor; + SqlHandlerStream(String connectionUrl, String sqlQuery, StreamComparator definedSort, + Properties connectionProperties, String driverClassName, boolean includeMetadata) + throws IOException { + super(connectionUrl, sqlQuery, definedSort, connectionProperties, driverClassName); + + this.includeMetadata = includeMetadata; } - public List children() { - return this.stream.children(); - } - - public void open() throws IOException { - this.stream.open(); - } - @Override - public Explanation toExplanation(StreamFactory factory) throws IOException { - - return new StreamExplanation(getStreamNodeId().toString()) - .withChildren(new Explanation[]{ - stream.toExplanation(factory) - }) - .withFunctionName("SQL METADATA") - .withExpression("--non-expressible--") - .withImplementingClass(this.getClass().getName()) - .withExpressionType(ExpressionType.STREAM_DECORATOR); - } - - // Return a metadata tuple as the first tuple and then pass through to the underlying stream. public Tuple read() throws IOException { - if(firstTuple) { - firstTuple = false; + // Return a metadata tuple as the first tuple and then pass through to the JDBCStream. + if(includeMetadata && firstTuple) { + try { + Map fields = new HashMap<>(); - Map fields = new HashMap<>(); - fields.put("isMetadata", true); - fields.put("fields", sqlVisitor.fields); - fields.put("aliases", sqlVisitor.columnAliases); - return new Tuple(fields); - } + firstTuple = false; - return this.stream.read(); - } + ResultSetMetaData resultSetMetaData = resultSet.getMetaData(); - public StreamComparator getStreamSort() { - return this.stream.getStreamSort(); - } + List metadataFields = new ArrayList<>(); + Map metadataAliases = new HashMap<>(); + for(int i = 1; i <= resultSetMetaData.getColumnCount(); i++) { + String columnName = resultSetMetaData.getColumnName(i); + String columnLabel = resultSetMetaData.getColumnLabel(i); + metadataFields.add(columnName); + metadataAliases.put(columnName, columnLabel); + } - public void close() throws IOException { - this.stream.close(); - } - - public void setStreamContext(StreamContext context) { - this.stream.setStreamContext(context); - } - } - - private static class HavingVisitor extends AstVisitor { - - private Map reverseAliasMap; - - public HavingVisitor(Map reverseAliasMap) { - this.reverseAliasMap = reverseAliasMap; - } - - protected Boolean visitLogicalBinaryExpression(LogicalBinaryExpression node, Tuple tuple) { - - Boolean b = process(node.getLeft(), tuple); - if(node.getType() == LogicalBinaryExpression.Type.AND) { - if(!b) { - //Short circuit - return false; - } else { - return process(node.getRight(), tuple); + fields.put("isMetadata", true); + fields.put("fields", metadataFields); + fields.put("aliases", metadataAliases); + return new Tuple(fields); + } catch (SQLException e) { + throw new IOException(e); } } else { - if(b) { - //Short circuit - return true; - } else { - return process(node.getRight(), tuple); - } - } - } - - protected Boolean visitComparisonExpression(ComparisonExpression node, Tuple tuple) { - String field = getHavingField(node.getLeft()); - - if(reverseAliasMap.containsKey(field)) { - field = reverseAliasMap.get(field); - } - - double d = Double.parseDouble(node.getRight().toString()); - double td = tuple.getDouble(field); - ComparisonExpression.Type t = node.getType(); - - switch(t) { - case LESS_THAN: - return td < d; - case LESS_THAN_OR_EQUAL: - return td <= d; - case NOT_EQUAL: - return td != d; - case EQUAL: - return td == d; - case GREATER_THAN: - return td > d; - case GREATER_THAN_OR_EQUAL: - return td >= d; - default: - return false; + return super.read(); } } } - } +} diff --git a/solr/core/src/java/org/apache/solr/handler/sql/CalciteSolrDriver.java b/solr/core/src/java/org/apache/solr/handler/sql/CalciteSolrDriver.java new file mode 100644 index 00000000000..3a7640de83e --- /dev/null +++ b/solr/core/src/java/org/apache/solr/handler/sql/CalciteSolrDriver.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.handler.sql; + +import org.apache.calcite.jdbc.CalciteConnection; +import org.apache.calcite.jdbc.Driver; +import org.apache.calcite.schema.SchemaPlus; + +import java.sql.Connection; +import java.sql.SQLException; +import java.util.Properties; + +/** + * JDBC driver for Calcite Solr. + * + *

It accepts connect strings that start with "jdbc:calcitesolr:".

+ */ +public class CalciteSolrDriver extends Driver { + public final static String CONNECT_STRING_PREFIX = "jdbc:calcitesolr:"; + + private CalciteSolrDriver() { + super(); + } + + static { + new CalciteSolrDriver().register(); + } + + @Override + protected String getConnectStringPrefix() { + return CONNECT_STRING_PREFIX; + } + + @Override + public Connection connect(String url, Properties info) throws SQLException { + if(!this.acceptsURL(url)) { + return null; + } + + Connection connection = super.connect(url, info); + CalciteConnection calciteConnection = (CalciteConnection) connection; + final SchemaPlus rootSchema = calciteConnection.getRootSchema(); + + String schemaName = info.getProperty("zk"); + if(schemaName == null) { + throw new SQLException("zk must be set"); + } + rootSchema.add(schemaName, new SolrSchema(info)); + + // Set the default schema + calciteConnection.setSchema(schemaName); + + return connection; + } +} diff --git a/solr/core/src/java/org/apache/solr/handler/sql/LimitStream.java b/solr/core/src/java/org/apache/solr/handler/sql/LimitStream.java new file mode 100644 index 00000000000..0d4bb72adf4 --- /dev/null +++ b/solr/core/src/java/org/apache/solr/handler/sql/LimitStream.java @@ -0,0 +1,89 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.handler.sql; + +import org.apache.solr.client.solrj.io.Tuple; +import org.apache.solr.client.solrj.io.comp.StreamComparator; +import org.apache.solr.client.solrj.io.stream.StreamContext; +import org.apache.solr.client.solrj.io.stream.TupleStream; +import org.apache.solr.client.solrj.io.stream.expr.Explanation; +import org.apache.solr.client.solrj.io.stream.expr.StreamExplanation; +import org.apache.solr.client.solrj.io.stream.expr.StreamFactory; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +class LimitStream extends TupleStream { + + private final TupleStream stream; + private final int limit; + private int count; + + LimitStream(TupleStream stream, int limit) { + this.stream = stream; + this.limit = limit; + } + + public void open() throws IOException { + this.stream.open(); + } + + public void close() throws IOException { + this.stream.close(); + } + + public List children() { + List children = new ArrayList<>(); + children.add(stream); + return children; + } + + public StreamComparator getStreamSort(){ + return stream.getStreamSort(); + } + + public void setStreamContext(StreamContext context) { + stream.setStreamContext(context); + } + + @Override + public Explanation toExplanation(StreamFactory factory) throws IOException { + + return new StreamExplanation(getStreamNodeId().toString()) + .withChildren(new Explanation[]{ + stream.toExplanation(factory) + }) + .withFunctionName("SQL LIMIT") + .withExpression("--non-expressible--") + .withImplementingClass(this.getClass().getName()) + .withExpressionType(Explanation.ExpressionType.STREAM_DECORATOR); + } + + public Tuple read() throws IOException { + ++count; + if(count > limit) { + Map fields = new HashMap<>(); + fields.put("EOF", "true"); + return new Tuple(fields); + } + + return stream.read(); + } +} \ No newline at end of file diff --git a/solr/core/src/java/org/apache/solr/handler/sql/SolrAggregate.java b/solr/core/src/java/org/apache/solr/handler/sql/SolrAggregate.java new file mode 100644 index 00000000000..983ab7691be --- /dev/null +++ b/solr/core/src/java/org/apache/solr/handler/sql/SolrAggregate.java @@ -0,0 +1,110 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.handler.sql; + +import org.apache.calcite.plan.RelOptCluster; +import org.apache.calcite.plan.RelTraitSet; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.rel.core.Aggregate; +import org.apache.calcite.rel.core.AggregateCall; +import org.apache.calcite.sql.SqlAggFunction; +import org.apache.calcite.sql.fun.SqlStdOperatorTable; +import org.apache.calcite.util.ImmutableBitSet; +import org.apache.calcite.util.Pair; + +import java.util.*; + +/** + * Implementation of {@link org.apache.calcite.rel.core.Aggregate} relational expression in Solr. + */ +class SolrAggregate extends Aggregate implements SolrRel { + private static final List SUPPORTED_AGGREGATIONS = Arrays.asList( + SqlStdOperatorTable.COUNT, + SqlStdOperatorTable.SUM, + SqlStdOperatorTable.SUM0, + SqlStdOperatorTable.MIN, + SqlStdOperatorTable.MAX, + SqlStdOperatorTable.AVG + ); + + SolrAggregate( + RelOptCluster cluster, + RelTraitSet traitSet, + RelNode child, + boolean indicator, + ImmutableBitSet groupSet, + List groupSets, + List aggCalls) { + super(cluster, traitSet, child, indicator, groupSet, groupSets, aggCalls); + assert getConvention() == SolrRel.CONVENTION; + assert getConvention() == child.getConvention(); + } + + @Override + public Aggregate copy(RelTraitSet traitSet, RelNode input, + boolean indicator, ImmutableBitSet groupSet, + List groupSets, List aggCalls) { + return new SolrAggregate(getCluster(), traitSet, input, indicator, groupSet, groupSets, aggCalls); + } + + public void implement(Implementor implementor) { + implementor.visitChild(0, getInput()); + + final List inNames = SolrRules.solrFieldNames(getInput().getRowType()); + + + for(Pair namedAggCall : getNamedAggCalls()) { + + + AggregateCall aggCall = namedAggCall.getKey(); + + Pair metric = toSolrMetric(implementor, aggCall, inNames); + implementor.addReverseAggMapping(namedAggCall.getValue(), metric.getKey().toLowerCase()+"("+metric.getValue()+")"); + implementor.addMetricPair(namedAggCall.getValue(), metric.getKey(), metric.getValue()); + if(aggCall.getName() == null) { + implementor.addFieldMapping(namedAggCall.getValue(), + aggCall.getAggregation().getName() + "(" + inNames.get(aggCall.getArgList().get(0)) + ")"); + } + } + + for(int group : getGroupSet()) { + String inName = inNames.get(group); + implementor.addBucket(inName); + } + } + + private Pair toSolrMetric(Implementor implementor, AggregateCall aggCall, List inNames) { + SqlAggFunction aggregation = aggCall.getAggregation(); + List args = aggCall.getArgList(); + switch (args.size()) { + case 0: + if (aggregation.equals(SqlStdOperatorTable.COUNT)) { + return new Pair<>(aggregation.getName(), "*"); + } + case 1: + String inName = inNames.get(args.get(0)); + String name = implementor.fieldMappings.getOrDefault(inName, inName); + if(SUPPORTED_AGGREGATIONS.contains(aggregation)) { + return new Pair<>(aggregation.getName(), name); + } + default: + throw new AssertionError("Invalid aggregation " + aggregation + " with args " + args + " with names" + inNames); + } + } +} + +// End SolrAggregate.java diff --git a/solr/core/src/java/org/apache/solr/handler/sql/SolrEnumerator.java b/solr/core/src/java/org/apache/solr/handler/sql/SolrEnumerator.java new file mode 100644 index 00000000000..6f9dddfbf2f --- /dev/null +++ b/solr/core/src/java/org/apache/solr/handler/sql/SolrEnumerator.java @@ -0,0 +1,146 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.handler.sql; + +import org.apache.calcite.linq4j.Enumerator; +import org.apache.solr.client.solrj.io.Tuple; +import org.apache.solr.client.solrj.io.stream.TupleStream; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.lang.invoke.MethodHandles; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; + +/** Enumerator that reads from a Solr collection. */ +class SolrEnumerator implements Enumerator { + private static final Logger logger = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); + + private final TupleStream tupleStream; + private final List> fields; + private Tuple current; + private char sep = 31; + + /** Creates a SolrEnumerator. + * + * @param tupleStream Solr TupleStream + * @param fields Fields to get from each Tuple + */ + SolrEnumerator(TupleStream tupleStream, List> fields) { + this.tupleStream = tupleStream; + try { + this.tupleStream.open(); + } catch (IOException e) { + throw new RuntimeException(e); + } + this.fields = fields; + this.current = null; + } + + /** Produce the next row from the results + * + * @return A new row from the results + */ + public Object current() { + if (fields.size() == 1) { + return this.getter(current, fields.get(0)); + } else { + // Build an array with all fields in this row + Object[] row = new Object[fields.size()]; + for (int i = 0; i < fields.size(); i++) { + row[i] = this.getter(current, fields.get(i)); + } + + return row; + } + } + + private Object getter(Tuple tuple, Map.Entry field) { + Object val = tuple.get(field.getKey()); + + if(val == null) { + return null; + } + + Class clazz = field.getValue(); + if(clazz.equals(Long.class)) { + if(val instanceof Double) { + return this.getRealVal(val); + } + return val; + } + + if(val instanceof ArrayList) { + ArrayList arrayList = (ArrayList) val; + StringBuilder buf = new StringBuilder(); + + for(Object o : arrayList) { + buf.append(sep); + buf.append(o.toString()); + } + val = buf.toString(); + } + + return val; + } + + private Object getRealVal(Object val) { + // Check if Double is really a Long + if(val instanceof Double) { + Double doubleVal = (double) val; + //make sure that double has no decimals and fits within Long + if(doubleVal % 1 == 0 && doubleVal >= Long.MIN_VALUE && doubleVal <= Long.MAX_VALUE) { + return doubleVal.longValue(); + } + return doubleVal; + } + + // Wasn't a double so just return original Object + return val; + } + + public boolean moveNext() { + try { + Tuple tuple = this.tupleStream.read(); + if (tuple.EOF) { + return false; + } else { + current = tuple; + return true; + } + } catch (IOException e) { + logger.error("IOException", e); + return false; + } + } + + public void reset() { + throw new UnsupportedOperationException(); + } + + public void close() { + if(this.tupleStream != null) { + try { + this.tupleStream.close(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + } +} diff --git a/solr/core/src/java/org/apache/solr/handler/sql/SolrFilter.java b/solr/core/src/java/org/apache/solr/handler/sql/SolrFilter.java new file mode 100644 index 00000000000..ce12aece907 --- /dev/null +++ b/solr/core/src/java/org/apache/solr/handler/sql/SolrFilter.java @@ -0,0 +1,382 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.handler.sql; + +import org.apache.calcite.plan.*; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.rel.core.Filter; +import org.apache.calcite.rel.metadata.RelMetadataQuery; +import org.apache.calcite.rex.RexCall; +import org.apache.calcite.rex.RexInputRef; +import org.apache.calcite.rex.RexLiteral; +import org.apache.calcite.rex.RexNode; +import org.apache.calcite.sql.SqlKind; +import org.apache.calcite.util.Pair; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +/** + * Implementation of a {@link org.apache.calcite.rel.core.Filter} relational expression in Solr. + */ +class SolrFilter extends Filter implements SolrRel { + SolrFilter( + RelOptCluster cluster, + RelTraitSet traitSet, + RelNode child, + RexNode condition) { + super(cluster, traitSet, child, condition); + assert getConvention() == SolrRel.CONVENTION; + assert getConvention() == child.getConvention(); + } + + @Override public RelOptCost computeSelfCost(RelOptPlanner planner, RelMetadataQuery mq) { + return super.computeSelfCost(planner, mq).multiplyBy(0.1); + } + + public SolrFilter copy(RelTraitSet traitSet, RelNode input, RexNode condition) { + return new SolrFilter(getCluster(), traitSet, input, condition); + } + + public void implement(Implementor implementor) { + implementor.visitChild(0, getInput()); + if(getInput() instanceof SolrAggregate) { + HavingTranslator translator = new HavingTranslator(SolrRules.solrFieldNames(getRowType()), implementor.reverseAggMappings); + String havingPredicate = translator.translateMatch(condition); + implementor.setHavingPredicate(havingPredicate); + } else { + Translator translator = new Translator(SolrRules.solrFieldNames(getRowType())); + String query = translator.translateMatch(condition); + implementor.addQuery(query); + implementor.setNegativeQuery(translator.negativeQuery); + } + } + + private static class Translator { + + private final List fieldNames; + public boolean negativeQuery = true; + + Translator(List fieldNames) { + this.fieldNames = fieldNames; + } + + private String translateMatch(RexNode condition) { + if (condition.getKind().belongsTo(SqlKind.COMPARISON)) { + return translateComparison(condition); + } else if (condition.isA(SqlKind.AND)) { + return "(" + translateAnd(condition) + ")"; + } else if (condition.isA(SqlKind.OR)) { + return "(" + translateOr(condition) + ")"; + } else { + return null; + } + } + + private String translateOr(RexNode condition) { + List ors = new ArrayList<>(); + for (RexNode node : RelOptUtil.disjunctions(condition)) { + ors.add(translateMatch(node)); + } + return String.join(" OR ", ors); + } + + private String translateAnd(RexNode node0) { + List andStrings = new ArrayList(); + List notStrings = new ArrayList(); + + List ands = new ArrayList(); + List nots = new ArrayList(); + RelOptUtil.decomposeConjunction(node0, ands, nots); + + + for (RexNode node : ands) { + andStrings.add(translateMatch(node)); + } + + String andString = String.join(" AND ", andStrings); + + if (nots.size() > 0) { + for (RexNode node : nots) { + notStrings.add(translateMatch(node)); + } + String notString = String.join(" NOT ", notStrings); + return "(" + andString + ") NOT (" + notString + ")"; + } else { + return andString; + } + } + + private String translateComparison(RexNode node) { + Pair binaryTranslated = null; + if (((RexCall) node).getOperands().size() == 2) { + binaryTranslated = translateBinary((RexCall) node); + } + + switch (node.getKind()) { + case NOT: + return "-" + translateComparison(((RexCall) node).getOperands().get(0)); + case EQUALS: + String terms = binaryTranslated.getValue().toString().trim(); + terms = terms.replace("'",""); + if (!terms.startsWith("(") && !terms.startsWith("[") && !terms.startsWith("{")) { + terms = "\"" + terms + "\""; + } + + String clause = binaryTranslated.getKey() + ":" + terms; + this.negativeQuery = false; + return clause; + case NOT_EQUALS: + return "-(" + binaryTranslated.getKey() + ":" + binaryTranslated.getValue() + ")"; + case LESS_THAN: + this.negativeQuery = false; + return "(" + binaryTranslated.getKey() + ": [ * TO " + binaryTranslated.getValue() + " })"; + case LESS_THAN_OR_EQUAL: + this.negativeQuery = false; + return "(" + binaryTranslated.getKey() + ": [ * TO " + binaryTranslated.getValue() + " ])"; + case GREATER_THAN: + this.negativeQuery = false; + return "(" + binaryTranslated.getKey() + ": { " + binaryTranslated.getValue() + " TO * ])"; + case GREATER_THAN_OR_EQUAL: + this.negativeQuery = false; + return "(" + binaryTranslated.getKey() + ": [ " + binaryTranslated.getValue() + " TO * ])"; + default: + throw new AssertionError("cannot translate " + node); + } + } + + /** + * Translates a call to a binary operator, reversing arguments if necessary. + */ + private Pair translateBinary(RexCall call) { + List operands = call.getOperands(); + if (operands.size() != 2) { + throw new AssertionError("Invalid number of arguments - " + operands.size()); + } + final RexNode left = operands.get(0); + final RexNode right = operands.get(1); + final Pair a = translateBinary2(left, right); + if (a != null) { + return a; + } + final Pair b = translateBinary2(right, left); + if (b != null) { + return b; + } + throw new AssertionError("cannot translate call " + call); + } + + /** + * Translates a call to a binary operator. Returns whether successful. + */ + private Pair translateBinary2(RexNode left, RexNode right) { + switch (right.getKind()) { + case LITERAL: + break; + default: + return null; + } + final RexLiteral rightLiteral = (RexLiteral) right; + switch (left.getKind()) { + case INPUT_REF: + final RexInputRef left1 = (RexInputRef) left; + String name = fieldNames.get(left1.getIndex()); + return new Pair<>(name, rightLiteral); + case CAST: + return translateBinary2(((RexCall) left).operands.get(0), right); +// case OTHER_FUNCTION: +// String itemName = SolrRules.isItem((RexCall) left); +// if (itemName != null) { +// return translateOp2(op, itemName, rightLiteral); +// } + default: + return null; + } + } + } + + private static class HavingTranslator { + + private final List fieldNames; + private Map reverseAggMappings; + + HavingTranslator(List fieldNames, Map reverseAggMappings) { + this.fieldNames = fieldNames; + this.reverseAggMappings = reverseAggMappings; + } + + private String translateMatch(RexNode condition) { + if (condition.getKind().belongsTo(SqlKind.COMPARISON)) { + return translateComparison(condition); + } else if (condition.isA(SqlKind.AND)) { + return translateAnd(condition); + } else if (condition.isA(SqlKind.OR)) { + return translateOr(condition); + } else { + return null; + } + } + + private String translateOr(RexNode condition) { + List ors = new ArrayList<>(); + for (RexNode node : RelOptUtil.disjunctions(condition)) { + ors.add(translateMatch(node)); + } + StringBuilder builder = new StringBuilder(); + + builder.append("or("); + int i = 0; + for (i = 0; i < ors.size(); i++) { + if (i > 0) { + builder.append(","); + } + + builder.append(ors.get(i)); + } + builder.append(")"); + return builder.toString(); + } + + private String translateAnd(RexNode node0) { + List andStrings = new ArrayList(); + List notStrings = new ArrayList(); + + List ands = new ArrayList(); + List nots = new ArrayList(); + + RelOptUtil.decomposeConjunction(node0, ands, nots); + + for (RexNode node : ands) { + andStrings.add(translateMatch(node)); + } + + StringBuilder builder = new StringBuilder(); + + builder.append("and("); + for (int i = 0; i < andStrings.size(); i++) { + if (i > 0) { + builder.append(","); + } + + builder.append(andStrings.get(i)); + } + builder.append(")"); + + + if (nots.size() > 0) { + for (RexNode node : nots) { + notStrings.add(translateMatch(node)); + } + + StringBuilder notBuilder = new StringBuilder(); + for(int i=0; i< notStrings.size(); i++) { + if(i > 0) { + notBuilder.append(","); + } + notBuilder.append("not("); + notBuilder.append(notStrings.get(i)); + notBuilder.append(")"); + } + + return "and(" + builder.toString() + ","+ notBuilder.toString()+")"; + } else { + return builder.toString(); + } + } + + private String translateComparison(RexNode node) { + Pair binaryTranslated = null; + if (((RexCall) node).getOperands().size() == 2) { + binaryTranslated = translateBinary((RexCall) node); + } + + switch (node.getKind()) { + case EQUALS: + String terms = binaryTranslated.getValue().toString().trim(); + String clause = "eq(" + binaryTranslated.getKey() + "," + terms + ")"; + return clause; + case NOT_EQUALS: + return "not(eq(" + binaryTranslated.getKey() + "," + binaryTranslated.getValue() + "))"; + case LESS_THAN: + return "lt(" + binaryTranslated.getKey() + "," + binaryTranslated.getValue() + ")"; + case LESS_THAN_OR_EQUAL: + return "lteq(" + binaryTranslated.getKey() + "," + binaryTranslated.getValue() + ")"; + case GREATER_THAN: + return "gt(" + binaryTranslated.getKey() + "," + binaryTranslated.getValue() + ")"; + case GREATER_THAN_OR_EQUAL: + return "gteq(" + binaryTranslated.getKey() + "," + binaryTranslated.getValue() + ")"; + default: + throw new AssertionError("cannot translate " + node); + } + } + + /** + * Translates a call to a binary operator, reversing arguments if necessary. + */ + private Pair translateBinary(RexCall call) { + List operands = call.getOperands(); + if (operands.size() != 2) { + throw new AssertionError("Invalid number of arguments - " + operands.size()); + } + final RexNode left = operands.get(0); + final RexNode right = operands.get(1); + final Pair a = translateBinary2(left, right); + + if (a != null) { + if(reverseAggMappings.containsKey(a.getKey())) { + return new Pair(reverseAggMappings.get(a.getKey()),a.getValue()); + } + return a; + } + final Pair b = translateBinary2(right, left); + if (b != null) { + return b; + } + throw new AssertionError("cannot translate call " + call); + } + + /** + * Translates a call to a binary operator. Returns whether successful. + */ + private Pair translateBinary2(RexNode left, RexNode right) { + switch (right.getKind()) { + case LITERAL: + break; + default: + return null; + } + + final RexLiteral rightLiteral = (RexLiteral) right; + switch (left.getKind()) { + case INPUT_REF: + final RexInputRef left1 = (RexInputRef) left; + String name = fieldNames.get(left1.getIndex()); + return new Pair<>(name, rightLiteral); + case CAST: + return translateBinary2(((RexCall) left).operands.get(0), right); +// case OTHER_FUNCTION: +// String itemName = SolrRules.isItem((RexCall) left); +// if (itemName != null) { +// return translateOp2(op, itemName, rightLiteral); +// } + default: + return null; + } + } + } +} diff --git a/solr/core/src/java/org/apache/solr/handler/sql/SolrMethod.java b/solr/core/src/java/org/apache/solr/handler/sql/SolrMethod.java new file mode 100644 index 00000000000..b0bf80140b3 --- /dev/null +++ b/solr/core/src/java/org/apache/solr/handler/sql/SolrMethod.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.handler.sql; + +import org.apache.calcite.linq4j.tree.Types; + +import java.lang.reflect.Method; +import java.util.List; + +/** + * Builtin methods in the Solr adapter. + */ +enum SolrMethod { + SOLR_QUERYABLE_QUERY(SolrTable.SolrQueryable.class, + "query", + List.class, + String.class, + List.class, + List.class, + List.class, + String.class, + String.class, + String.class); + + public final Method method; + + SolrMethod(Class clazz, String methodName, Class... argumentTypes) { + this.method = Types.lookupMethod(clazz, methodName, argumentTypes); + } +} diff --git a/solr/core/src/java/org/apache/solr/handler/sql/SolrProject.java b/solr/core/src/java/org/apache/solr/handler/sql/SolrProject.java new file mode 100644 index 00000000000..c4217f27e38 --- /dev/null +++ b/solr/core/src/java/org/apache/solr/handler/sql/SolrProject.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.handler.sql; + +import org.apache.calcite.adapter.java.JavaTypeFactory; +import org.apache.calcite.plan.RelOptCluster; +import org.apache.calcite.plan.RelOptCost; +import org.apache.calcite.plan.RelOptPlanner; +import org.apache.calcite.plan.RelTraitSet; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.rel.core.Project; +import org.apache.calcite.rel.metadata.RelMetadataQuery; +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.rex.RexNode; +import org.apache.calcite.util.Pair; + +import java.util.List; + +/** + * Implementation of {@link org.apache.calcite.rel.core.Project} relational expression in Solr. + */ +class SolrProject extends Project implements SolrRel { + SolrProject(RelOptCluster cluster, RelTraitSet traitSet, + RelNode input, List projects, RelDataType rowType) { + super(cluster, traitSet, input, projects, rowType); + assert getConvention() == SolrRel.CONVENTION; + assert getConvention() == input.getConvention(); + } + + @Override + public Project copy(RelTraitSet traitSet, RelNode input, List projects, RelDataType rowType) { + return new SolrProject(getCluster(), traitSet, input, projects, rowType); + } + + @Override + public RelOptCost computeSelfCost(RelOptPlanner planner, RelMetadataQuery mq) { + return super.computeSelfCost(planner, mq).multiplyBy(0.1); + } + + public void implement(Implementor implementor) { + implementor.visitChild(0, getInput()); + final SolrRules.RexToSolrTranslator translator = new SolrRules.RexToSolrTranslator( + (JavaTypeFactory) getCluster().getTypeFactory(), SolrRules.solrFieldNames(getInput().getRowType())); + for (Pair pair : getNamedProjects()) { + final String name = pair.right; + final String expr = pair.left.accept(translator); + implementor.addFieldMapping(name, expr); + } + } +} diff --git a/solr/core/src/java/org/apache/solr/handler/sql/SolrRel.java b/solr/core/src/java/org/apache/solr/handler/sql/SolrRel.java new file mode 100644 index 00000000000..557cfe063d6 --- /dev/null +++ b/solr/core/src/java/org/apache/solr/handler/sql/SolrRel.java @@ -0,0 +1,105 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.handler.sql; + +import org.apache.calcite.plan.Convention; +import org.apache.calcite.plan.RelOptTable; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.util.Pair; +import org.apache.solr.client.solrj.io.ops.BooleanOperation; + +import java.util.*; + +/** + * Relational expression that uses Solr calling convention. + */ +interface SolrRel extends RelNode { + void implement(Implementor implementor); + + /** Calling convention for relational operations that occur in Solr. */ + Convention CONVENTION = new Convention.Impl("Solr", SolrRel.class); + + /** Callback for the implementation process that converts a tree of {@link SolrRel} nodes into a Solr query. */ + class Implementor { + final Map fieldMappings = new HashMap<>(); + final Map reverseAggMappings = new HashMap<>(); + String query = null; + String havingPredicate; + boolean negativeQuery; + String limitValue = null; + final List> orders = new ArrayList<>(); + final List buckets = new ArrayList<>(); + final List> metricPairs = new ArrayList<>(); + + RelOptTable table; + SolrTable solrTable; + + void addFieldMapping(String key, String val) { + if(key != null && !fieldMappings.containsKey(key)) { + this.fieldMappings.put(key, val); + } + } + + void addReverseAggMapping(String key, String val) { + if(key != null && !reverseAggMappings.containsKey(key)) { + this.reverseAggMappings.put(key, val); + } + } + + void addQuery(String query) { + this.query = query; + } + + void setNegativeQuery(boolean negativeQuery) { + this.negativeQuery = negativeQuery; + } + + void addOrder(String column, String direction) { + column = this.fieldMappings.getOrDefault(column, column); + this.orders.add(new Pair<>(column, direction)); + } + + void addBucket(String bucket) { + bucket = this.fieldMappings.getOrDefault(bucket, bucket); + this.buckets.add(bucket); + } + + void addMetricPair(String outName, String metric, String column) { + column = this.fieldMappings.getOrDefault(column, column); + this.metricPairs.add(new Pair<>(metric, column)); + + String metricIdentifier = metric.toLowerCase(Locale.ROOT) + "(" + column + ")"; + if(outName != null) { + this.addFieldMapping(outName, metricIdentifier); + } + } + + void setHavingPredicate(String havingPredicate) { + this.havingPredicate = havingPredicate; + } + + + void setLimit(String limit) { + limitValue = limit; + } + + void visitChild(int ordinal, RelNode input) { + assert ordinal == 0; + ((SolrRel) input).implement(this); + } + } +} diff --git a/solr/core/src/java/org/apache/solr/handler/sql/SolrRules.java b/solr/core/src/java/org/apache/solr/handler/sql/SolrRules.java new file mode 100644 index 00000000000..4cbadda5494 --- /dev/null +++ b/solr/core/src/java/org/apache/solr/handler/sql/SolrRules.java @@ -0,0 +1,234 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.handler.sql; + +import org.apache.calcite.adapter.java.JavaTypeFactory; +import org.apache.calcite.plan.*; +import org.apache.calcite.rel.RelCollations; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.rel.convert.ConverterRule; +import org.apache.calcite.rel.core.Sort; +import org.apache.calcite.rel.logical.LogicalAggregate; +import org.apache.calcite.rel.logical.LogicalFilter; +import org.apache.calcite.rel.logical.LogicalProject; +import org.apache.calcite.rel.logical.LogicalSort; +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.rex.RexCall; +import org.apache.calcite.rex.RexInputRef; +import org.apache.calcite.rex.RexNode; +import org.apache.calcite.rex.RexVisitorImpl; +import org.apache.calcite.sql.SqlKind; +import org.apache.calcite.sql.validate.SqlValidatorUtil; + +import java.util.AbstractList; +import java.util.ArrayList; +import java.util.List; +import java.util.function.Predicate; + +/** + * Rules and relational operators for + * {@link SolrRel#CONVENTION} + * calling convention. + */ +class SolrRules { + static final RelOptRule[] RULES = { + SolrSortRule.SORT_RULE, + SolrFilterRule.FILTER_RULE, + SolrProjectRule.PROJECT_RULE, + SolrAggregateRule.AGGREGATE_RULE, + }; + + static List solrFieldNames(final RelDataType rowType) { + return SqlValidatorUtil.uniquify( + new AbstractList() { + @Override + public String get(int index) { + return rowType.getFieldList().get(index).getName(); + } + + @Override + public int size() { + return rowType.getFieldCount(); + } + }, true); + } + + /** Translator from {@link RexNode} to strings in Solr's expression language. */ + static class RexToSolrTranslator extends RexVisitorImpl { + private final JavaTypeFactory typeFactory; + private final List inFields; + + RexToSolrTranslator(JavaTypeFactory typeFactory, List inFields) { + super(true); + this.typeFactory = typeFactory; + this.inFields = inFields; + } + + @Override + public String visitInputRef(RexInputRef inputRef) { + return inFields.get(inputRef.getIndex()); + } + + @Override + public String visitCall(RexCall call) { + final List strings = visitList(call.operands); + if (call.getKind() == SqlKind.CAST) { + return strings.get(0); + } + + return super.visitCall(call); + } + + private List visitList(List list) { + final List strings = new ArrayList<>(); + for (RexNode node : list) { + strings.add(node.accept(this)); + } + return strings; + } + } + + /** Base class for planner rules that convert a relational expression to Solr calling convention. */ + abstract static class SolrConverterRule extends ConverterRule { + final Convention out = SolrRel.CONVENTION; + + SolrConverterRule(Class clazz, String description) { + this(clazz, relNode -> true, description); + } + + SolrConverterRule(Class clazz, Predicate predicate, String description) { + super(clazz, Convention.NONE, SolrRel.CONVENTION, description); + } + } + + /** + * Rule to convert a {@link LogicalFilter} to a {@link SolrFilter}. + */ + private static class SolrFilterRule extends SolrConverterRule { + private static boolean isNotFilterByExpr(List rexNodes, List fieldNames) { + + // We dont have a way to filter by result of aggregator now + boolean result = true; + + for (RexNode rexNode : rexNodes) { + if (rexNode instanceof RexCall) { + result = result && isNotFilterByExpr(((RexCall) rexNode).getOperands(), fieldNames); + } else if (rexNode instanceof RexInputRef) { + result = result && !fieldNames.get(((RexInputRef) rexNode).getIndex()).startsWith("EXPR$"); + } + } + return result; + } + + private static final Predicate FILTER_PREDICATE = relNode -> { + List filterOperands = ((RexCall) ((LogicalFilter) relNode).getCondition()).getOperands(); + return isNotFilterByExpr(filterOperands, SolrRules.solrFieldNames(relNode.getRowType())); + }; + + private static final SolrFilterRule FILTER_RULE = new SolrFilterRule(); + + private SolrFilterRule() { + super(LogicalFilter.class, FILTER_PREDICATE, "SolrFilterRule"); + } + + public RelNode convert(RelNode rel) { + final LogicalFilter filter = (LogicalFilter) rel; + final RelTraitSet traitSet = filter.getTraitSet().replace(out); + return new SolrFilter( + rel.getCluster(), + traitSet, + convert(filter.getInput(), out), + filter.getCondition()); + } + } + + /** + * Rule to convert a {@link LogicalProject} to a {@link SolrProject}. + */ + private static class SolrProjectRule extends SolrConverterRule { + private static final SolrProjectRule PROJECT_RULE = new SolrProjectRule(); + + private SolrProjectRule() { + super(LogicalProject.class, "SolrProjectRule"); + } + + public RelNode convert(RelNode rel) { + final LogicalProject project = (LogicalProject) rel; + final RelNode converted = convert(project.getInput(), out); + final RelTraitSet traitSet = project.getTraitSet().replace(out); + return new SolrProject( + rel.getCluster(), + traitSet, + converted, + project.getProjects(), + project.getRowType()); + } + } + + /** + * Rule to convert a {@link LogicalSort} to a {@link SolrSort}. + */ + private static class SolrSortRule extends SolrConverterRule { + static final SolrSortRule SORT_RULE = new SolrSortRule(LogicalSort.class, "SolrSortRule"); + + SolrSortRule(Class clazz, String description) { + super(clazz, description); + } + + @Override + public RelNode convert(RelNode rel) { + final Sort sort = (Sort) rel; + final RelTraitSet traitSet = sort.getTraitSet().replace(out).replace(sort.getCollation()); + return new SolrSort( + rel.getCluster(), + traitSet, + convert(sort.getInput(), traitSet.replace(RelCollations.EMPTY)), + sort.getCollation(), + sort.offset, + sort.fetch); + } + } + + /** + * Rule to convert an {@link LogicalAggregate} to an {@link SolrAggregate}. + */ + private static class SolrAggregateRule extends SolrConverterRule { +// private static final Predicate AGGREGATE_PREDICTE = relNode -> +// Aggregate.IS_SIMPLE.apply(((LogicalAggregate)relNode));// && +// !((LogicalAggregate)relNode).containsDistinctCall(); + + private static final RelOptRule AGGREGATE_RULE = new SolrAggregateRule(); + + private SolrAggregateRule() { + super(LogicalAggregate.class, "SolrAggregateRule"); + } + + @Override + public RelNode convert(RelNode rel) { + final LogicalAggregate agg = (LogicalAggregate) rel; + final RelTraitSet traitSet = agg.getTraitSet().replace(out); + return new SolrAggregate( + rel.getCluster(), + traitSet, + convert(agg.getInput(), traitSet.simplify()), + agg.indicator, + agg.getGroupSet(), + agg.getGroupSets(), + agg.getAggCallList()); + } + } +} diff --git a/solr/core/src/java/org/apache/solr/handler/sql/SolrSchema.java b/solr/core/src/java/org/apache/solr/handler/sql/SolrSchema.java new file mode 100644 index 00000000000..83fa5379f41 --- /dev/null +++ b/solr/core/src/java/org/apache/solr/handler/sql/SolrSchema.java @@ -0,0 +1,128 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.handler.sql; + +import com.google.common.collect.ImmutableMap; +import org.apache.calcite.rel.type.*; +import org.apache.calcite.schema.Table; +import org.apache.calcite.schema.impl.AbstractSchema; +import org.apache.calcite.sql.type.SqlTypeFactoryImpl; +import org.apache.solr.client.solrj.SolrServerException; +import org.apache.solr.client.solrj.impl.CloudSolrClient; +import org.apache.solr.client.solrj.request.LukeRequest; +import org.apache.solr.client.solrj.response.LukeResponse; +import org.apache.solr.common.cloud.Aliases; +import org.apache.solr.common.cloud.ClusterState; +import org.apache.solr.common.cloud.ZkStateReader; +import org.apache.solr.common.luke.FieldFlag; + +import java.io.IOException; +import java.util.EnumSet; +import java.util.Map; +import java.util.Properties; + +class SolrSchema extends AbstractSchema { + final Properties properties; + + SolrSchema(Properties properties) { + super(); + this.properties = properties; + } + + @Override + protected Map getTableMap() { + String zk = this.properties.getProperty("zk"); + try(CloudSolrClient cloudSolrClient = new CloudSolrClient.Builder().withZkHost(zk).build()) { + cloudSolrClient.connect(); + ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader(); + ClusterState clusterState = zkStateReader.getClusterState(); + + final ImmutableMap.Builder builder = ImmutableMap.builder(); + + for (String collection : clusterState.getCollectionsMap().keySet()) { + builder.put(collection, new SolrTable(this, collection)); + } + + Aliases aliases = zkStateReader.getAliases(); + if(aliases.collectionAliasSize() > 0) { + for (Map.Entry alias : aliases.getCollectionAliasMap().entrySet()) { + builder.put(alias.getKey(), new SolrTable(this, alias.getValue())); + } + } + + return builder.build(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + private Map getFieldInfo(String collection) { + String zk = this.properties.getProperty("zk"); + try(CloudSolrClient cloudSolrClient = new CloudSolrClient.Builder().withZkHost(zk).build()) { + cloudSolrClient.connect(); + LukeRequest lukeRequest = new LukeRequest(); + lukeRequest.setNumTerms(0); + LukeResponse lukeResponse = lukeRequest.process(cloudSolrClient, collection); + return lukeResponse.getFieldInfo(); + } catch (SolrServerException | IOException e) { + throw new RuntimeException(e); + } + } + + RelProtoDataType getRelDataType(String collection) { + // Temporary type factory, just for the duration of this method. Allowable + // because we're creating a proto-type, not a type; before being used, the + // proto-type will be copied into a real type factory. + final RelDataTypeFactory typeFactory = new SqlTypeFactoryImpl(RelDataTypeSystem.DEFAULT); + final RelDataTypeFactory.FieldInfoBuilder fieldInfo = typeFactory.builder(); + Map luceneFieldInfoMap = getFieldInfo(collection); + + for(Map.Entry entry : luceneFieldInfoMap.entrySet()) { + LukeResponse.FieldInfo luceneFieldInfo = entry.getValue(); + + RelDataType type; + switch (luceneFieldInfo.getType()) { + case "string": + type = typeFactory.createJavaType(String.class); + break; + case "int": + case "long": + type = typeFactory.createJavaType(Long.class); + break; + case "float": + case "double": + type = typeFactory.createJavaType(Double.class); + break; + default: + type = typeFactory.createJavaType(String.class); + } + + EnumSet flags = luceneFieldInfo.parseFlags(luceneFieldInfo.getSchema()); + /* + if(flags != null && flags.contains(FieldFlag.MULTI_VALUED)) { + type = typeFactory.createArrayType(type, -1); + } + */ + + fieldInfo.add(entry.getKey(), type).nullable(true); + } + fieldInfo.add("_query_",typeFactory.createJavaType(String.class)); + fieldInfo.add("score",typeFactory.createJavaType(Double.class)); + + return RelDataTypeImpl.proto(fieldInfo.build()); + } +} diff --git a/solr/core/src/java/org/apache/solr/handler/sql/SolrSort.java b/solr/core/src/java/org/apache/solr/handler/sql/SolrSort.java new file mode 100644 index 00000000000..1c5274a2ca6 --- /dev/null +++ b/solr/core/src/java/org/apache/solr/handler/sql/SolrSort.java @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.handler.sql; + +import org.apache.calcite.plan.RelOptCluster; +import org.apache.calcite.plan.RelOptCost; +import org.apache.calcite.plan.RelOptPlanner; +import org.apache.calcite.plan.RelTraitSet; +import org.apache.calcite.rel.RelCollation; +import org.apache.calcite.rel.RelFieldCollation; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.rel.core.Sort; +import org.apache.calcite.rel.metadata.RelMetadataQuery; +import org.apache.calcite.rel.type.RelDataTypeField; +import org.apache.calcite.rex.RexLiteral; +import org.apache.calcite.rex.RexNode; + +import java.util.List; + +/** + * Implementation of {@link org.apache.calcite.rel.core.Sort} relational expression in Solr. + */ +class SolrSort extends Sort implements SolrRel { + + SolrSort(RelOptCluster cluster, RelTraitSet traitSet, RelNode child, RelCollation collation, RexNode offset, + RexNode fetch) { + super(cluster, traitSet, child, collation, offset, fetch); + + assert getConvention() == SolrRel.CONVENTION; + assert getConvention() == child.getConvention(); + } + + @Override + public RelOptCost computeSelfCost(RelOptPlanner planner, RelMetadataQuery mq) { + return planner.getCostFactory().makeZeroCost(); + } + + @Override + public Sort copy(RelTraitSet traitSet, RelNode input, RelCollation newCollation, RexNode offset, RexNode fetch) { + return new SolrSort(getCluster(), traitSet, input, collation, offset, fetch); + } + + public void implement(Implementor implementor) { + implementor.visitChild(0, getInput()); + + List sortCollations = collation.getFieldCollations(); + if (!sortCollations.isEmpty()) { + // Construct a series of order clauses from the desired collation + final List fields = getRowType().getFieldList(); + for (RelFieldCollation fieldCollation : sortCollations) { + final String name = fields.get(fieldCollation.getFieldIndex()).getName(); + String direction = "asc"; + if (fieldCollation.getDirection().equals(RelFieldCollation.Direction.DESCENDING)) { + direction = "desc"; + } + implementor.addOrder(name, direction); + } + } + + + if(fetch != null) { + implementor.setLimit(((RexLiteral) fetch).getValue().toString()); + } + } +} diff --git a/solr/core/src/java/org/apache/solr/handler/sql/SolrTable.java b/solr/core/src/java/org/apache/solr/handler/sql/SolrTable.java new file mode 100644 index 00000000000..6784323933b --- /dev/null +++ b/solr/core/src/java/org/apache/solr/handler/sql/SolrTable.java @@ -0,0 +1,842 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.handler.sql; + +import org.apache.calcite.adapter.java.AbstractQueryableTable; +import org.apache.calcite.linq4j.*; +import org.apache.calcite.plan.RelOptCluster; +import org.apache.calcite.plan.RelOptTable; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.rel.type.RelDataTypeFactory; +import org.apache.calcite.rel.type.RelProtoDataType; +import org.apache.calcite.schema.SchemaPlus; +import org.apache.calcite.schema.TranslatableTable; +import org.apache.calcite.schema.impl.AbstractTableQueryable; +import org.apache.calcite.util.Pair; +import org.apache.solr.client.solrj.io.comp.ComparatorOrder; +import org.apache.solr.client.solrj.io.comp.FieldComparator; +import org.apache.solr.client.solrj.io.comp.MultipleFieldComparator; +import org.apache.solr.client.solrj.io.comp.StreamComparator; +import org.apache.solr.client.solrj.io.eq.FieldEqualitor; +import org.apache.solr.client.solrj.io.eq.MultipleFieldEqualitor; +import org.apache.solr.client.solrj.io.eq.StreamEqualitor; +import org.apache.solr.client.solrj.io.ops.AndOperation; +import org.apache.solr.client.solrj.io.ops.BooleanOperation; +import org.apache.solr.client.solrj.io.ops.EqualsOperation; +import org.apache.solr.client.solrj.io.ops.GreaterThanEqualToOperation; +import org.apache.solr.client.solrj.io.ops.GreaterThanOperation; +import org.apache.solr.client.solrj.io.ops.LessThanEqualToOperation; +import org.apache.solr.client.solrj.io.ops.LessThanOperation; +import org.apache.solr.client.solrj.io.ops.NotOperation; +import org.apache.solr.client.solrj.io.ops.OrOperation; +import org.apache.solr.client.solrj.io.stream.*; +import org.apache.solr.client.solrj.io.stream.expr.StreamExpressionParser; +import org.apache.solr.client.solrj.io.stream.expr.StreamFactory; +import org.apache.solr.client.solrj.io.stream.metrics.*; +import org.apache.solr.common.params.CommonParams; +import org.apache.solr.common.params.ModifiableSolrParams; + +import java.io.IOException; +import java.util.*; +import java.util.stream.Collectors; + +/** + * Table based on a Solr collection + */ +class SolrTable extends AbstractQueryableTable implements TranslatableTable { + private static final String DEFAULT_QUERY = "*:*"; + private static final String DEFAULT_VERSION_FIELD = "_version_"; + + private final String collection; + private final SolrSchema schema; + private RelProtoDataType protoRowType; + + SolrTable(SolrSchema schema, String collection) { + super(Object[].class); + this.schema = schema; + this.collection = collection; + } + + public String toString() { + return "SolrTable {" + collection + "}"; + } + + public RelDataType getRowType(RelDataTypeFactory typeFactory) { + if (protoRowType == null) { + protoRowType = schema.getRelDataType(collection); + } + return protoRowType.apply(typeFactory); + } + + private Enumerable query(final Properties properties) { + return query(properties, Collections.emptyList(), null, Collections.emptyList(), Collections.emptyList(), + Collections.emptyList(), null, null, null); + } + + /** Executes a Solr query on the underlying table. + * + * @param properties Connections properties + * @param fields List of fields to project + * @param query A string for the query + * @return Enumerator of results + */ + private Enumerable query(final Properties properties, + final List> fields, + final String query, + final List> orders, + final List buckets, + final List> metricPairs, + final String limit, + final String negativeQuery, + final String havingPredicate) { + // SolrParams should be a ModifiableParams instead of a map + boolean mapReduce = "map_reduce".equals(properties.getProperty("aggregationMode")); + boolean negative = Boolean.parseBoolean(negativeQuery); + + String q = null; + + if (query == null) { + q = DEFAULT_QUERY; + } else { + if(negative) { + q = DEFAULT_QUERY + " AND " + query; + } else { + q = query; + } + } + + TupleStream tupleStream; + String zk = properties.getProperty("zk"); + try { + if (metricPairs.isEmpty() && buckets.isEmpty()) { + tupleStream = handleSelect(zk, collection, q, fields, orders, limit); + } else { + if(buckets.isEmpty()) { + tupleStream = handleStats(zk, collection, q, metricPairs); + } else { + if(mapReduce) { + tupleStream = handleGroupByMapReduce(zk, + collection, + properties, + fields, + q, + orders, + buckets, + metricPairs, + limit, + havingPredicate); + } else { + tupleStream = handleGroupByFacet(zk, + collection, + fields, + q, + orders, + buckets, + metricPairs, + limit, + havingPredicate); + } + } + } + } catch (IOException e) { + throw new RuntimeException(e); + } + + final TupleStream finalStream = tupleStream; + + return new AbstractEnumerable() { + // Use original fields list to make sure only the fields specified are enumerated + public Enumerator enumerator() { + return new SolrEnumerator(finalStream, fields); + } + }; + } + + private static StreamComparator bucketSortComp(List buckets, Map dirs) { + FieldComparator[] comps = new FieldComparator[buckets.size()]; + for(int i=0; i order) { + String direction = order.getValue(); + return direction == null ? "asc" : direction; + } + + private StreamComparator getComp(List> orders) { + FieldComparator[] comps = new FieldComparator[orders.size()]; + for(int i = 0; i < orders.size(); i++) { + Map.Entry order = orders.get(i); + String direction = getSortDirection(order); + ComparatorOrder comparatorOrder = ComparatorOrder.fromString(direction); + String sortKey = order.getKey(); + comps[i] = new FieldComparator(sortKey, comparatorOrder); + } + + if(comps.length == 1) { + return comps[0]; + } else { + return new MultipleFieldComparator(comps); + } + } + + private List buildMetrics(List> metricPairs, boolean ifEmptyCount) { + List metrics = new ArrayList<>(metricPairs.size()); + metrics.addAll(metricPairs.stream().map(this::getMetric).collect(Collectors.toList())); + if(metrics.size() == 0 && ifEmptyCount) { + metrics.add(new CountMetric()); + } + return metrics; + } + + private Metric getMetric(Pair metricPair) { + switch (metricPair.getKey()) { + case "COUNT": + return new CountMetric(metricPair.getValue()); + case "SUM": + case "$SUM0": + return new SumMetric(metricPair.getValue()); + case "MIN": + return new MinMetric(metricPair.getValue()); + case "MAX": + return new MaxMetric(metricPair.getValue()); + case "AVG": + return new MeanMetric(metricPair.getValue()); + default: + throw new IllegalArgumentException(metricPair.getKey()); + } + } + + private TupleStream handleSelect(String zk, + String collection, + String query, + List> fields, + List> orders, + String limit) throws IOException { + + ModifiableSolrParams params = new ModifiableSolrParams(); + params.add(CommonParams.Q, query); + + //Validate the fields + for(Map.Entry entry : fields) { + String fname = entry.getKey(); + if(limit == null && "score".equals(fname)) { + throw new IOException("score is not a valid field for unlimited queries."); + } + + if(fname.contains("*")) { + throw new IOException("* is not supported for column selection."); + } + } + + String fl = getFields(fields); + + if(orders.size() > 0) { + params.add(CommonParams.SORT, getSort(orders)); + } else { + if(limit == null) { + params.add(CommonParams.SORT, "_version_ desc"); + fl = fl+",_version_"; + } else { + params.add(CommonParams.SORT, "score desc"); + if(fl.indexOf("score") == -1) { + fl = fl + ",score"; + } + } + } + + params.add(CommonParams.FL, fl); + + if (limit != null) { + params.add(CommonParams.ROWS, limit); + return new LimitStream(new CloudSolrStream(zk, collection, params), Integer.parseInt(limit)); + } else { + params.add(CommonParams.QT, "/export"); + return new CloudSolrStream(zk, collection, params); + } + } + + private String getSort(List> orders) { + StringBuilder buf = new StringBuilder(); + for(Pair pair : orders) { + if(buf.length() > 0) { + buf.append(","); + } + buf.append(pair.getKey()).append(" ").append(pair.getValue()); + } + + return buf.toString(); + } + + private String getSingleSort(Pair order) { + StringBuilder buf = new StringBuilder(); + buf.append(order.getKey()).append(" ").append(order.getValue()); + return buf.toString(); + } + + private String getFields(List> fields) { + StringBuilder buf = new StringBuilder(); + for(Map.Entry field : fields) { + + if(buf.length() > 0) { + buf.append(","); + } + + buf.append(field.getKey()); + } + + return buf.toString(); + } + + private String getFields(Set fieldSet) { + StringBuilder buf = new StringBuilder(); + boolean appendVersion = true; + for(String field : fieldSet) { + + if(buf.length() > 0) { + buf.append(","); + } + + if(field.equals("_version_")) { + appendVersion = false; + } + + buf.append(field); + } + + if(appendVersion){ + buf.append(",_version_"); + } + + return buf.toString(); + } + + + private Set getFieldSet(Metric[] metrics, List> fields) { + HashSet set = new HashSet(); + for(Metric metric : metrics) { + for(String column : metric.getColumns()) { + set.add(column); + } + } + + for(Map.Entry field : fields) { + if(field.getKey().indexOf('(') == -1) { + set.add(field.getKey()); + } + } + + return set; + } + + private static String getSortDirection(List> orders) { + if(orders != null && orders.size() > 0) { + for(Pair item : orders) { + return item.getValue(); + } + } + + return "asc"; + } + + private static String bucketSort(Bucket[] buckets, String dir) { + StringBuilder buf = new StringBuilder(); + boolean comma = false; + for(Bucket bucket : buckets) { + if(comma) { + buf.append(","); + } + buf.append(bucket.toString()).append(" ").append(dir); + comma = true; + } + + return buf.toString(); + } + + private static String getPartitionKeys(Bucket[] buckets) { + StringBuilder buf = new StringBuilder(); + boolean comma = false; + for(Bucket bucket : buckets) { + if(comma) { + buf.append(","); + } + buf.append(bucket.toString()); + comma = true; + } + return buf.toString(); + } + + private static boolean sortsEqual(Bucket[] buckets, String direction, List> orders) { + + if(buckets.length != orders.size()) { + return false; + } + + for(int i=0; i< buckets.length; i++) { + Bucket bucket = buckets[i]; + Pair order = orders.get(i); + if(!bucket.toString().equals(order.getKey())) { + return false; + } + + if(!order.getValue().toLowerCase(Locale.ROOT).contains(direction.toLowerCase(Locale.ROOT))) { + return false; + } + } + + return true; + } + + private TupleStream handleGroupByMapReduce(String zk, + String collection, + Properties properties, + final List> fields, + final String query, + final List> orders, + final List _buckets, + final List> metricPairs, + final String limit, + final String havingPredicate) throws IOException { + + int numWorkers = Integer.parseInt(properties.getProperty("numWorkers", "1")); + + Bucket[] buckets = buildBuckets(_buckets, fields); + Metric[] metrics = buildMetrics(metricPairs, false).toArray(new Metric[0]); + + if(metrics.length == 0) { + return handleSelectDistinctMapReduce(zk, collection, properties, fields, query, orders, buckets, limit); + } + + Set fieldSet = getFieldSet(metrics, fields); + + if(metrics.length == 0) { + throw new IOException("Group by queries must include atleast one aggregate function."); + } + + String fl = getFields(fieldSet); + String sortDirection = getSortDirection(orders); + String sort = bucketSort(buckets, sortDirection); + + ModifiableSolrParams params = new ModifiableSolrParams(); + + params.set(CommonParams.FL, fl); + params.set(CommonParams.Q, query); + //Always use the /export handler for Group By Queries because it requires exporting full result sets. + params.set(CommonParams.QT, "/export"); + + if(numWorkers > 1) { + params.set("partitionKeys", getPartitionKeys(buckets)); + } + + params.set("sort", sort); + + TupleStream tupleStream = null; + + CloudSolrStream cstream = new CloudSolrStream(zk, collection, params); + tupleStream = new RollupStream(cstream, buckets, metrics); + + StreamFactory factory = new StreamFactory() + .withFunctionName("search", CloudSolrStream.class) + .withFunctionName("parallel", ParallelStream.class) + .withFunctionName("rollup", RollupStream.class) + .withFunctionName("sum", SumMetric.class) + .withFunctionName("min", MinMetric.class) + .withFunctionName("max", MaxMetric.class) + .withFunctionName("avg", MeanMetric.class) + .withFunctionName("count", CountMetric.class) + .withFunctionName("and", AndOperation.class) + .withFunctionName("or", OrOperation.class) + .withFunctionName("not", NotOperation.class) + .withFunctionName("eq", EqualsOperation.class) + .withFunctionName("gt", GreaterThanOperation.class) + .withFunctionName("lt", LessThanOperation.class) + .withFunctionName("lteq", LessThanEqualToOperation.class) + .withFunctionName("having", HavingStream.class) + .withFunctionName("gteq", GreaterThanEqualToOperation.class); + + if(havingPredicate != null) { + BooleanOperation booleanOperation = (BooleanOperation)factory.constructOperation(StreamExpressionParser.parse(havingPredicate)); + tupleStream = new HavingStream(tupleStream, booleanOperation); + } + + if(numWorkers > 1) { + // Do the rollups in parallel + // Maintain the sort of the Tuples coming from the workers. + StreamComparator comp = bucketSortComp(buckets, sortDirection); + ParallelStream parallelStream = new ParallelStream(zk, collection, tupleStream, numWorkers, comp); + + + parallelStream.setStreamFactory(factory); + tupleStream = parallelStream; + } + + //TODO: Currently we are not pushing down the having clause. + // We need to push down the having clause to ensure that LIMIT does not cut off records prior to the having filter. + + if(orders != null && orders.size() > 0) { + if(!sortsEqual(buckets, sortDirection, orders)) { + int lim = (limit == null) ? 100 : Integer.parseInt(limit); + StreamComparator comp = getComp(orders); + //Rank the Tuples + //If parallel stream is used ALL the Rolled up tuples from the workers will be ranked + //Providing a true Top or Bottom. + tupleStream = new RankStream(tupleStream, lim, comp); + } else { + // Sort is the same as the same as the underlying stream + // Only need to limit the result, not Rank the result + if(limit != null) { + tupleStream = new LimitStream(tupleStream, Integer.parseInt(limit)); + } + } + } else { + //No order by, check for limit + if(limit != null) { + tupleStream = new LimitStream(tupleStream, Integer.parseInt(limit)); + } + } + + return tupleStream; + } + + private Bucket[] buildBuckets(List buckets, List> fields) { + Bucket[] bucketsArray = new Bucket[buckets.size()]; + + int i=0; + for(Map.Entry field : fields) { + String fieldName = field.getKey(); + if(buckets.contains(fieldName)) { + bucketsArray[i++] = new Bucket(fieldName); + } + } + + return bucketsArray; + } + + private TupleStream handleGroupByFacet(String zkHost, + String collection, + final List> fields, + final String query, + final List> orders, + final List bucketFields, + final List> metricPairs, + final String lim, + final String havingPredicate) throws IOException { + + ModifiableSolrParams solrParams = new ModifiableSolrParams(); + solrParams.add(CommonParams.Q, query); + + Bucket[] buckets = buildBuckets(bucketFields, fields); + Metric[] metrics = buildMetrics(metricPairs, true).toArray(new Metric[0]); + if(metrics.length == 0) { + metrics = new Metric[1]; + metrics[0] = new CountMetric(); + } + + int limit = lim != null ? Integer.parseInt(lim) : 1000; + + FieldComparator[] sorts = null; + + if(orders == null || orders.size() == 0) { + sorts = new FieldComparator[buckets.length]; + for(int i=0; i> fields, + final String query, + final List> orders, + final Bucket[] buckets, + final String limit) throws IOException{ + + int numWorkers = Integer.parseInt(properties.getProperty("numWorkers", "1")); + + String fl = getFields(fields); + + String sort = null; + StreamEqualitor ecomp = null; + StreamComparator comp = null; + + if(orders != null && orders.size() > 0) { + StreamComparator[] adjustedSorts = adjustSorts(orders, buckets); + // Because of the way adjustSorts works we know that each FieldComparator has a single + // field name. For this reason we can just look at the leftFieldName + FieldEqualitor[] fieldEqualitors = new FieldEqualitor[adjustedSorts.length]; + StringBuilder buf = new StringBuilder(); + for(int i=0; i0) { + buf.append(","); + } + buf.append(fieldComparator.getLeftFieldName()).append(" ").append(fieldComparator.getOrder().toString()); + } + + sort = buf.toString(); + + if(adjustedSorts.length == 1) { + ecomp = fieldEqualitors[0]; + comp = adjustedSorts[0]; + } else { + ecomp = new MultipleFieldEqualitor(fieldEqualitors); + comp = new MultipleFieldComparator(adjustedSorts); + } + } else { + StringBuilder sortBuf = new StringBuilder(); + FieldEqualitor[] equalitors = new FieldEqualitor[buckets.length]; + StreamComparator[] streamComparators = new StreamComparator[buckets.length]; + for(int i=0; i0) { + sortBuf.append(','); + } + sortBuf.append(buckets[i].toString()).append(" asc"); + } + + sort = sortBuf.toString(); + + if(equalitors.length == 1) { + ecomp = equalitors[0]; + comp = streamComparators[0]; + } else { + ecomp = new MultipleFieldEqualitor(equalitors); + comp = new MultipleFieldComparator(streamComparators); + } + } + + ModifiableSolrParams params = new ModifiableSolrParams(); + + params.set(CommonParams.FL, fl); + params.set(CommonParams.Q, query); + //Always use the /export handler for Distinct Queries because it requires exporting full result sets. + params.set(CommonParams.QT, "/export"); + + if(numWorkers > 1) { + params.set("partitionKeys", getPartitionKeys(buckets)); + } + + params.set("sort", sort); + + TupleStream tupleStream = null; + + CloudSolrStream cstream = new CloudSolrStream(zkHost, collection, params); + tupleStream = new UniqueStream(cstream, ecomp); + + if(numWorkers > 1) { + // Do the unique in parallel + // Maintain the sort of the Tuples coming from the workers. + ParallelStream parallelStream = new ParallelStream(zkHost, collection, tupleStream, numWorkers, comp); + + StreamFactory factory = new StreamFactory() + .withFunctionName("search", CloudSolrStream.class) + .withFunctionName("parallel", ParallelStream.class) + .withFunctionName("unique", UniqueStream.class); + + parallelStream.setStreamFactory(factory); + tupleStream = parallelStream; + } + + if(limit != null) { + tupleStream = new LimitStream(tupleStream, Integer.parseInt(limit)); + } + + return tupleStream; + } + + + private StreamComparator[] adjustSorts(List> orders, Bucket[] buckets) throws IOException { + List adjustedSorts = new ArrayList(); + Set bucketFields = new HashSet(); + Set sortFields = new HashSet(); + + ComparatorOrder comparatorOrder = ComparatorOrder.ASCENDING; + for(Pair order : orders) { + sortFields.add(order.getKey()); + adjustedSorts.add(new FieldComparator(order.getKey(), ascDescComp(order.getValue()))); + comparatorOrder = ascDescComp(order.getValue()); + } + + for(Bucket bucket : buckets) { + bucketFields.add(bucket.toString()); + } + + for(String sf : sortFields) { + if(!bucketFields.contains(sf)) { + throw new IOException("All sort fields must be in the field list."); + } + } + + //Add sort fields if needed + if(sortFields.size() < buckets.length) { + for(Bucket bucket : buckets) { + String b = bucket.toString(); + if(!sortFields.contains(b)) { + adjustedSorts.add(new FieldComparator(bucket.toString(), comparatorOrder)); + } + } + } + + return adjustedSorts.toArray(new FieldComparator[adjustedSorts.size()]); + } + + private TupleStream handleStats(String zk, + String collection, + String query, + List> metricPairs) { + + + ModifiableSolrParams solrParams = new ModifiableSolrParams(); + solrParams.add(CommonParams.Q, query); + Metric[] metrics = buildMetrics(metricPairs, false).toArray(new Metric[0]); + return new StatsStream(zk, collection, solrParams, metrics); + } + + public Queryable asQueryable(QueryProvider queryProvider, SchemaPlus schema, String tableName) { + return new SolrQueryable<>(queryProvider, schema, this, tableName); + } + + public RelNode toRel(RelOptTable.ToRelContext context, RelOptTable relOptTable) { + final RelOptCluster cluster = context.getCluster(); + return new SolrTableScan(cluster, cluster.traitSetOf(SolrRel.CONVENTION), relOptTable, this, null); + } + + @SuppressWarnings("WeakerAccess") + public static class SolrQueryable extends AbstractTableQueryable { + SolrQueryable(QueryProvider queryProvider, SchemaPlus schema, SolrTable table, String tableName) { + super(queryProvider, schema, table, tableName); + } + + public Enumerator enumerator() { + @SuppressWarnings("unchecked") + final Enumerable enumerable = (Enumerable) getTable().query(getProperties()); + return enumerable.enumerator(); + } + + private SolrTable getTable() { + return (SolrTable) table; + } + + private Properties getProperties() { + return schema.unwrap(SolrSchema.class).properties; + } + + /** Called via code-generation. + * + * @see SolrMethod#SOLR_QUERYABLE_QUERY + */ + @SuppressWarnings("UnusedDeclaration") + public Enumerable query(List> fields, String query, List> order, + List buckets, List> metricPairs, String limit, String negativeQuery, String havingPredicate) { + return getTable().query(getProperties(), fields, query, order, buckets, metricPairs, limit, negativeQuery, havingPredicate); + } + } + + private static FieldComparator[] getComps(List> orders) { + FieldComparator[] comps = new FieldComparator[orders.size()]; + for(int i=0; i sortItem = orders.get(i); + String ordering = sortItem.getValue(); + ComparatorOrder comparatorOrder = ascDescComp(ordering); + String sortKey = sortItem.getKey(); + comps[i] = new FieldComparator(sortKey, comparatorOrder); + } + + return comps; + } + + private static ComparatorOrder ascDescComp(String s) { + if(s.toLowerCase(Locale.ROOT).contains("desc")) { + return ComparatorOrder.DESCENDING; + } else { + return ComparatorOrder.ASCENDING; + } + } +} diff --git a/solr/core/src/java/org/apache/solr/handler/sql/SolrTableScan.java b/solr/core/src/java/org/apache/solr/handler/sql/SolrTableScan.java new file mode 100644 index 00000000000..88c53ac0968 --- /dev/null +++ b/solr/core/src/java/org/apache/solr/handler/sql/SolrTableScan.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.handler.sql; + +import org.apache.calcite.plan.*; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.rel.core.TableScan; +import org.apache.calcite.rel.metadata.RelMetadataQuery; +import org.apache.calcite.rel.type.RelDataType; + +import java.util.List; + +/** + * Relational expression representing a scan of a Solr collection. + */ +class SolrTableScan extends TableScan implements SolrRel { + private final SolrTable solrTable; + private final RelDataType projectRowType; + + /** + * Creates a SolrTableScan. + * + * @param cluster Cluster + * @param traitSet Traits + * @param table Table + * @param solrTable Solr table + * @param projectRowType Fields and types to project; null to project raw row + */ + SolrTableScan(RelOptCluster cluster, RelTraitSet traitSet, RelOptTable table, SolrTable solrTable, + RelDataType projectRowType) { + super(cluster, traitSet, table); + this.solrTable = solrTable; + this.projectRowType = projectRowType; + + assert solrTable != null; + assert getConvention() == SolrRel.CONVENTION; + } + + @Override public RelOptCost computeSelfCost(RelOptPlanner planner, RelMetadataQuery mq) { + final float f = projectRowType == null ? 1f : (float) projectRowType.getFieldCount() / 100f; + return super.computeSelfCost(planner, mq).multiplyBy(.1 * f); + } + + @Override + public RelNode copy(RelTraitSet traitSet, List inputs) { + assert inputs.isEmpty(); + return this; + } + + @Override + public RelDataType deriveRowType() { + return projectRowType != null ? projectRowType : super.deriveRowType(); + } + + @Override + public void register(RelOptPlanner planner) { + planner.addRule(SolrToEnumerableConverterRule.INSTANCE); + for (RelOptRule rule : SolrRules.RULES) { + planner.addRule(rule); + } + } + + public void implement(Implementor implementor) { + implementor.solrTable = solrTable; + implementor.table = table; + } +} diff --git a/solr/core/src/java/org/apache/solr/handler/sql/SolrToEnumerableConverter.java b/solr/core/src/java/org/apache/solr/handler/sql/SolrToEnumerableConverter.java new file mode 100644 index 00000000000..10d4d4c9688 --- /dev/null +++ b/solr/core/src/java/org/apache/solr/handler/sql/SolrToEnumerableConverter.java @@ -0,0 +1,135 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.handler.sql; + +import com.google.common.collect.Lists; +import org.apache.calcite.adapter.enumerable.*; +import org.apache.calcite.linq4j.tree.BlockBuilder; +import org.apache.calcite.linq4j.tree.Expression; +import org.apache.calcite.linq4j.tree.Expressions; +import org.apache.calcite.linq4j.tree.MethodCallExpression; +import org.apache.calcite.plan.*; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.rel.convert.ConverterImpl; +import org.apache.calcite.rel.metadata.RelMetadataQuery; +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.runtime.Hook; +import org.apache.calcite.util.BuiltInMethod; +import org.apache.calcite.util.Pair; + +import java.util.AbstractList; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +/** + * Relational expression representing a scan of a table in Solr + */ +class SolrToEnumerableConverter extends ConverterImpl implements EnumerableRel { + SolrToEnumerableConverter(RelOptCluster cluster, RelTraitSet traits, RelNode input) { + super(cluster, ConventionTraitDef.INSTANCE, traits, input); + } + + @Override + public RelNode copy(RelTraitSet traitSet, List inputs) { + return new SolrToEnumerableConverter(getCluster(), traitSet, sole(inputs)); + } + + @Override + public RelOptCost computeSelfCost(RelOptPlanner planner, RelMetadataQuery mq) { + return super.computeSelfCost(planner, mq).multiplyBy(.1); + } + + public Result implement(EnumerableRelImplementor implementor, Prefer pref) { + // Generates a call to "query" with the appropriate fields + final BlockBuilder list = new BlockBuilder(); + final SolrRel.Implementor solrImplementor = new SolrRel.Implementor(); + solrImplementor.visitChild(0, getInput()); + final RelDataType rowType = getRowType(); + final PhysType physType = PhysTypeImpl.of(implementor.getTypeFactory(), rowType, pref.prefer(JavaRowFormat.ARRAY)); + final Expression table = list.append("table", solrImplementor.table.getExpression(SolrTable.SolrQueryable.class)); + final Expression fields = + list.append("fields", + constantArrayList( + Pair.zip(generateFields(SolrRules.solrFieldNames(rowType), solrImplementor.fieldMappings), + new AbstractList() { + @Override + public Class get(int index) { + return physType.fieldClass(index); + } + + @Override + public int size() { + return rowType.getFieldCount(); + } + }), + Pair.class)); + final Expression query = list.append("query", Expressions.constant(solrImplementor.query, String.class)); + final Expression orders = list.append("orders", constantArrayList(solrImplementor.orders, Pair.class)); + final Expression buckets = list.append("buckets", constantArrayList(solrImplementor.buckets, String.class)); + final Expression metricPairs = list.append("metricPairs", constantArrayList(solrImplementor.metricPairs, Pair.class)); + final Expression limit = list.append("limit", Expressions.constant(solrImplementor.limitValue)); + final Expression negativeQuery = list.append("negativeQuery", Expressions.constant(Boolean.toString(solrImplementor.negativeQuery), String.class)); + final Expression havingPredicate = list.append("havingTest", Expressions.constant(solrImplementor.havingPredicate, String.class)); + Expression enumerable = list.append("enumerable", Expressions.call(table, SolrMethod.SOLR_QUERYABLE_QUERY.method, + fields, query, orders, buckets, metricPairs, limit, negativeQuery, havingPredicate)); + Hook.QUERY_PLAN.run(query); + list.add(Expressions.return_(null, enumerable)); + return implementor.result(physType, list.toBlock()); + } + + private List generateFields(List queryFields, Map fieldMappings) { + if(fieldMappings.isEmpty()) { + return queryFields; + } else { + List fields = new ArrayList<>(); + for(String field : queryFields) { + fields.add(getField(fieldMappings, field)); + } + return fields; + } + } + + private String getField(Map fieldMappings, String field) { + String retField = field; + while(fieldMappings.containsKey(field)) { + field = fieldMappings.getOrDefault(field, retField); + if(retField.equals(field)) { + break; + } else { + retField = field; + } + } + return retField; + } + + /** + * E.g. {@code constantArrayList("x", "y")} returns + * "Arrays.asList('x', 'y')". + */ + private static MethodCallExpression constantArrayList(List values, Class clazz) { + return Expressions.call(BuiltInMethod.ARRAYS_AS_LIST.method, + Expressions.newArrayInit(clazz, constantList(values))); + } + + /** + * E.g. {@code constantList("x", "y")} returns "{ConstantExpression("x"), ConstantExpression("y")}". + */ + private static List constantList(List values) { + return Lists.transform(values, Expressions::constant); + } +} diff --git a/solr/core/src/java/org/apache/solr/handler/sql/SolrToEnumerableConverterRule.java b/solr/core/src/java/org/apache/solr/handler/sql/SolrToEnumerableConverterRule.java new file mode 100644 index 00000000000..80365caec1c --- /dev/null +++ b/solr/core/src/java/org/apache/solr/handler/sql/SolrToEnumerableConverterRule.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.handler.sql; + +import org.apache.calcite.adapter.enumerable.EnumerableConvention; +import org.apache.calcite.plan.RelTraitSet; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.rel.convert.ConverterRule; + +/** + * Rule to convert a relational expression from {@link SolrRel#CONVENTION} to {@link EnumerableConvention}. + */ +class SolrToEnumerableConverterRule extends ConverterRule { + static final ConverterRule INSTANCE = new SolrToEnumerableConverterRule(); + + private SolrToEnumerableConverterRule() { + super(RelNode.class, SolrRel.CONVENTION, EnumerableConvention.INSTANCE, "SolrToEnumerableConverterRule"); + } + + @Override + public RelNode convert(RelNode rel) { + RelTraitSet newTraitSet = rel.getTraitSet().replace(getOutConvention()); + return new SolrToEnumerableConverter(rel.getCluster(), newTraitSet, rel); + } +} diff --git a/solr/core/src/java/org/apache/solr/handler/sql/package-info.java b/solr/core/src/java/org/apache/solr/handler/sql/package-info.java new file mode 100644 index 00000000000..5aef90dace2 --- /dev/null +++ b/solr/core/src/java/org/apache/solr/handler/sql/package-info.java @@ -0,0 +1,21 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Classes related to Apache Calcite implementation in {@link org.apache.solr.handler.SQLHandler} + */ +package org.apache.solr.handler.sql; \ No newline at end of file diff --git a/solr/core/src/test/org/apache/solr/handler/TestSQLHandler.java b/solr/core/src/test/org/apache/solr/handler/TestSQLHandler.java index 4ce27986899..35f7ad05b9e 100644 --- a/solr/core/src/test/org/apache/solr/handler/TestSQLHandler.java +++ b/solr/core/src/test/org/apache/solr/handler/TestSQLHandler.java @@ -18,11 +18,8 @@ package org.apache.solr.handler; import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.List; -import com.facebook.presto.sql.parser.SqlParser; -import com.facebook.presto.sql.tree.Statement; import org.apache.solr.client.solrj.io.Tuple; import org.apache.solr.client.solrj.io.stream.ExceptionStream; import org.apache.solr.client.solrj.io.stream.SolrStream; @@ -34,13 +31,11 @@ import org.apache.solr.common.params.ModifiableSolrParams; import org.apache.solr.common.params.SolrParams; import org.junit.After; -import org.junit.AfterClass; import org.junit.Before; import org.junit.Test; public class TestSQLHandler extends AbstractFullDistribZkTestBase { - static { schemaString = "schema-sql.xml"; } @@ -49,11 +44,6 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { sliceCount = 2; } - @AfterClass - public static void afterSuperClass() { - - } - protected String getCloudSolrConfig() { return "solrconfig-sql.xml"; } @@ -80,7 +70,7 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { @Test public void doTest() throws Exception { waitForRecoveriesToFinish(false); - testPredicate(); + testBasicSelect(); testWhere(); testMixedCaseFields(); @@ -95,213 +85,10 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { testParallelBasicGrouping(); testParallelSelectDistinct(); testParallelTimeSeriesGrouping(); - testCatalogStream(); - testSchemasStream(); - testTablesStream(); - } - - private void testPredicate() throws Exception { - - SqlParser parser = new SqlParser(); - String sql = "select a from b where c = 'd'"; - Statement statement = parser.createStatement(sql); - SQLHandler.SQLVisitor sqlVistor = new SQLHandler.SQLVisitor(new StringBuilder()); - sqlVistor.process(statement, 0); - - assertEquals("(c:\"d\")", sqlVistor.query); - - parser = new SqlParser(); - sql = "select a from b where c = 5"; - statement = parser.createStatement(sql); - sqlVistor = new SQLHandler.SQLVisitor(new StringBuilder()); - sqlVistor.process(statement, 0); - - assertEquals("(c:\"5\")", sqlVistor.query); - - parser = new SqlParser(); - sql = "select a from b where c <> 'd'"; - statement = parser.createStatement(sql); - sqlVistor = new SQLHandler.SQLVisitor(new StringBuilder()); - sqlVistor.process(statement, 0); - - assertEquals("(-c:\"d\")", sqlVistor.query); - - parser = new SqlParser(); - sql = "select a from b where c <> 5"; - statement = parser.createStatement(sql); - sqlVistor = new SQLHandler.SQLVisitor(new StringBuilder()); - sqlVistor.process(statement, 0); - - assertEquals("(-c:\"5\")", sqlVistor.query); - - parser = new SqlParser(); - sql = "select a from b where c > 'd'"; - statement = parser.createStatement(sql); - sqlVistor = new SQLHandler.SQLVisitor(new StringBuilder()); - sqlVistor.process(statement, 0); - - assertEquals("(c:{\"d\" TO *])", sqlVistor.query); - - parser = new SqlParser(); - sql = "select a from b where c > 5"; - statement = parser.createStatement(sql); - sqlVistor = new SQLHandler.SQLVisitor(new StringBuilder()); - sqlVistor.process(statement, 0); - - assertEquals("(c:{\"5\" TO *])", sqlVistor.query); - - parser = new SqlParser(); - sql = "select a from b where c >= 'd'"; - statement = parser.createStatement(sql); - sqlVistor = new SQLHandler.SQLVisitor(new StringBuilder()); - sqlVistor.process(statement, 0); - - assertEquals("(c:[\"d\" TO *])", sqlVistor.query); - - parser = new SqlParser(); - sql = "select a from b where c >= 5"; - statement = parser.createStatement(sql); - sqlVistor = new SQLHandler.SQLVisitor(new StringBuilder()); - sqlVistor.process(statement, 0); - - assertEquals("(c:[\"5\" TO *])", sqlVistor.query); - - parser = new SqlParser(); - sql = "select a from b where c < 'd'"; - statement = parser.createStatement(sql); - sqlVistor = new SQLHandler.SQLVisitor(new StringBuilder()); - sqlVistor.process(statement, 0); - - assertEquals("(c:[* TO \"d\"})", sqlVistor.query); - - parser = new SqlParser(); - sql = "select a from b where c < 5"; - statement = parser.createStatement(sql); - sqlVistor = new SQLHandler.SQLVisitor(new StringBuilder()); - sqlVistor.process(statement, 0); - - assertEquals("(c:[* TO \"5\"})", sqlVistor.query); - - parser = new SqlParser(); - sql = "select a from b where c <= 'd'"; - statement = parser.createStatement(sql); - sqlVistor = new SQLHandler.SQLVisitor(new StringBuilder()); - sqlVistor.process(statement, 0); - - assertEquals("(c:[* TO \"d\"])", sqlVistor.query); - - parser = new SqlParser(); - sql = "select a from b where c <= 5"; - statement = parser.createStatement(sql); - sqlVistor = new SQLHandler.SQLVisitor(new StringBuilder()); - sqlVistor.process(statement, 0); - - assertEquals("(c:[* TO \"5\"])", sqlVistor.query); - - //Add parens - parser = new SqlParser(); - sql = "select a from b where (c = 'd')"; - statement = parser.createStatement(sql); - sqlVistor = new SQLHandler.SQLVisitor(new StringBuilder()); - sqlVistor.process(statement, new Integer(0)); - - assert(sqlVistor.query.equals("(c:\"d\")")); - - - //Upper case - parser = new SqlParser(); - sql = "select a from b where ('CcC' = 'D')"; - statement = parser.createStatement(sql); - sqlVistor = new SQLHandler.SQLVisitor(new StringBuilder()); - sqlVistor.process(statement, new Integer(0)); - assert(sqlVistor.query.equals("(CcC:\"D\")")); - - //Phrase - parser = new SqlParser(); - sql = "select a from b where (c = 'd d')"; - statement = parser.createStatement(sql); - sqlVistor = new SQLHandler.SQLVisitor(new StringBuilder()); - sqlVistor.process(statement, new Integer(0)); - - assert(sqlVistor.query.equals("(c:\"d d\")")); - - // AND - parser = new SqlParser(); - sql = "select a from b where ((c = 'd') AND (l = 'z'))"; - statement = parser.createStatement(sql); - sqlVistor = new SQLHandler.SQLVisitor(new StringBuilder()); - sqlVistor.process(statement, new Integer(0)); - - assert(sqlVistor.query.equals("((c:\"d\") AND (l:\"z\"))")); - - // OR - - parser = new SqlParser(); - sql = "select a from b where ((c = 'd') OR (l = 'z'))"; - statement = parser.createStatement(sql); - sqlVistor = new SQLHandler.SQLVisitor(new StringBuilder()); - sqlVistor.process(statement, new Integer(0)); - - assert(sqlVistor.query.equals("((c:\"d\") OR (l:\"z\"))")); - - // AND NOT - - parser = new SqlParser(); - sql = "select a from b where ((c = 'd') AND NOT (l = 'z'))"; - statement = parser.createStatement(sql); - sqlVistor = new SQLHandler.SQLVisitor(new StringBuilder()); - sqlVistor.process(statement, new Integer(0)); - - assert(sqlVistor.query.equals("((c:\"d\") AND -(l:\"z\"))")); - - // NESTED - parser = new SqlParser(); - sql = "select a from b where ((c = 'd') OR ((l = 'z') AND (m = 'j')))"; - statement = parser.createStatement(sql); - sqlVistor = new SQLHandler.SQLVisitor(new StringBuilder()); - sqlVistor.process(statement, new Integer(0)); - - assert(sqlVistor.query.equals("((c:\"d\") OR ((l:\"z\") AND (m:\"j\")))")); - - // NESTED NOT - parser = new SqlParser(); - sql = "select a from b where ((c = 'd') OR ((l = 'z') AND NOT (m = 'j')))"; - statement = parser.createStatement(sql); - sqlVistor = new SQLHandler.SQLVisitor(new StringBuilder()); - sqlVistor.process(statement, new Integer(0)); - - assert(sqlVistor.query.equals("((c:\"d\") OR ((l:\"z\") AND -(m:\"j\")))")); - - // RANGE - Will have to do until SQL BETWEEN is supported. - // NESTED - parser = new SqlParser(); - sql = "select a from b where ((c = '[0 TO 100]') OR ((l = '(z)') AND (m = 'j')))"; - statement = parser.createStatement(sql); - sqlVistor = new SQLHandler.SQLVisitor(new StringBuilder()); - sqlVistor.process(statement, new Integer(0)); - - assert(sqlVistor.query.equals("((c:[0 TO 100]) OR ((l:(z)) AND (m:\"j\")))")); - - // Wildcard - parser = new SqlParser(); - sql = "select a from b where ((c = '[0 TO 100]') OR ((l = '(z*)') AND (m = 'j')))"; - statement = parser.createStatement(sql); - sqlVistor = new SQLHandler.SQLVisitor(new StringBuilder()); - sqlVistor.process(statement, new Integer(0)); - assert(sqlVistor.query.equals("((c:[0 TO 100]) OR ((l:(z*)) AND (m:\"j\")))")); - - // Complex Lucene/Solr Query - parser = new SqlParser(); - sql = "select a from b where (('c' = '[0 TO 100]') OR ((l = '(z*)') AND ('M' = '(j OR (k NOT s))')))"; - statement = parser.createStatement(sql); - sqlVistor = new SQLHandler.SQLVisitor(new StringBuilder()); - sqlVistor.process(statement, new Integer(0)); - assert(sqlVistor.query.equals("((c:[0 TO 100]) OR ((l:(z*)) AND (M:(j OR (k NOT s)))))")); } private void testBasicSelect() throws Exception { try { - CloudJettyRunner jetty = this.cloudJettys.get(0); del("*:*"); @@ -317,16 +104,18 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { indexDoc(sdoc("id", "7", "text", "XXXX XXXX", "str_s", "c", "field_i", "50")); indexDoc(sdoc("id", "8", "text", "XXXX XXXX", "str_s", "c", "field_i", "60")); commit(); - + + + System.out.println("############# testBasicSelect() ############"); + SolrParams sParams = mapParams(CommonParams.QT, "/sql", - "stmt", "select 'id', field_i, str_s from collection1 where 'text'='XXXX' order by field_i desc"); + "stmt", "select id, field_i, str_s from collection1 where (text='(XXXX)' OR text='XXXX') AND text='XXXX' order by field_i desc"); SolrStream solrStream = new SolrStream(jetty.url, sParams); List tuples = getTuples(solrStream); assert(tuples.size() == 8); - - Tuple tuple = null; + Tuple tuple; tuple = tuples.get(0); assert(tuple.getLong("id") == 8); @@ -369,7 +158,7 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { assert(tuple.get("str_s").equals("a")); //Test unlimited unsorted result. Should sort on _version_ desc - sParams = mapParams(CommonParams.QT, "/sql", "stmt", "select 'id', field_i, str_s from collection1 where 'text'='XXXX'"); + sParams = mapParams(CommonParams.QT, "/sql", "stmt", "select id, field_i, str_s from collection1 where text='XXXX'"); solrStream = new SolrStream(jetty.url, sParams); tuples = getTuples(solrStream); @@ -475,7 +264,6 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { assert(tuple.getLong("myInt") == 7); assert(tuple.get("myString").equals("a")); - sParams = mapParams(CommonParams.QT, "/sql", "stmt", "select id as myId, field_i as myInt, str_s as myString from collection1 where text='XXXX' AND id='(1 2 3)' order by field_i desc"); @@ -588,29 +376,30 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { tuple = tuples.get(6); assertEquals(8L, tuple.get("id")); + // TODO requires different Calcite SQL conformance level // Not Equals != - sParams = mapParams(CommonParams.QT, "/sql", - "stmt", "select id from collection1 where id != 1 order by id asc limit 10"); - - solrStream = new SolrStream(jetty.url, sParams); - tuples = getTuples(solrStream); - - assertEquals(7, tuples.size()); - - tuple = tuples.get(0); - assertEquals(2L, tuple.get("id")); - tuple = tuples.get(1); - assertEquals(3L, tuple.get("id")); - tuple = tuples.get(2); - assertEquals(4L, tuple.get("id")); - tuple = tuples.get(3); - assertEquals(5L, tuple.get("id")); - tuple = tuples.get(4); - assertEquals(6L, tuple.get("id")); - tuple = tuples.get(5); - assertEquals(7L, tuple.get("id")); - tuple = tuples.get(6); - assertEquals(8L, tuple.get("id")); +// sParams = mapParams(CommonParams.QT, "/sql", +// "stmt", "select id from collection1 where id != 1 order by id asc limit 10"); +// +// solrStream = new SolrStream(jetty.url, sParams); +// tuples = getTuples(solrStream); +// +// assertEquals(7, tuples.size()); +// +// tuple = tuples.get(0); +// assertEquals(2L, tuple.get("id")); +// tuple = tuples.get(1); +// assertEquals(3L, tuple.get("id")); +// tuple = tuples.get(2); +// assertEquals(4L, tuple.get("id")); +// tuple = tuples.get(3); +// assertEquals(5L, tuple.get("id")); +// tuple = tuples.get(4); +// assertEquals(6L, tuple.get("id")); +// tuple = tuples.get(5); +// assertEquals(7L, tuple.get("id")); +// tuple = tuples.get(6); +// assertEquals(8L, tuple.get("id")); // Less than sParams = mapParams(CommonParams.QT, "/sql", @@ -669,7 +458,6 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { } } - private void testMixedCaseFields() throws Exception { try { @@ -688,15 +476,16 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { indexDoc(sdoc("id", "7", "Text_t", "XXXX XXXX", "Str_s", "c", "Field_i", "50")); indexDoc(sdoc("id", "8", "Text_t", "XXXX XXXX", "Str_s", "c", "Field_i", "60")); commit(); - SolrParams sParams = mapParams(CommonParams.QT, "/sql", - "stmt", "select id, Field_i, Str_s from Collection1 where Text_t='XXXX' order by Field_i desc"); + + SolrParams sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "map_reduce", + "stmt", "select id, Field_i, Str_s from collection1 where Text_t='XXXX' order by Field_i desc"); SolrStream solrStream = new SolrStream(jetty.url, sParams); List tuples = getTuples(solrStream); assert(tuples.size() == 8); - Tuple tuple = null; + Tuple tuple; tuple = tuples.get(0); assert(tuple.getLong("id") == 8); @@ -738,24 +527,9 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { assert(tuple.getLong("Field_i") == 7); assert(tuple.get("Str_s").equals("a")); - sParams = mapParams(CommonParams.QT, "/sql", - "stmt", "select Str_s, sum(Field_i) from Collection1 where 'id'='(1 8)' group by Str_s having (sum(Field_i) = 7 OR 'sum(Field_i)' = 60) order by 'sum(Field_i)' desc"); - - solrStream = new SolrStream(jetty.url, sParams); - tuples = getTuples(solrStream); - - assert(tuples.size() == 2); - - tuple = tuples.get(0); - assert(tuple.get("Str_s").equals("c")); - assert(tuple.getDouble("sum(Field_i)") == 60); - - tuple = tuples.get(1); - assert(tuple.get("Str_s").equals("a")); - assert(tuple.getDouble("sum(Field_i)") == 7); - + // TODO get sum(Field_i) as named one sParams = mapParams(CommonParams.QT, "/sql", - "stmt", "select Str_s, sum(Field_i) from Collection1 where 'id'='(1 8)' group by 'Str_s' having (sum(Field_i) = 7 OR 'sum(Field_i)' = 60) order by 'sum(Field_i)' desc"); + "stmt", "select Str_s, sum(Field_i) from collection1 where id='(1 8)' group by Str_s having (sum(Field_i) = 7 OR sum(Field_i) = 60) order by sum(Field_i) desc"); solrStream = new SolrStream(jetty.url, sParams); tuples = getTuples(solrStream); @@ -764,13 +538,27 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { tuple = tuples.get(0); assert(tuple.get("Str_s").equals("c")); - assert(tuple.getDouble("sum(Field_i)") == 60); + assert(tuple.getDouble("EXPR$1") == 60); tuple = tuples.get(1); assert(tuple.get("Str_s").equals("a")); - assert(tuple.getDouble("sum(Field_i)") == 7); + assert(tuple.getDouble("EXPR$1") == 7); + sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "map_reduce", + "stmt", "select Str_s, sum(Field_i) from collection1 where id='(1 8)' group by Str_s having (sum(Field_i) = 7 OR sum(Field_i) = 60) order by sum(Field_i) desc"); + solrStream = new SolrStream(jetty.url, sParams); + tuples = getTuples(solrStream); + + assert(tuples.size() == 2); + + tuple = tuples.get(0); + assert(tuple.get("Str_s").equals("c")); + assert(tuple.getDouble("EXPR$1") == 60); + + tuple = tuples.get(1); + assert(tuple.get("Str_s").equals("a")); + assert(tuple.getDouble("EXPR$1") == 7); } finally { delete(); } @@ -795,15 +583,14 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { indexDoc(sdoc("id", "8", "text", "XXXX XXXX", "str_s", "c", "field_i", "60")); commit(); - SolrParams sParams = mapParams(CommonParams.QT, "/sql", - "stmt", "select id, field_i, str_s from collection1 where text='XXXX' order by field_iff desc"); + SolrParams sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "map_reduce", + "stmt", "select id, str_s from collection1 where text='XXXX' order by field_iff desc"); SolrStream solrStream = new SolrStream(jetty.url, sParams); Tuple tuple = getTuple(new ExceptionStream(solrStream)); assert(tuple.EOF); assert(tuple.EXCEPTION); - //A parse exception detected before being sent to the search engine - assert(tuple.getException().contains("Fields in the sort spec must be included in the field list")); + assert(tuple.getException().contains("Column 'field_iff' not found in any table")); sParams = mapParams(CommonParams.QT, "/sql", "stmt", "select id, field_iff, str_s from collection1 where text='XXXX' order by field_iff desc"); @@ -812,38 +599,26 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { tuple = getTuple(new ExceptionStream(solrStream)); assert(tuple.EOF); assert(tuple.EXCEPTION); - //An exception not detected by the parser thrown from the /select handler - assert(tuple.getException().contains("sort param field can't be found:")); - sParams = mapParams(CommonParams.QT, "/sql", - "stmt", "select str_s, count(*), sum(field_iff), min(field_i), max(field_i), avg(field_i) from collection1 where text='XXXX' group by str_s having ((sum(field_iff) = 19) AND (min(field_i) = 8))"); + assert(tuple.getException().contains("Column 'field_iff' not found in any table")); + + sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "map_reduce", + "stmt", "select str_s, count(*), sum(field_iff), min(field_i), max(field_i), cast(avg(1.0 * field_i) as float) from collection1 where text='XXXX' group by str_s having ((sum(field_iff) = 19) AND (min(field_i) = 8))"); solrStream = new SolrStream(jetty.url, sParams); tuple = getTuple(new ExceptionStream(solrStream)); assert(tuple.EOF); assert(tuple.EXCEPTION); - //An exception not detected by the parser thrown from the /export handler - assert(tuple.getException().contains("undefined field:")); + assert(tuple.getException().contains("Column 'field_iff' not found in any table")); - sParams = mapParams(CommonParams.QT, "/sql", - "stmt", "select str_s, count(*), blah(field_iff), min(field_i), max(field_i), avg(field_i) from collection1 where text='XXXX' group by str_s having ((sum(field_iff) = 19) AND (min(field_i) = 8))"); + sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "map_reduce", + "stmt", "select str_s, count(*), blah(field_i), min(field_i), max(field_i), cast(avg(1.0 * field_i) as float) from collection1 where text='XXXX' group by str_s having ((sum(field_i) = 19) AND (min(field_i) = 8))"); solrStream = new SolrStream(jetty.url, sParams); tuple = getTuple(new ExceptionStream(solrStream)); assert(tuple.EOF); assert(tuple.EXCEPTION); - //An exception not detected by the parser thrown from the /export handler - assert(tuple.getException().contains("Invalid function: blah")); - - sParams = mapParams(CommonParams.QT, "/sql", - "stmt", "select str_s from collection1 where text='XXXX' group by str_s"); - - solrStream = new SolrStream(jetty.url, sParams); - tuple = getTuple(new ExceptionStream(solrStream)); - assert(tuple.EOF); - assert(tuple.EXCEPTION); - assert(tuple.getException().contains("Group by queries must include atleast one aggregate function.")); - + assert(tuple.getException().contains("No match found for function signature blah")); } finally { delete(); } @@ -866,38 +641,37 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { indexr("id", "6", "text", "XXXX XXXX", "str_s", "c", "field_i", "40"); indexr("id", "7", "text", "XXXX XXXX", "str_s", "c", "field_i", "50"); indexr("id", "8", "text", "XXXX XXXX", "str_s", "c", "field_i", "60"); + indexr("id", "9", "text", "XXXX XXXY", "str_s", "d", "field_i", "70"); commit(); - - SolrParams sParams = mapParams(CommonParams.QT, "/sql", - "stmt", "select str_s, 'count(*)', sum('field_i'), min(field_i), max(field_i), avg(field_i) from collection1 where text='XXXX' group by 'str_s' order by 'sum(field_i)' asc limit 2"); + + SolrParams sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "map_reduce", + "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), cast(avg(1.0 * field_i) as float) from collection1 where text='XXXX' group by str_s order by sum(field_i) asc limit 2"); SolrStream solrStream = new SolrStream(jetty.url, sParams); List tuples = getTuples(solrStream); //Only two results because of the limit. assert(tuples.size() == 2); - - Tuple tuple = null; + Tuple tuple; tuple = tuples.get(0); assert(tuple.get("str_s").equals("b")); - assert(tuple.getDouble("count(*)") == 2); - assert(tuple.getDouble("sum(field_i)") == 19); - assert(tuple.getDouble("min(field_i)") == 8); - assert(tuple.getDouble("max(field_i)") == 11); - assert(tuple.getDouble("avg(field_i)") == 9.5D); + assert(tuple.getDouble("EXPR$1") == 2); //count(*) + assert(tuple.getDouble("EXPR$2") == 19); //sum(field_i) + assert(tuple.getDouble("EXPR$3") == 8); //min(field_i) + assert(tuple.getDouble("EXPR$4") == 11); //max(field_i) + assert(tuple.getDouble("EXPR$5") == 9.5D); //avg(field_i) tuple = tuples.get(1); assert(tuple.get("str_s").equals("a")); - assert(tuple.getDouble("count(*)") == 2); - assert(tuple.getDouble("sum(field_i)") == 27); - assert(tuple.getDouble("min(field_i)") == 7); - assert(tuple.getDouble("max(field_i)") == 20); - assert(tuple.getDouble("avg(field_i)") == 13.5D); + assert(tuple.getDouble("EXPR$1") == 2); //count(*) + assert(tuple.getDouble("EXPR$2") == 27); //sum(field_i) + assert(tuple.getDouble("EXPR$3") == 7); //min(field_i) + assert(tuple.getDouble("EXPR$4") == 20); //max(field_i) + assert(tuple.getDouble("EXPR$5") == 13.5D); //avg(field_i) - - sParams = mapParams(CommonParams.QT, "/sql", - "stmt", "select str_s as myString, 'count(*)', sum('field_i') as sum, min(field_i), max(field_i), avg(field_i) from collection1 where text='XXXX' group by 'str_s' order by sum asc limit 2"); + sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "map_reduce", + "stmt", "select str_s as myString, count(*), sum(field_i) as mySum, min(field_i), max(field_i), cast(avg(1.0 * field_i) as float) from collection1 where text='XXXX' group by str_s order by mySum asc limit 2"); solrStream = new SolrStream(jetty.url, sParams); tuples = getTuples(solrStream); @@ -907,23 +681,24 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { tuple = tuples.get(0); assert(tuple.get("myString").equals("b")); - assert(tuple.getDouble("count(*)") == 2); - assert(tuple.getDouble("sum") == 19); - assert(tuple.getDouble("min(field_i)") == 8); - assert(tuple.getDouble("max(field_i)") == 11); - assert(tuple.getDouble("avg(field_i)") == 9.5D); + assert(tuple.getDouble("EXPR$1") == 2); //count(*) + assert(tuple.getDouble("mySum") == 19); + assert(tuple.getDouble("EXPR$3") == 8); //min(field_i) + assert(tuple.getDouble("EXPR$4") == 11); //max(field_i) + assert(tuple.getDouble("EXPR$5") == 9.5D); //avg(field_i) tuple = tuples.get(1); assert(tuple.get("myString").equals("a")); - assert(tuple.getDouble("count(*)") == 2); - assert(tuple.getDouble("sum") == 27); - assert(tuple.getDouble("min(field_i)") == 7); - assert(tuple.getDouble("max(field_i)") == 20); - assert(tuple.getDouble("avg(field_i)") == 13.5D); + assert(tuple.getDouble("EXPR$1") == 2); //count(*) + assert(tuple.getDouble("mySum") == 27); + assert(tuple.getDouble("EXPR$3") == 7); //min(field_i) + assert(tuple.getDouble("EXPR$4") == 20); //max(field_i) + assert(tuple.getDouble("EXPR$5") == 13.5D); //avg(field_i) - - sParams = mapParams(CommonParams.QT, "/sql", - "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), avg(field_i) from collection1 where (text='XXXX' AND NOT text='XXXX XXX') group by str_s order by str_s desc"); + sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "map_reduce", + "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), " + + "cast(avg(1.0 * field_i) as float) from collection1 where (text='XXXX' AND NOT ((text='XXXY') AND (text='XXXY' OR text='XXXY'))) " + + "group by str_s order by str_s desc"); solrStream = new SolrStream(jetty.url, sParams); tuples = getTuples(solrStream); @@ -935,31 +710,33 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { tuple = tuples.get(0); assert(tuple.get("str_s").equals("c")); - assert(tuple.getDouble("count(*)") == 4); - assert(tuple.getDouble("sum(field_i)") == 180); - assert(tuple.getDouble("min(field_i)") == 30); - assert(tuple.getDouble("max(field_i)") == 60); - assert(tuple.getDouble("avg(field_i)") == 45); + assert(tuple.getDouble("EXPR$1") == 4); //count(*) + assert(tuple.getDouble("EXPR$2") == 180); //sum(field_i) + assert(tuple.getDouble("EXPR$3") == 30); //min(field_i) + assert(tuple.getDouble("EXPR$4") == 60); //max(field_i) + assert(tuple.getDouble("EXPR$5") == 45); //avg(field_i) tuple = tuples.get(1); assert(tuple.get("str_s").equals("b")); - assert(tuple.getDouble("count(*)") == 2); - assert(tuple.getDouble("sum(field_i)") == 19); - assert(tuple.getDouble("min(field_i)") == 8); - assert(tuple.getDouble("max(field_i)") == 11); - assert(tuple.getDouble("avg(field_i)") == 9.5D); + assert(tuple.getDouble("EXPR$1") == 2); //count(*) + assert(tuple.getDouble("EXPR$2") == 19); //sum(field_i) + assert(tuple.getDouble("EXPR$3") == 8); //min(field_i) + assert(tuple.getDouble("EXPR$4") == 11); //max(field_i) + assert(tuple.getDouble("EXPR$5") == 9.5D); //avg(field_i) tuple = tuples.get(2); assert(tuple.get("str_s").equals("a")); - assert(tuple.getDouble("count(*)") == 2); - assert(tuple.getDouble("sum(field_i)") == 27); - assert(tuple.getDouble("min(field_i)") == 7); - assert(tuple.getDouble("max(field_i)") == 20); - assert(tuple.getDouble("avg(field_i)") == 13.5D); + assert(tuple.getDouble("EXPR$1") == 2); //count(*) + assert(tuple.getDouble("EXPR$2") == 27); //sum(field_i) + assert(tuple.getDouble("EXPR$3") == 7); //min(field_i) + assert(tuple.getDouble("EXPR$4") == 20); //max(field_i) + assert(tuple.getDouble("EXPR$5") == 13.5D); //avg(field_i) - sParams = mapParams(CommonParams.QT, "/sql", - "stmt", "select str_s as myString, count(*) as count, sum(field_i) as sum, min(field_i) as min, max(field_i) as max, avg(field_i) as avg from collection1 where (text='XXXX' AND NOT text='XXXX XXX') group by str_s order by str_s desc"); + sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "map_reduce", + "stmt", "select str_s as myString, count(*) as myCount, sum(field_i) as mySum, min(field_i) as myMin, " + + "max(field_i) as myMax, cast(avg(1.0 * field_i) as float) as myAvg from collection1 " + + "where (text='XXXX' AND NOT (text='XXXY')) group by str_s order by str_s desc"); solrStream = new SolrStream(jetty.url, sParams); tuples = getTuples(solrStream); @@ -971,32 +748,31 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { tuple = tuples.get(0); assert(tuple.get("myString").equals("c")); - assert(tuple.getDouble("count") == 4); - assert(tuple.getDouble("sum") == 180); - assert(tuple.getDouble("min") == 30); - assert(tuple.getDouble("max") == 60); - assert(tuple.getDouble("avg") == 45); + assert(tuple.getDouble("myCount") == 4); + assert(tuple.getDouble("mySum") == 180); + assert(tuple.getDouble("myMin") == 30); + assert(tuple.getDouble("myMax") == 60); + assert(tuple.getDouble("myAvg") == 45); tuple = tuples.get(1); assert(tuple.get("myString").equals("b")); - assert(tuple.getDouble("count") == 2); - assert(tuple.getDouble("sum") == 19); - assert(tuple.getDouble("min") == 8); - assert(tuple.getDouble("max") == 11); - assert(tuple.getDouble("avg") == 9.5D); + assert(tuple.getDouble("myCount") == 2); + assert(tuple.getDouble("mySum") == 19); + assert(tuple.getDouble("myMin") == 8); + assert(tuple.getDouble("myMax") == 11); + assert(tuple.getDouble("myAvg") == 9.5D); tuple = tuples.get(2); assert(tuple.get("myString").equals("a")); - assert(tuple.getDouble("count") == 2); - assert(tuple.getDouble("sum") == 27); - assert(tuple.getDouble("min") == 7); - assert(tuple.getDouble("max") == 20); - assert(tuple.getDouble("avg") == 13.5D); + assert(tuple.getDouble("myCount") == 2); + assert(tuple.getDouble("mySum") == 27); + assert(tuple.getDouble("myMin") == 7); + assert(tuple.getDouble("myMax") == 20); + assert(tuple.getDouble("myAvg") == 13.5D); - - - sParams = mapParams(CommonParams.QT, "/sql", - "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), avg(field_i) from collection1 where text='XXXX' group by str_s having sum(field_i) = 19"); + sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "map_reduce", + "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), cast(avg(1.0 * field_i) as float) " + + "from collection1 where text='XXXX' group by str_s having sum(field_i) = 19"); solrStream = new SolrStream(jetty.url, sParams); tuples = getTuples(solrStream); @@ -1005,65 +781,63 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { tuple = tuples.get(0); assert(tuple.get("str_s").equals("b")); - assert(tuple.getDouble("count(*)") == 2); - assert(tuple.getDouble("sum(field_i)") == 19); - assert(tuple.getDouble("min(field_i)") == 8); - assert(tuple.getDouble("max(field_i)") == 11); - assert(tuple.getDouble("avg(field_i)") == 9.5D); + assert(tuple.getDouble("EXPR$1") == 2); //count(*) + assert(tuple.getDouble("EXPR$2") == 19); //sum(field_i) + assert(tuple.getDouble("EXPR$3") == 8); //min(field_i) + assert(tuple.getDouble("EXPR$4") == 11); //max(field_i) + assert(tuple.getDouble("EXPR$5") == 9.5D); //avg(field_i) - sParams = mapParams(CommonParams.QT, "/sql", - "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), avg(field_i) from collection1 where text='XXXX' group by str_s having ((sum(field_i) = 19) AND (min(field_i) = 8))"); + sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "map_reduce", + "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), cast(avg(1.0 * field_i) as float) " + + "from collection1 where text='XXXX' group by str_s having ((sum(field_i) = 19) AND (min(field_i) = 8))"); solrStream = new SolrStream(jetty.url, sParams); tuples = getTuples(solrStream); - //Only two results because of the limit. assert(tuples.size() == 1); tuple = tuples.get(0); assert(tuple.get("str_s").equals("b")); - assert(tuple.getDouble("count(*)") == 2); - assert(tuple.getDouble("sum(field_i)") == 19); - assert(tuple.getDouble("min(field_i)") == 8); - assert(tuple.getDouble("max(field_i)") == 11); - assert(tuple.getDouble("avg(field_i)") == 9.5D); + assert(tuple.getDouble("EXPR$1") == 2); //count(*) + assert(tuple.getDouble("EXPR$2") == 19); //sum(field_i) + assert(tuple.getDouble("EXPR$3") == 8); //min(field_i) + assert(tuple.getDouble("EXPR$4") == 11); //max(field_i) + assert(tuple.getDouble("EXPR$5") == 9.5D); //avg(field_i) - sParams = mapParams(CommonParams.QT, "/sql", - "stmt", "select str_s as myString, count(*), sum(field_i) as sum, min(field_i), max(field_i), avg(field_i) from collection1 where text='XXXX' group by myString having ((sum = 19) AND (min(field_i) = 8))"); + sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "map_reduce", + "stmt", "select str_s, count(*), sum(field_i) as mySum, min(field_i), max(field_i), " + + "cast(avg(1.0 * field_i) as float) from collection1 where text='XXXX' group by str_s " + + "having ((sum(field_i) = 19) AND (min(field_i) = 8))"); solrStream = new SolrStream(jetty.url, sParams); tuples = getTuples(solrStream); - //Only two results because of the limit. assert(tuples.size() == 1); tuple = tuples.get(0); - assert(tuple.get("myString").equals("b")); - assert(tuple.getDouble("count(*)") == 2); - assert(tuple.getDouble("sum") == 19); - assert(tuple.getDouble("min(field_i)") == 8); - assert(tuple.getDouble("max(field_i)") == 11); - assert(tuple.getDouble("avg(field_i)") == 9.5D); + assert(tuple.get("str_s").equals("b")); + assert(tuple.getDouble("EXPR$1") == 2); //count(*) + assert(tuple.getDouble("mySum") == 19); + assert(tuple.getDouble("EXPR$3") == 8); //min(field_i) + assert(tuple.getDouble("EXPR$4") == 11); //max(field_i) + assert(tuple.getDouble("EXPR$5") == 9.5D); //avg(field_i) - - sParams = mapParams(CommonParams.QT, "/sql", - "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), avg(field_i) from collection1 where text='XXXX' group by str_s having ((sum(field_i) = 19) AND (min(field_i) = 100))"); + sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "map_reduce", + "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), " + + "cast(avg(1.0 * field_i) as float) from collection1 where text='XXXX' group by str_s " + + "having ((sum(field_i) = 19) AND (min(field_i) = 100))"); solrStream = new SolrStream(jetty.url, sParams); tuples = getTuples(solrStream); assert(tuples.size() == 0); - - } finally { delete(); } } - private void testSelectDistinctFacets() throws Exception { try { - CloudJettyRunner jetty = this.cloudJettys.get(0); del("*:*"); @@ -1079,15 +853,19 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { indexr("id", "7", "text", "XXXX XXXX", "str_s", "c", "field_i", "50"); indexr("id", "8", "text", "XXXX XXXX", "str_s", "c", "field_i", "60"); commit(); + SolrParams sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "facet", - "stmt", "select distinct 'str_s', 'field_i' from collection1 order by 'str_s' asc, 'field_i' asc"); + "stmt", "select distinct str_s, field_i from collection1 order by str_s asc, field_i asc"); + + System.out.println("######## selectDistinctFacets #######"); SolrStream solrStream = new SolrStream(jetty.url, sParams); List tuples = getTuples(solrStream); + //assert(false); assert(tuples.size() == 6); - Tuple tuple = null; + Tuple tuple; tuple = tuples.get(0); assert(tuple.get("str_s").equals("a")); @@ -1204,7 +982,6 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { // Test without a sort. Sort should be asc by default. - sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "facet", "stmt", "select distinct str_s, field_i from collection1"); @@ -1218,30 +995,35 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { assert(tuple.getLong("field_i") == 1); tuple = tuples.get(1); + + assert(tuple.get("str_s").equals("a")); assert(tuple.getLong("field_i") == 20); tuple = tuples.get(2); + + assert(tuple.get("str_s").equals("b")); assert(tuple.getLong("field_i") == 2); tuple = tuples.get(3); + assert(tuple.get("str_s").equals("c")); assert(tuple.getLong("field_i") == 30); tuple = tuples.get(4); + assert(tuple.get("str_s").equals("c")); assert(tuple.getLong("field_i") == 50); tuple = tuples.get(5); + assert(tuple.get("str_s").equals("c")); assert(tuple.getLong("field_i") == 60); - // Test with a predicate. - sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "facet", - "stmt", "select distinct str_s, field_i from collection1 where str_s = 'a'"); + "stmt", "select distinct str_s, field_i from collection1 where str_s = 'a'"); solrStream = new SolrStream(jetty.url, sParams); tuples = getTuples(solrStream); @@ -1255,14 +1037,11 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { tuple = tuples.get(1); assert(tuple.get("str_s").equals("a")); assert(tuple.getLong("field_i") == 20); - - } finally { delete(); } } - private void testSelectDistinct() throws Exception { try { @@ -1281,18 +1060,18 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { indexr("id", "7", "text", "XXXX XXXX", "str_s", "c", "field_i", "50"); indexr("id", "8", "text", "XXXX XXXX", "str_s", "c", "field_i", "60"); commit(); - - SolrParams sParams = mapParams(CommonParams.QT, "/sql", - "stmt", "select distinct 'str_s', 'field_i' from collection1 order by 'str_s' asc, 'field_i' asc"); - SolrStream solrStream = new SolrStream(jetty.url, sParams); + SolrParams sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "map_reduce", + "stmt", "select distinct str_s, field_i from collection1 order by str_s asc, field_i asc"); + + System.out.println("##################### testSelectDistinct()"); + + TupleStream solrStream = new SolrStream(jetty.url, sParams); List tuples = getTuples(solrStream); + assert(tuples.size() == 6); - - Tuple tuple = null; - - tuple = tuples.get(0); + Tuple tuple = tuples.get(0); assert(tuple.get("str_s").equals("a")); assert(tuple.getLong("field_i") == 1); @@ -1318,7 +1097,7 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { //reverse the sort - sParams = mapParams(CommonParams.QT, "/sql", + sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "map_reduce", "stmt", "select distinct str_s, field_i from collection1 order by str_s desc, field_i desc"); solrStream = new SolrStream(jetty.url, sParams); @@ -1353,7 +1132,7 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { assert(tuple.getLong("field_i") == 1); - sParams = mapParams(CommonParams.QT, "/sql", + sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "map_reduce", "stmt", "select distinct str_s as myString, field_i from collection1 order by myString desc, field_i desc"); solrStream = new SolrStream(jetty.url, sParams); @@ -1389,7 +1168,7 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { //test with limit - sParams = mapParams(CommonParams.QT, "/sql", + sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "map_reduce", "stmt", "select distinct str_s, field_i from collection1 order by str_s desc, field_i desc limit 2"); solrStream = new SolrStream(jetty.url, sParams); @@ -1407,9 +1186,8 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { // Test without a sort. Sort should be asc by default. - - sParams = mapParams(CommonParams.QT, "/sql", - "stmt", "select distinct str_s, field_i from collection1"); + sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "map_reduce", + "stmt", "select distinct str_s, field_i from collection1"); solrStream = new SolrStream(jetty.url, sParams); tuples = getTuples(solrStream); @@ -1441,9 +1219,8 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { assert(tuple.getLong("field_i") == 60); // Test with a predicate. - - sParams = mapParams(CommonParams.QT, "/sql", - "stmt", "select distinct str_s, field_i from collection1 where str_s = 'a'"); + sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "map_reduce", + "stmt", "select distinct str_s, field_i from collection1 where str_s = 'a'"); solrStream = new SolrStream(jetty.url, sParams); tuples = getTuples(solrStream); @@ -1457,8 +1234,6 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { tuple = tuples.get(1); assert(tuple.get("str_s").equals("a")); assert(tuple.getLong("field_i") == 20); - - } finally { delete(); } @@ -1466,7 +1241,6 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { private void testParallelSelectDistinct() throws Exception { try { - CloudJettyRunner jetty = this.cloudJettys.get(0); del("*:*"); @@ -1482,7 +1256,7 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { indexr("id", "7", "text", "XXXX XXXX", "str_s", "c", "field_i", "50"); indexr("id", "8", "text", "XXXX XXXX", "str_s", "c", "field_i", "60"); commit(); - SolrParams sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", + SolrParams sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", "aggregationMode", "map_reduce", "stmt", "select distinct str_s, field_i from collection1 order by str_s asc, field_i asc"); SolrStream solrStream = new SolrStream(jetty.url, sParams); @@ -1490,7 +1264,7 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { assert(tuples.size() == 6); - Tuple tuple = null; + Tuple tuple; tuple = tuples.get(0); assert(tuple.get("str_s").equals("a")); @@ -1518,7 +1292,7 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { //reverse the sort - sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", + sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", "aggregationMode", "map_reduce", "stmt", "select distinct str_s, field_i from collection1 order by str_s desc, field_i desc"); solrStream = new SolrStream(jetty.url, sParams); @@ -1534,7 +1308,6 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { assert(tuple.get("str_s").equals("c")); assert(tuple.getLong("field_i") == 50); - tuple = tuples.get(2); assert(tuple.get("str_s").equals("c")); assert(tuple.getLong("field_i") == 30); @@ -1543,7 +1316,6 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { assert(tuple.get("str_s").equals("b")); assert(tuple.getLong("field_i") == 2); - tuple = tuples.get(4); assert(tuple.get("str_s").equals("a")); assert(tuple.getLong("field_i") == 20); @@ -1554,7 +1326,7 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { //reverse the sort - sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", + sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", "aggregationMode", "map_reduce", "stmt", "select distinct str_s as myString, field_i from collection1 order by myString desc, field_i desc"); solrStream = new SolrStream(jetty.url, sParams); @@ -1590,7 +1362,7 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { //test with limit - sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", + sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", "aggregationMode", "map_reduce", "stmt", "select distinct str_s, field_i from collection1 order by str_s desc, field_i desc limit 2"); solrStream = new SolrStream(jetty.url, sParams); @@ -1608,9 +1380,8 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { // Test without a sort. Sort should be asc by default. - - sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", - "stmt", "select distinct str_s, field_i from collection1"); + sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", "aggregationMode", "map_reduce", + "stmt", "select distinct str_s, field_i from collection1"); solrStream = new SolrStream(jetty.url, sParams); tuples = getTuples(solrStream); @@ -1642,9 +1413,8 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { assert(tuple.getLong("field_i") == 60); // Test with a predicate. - - sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", - "stmt", "select distinct str_s, field_i from collection1 where str_s = 'a'"); + sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", "aggregationMode", "map_reduce", + "stmt", "select distinct str_s, field_i from collection1 where str_s = 'a'"); solrStream = new SolrStream(jetty.url, sParams); tuples = getTuples(solrStream); @@ -1664,11 +1434,8 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { } } - - private void testBasicGroupingFacets() throws Exception { try { - CloudJettyRunner jetty = this.cloudJettys.get(0); del("*:*"); @@ -1683,9 +1450,13 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { indexr("id", "6", "text", "XXXX XXXX", "str_s", "c", "field_i", "40"); indexr("id", "7", "text", "XXXX XXXX", "str_s", "c", "field_i", "50"); indexr("id", "8", "text", "XXXX XXXX", "str_s", "c", "field_i", "60"); + indexr("id", "9", "text", "XXXX XXXY", "str_s", "d", "field_i", "70"); commit(); + SolrParams sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "facet", - "stmt", "select 'str_s', 'count(*)', sum(field_i), min(field_i), max(field_i), avg(field_i) from collection1 where text='XXXX' group by 'str_s' order by 'sum(field_i)' asc limit 2"); + "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), " + + "cast(avg(1.0 * field_i) as float) from collection1 where text='XXXX' group by str_s " + + "order by sum(field_i) asc limit 2"); SolrStream solrStream = new SolrStream(jetty.url, sParams); List tuples = getTuples(solrStream); @@ -1693,26 +1464,28 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { //Only two results because of the limit. assert(tuples.size() == 2); - Tuple tuple = null; + Tuple tuple; tuple = tuples.get(0); assert(tuple.get("str_s").equals("b")); - assert(tuple.getDouble("count(*)") == 2); - assert(tuple.getDouble("sum(field_i)") == 19); - assert(tuple.getDouble("min(field_i)") == 8); - assert(tuple.getDouble("max(field_i)") == 11); - assert(tuple.getDouble("avg(field_i)") == 9.5D); + assert(tuple.getDouble("EXPR$1") == 2); //count(*) + assert(tuple.getDouble("EXPR$2") == 19); //sum(field_i) + assert(tuple.getDouble("EXPR$3") == 8); //min(field_i) + assert(tuple.getDouble("EXPR$4") == 11); //max(field_i) + assert(tuple.getDouble("EXPR$5") == 9.5D); //avg(field_i) tuple = tuples.get(1); assert(tuple.get("str_s").equals("a")); - assert(tuple.getDouble("count(*)") == 2); - assert(tuple.getDouble("sum(field_i)") == 27); - assert(tuple.getDouble("min(field_i)") == 7); - assert(tuple.getDouble("max(field_i)") == 20); - assert(tuple.getDouble("avg(field_i)") == 13.5D); + assert(tuple.getDouble("EXPR$1") == 2); //count(*) + assert(tuple.getDouble("EXPR$2") == 27); //sum(field_i) + assert(tuple.getDouble("EXPR$3") == 7); //min(field_i) + assert(tuple.getDouble("EXPR$4") == 20); //max(field_i) + assert(tuple.getDouble("EXPR$5") == 13.5D); //avg(field_i) sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "facet", - "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), avg(field_i) from collection1 where (text='XXXX' AND NOT text='XXXX XXX') group by str_s order by str_s desc"); + "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), " + + "cast(avg(1.0 * field_i) as float) from collection1 where (text='XXXX' AND NOT (text='XXXY')) " + + "group by str_s order by str_s desc"); solrStream = new SolrStream(jetty.url, sParams); tuples = getTuples(solrStream); @@ -1724,30 +1497,32 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { tuple = tuples.get(0); assert(tuple.get("str_s").equals("c")); - assert(tuple.getDouble("count(*)") == 4); - assert(tuple.getDouble("sum(field_i)") == 180); - assert(tuple.getDouble("min(field_i)") == 30); - assert(tuple.getDouble("max(field_i)") == 60); - assert(tuple.getDouble("avg(field_i)") == 45); + assert(tuple.getDouble("EXPR$1") == 4); //count(*) + assert(tuple.getDouble("EXPR$2") == 180); //sum(field_i) + assert(tuple.getDouble("EXPR$3") == 30); //min(field_i) + assert(tuple.getDouble("EXPR$4") == 60); //max(field_i) + assert(tuple.getDouble("EXPR$5") == 45); //avg(field_i) tuple = tuples.get(1); assert(tuple.get("str_s").equals("b")); - assert(tuple.getDouble("count(*)") == 2); - assert(tuple.getDouble("sum(field_i)") == 19); - assert(tuple.getDouble("min(field_i)") == 8); - assert(tuple.getDouble("max(field_i)") == 11); - assert(tuple.getDouble("avg(field_i)") == 9.5D); + assert(tuple.getDouble("EXPR$1") == 2); //count(*) + assert(tuple.getDouble("EXPR$2") == 19); //sum(field_i) + assert(tuple.getDouble("EXPR$3") == 8); //min(field_i) + assert(tuple.getDouble("EXPR$4") == 11); //max(field_i) + assert(tuple.getDouble("EXPR$5") == 9.5D); //avg(field_i) tuple = tuples.get(2); assert(tuple.get("str_s").equals("a")); - assert(tuple.getDouble("count(*)") == 2); - assert(tuple.getDouble("sum(field_i)") == 27); - assert(tuple.getDouble("min(field_i)") == 7); - assert(tuple.getDouble("max(field_i)") == 20); - assert(tuple.getDouble("avg(field_i)") == 13.5D); + assert(tuple.getDouble("EXPR$1") == 2); //count(*) + assert(tuple.getDouble("EXPR$2") == 27); //sum(field_i) + assert(tuple.getDouble("EXPR$3") == 7); //min(field_i) + assert(tuple.getDouble("EXPR$4") == 20); //max(field_i) + assert(tuple.getDouble("EXPR$5") == 13.5D); //avg(field_i) sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "facet", - "stmt", "select str_s as myString, count(*), sum(field_i) as sum, min(field_i), max(field_i), avg(field_i) from collection1 where (text='XXXX' AND NOT text='XXXX XXX') group by myString order by myString desc"); + "stmt", "select str_s as myString, count(*), sum(field_i) as mySum, min(field_i), max(field_i), " + + "cast(avg(1.0 * field_i) as float) from collection1 where (text='XXXX' AND NOT (text='XXXY')) " + + "group by str_s order by myString desc"); solrStream = new SolrStream(jetty.url, sParams); tuples = getTuples(solrStream); @@ -1759,33 +1534,31 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { tuple = tuples.get(0); assert(tuple.get("myString").equals("c")); - assert(tuple.getDouble("count(*)") == 4); - assert(tuple.getDouble("sum") == 180); - assert(tuple.getDouble("min(field_i)") == 30); - assert(tuple.getDouble("max(field_i)") == 60); - assert(tuple.getDouble("avg(field_i)") == 45); + assert(tuple.getDouble("EXPR$1") == 4); //count(*) + assert(tuple.getDouble("mySum") == 180); + assert(tuple.getDouble("EXPR$3") == 30); //min(field_i) + assert(tuple.getDouble("EXPR$4") == 60); //max(field_i) + assert(tuple.getDouble("EXPR$5") == 45); //avg(field_i) tuple = tuples.get(1); assert(tuple.get("myString").equals("b")); - assert(tuple.getDouble("count(*)") == 2); - assert(tuple.getDouble("sum") == 19); - assert(tuple.getDouble("min(field_i)") == 8); - assert(tuple.getDouble("max(field_i)") == 11); - assert(tuple.getDouble("avg(field_i)") == 9.5D); + assert(tuple.getDouble("EXPR$1") == 2); //count(*) + assert(tuple.getDouble("mySum") == 19); + assert(tuple.getDouble("EXPR$3") == 8); //min(field_i) + assert(tuple.getDouble("EXPR$4") == 11); //max(field_i) + assert(tuple.getDouble("EXPR$5") == 9.5D); //avg(field_i) tuple = tuples.get(2); assert(tuple.get("myString").equals("a")); - assert(tuple.getDouble("count(*)") == 2); - assert(tuple.getDouble("sum") == 27); - assert(tuple.getDouble("min(field_i)") == 7); - assert(tuple.getDouble("max(field_i)") == 20); - assert(tuple.getDouble("avg(field_i)") == 13.5D); - - - + assert(tuple.getDouble("EXPR$1") == 2); //count(*) + assert(tuple.getDouble("mySum") == 27); + assert(tuple.getDouble("EXPR$3") == 7); //min(field_i) + assert(tuple.getDouble("EXPR$4") == 20); //max(field_i) + assert(tuple.getDouble("EXPR$5") == 13.5D); //avg(field_i) sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "facet", - "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), avg(field_i) from collection1 where text='XXXX' group by str_s having 'sum(field_i)' = 19"); + "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), " + + "cast(avg(1.0 * field_i) as float) from collection1 where text='XXXX' group by str_s having sum(field_i) = 19"); solrStream = new SolrStream(jetty.url, sParams); tuples = getTuples(solrStream); @@ -1794,68 +1567,64 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { tuple = tuples.get(0); assert(tuple.get("str_s").equals("b")); - assert(tuple.getDouble("count(*)") == 2); - assert(tuple.getDouble("sum(field_i)") == 19); - assert(tuple.getDouble("min(field_i)") == 8); - assert(tuple.getDouble("max(field_i)") == 11); - assert(tuple.getDouble("avg(field_i)") == 9.5D); + assert(tuple.getDouble("EXPR$1") == 2); //count(*) + assert(tuple.getDouble("EXPR$2") == 19); //sum(field_i) + assert(tuple.getDouble("EXPR$3") == 8); //min(field_i) + assert(tuple.getDouble("EXPR$4") == 11); //max(field_i) + assert(tuple.getDouble("EXPR$5") == 9.5D); //avg(field_i) sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "facet", - "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), avg(field_i) from collection1 where text='XXXX' group by str_s having (('sum(field_i)' = 19) AND (min(field_i) = 8))"); + "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), " + + "cast(avg(1.0 * field_i) as float) from collection1 where text='XXXX' group by str_s " + + "having ((sum(field_i) = 19) AND (min(field_i) = 8))"); solrStream = new SolrStream(jetty.url, sParams); tuples = getTuples(solrStream); - //Only two results because of the limit. assert(tuples.size() == 1); tuple = tuples.get(0); assert(tuple.get("str_s").equals("b")); - assert(tuple.getDouble("count(*)") == 2); - assert(tuple.getDouble("sum(field_i)") == 19); - assert(tuple.getDouble("min(field_i)") == 8); - assert(tuple.getDouble("max(field_i)") == 11); - assert(tuple.getDouble("avg(field_i)") == 9.5D); - + assert(tuple.getDouble("EXPR$1") == 2); //count(*) + assert(tuple.getDouble("EXPR$2") == 19); //sum(field_i) + assert(tuple.getDouble("EXPR$3") == 8); //min(field_i) + assert(tuple.getDouble("EXPR$4") == 11); //max(field_i) + assert(tuple.getDouble("EXPR$5") == 9.5D); //avg(field_i) sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "facet", - "stmt", "select str_s myString, count(*), sum(field_i) as sum, min(field_i), max(field_i), avg(field_i) from collection1 where text='XXXX' group by myString having ((sum = 19) AND (min(field_i) = 8))"); + "stmt", "select str_s, count(*), sum(field_i) as mySum, min(field_i), max(field_i), " + + "cast(avg(1.0 * field_i) as float) from collection1 where text='XXXX' group by str_s " + + "having ((sum(field_i) = 19) AND (min(field_i) = 8))"); solrStream = new SolrStream(jetty.url, sParams); tuples = getTuples(solrStream); - //Only two results because of the limit. assert(tuples.size() == 1); tuple = tuples.get(0); - assert(tuple.get("myString").equals("b")); - assert(tuple.getDouble("count(*)") == 2); - assert(tuple.getDouble("sum") == 19); - assert(tuple.getDouble("min(field_i)") == 8); - assert(tuple.getDouble("max(field_i)") == 11); - assert(tuple.getDouble("avg(field_i)") == 9.5D); + assert(tuple.get("str_s").equals("b")); + assert(tuple.getDouble("EXPR$1") == 2); //count(*) + assert(tuple.getDouble("mySum") == 19); + assert(tuple.getDouble("EXPR$3") == 8); //min(field_i) + assert(tuple.getDouble("EXPR$4") == 11); //max(field_i) + assert(tuple.getDouble("EXPR$5") == 9.5D); //avg(field_i) sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "facet", - "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), avg(field_i) from collection1 where text='XXXX' group by str_s having ((sum(field_i) = 19) AND (min(field_i) = 100))"); + "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), " + + "cast(avg(1.0 * field_i) as float) from collection1 where text='XXXX' group by str_s " + + "having ((sum(field_i) = 19) AND (min(field_i) = 100))"); solrStream = new SolrStream(jetty.url, sParams); tuples = getTuples(solrStream); assert(tuples.size() == 0); - - } finally { delete(); } } - - - - private void testParallelBasicGrouping() throws Exception { try { - CloudJettyRunner jetty = this.cloudJettys.get(0); del("*:*"); @@ -1871,8 +1640,11 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { indexr("id", "7", "text", "XXXX XXXX", "str_s", "c", "field_i", "50"); indexr("id", "8", "text", "XXXX XXXX", "str_s", "c", "field_i", "60"); commit(); - SolrParams sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", - "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), avg(field_i) from collection1 where text='XXXX' group by str_s order by sum(field_i) asc limit 2"); + + SolrParams sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", "aggregationMode", "map_reduce", + "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), " + + "cast(avg(1.0 * field_i) as float) from collection1 where text='XXXX' group by str_s " + + "order by sum(field_i) asc limit 2"); SolrStream solrStream = new SolrStream(jetty.url, sParams); List tuples = getTuples(solrStream); @@ -1880,26 +1652,28 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { //Only two results because of the limit. assert(tuples.size() == 2); - Tuple tuple = null; + Tuple tuple; tuple = tuples.get(0); assert(tuple.get("str_s").equals("b")); - assert(tuple.getDouble("count(*)") == 2); - assert(tuple.getDouble("sum(field_i)") == 19); - assert(tuple.getDouble("min(field_i)") == 8); - assert(tuple.getDouble("max(field_i)") == 11); - assert(tuple.getDouble("avg(field_i)") == 9.5D); + assert(tuple.getDouble("EXPR$1") == 2); //count(*) + assert(tuple.getDouble("EXPR$2") == 19); //sum(field_i) + assert(tuple.getDouble("EXPR$3") == 8); //min(field_i) + assert(tuple.getDouble("EXPR$4") == 11); //max(field_i) + assert(tuple.getDouble("EXPR$5") == 9.5D); //avg(field_i) tuple = tuples.get(1); assert(tuple.get("str_s").equals("a")); - assert(tuple.getDouble("count(*)") == 2); - assert(tuple.getDouble("sum(field_i)") == 27); - assert(tuple.getDouble("min(field_i)") == 7); - assert(tuple.getDouble("max(field_i)") == 20); - assert(tuple.getDouble("avg(field_i)") == 13.5D); + assert(tuple.getDouble("EXPR$1") == 2); //count(*) + assert(tuple.getDouble("EXPR$2") == 27); //sum(field_i) + assert(tuple.getDouble("EXPR$3") == 7); //min(field_i) + assert(tuple.getDouble("EXPR$4") == 20); //max(field_i) + assert(tuple.getDouble("EXPR$5") == 13.5D); //avg(field_i) - sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", - "stmt", "select str_s, count(*), sum(field_i) as sum, min(field_i), max(field_i), avg(field_i) from collection1 where text='XXXX' group by str_s order by sum asc limit 2"); + + sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", "aggregationMode", "map_reduce", + "stmt", "select str_s, count(*), sum(field_i) as mySum, min(field_i), max(field_i), " + + "cast(avg(1.0 * field_i) as float) from collection1 where text='XXXX' group by str_s order by mySum asc limit 2"); solrStream = new SolrStream(jetty.url, sParams); tuples = getTuples(solrStream); @@ -1909,23 +1683,24 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { tuple = tuples.get(0); assert(tuple.get("str_s").equals("b")); - assert(tuple.getDouble("count(*)") == 2); - assert(tuple.getDouble("sum") == 19); - assert(tuple.getDouble("min(field_i)") == 8); - assert(tuple.getDouble("max(field_i)") == 11); - assert(tuple.getDouble("avg(field_i)") == 9.5D); + assert(tuple.getDouble("EXPR$1") == 2); //count(*) + assert(tuple.getDouble("mySum") == 19); + assert(tuple.getDouble("EXPR$3") == 8); //min(field_i) + assert(tuple.getDouble("EXPR$4") == 11); //max(field_i) + assert(tuple.getDouble("EXPR$5") == 9.5D); //avg(field_i) tuple = tuples.get(1); assert(tuple.get("str_s").equals("a")); - assert(tuple.getDouble("count(*)") == 2); - assert(tuple.getDouble("sum") == 27); - assert(tuple.getDouble("min(field_i)") == 7); - assert(tuple.getDouble("max(field_i)") == 20); - assert(tuple.getDouble("avg(field_i)") == 13.5D); + assert(tuple.getDouble("EXPR$1") == 2); //count(*) + assert(tuple.getDouble("mySum") == 27); + assert(tuple.getDouble("EXPR$3") == 7); //min(field_i) + assert(tuple.getDouble("EXPR$4") == 20); //max(field_i) + assert(tuple.getDouble("EXPR$5") == 13.5D); //avg(field_i) - sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", - "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), avg(field_i) from collection1 where text='XXXX' group by str_s order by str_s desc"); + sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", "aggregationMode", "map_reduce", + "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), " + + "cast(avg(1.0 * field_i) as float) from collection1 where text='XXXX' group by str_s order by str_s desc"); solrStream = new SolrStream(jetty.url, sParams); tuples = getTuples(solrStream); @@ -1937,31 +1712,32 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { tuple = tuples.get(0); assert(tuple.get("str_s").equals("c")); - assert(tuple.getDouble("count(*)") == 4); - assert(tuple.getDouble("sum(field_i)") == 180); - assert(tuple.getDouble("min(field_i)") == 30); - assert(tuple.getDouble("max(field_i)") == 60); - assert(tuple.getDouble("avg(field_i)") == 45); + assert(tuple.getDouble("EXPR$1") == 4); //count(*) + assert(tuple.getDouble("EXPR$2") == 180); //sum(field_i) + assert(tuple.getDouble("EXPR$3") == 30); //min(field_i) + assert(tuple.getDouble("EXPR$4") == 60); //max(field_i) + assert(tuple.getDouble("EXPR$5") == 45); //avg(field_i) tuple = tuples.get(1); assert(tuple.get("str_s").equals("b")); - assert(tuple.getDouble("count(*)") == 2); - assert(tuple.getDouble("sum(field_i)") == 19); - assert(tuple.getDouble("min(field_i)") == 8); - assert(tuple.getDouble("max(field_i)") == 11); - assert(tuple.getDouble("avg(field_i)") == 9.5D); + assert(tuple.getDouble("EXPR$1") == 2); //count(*) + assert(tuple.getDouble("EXPR$2") == 19); //sum(field_i) + assert(tuple.getDouble("EXPR$3") == 8); //min(field_i) + assert(tuple.getDouble("EXPR$4") == 11); //max(field_i) + assert(tuple.getDouble("EXPR$5") == 9.5D); //avg(field_i) tuple = tuples.get(2); assert(tuple.get("str_s").equals("a")); - assert(tuple.getDouble("count(*)") == 2); - assert(tuple.getDouble("sum(field_i)") == 27); - assert(tuple.getDouble("min(field_i)") == 7); - assert(tuple.getDouble("max(field_i)") == 20); - assert(tuple.getDouble("avg(field_i)") == 13.5D); + assert(tuple.getDouble("EXPR$1") == 2); //count(*) + assert(tuple.getDouble("EXPR$2") == 27); //sum(field_i) + assert(tuple.getDouble("EXPR$3") == 7); //min(field_i) + assert(tuple.getDouble("EXPR$4") == 20); //max(field_i) + assert(tuple.getDouble("EXPR$5") == 13.5D); //avg(field_i) - sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", - "stmt", "select str_s as myString, count(*), sum(field_i), min(field_i), max(field_i), avg(field_i) from collection1 where text='XXXX' group by myString order by myString desc"); + sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", "aggregationMode", "map_reduce", + "stmt", "select str_s as myString, count(*), sum(field_i), min(field_i), max(field_i), " + + "cast(avg(1.0 * field_i) as float) from collection1 where text='XXXX' group by str_s order by myString desc"); solrStream = new SolrStream(jetty.url, sParams); tuples = getTuples(solrStream); @@ -1973,79 +1749,73 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { tuple = tuples.get(0); assert(tuple.get("myString").equals("c")); - assert(tuple.getDouble("count(*)") == 4); - assert(tuple.getDouble("sum(field_i)") == 180); - assert(tuple.getDouble("min(field_i)") == 30); - assert(tuple.getDouble("max(field_i)") == 60); - assert(tuple.getDouble("avg(field_i)") == 45); + assert(tuple.getDouble("EXPR$1") == 4); //count(*) + assert(tuple.getDouble("EXPR$2") == 180); //sum(field_i) + assert(tuple.getDouble("EXPR$3") == 30); //min(field_i) + assert(tuple.getDouble("EXPR$4") == 60); //max(field_i) + assert(tuple.getDouble("EXPR$5") == 45); //avg(field_i) tuple = tuples.get(1); assert(tuple.get("myString").equals("b")); - assert(tuple.getDouble("count(*)") == 2); - assert(tuple.getDouble("sum(field_i)") == 19); - assert(tuple.getDouble("min(field_i)") == 8); - assert(tuple.getDouble("max(field_i)") == 11); - assert(tuple.getDouble("avg(field_i)") == 9.5D); + assert(tuple.getDouble("EXPR$1") == 2); //count(*) + assert(tuple.getDouble("EXPR$2") == 19); //sum(field_i) + assert(tuple.getDouble("EXPR$3") == 8); //min(field_i) + assert(tuple.getDouble("EXPR$4") == 11); //max(field_i) + assert(tuple.getDouble("EXPR$5") == 9.5D); //avg(field_i) tuple = tuples.get(2); assert(tuple.get("myString").equals("a")); - assert(tuple.getDouble("count(*)") == 2); - assert(tuple.getDouble("sum(field_i)") == 27); - assert(tuple.getDouble("min(field_i)") == 7); - assert(tuple.getDouble("max(field_i)") == 20); - assert(tuple.getDouble("avg(field_i)") == 13.5D); + assert(tuple.getDouble("EXPR$1") == 2); //count(*) + assert(tuple.getDouble("EXPR$2") == 27); //sum(field_i) + assert(tuple.getDouble("EXPR$3") == 7); //min(field_i) + assert(tuple.getDouble("EXPR$4") == 20); //max(field_i) + assert(tuple.getDouble("EXPR$5") == 13.5D); //avg(field_i) - sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", - "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), avg(field_i) from collection1 where text='XXXX' group by str_s having sum(field_i) = 19"); + sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", "aggregationMode", "map_reduce", + "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), " + + "cast(avg(1.0 * field_i) as float) from collection1 where text='XXXX' group by str_s having sum(field_i) = 19"); solrStream = new SolrStream(jetty.url, sParams); tuples = getTuples(solrStream); - //Only two results because of the limit. assert(tuples.size() == 1); tuple = tuples.get(0); assert(tuple.get("str_s").equals("b")); - assert(tuple.getDouble("count(*)") == 2); - assert(tuple.getDouble("sum(field_i)") == 19); - assert(tuple.getDouble("min(field_i)") == 8); - assert(tuple.getDouble("max(field_i)") == 11); - assert(tuple.getDouble("avg(field_i)") == 9.5D); + assert(tuple.getDouble("EXPR$1") == 2); //count(*) + assert(tuple.getDouble("EXPR$2") == 19); //sum(field_i) + assert(tuple.getDouble("EXPR$3") == 8); //min(field_i) + assert(tuple.getDouble("EXPR$4") == 11); //max(field_i) + assert(tuple.getDouble("EXPR$5") == 9.5D); //avg(field_i) - tuple = tuples.get(0); - assert(tuple.get("str_s").equals("b")); - assert(tuple.getDouble("count(*)") == 2); - assert(tuple.getDouble("sum(field_i)") == 19); - assert(tuple.getDouble("min(field_i)") == 8); - assert(tuple.getDouble("max(field_i)") == 11); - assert(tuple.getDouble("avg(field_i)") == 9.5D); - - sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", - "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), avg(field_i) from collection1 where text='XXXX' group by str_s having ((sum(field_i) = 19) AND (min(field_i) = 8))"); + sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", "aggregationMode", "map_reduce", + "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), " + + "cast(avg(1.0 * field_i) as float) from collection1 where text='XXXX' group by str_s " + + "having ((sum(field_i) = 19) AND (min(field_i) = 8))"); solrStream = new SolrStream(jetty.url, sParams); tuples = getTuples(solrStream); - //Only two results because of the limit. assert(tuples.size() == 1); tuple = tuples.get(0); assert(tuple.get("str_s").equals("b")); - assert(tuple.getDouble("count(*)") == 2); - assert(tuple.getDouble("sum(field_i)") == 19); - assert(tuple.getDouble("min(field_i)") == 8); - assert(tuple.getDouble("max(field_i)") == 11); - assert(tuple.getDouble("avg(field_i)") == 9.5D); + assert(tuple.getDouble("EXPR$1") == 2); //count(*) + assert(tuple.getDouble("EXPR$2") == 19); //sum(field_i) + assert(tuple.getDouble("EXPR$3") == 8); //min(field_i) + assert(tuple.getDouble("EXPR$4") == 11); //max(field_i) + assert(tuple.getDouble("EXPR$5") == 9.5D); //avg(field_i) - sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", - "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), avg(field_i) from collection1 where text='XXXX' group by str_s having ((sum(field_i) = 19) AND (min(field_i) = 100))"); + sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", "aggregationMode", "map_reduce", + "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), " + + "cast(avg(1.0 * field_i) as float) from collection1 where text='XXXX' group by str_s " + + "having ((sum(field_i) = 19) AND (min(field_i) = 100))"); solrStream = new SolrStream(jetty.url, sParams); tuples = getTuples(solrStream); assert(tuples.size() == 0); - } finally { delete(); } @@ -2053,7 +1823,6 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { private void testAggregatesWithoutGrouping() throws Exception { - CloudJettyRunner jetty = this.cloudJettys.get(0); del("*:*"); @@ -2073,11 +1842,11 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { commit(); - SolrParams sParams = mapParams(CommonParams.QT, "/sql", "stmt", "select count(*), sum(a_i), min(a_i), max(a_i), avg(a_i), sum(a_f), min(a_f), max(a_f), avg(a_f) from collection1"); + SolrParams sParams = mapParams(CommonParams.QT, "/sql", "stmt", "select count(*), sum(a_i), min(a_i), max(a_i), cast(avg(1.0 * a_i) as float), sum(a_f), " + + "min(a_f), max(a_f), avg(a_f) from collection1"); SolrStream solrStream = new SolrStream(jetty.url, sParams); - List tuples = getTuples(solrStream); assert(tuples.size() == 1); @@ -2086,30 +1855,29 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { Tuple tuple = tuples.get(0); - Double sumi = tuple.getDouble("sum(a_i)"); - Double sumf = tuple.getDouble("sum(a_f)"); - Double mini = tuple.getDouble("min(a_i)"); - Double minf = tuple.getDouble("min(a_f)"); - Double maxi = tuple.getDouble("max(a_i)"); - Double maxf = tuple.getDouble("max(a_f)"); - Double avgi = tuple.getDouble("avg(a_i)"); - Double avgf = tuple.getDouble("avg(a_f)"); - Double count = tuple.getDouble("count(*)"); - - assertTrue(sumi.longValue() == 70); - assertTrue(sumf.doubleValue() == 55.0D); - assertTrue(mini.doubleValue() == 0.0D); - assertTrue(minf.doubleValue() == 1.0D); - assertTrue(maxi.doubleValue() == 14.0D); - assertTrue(maxf.doubleValue() == 10.0D); - assertTrue(avgi.doubleValue() == 7.0D); - assertTrue(avgf.doubleValue() == 5.5D); - assertTrue(count.doubleValue() == 10); - + Double count = tuple.getDouble("EXPR$0"); //count(*) + Double sumi = tuple.getDouble("EXPR$1"); //sum(a_i) + Double mini = tuple.getDouble("EXPR$2"); //min(a_i) + Double maxi = tuple.getDouble("EXPR$3"); //max(a_i) + Double avgi = tuple.getDouble("EXPR$4"); //avg(a_i) + Double sumf = tuple.getDouble("EXPR$5"); //sum(a_f) + Double minf = tuple.getDouble("EXPR$6"); //min(a_f) + Double maxf = tuple.getDouble("EXPR$7"); //max(a_f) + Double avgf = tuple.getDouble("EXPR$8"); //avg(a_f) + assertTrue(count == 10); + assertTrue(sumi == 70); + assertTrue(mini == 0.0D); + assertTrue(maxi == 14.0D); + assertTrue(avgi == 7.0D); + assertTrue(sumf == 55.0D); + assertTrue(minf == 1.0D); + assertTrue(maxf == 10.0D); + assertTrue(avgf == 5.5D); sParams = mapParams(CommonParams.QT, "/sql", - "stmt", "select count(*) as count, sum(a_i) as sum, min(a_i) as min, max(a_i) as max, avg(a_i) as avg, sum(a_f), min(a_f), max(a_f), avg(a_f) from collection1"); + "stmt", "select count(*) as myCount, sum(a_i) as mySum, min(a_i) as myMin, max(a_i) as myMax, " + + "cast(avg(1.0 * a_i) as float) as myAvg, sum(a_f), min(a_f), max(a_f), avg(a_f) from collection1"); solrStream = new SolrStream(jetty.url, sParams); @@ -2121,33 +1889,30 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { tuple = tuples.get(0); - sumi = tuple.getDouble("sum"); - sumf = tuple.getDouble("sum(a_f)"); - mini = tuple.getDouble("min"); - minf = tuple.getDouble("min(a_f)"); - maxi = tuple.getDouble("max"); - maxf = tuple.getDouble("max(a_f)"); - avgi = tuple.getDouble("avg"); - avgf = tuple.getDouble("avg(a_f)"); - count = tuple.getDouble("count"); - - assertTrue(sumi.longValue() == 70); - assertTrue(sumf.doubleValue() == 55.0D); - assertTrue(mini.doubleValue() == 0.0D); - assertTrue(minf.doubleValue() == 1.0D); - assertTrue(maxi.doubleValue() == 14.0D); - assertTrue(maxf.doubleValue() == 10.0D); - assertTrue(avgi.doubleValue() == 7.0D); - assertTrue(avgf.doubleValue() == 5.5D); - assertTrue(count.doubleValue() == 10); - - + count = tuple.getDouble("myCount"); + sumi = tuple.getDouble("mySum"); + mini = tuple.getDouble("myMin"); + maxi = tuple.getDouble("myMax"); + avgi = tuple.getDouble("myAvg"); + sumf = tuple.getDouble("EXPR$5"); //sum(a_f) + minf = tuple.getDouble("EXPR$6"); //min(a_f) + maxf = tuple.getDouble("EXPR$7"); //max(a_f) + avgf = tuple.getDouble("EXPR$8"); //avg(a_f) + assertTrue(count == 10); + assertTrue(mini == 0.0D); + assertTrue(maxi == 14.0D); + assertTrue(sumi == 70); + assertTrue(avgi == 7.0D); + assertTrue(sumf == 55.0D); + assertTrue(minf == 1.0D); + assertTrue(maxf == 10.0D); + assertTrue(avgf == 5.5D); // Test where clause hits - sParams = mapParams(CommonParams.QT, "/sql", - "stmt", "select count(*), sum(a_i), min(a_i), max(a_i), avg(a_i), sum(a_f), min(a_f), max(a_f), avg(a_f) from collection1 where id = 2"); + "stmt", "select count(*), sum(a_i), min(a_i), max(a_i), cast(avg(1.0 * a_i) as float), sum(a_f), " + + "min(a_f), max(a_f), avg(a_f) from collection1 where id = 2"); solrStream = new SolrStream(jetty.url, sParams); @@ -2157,31 +1922,31 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { tuple = tuples.get(0); - sumi = tuple.getDouble("sum(a_i)"); - sumf = tuple.getDouble("sum(a_f)"); - mini = tuple.getDouble("min(a_i)"); - minf = tuple.getDouble("min(a_f)"); - maxi = tuple.getDouble("max(a_i)"); - maxf = tuple.getDouble("max(a_f)"); - avgi = tuple.getDouble("avg(a_i)"); - avgf = tuple.getDouble("avg(a_f)"); - count = tuple.getDouble("count(*)"); + count = tuple.getDouble("EXPR$0"); //count(*) + sumi = tuple.getDouble("EXPR$1"); //sum(a_i) + mini = tuple.getDouble("EXPR$2"); //min(a_i) + maxi = tuple.getDouble("EXPR$3"); //max(a_i) + avgi = tuple.getDouble("EXPR$4"); //avg(a_i) + sumf = tuple.getDouble("EXPR$5"); //sum(a_f) + minf = tuple.getDouble("EXPR$6"); //min(a_f) + maxf = tuple.getDouble("EXPR$7"); //max(a_f) + avgf = tuple.getDouble("EXPR$8"); //avg(a_f) - assertTrue(sumi.longValue() == 2); - assertTrue(sumf.doubleValue() == 2.0D); + assertTrue(count == 1); + assertTrue(sumi == 2); assertTrue(mini == 2); - assertTrue(minf == 2); assertTrue(maxi == 2); + assertTrue(avgi == 2.0D); + assertTrue(sumf == 2.0D); + assertTrue(minf == 2); assertTrue(maxf == 2); - assertTrue(avgi.doubleValue() == 2.0D); - assertTrue(avgf.doubleValue() == 2.0); - assertTrue(count.doubleValue() == 1); + assertTrue(avgf == 2.0); // Test zero hits - sParams = mapParams(CommonParams.QT, "/sql", - "stmt", "select count(*), sum(a_i), min(a_i), max(a_i), avg(a_i), sum(a_f), min(a_f), max(a_f), avg(a_f) from collection1 where a_s = 'blah'"); + "stmt", "select count(*), sum(a_i), min(a_i), max(a_i), cast(avg(1.0 * a_i) as float), sum(a_f), " + + "min(a_f), max(a_f), avg(a_f) from collection1 where a_s = 'blah'"); solrStream = new SolrStream(jetty.url, sParams); @@ -2191,36 +1956,32 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { tuple = tuples.get(0); - sumi = tuple.getDouble("sum(a_i)"); - sumf = tuple.getDouble("sum(a_f)"); - mini = tuple.getDouble("min(a_i)"); - minf = tuple.getDouble("min(a_f)"); - maxi = tuple.getDouble("max(a_i)"); - maxf = tuple.getDouble("max(a_f)"); - avgi = tuple.getDouble("avg(a_i)"); - avgf = tuple.getDouble("avg(a_f)"); - count = tuple.getDouble("count(*)"); + count = tuple.getDouble("EXPR$0"); //count(*) + sumi = tuple.getDouble("EXPR$1"); //sum(a_i) + mini = tuple.getDouble("EXPR$2"); //min(a_i) + maxi = tuple.getDouble("EXPR$3"); //max(a_i) + avgi = tuple.getDouble("EXPR$4"); //avg(a_i) + sumf = tuple.getDouble("EXPR$5"); //sum(a_f) + minf = tuple.getDouble("EXPR$6"); //min(a_f) + maxf = tuple.getDouble("EXPR$7"); //max(a_f) + avgf = tuple.getDouble("EXPR$8"); //avg(a_f) - assertTrue(sumi.longValue() == 0); - assertTrue(sumf.doubleValue() == 0.0D); + assertTrue(count == 0); + assertTrue(sumi == null); assertTrue(mini == null); - assertTrue(minf == null); assertTrue(maxi == null); + assertTrue(avgi == null); + assertTrue(sumf == null); + assertTrue(minf == null); assertTrue(maxf == null); - assertTrue(Double.isNaN(avgi)); - assertTrue(Double.isNaN(avgf)); - assertTrue(count.doubleValue() == 0); + assertTrue(avgf == null); del("*:*"); commit(); } - - - private void testTimeSeriesGrouping() throws Exception { try { - CloudJettyRunner jetty = this.cloudJettys.get(0); del("*:*"); @@ -2237,97 +1998,93 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { indexr("id", "8", "year_i", "2014", "month_i", "4", "day_i", "2", "item_i", "1"); commit(); + SolrParams sParams = mapParams(CommonParams.QT, "/sql", - "stmt", "select year_i, sum(item_i) from collection1 group by year_i order by year_i desc"); + "stmt", "select year_i, sum(item_i) from collection1 group by year_i order by year_i desc"); SolrStream solrStream = new SolrStream(jetty.url, sParams); List tuples = getTuples(solrStream); - //Only two results because of the limit. assert(tuples.size() == 2); - Tuple tuple = null; + Tuple tuple; tuple = tuples.get(0); assert(tuple.getLong("year_i") == 2015); - assert(tuple.getDouble("sum(item_i)") == 66); + assert(tuple.getDouble("EXPR$1") == 66); //sum(item_i) tuple = tuples.get(1); assert(tuple.getLong("year_i") == 2014); - assert(tuple.getDouble("sum(item_i)") == 7); + assert(tuple.getDouble("EXPR$1") == 7); //sum(item_i) sParams = mapParams(CommonParams.QT, "/sql", - "stmt", "select year_i, month_i, sum(item_i) from collection1 group by year_i, month_i order by year_i desc, month_i desc"); + "stmt", "select year_i, month_i, sum(item_i) from collection1 group by year_i, month_i " + + "order by year_i desc, month_i desc"); solrStream = new SolrStream(jetty.url, sParams); tuples = getTuples(solrStream); - //Only two results because of the limit. assert(tuples.size() == 3); - tuple = null; - tuple = tuples.get(0); assert(tuple.getLong("year_i") == 2015); assert(tuple.getLong("month_i") == 11); - assert(tuple.getDouble("sum(item_i)") == 57); + assert(tuple.getDouble("EXPR$2") == 57); //sum(item_i) tuple = tuples.get(1); assert(tuple.getLong("year_i") == 2015); assert(tuple.getLong("month_i") == 10); - assert(tuple.getDouble("sum(item_i)") == 9); + assert(tuple.getDouble("EXPR$2") == 9); //sum(item_i) tuple = tuples.get(2); assert(tuple.getLong("year_i") == 2014); assert(tuple.getLong("month_i") == 4); - assert(tuple.getDouble("sum(item_i)") == 7); + assert(tuple.getDouble("EXPR$2") == 7); //sum(item_i) sParams = mapParams(CommonParams.QT, "/sql", - "stmt", "select year_i, month_i, day_i, sum(item_i) from collection1 group by year_i, month_i, day_i order by year_i desc, month_i desc, day_i desc"); + "stmt", "select year_i, month_i, day_i, sum(item_i) from collection1 group by year_i, month_i, day_i " + + "order by year_i desc, month_i desc, day_i desc"); solrStream = new SolrStream(jetty.url, sParams); tuples = getTuples(solrStream); - //Only two results because of the limit. assert(tuples.size() == 6); - tuple = null; - tuple = tuples.get(0); assert(tuple.getLong("year_i") == 2015); assert(tuple.getLong("month_i") == 11); assert(tuple.getLong("day_i") == 8); - assert(tuple.getDouble("sum(item_i)") == 42); + assert(tuple.getDouble("EXPR$3") == 42); //sum(item_i) tuple = tuples.get(1); assert(tuple.getLong("year_i") == 2015); assert(tuple.getLong("month_i") == 11); assert(tuple.getLong("day_i") == 7); - assert(tuple.getDouble("sum(item_i)") == 15); + assert(tuple.getDouble("EXPR$3") == 15); //sum(item_i) tuple = tuples.get(2); assert(tuple.getLong("year_i") == 2015); assert(tuple.getLong("month_i") == 10); assert(tuple.getLong("day_i") == 3); - assert(tuple.getDouble("sum(item_i)") == 5); + assert(tuple.getDouble("EXPR$3") == 5); //sum(item_i) tuple = tuples.get(3); assert(tuple.getLong("year_i") == 2015); assert(tuple.getLong("month_i") == 10); assert(tuple.getLong("day_i") == 1); - assert(tuple.getDouble("sum(item_i)") == 4); + assert(tuple.getDouble("EXPR$3") == 4); //sum(item_i) tuple = tuples.get(4); assert(tuple.getLong("year_i") == 2014); assert(tuple.getLong("month_i") == 4); assert(tuple.getLong("day_i") == 4); - assert(tuple.getDouble("sum(item_i)") == 6); + assert(tuple.getDouble("EXPR$3") == 6); //sum(item_i) tuple = tuples.get(5); assert(tuple.getLong("year_i") == 2014); assert(tuple.getLong("month_i") == 4); assert(tuple.getLong("day_i") == 2); - assert(tuple.getDouble("sum(item_i)") == 1); + assert(tuple.getDouble("EXPR$3") == 1); //sum(item_i) } finally { delete(); @@ -2360,89 +2117,88 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { SolrStream solrStream = new SolrStream(jetty.url, sParams); List tuples = getTuples(solrStream); - //Only two results because of the limit. assert(tuples.size() == 2); - Tuple tuple = null; + Tuple tuple; tuple = tuples.get(0); assert(tuple.getLong("year_i") == 2015); - assert(tuple.getDouble("sum(item_i)") == 66); + assert(tuple.getDouble("EXPR$1") == 66); //sum(item_i) tuple = tuples.get(1); assert(tuple.getLong("year_i") == 2014); - assert(tuple.getDouble("sum(item_i)") == 7); + assert(tuple.getDouble("EXPR$1") == 7); //sum(item_i) sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "facet", - "stmt", "select year_i, month_i, sum(item_i) from collection1 group by year_i, month_i order by year_i desc, month_i desc"); + "stmt", "select year_i, month_i, sum(item_i) from collection1 group by year_i, month_i " + + "order by year_i desc, month_i desc"); solrStream = new SolrStream(jetty.url, sParams); tuples = getTuples(solrStream); - //Only two results because of the limit. assert(tuples.size() == 3); tuple = tuples.get(0); assert(tuple.getLong("year_i") == 2015); assert(tuple.getLong("month_i") == 11); - assert(tuple.getDouble("sum(item_i)") == 57); + assert(tuple.getDouble("EXPR$2") == 57); //sum(item_i) tuple = tuples.get(1); assert(tuple.getLong("year_i") == 2015); assert(tuple.getLong("month_i") == 10); - assert(tuple.getDouble("sum(item_i)") == 9); + assert(tuple.getDouble("EXPR$2") == 9); //sum(item_i) tuple = tuples.get(2); assert(tuple.getLong("year_i") == 2014); assert(tuple.getLong("month_i") == 4); - assert(tuple.getDouble("sum(item_i)") == 7); + assert(tuple.getDouble("EXPR$2") == 7); //sum(item_i) sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "facet", - "stmt", "select year_i, month_i, day_i, sum(item_i) from collection1 group by year_i, month_i, day_i order by year_i desc, month_i desc, day_i desc"); + "stmt", "select year_i, month_i, day_i, sum(item_i) from collection1 group by year_i, month_i, day_i " + + "order by year_i desc, month_i desc, day_i desc"); solrStream = new SolrStream(jetty.url, sParams); tuples = getTuples(solrStream); - //Only two results because of the limit. assert(tuples.size() == 6); tuple = tuples.get(0); assert(tuple.getLong("year_i") == 2015); assert(tuple.getLong("month_i") == 11); assert(tuple.getLong("day_i") == 8); - assert(tuple.getDouble("sum(item_i)") == 42); + assert(tuple.getDouble("EXPR$3") == 42); //sum(item_i) tuple = tuples.get(1); assert(tuple.getLong("year_i") == 2015); assert(tuple.getLong("month_i") == 11); assert(tuple.getLong("day_i") == 7); - assert(tuple.getDouble("sum(item_i)") == 15); + assert(tuple.getDouble("EXPR$3") == 15); //sum(item_i) tuple = tuples.get(2); assert(tuple.getLong("year_i") == 2015); assert(tuple.getLong("month_i") == 10); assert(tuple.getLong("day_i") == 3); - assert(tuple.getDouble("sum(item_i)") == 5); + assert(tuple.getDouble("EXPR$3") == 5); //sum(item_i) tuple = tuples.get(3); assert(tuple.getLong("year_i") == 2015); assert(tuple.getLong("month_i") == 10); assert(tuple.getLong("day_i") == 1); - assert(tuple.getDouble("sum(item_i)") == 4); + assert(tuple.getDouble("EXPR$3") == 4); //sum(item_i) tuple = tuples.get(4); assert(tuple.getLong("year_i") == 2014); assert(tuple.getLong("month_i") == 4); assert(tuple.getLong("day_i") == 4); - assert(tuple.getDouble("sum(item_i)") == 6); + assert(tuple.getDouble("EXPR$3") == 6); //sum(item_i) tuple = tuples.get(5); assert(tuple.getLong("year_i") == 2014); assert(tuple.getLong("month_i") == 4); assert(tuple.getLong("day_i") == 2); - assert(tuple.getDouble("sum(item_i)") == 1); + assert(tuple.getDouble("EXPR$3") == 1); //sum(item_i) } finally { delete(); } @@ -2450,7 +2206,6 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { private void testParallelTimeSeriesGrouping() throws Exception { try { - CloudJettyRunner jetty = this.cloudJettys.get(0); del("*:*"); @@ -2467,172 +2222,105 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase { indexr("id", "8", "year_i", "2014", "month_i", "4", "day_i", "2", "item_i", "1"); commit(); - SolrParams sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", + SolrParams sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", "aggregationMode", "map_reduce", "stmt", "select year_i, sum(item_i) from collection1 group by year_i order by year_i desc"); SolrStream solrStream = new SolrStream(jetty.url, sParams); List tuples = getTuples(solrStream); - //Only two results because of the limit. assert(tuples.size() == 2); - Tuple tuple = null; + Tuple tuple; tuple = tuples.get(0); assert(tuple.getLong("year_i") == 2015); assert(tuple.get("year_i") instanceof Long); // SOLR-8601, This tests that the bucket is actually a Long and not parsed from a String. - assert(tuple.getDouble("sum(item_i)") == 66); + assert(tuple.getDouble("EXPR$1") == 66); //sum(item_i) tuple = tuples.get(1); assert(tuple.getLong("year_i") == 2014); - assert(tuple.getDouble("sum(item_i)") == 7); + assert(tuple.getDouble("EXPR$1") == 7); //sum(item_i) - sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", - "stmt", "select year_i, month_i, sum(item_i) from collection1 group by year_i, month_i order by year_i desc, month_i desc"); + sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", "aggregationMode", "map_reduce", + "stmt", "select year_i, month_i, sum(item_i) from collection1 group by year_i, month_i " + + "order by year_i desc, month_i desc"); solrStream = new SolrStream(jetty.url, sParams); tuples = getTuples(solrStream); - //Only two results because of the limit. assert(tuples.size() == 3); - tuple = null; - tuple = tuples.get(0); assert(tuple.getLong("year_i") == 2015); assert(tuple.getLong("month_i") == 11); assert(tuple.get("year_i") instanceof Long); assert(tuple.get("month_i") instanceof Long); - assert(tuple.getDouble("sum(item_i)") == 57); + assert(tuple.getDouble("EXPR$2") == 57); //sum(item_i) tuple = tuples.get(1); assert(tuple.getLong("year_i") == 2015); assert(tuple.getLong("month_i") == 10); - assert(tuple.getDouble("sum(item_i)") == 9); + assert(tuple.getDouble("EXPR$2") == 9); //sum(item_i) tuple = tuples.get(2); assert(tuple.getLong("year_i") == 2014); assert(tuple.getLong("month_i") == 4); - assert(tuple.getDouble("sum(item_i)") == 7); + assert(tuple.getDouble("EXPR$2") == 7); //sum(item_i) - sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", - "stmt", "select year_i, month_i, day_i, sum(item_i) from collection1 group by year_i, month_i, day_i order by year_i desc, month_i desc, day_i desc"); + sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", "aggregationMode", "map_reduce", + "stmt", "select year_i, month_i, day_i, sum(item_i) from collection1 group by year_i, month_i, day_i " + + "order by year_i desc, month_i desc, day_i desc"); solrStream = new SolrStream(jetty.url, sParams); tuples = getTuples(solrStream); - //Only two results because of the limit. assert(tuples.size() == 6); - tuple = null; - tuple = tuples.get(0); assert(tuple.getLong("year_i") == 2015); assert(tuple.getLong("month_i") == 11); assert(tuple.getLong("day_i") == 8); - assert(tuple.getDouble("sum(item_i)") == 42); + assert(tuple.getDouble("EXPR$3") == 42); //sum(item_i) tuple = tuples.get(1); assert(tuple.getLong("year_i") == 2015); assert(tuple.getLong("month_i") == 11); assert(tuple.getLong("day_i") == 7); - assert(tuple.getDouble("sum(item_i)") == 15); + assert(tuple.getDouble("EXPR$3") == 15); //sum(item_i) tuple = tuples.get(2); assert(tuple.getLong("year_i") == 2015); assert(tuple.getLong("month_i") == 10); assert(tuple.getLong("day_i") == 3); - assert(tuple.getDouble("sum(item_i)") == 5); + assert(tuple.getDouble("EXPR$3") == 5); //sum(item_i) tuple = tuples.get(3); assert(tuple.getLong("year_i") == 2015); assert(tuple.getLong("month_i") == 10); assert(tuple.getLong("day_i") == 1); - assert(tuple.getDouble("sum(item_i)") == 4); + assert(tuple.getDouble("EXPR$3") == 4); //sum(item_i) tuple = tuples.get(4); assert(tuple.getLong("year_i") == 2014); assert(tuple.getLong("month_i") == 4); assert(tuple.getLong("day_i") == 4); - assert(tuple.getDouble("sum(item_i)") == 6); + assert(tuple.getDouble("EXPR$3") == 6); //sum(item_i) tuple = tuples.get(5); assert(tuple.getLong("year_i") == 2014); assert(tuple.getLong("month_i") == 4); assert(tuple.getLong("day_i") == 2); - assert(tuple.getDouble("sum(item_i)") == 1); + assert(tuple.getDouble("EXPR$3") == 1); //sum(item_i) } finally { delete(); } } - private void testCatalogStream() throws Exception { - CloudJettyRunner jetty = this.cloudJettys.get(0); - - SolrParams sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", - "stmt", "select TABLE_CAT from _CATALOGS_"); - - SolrStream solrStream = new SolrStream(jetty.url, sParams); - List tuples = getTuples(solrStream); - - assertEquals(tuples.size(), 1); - assertEquals(tuples.get(0).getString("TABLE_CAT"), zkServer.getZkAddress()); - } - - private void testSchemasStream() throws Exception { - CloudJettyRunner jetty = this.cloudJettys.get(0); - - SolrParams sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", - "stmt", "select TABLE_SCHEM, TABLE_CATALOG from _SCHEMAS_"); - - SolrStream solrStream = new SolrStream(jetty.url, sParams); - List tuples = getTuples(solrStream); - - assertEquals(tuples.size(), 0); - } - - private void testTablesStream() throws Exception { - CloudJettyRunner jetty = this.cloudJettys.get(0); - - SolrParams sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", - "stmt", "select TABLE_CAT, TABLE_SCHEM, TABLE_NAME, TABLE_TYPE, REMARKS from _TABLES_"); - - SolrStream solrStream = new SolrStream(jetty.url, sParams); - List tuples = getTuples(solrStream); - - assertEquals(2, tuples.size()); - - List collections = new ArrayList<>(); - collections.addAll(cloudClient.getZkStateReader().getClusterState().getCollectionsMap().keySet()); - Collections.sort(collections); - for (Tuple tuple : tuples) { - assertEquals(zkServer.getZkAddress(), tuple.getString("TABLE_CAT")); - assertNull(tuple.get("TABLE_SCHEM")); - assertTrue(collections.contains(tuple.getString("TABLE_NAME"))); - assertEquals("TABLE", tuple.getString("TABLE_TYPE")); - assertNull(tuple.get("REMARKS")); - } - - tuples = getTuples(solrStream); - assertEquals(2, tuples.size()); - - collections = new ArrayList<>(); - collections.addAll(cloudClient.getZkStateReader().getClusterState().getCollectionsMap().keySet()); - Collections.sort(collections); - for (Tuple tuple : tuples) { - assertEquals(zkServer.getZkAddress(), tuple.getString("TABLE_CAT")); - assertNull(tuple.get("TABLE_SCHEM")); - assertTrue(collections.contains(tuple.getString("TABLE_NAME"))); - assertEquals("TABLE", tuple.getString("TABLE_TYPE")); - assertNull(tuple.get("REMARKS")); - } - } - protected List getTuples(TupleStream tupleStream) throws IOException { tupleStream.open(); - List tuples = new ArrayList(); + List tuples = new ArrayList<>(); for(;;) { Tuple t = tupleStream.read(); if(t.EOF) { diff --git a/solr/licenses/avatica-core-1.9.0.jar.sha1 b/solr/licenses/avatica-core-1.9.0.jar.sha1 new file mode 100644 index 00000000000..b44a6158cf7 --- /dev/null +++ b/solr/licenses/avatica-core-1.9.0.jar.sha1 @@ -0,0 +1 @@ +c16b346eef02495f2f4b429fe04c33e526ec0229 diff --git a/solr/licenses/avatica-core-LICENSE-ASL.txt b/solr/licenses/avatica-core-LICENSE-ASL.txt new file mode 100644 index 00000000000..f7b9863d57e --- /dev/null +++ b/solr/licenses/avatica-core-LICENSE-ASL.txt @@ -0,0 +1,268 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + + + +----------------------------------------------------------------------- + +APACHE CALCITE SUBCOMPONENTS: + +The Apache Calcite project contains subcomponents with separate copyright +notices and license terms. Your use of the source code for the these +subcomponents is subject to the terms and conditions of the following +licenses. + +----------------------------------------------------------------------- + The MIT License +----------------------------------------------------------------------- + +The Apache Calcite project bundles the following files under the MIT License: + +- site + Parts of the web site generated by Jekyll (http://jekyllrb.com/) + Copyright (c) 2008-2015 Tom Preston-Werner +- site/_sass/_font-awesome.scss + Font-awesome css files v4.1.0 (http://fortawesome.github.io/Font-Awesome/) + Copyright (c) 2013 Dave Gandy +- site/_sass/_normalize.scss + normalize.css v3.0.2 | git.io/normalize + Copyright (c) Nicolas Gallagher and Jonathan Neal +- site/_sass/_gridism.scss + Gridism: A simple, responsive, and handy CSS grid by @cobyism + https://github.com/cobyism/gridism + Copyright (c) 2013 Coby Chapple +- site/js/html5shiv.min.js + HTML5 Shiv 3.7.2 | @afarkas @jdalton @jon_neal @rem +- site/js/respond.min.js + Respond.js v1.4.2: min/max-width media query polyfill + Copyright 2013 Scott Jehl + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +----------------------------------------------------------------------- + The Open Font License +----------------------------------------------------------------------- + +The Apache Calcite project bundles the following fonts under the +SIL Open Font License (OFL) - http://scripts.sil.org/OFL/ + +- site/fonts/fontawesome-webfont.* + Font-awesome font files v4.0.3 (http://fortawesome.github.io/Font-Awesome/) diff --git a/solr/licenses/avatica-core-NOTICE.txt b/solr/licenses/avatica-core-NOTICE.txt new file mode 100644 index 00000000000..506738bcae5 --- /dev/null +++ b/solr/licenses/avatica-core-NOTICE.txt @@ -0,0 +1,5 @@ +Apache Calcite +Copyright 2012-2016 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). diff --git a/solr/licenses/calcite-core-1.11.0.jar.sha1 b/solr/licenses/calcite-core-1.11.0.jar.sha1 new file mode 100644 index 00000000000..980c772c62d --- /dev/null +++ b/solr/licenses/calcite-core-1.11.0.jar.sha1 @@ -0,0 +1 @@ +d0f90841119f1ec02a64ef029ff525171a320cff diff --git a/solr/licenses/calcite-core-LICENSE-ASL.txt b/solr/licenses/calcite-core-LICENSE-ASL.txt new file mode 100644 index 00000000000..f7b9863d57e --- /dev/null +++ b/solr/licenses/calcite-core-LICENSE-ASL.txt @@ -0,0 +1,268 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + + + +----------------------------------------------------------------------- + +APACHE CALCITE SUBCOMPONENTS: + +The Apache Calcite project contains subcomponents with separate copyright +notices and license terms. Your use of the source code for the these +subcomponents is subject to the terms and conditions of the following +licenses. + +----------------------------------------------------------------------- + The MIT License +----------------------------------------------------------------------- + +The Apache Calcite project bundles the following files under the MIT License: + +- site + Parts of the web site generated by Jekyll (http://jekyllrb.com/) + Copyright (c) 2008-2015 Tom Preston-Werner +- site/_sass/_font-awesome.scss + Font-awesome css files v4.1.0 (http://fortawesome.github.io/Font-Awesome/) + Copyright (c) 2013 Dave Gandy +- site/_sass/_normalize.scss + normalize.css v3.0.2 | git.io/normalize + Copyright (c) Nicolas Gallagher and Jonathan Neal +- site/_sass/_gridism.scss + Gridism: A simple, responsive, and handy CSS grid by @cobyism + https://github.com/cobyism/gridism + Copyright (c) 2013 Coby Chapple +- site/js/html5shiv.min.js + HTML5 Shiv 3.7.2 | @afarkas @jdalton @jon_neal @rem +- site/js/respond.min.js + Respond.js v1.4.2: min/max-width media query polyfill + Copyright 2013 Scott Jehl + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +----------------------------------------------------------------------- + The Open Font License +----------------------------------------------------------------------- + +The Apache Calcite project bundles the following fonts under the +SIL Open Font License (OFL) - http://scripts.sil.org/OFL/ + +- site/fonts/fontawesome-webfont.* + Font-awesome font files v4.0.3 (http://fortawesome.github.io/Font-Awesome/) diff --git a/solr/licenses/calcite-core-NOTICE.txt b/solr/licenses/calcite-core-NOTICE.txt new file mode 100644 index 00000000000..589ab43a3a3 --- /dev/null +++ b/solr/licenses/calcite-core-NOTICE.txt @@ -0,0 +1,12 @@ +Apache Calcite +Copyright 2012-2017 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +This product is based on source code originally developed +by DynamoBI Corporation, LucidEra Inc., SQLstream Inc. and others +under the auspices of the Eigenbase Foundation +and released as the LucidDB project. + +The web site includes files generated by Jekyll. diff --git a/solr/licenses/calcite-linq4j-1.11.0.jar.sha1 b/solr/licenses/calcite-linq4j-1.11.0.jar.sha1 new file mode 100644 index 00000000000..98396328447 --- /dev/null +++ b/solr/licenses/calcite-linq4j-1.11.0.jar.sha1 @@ -0,0 +1 @@ +b09e988f64c69c21cc61aa734e9955736a07e016 diff --git a/solr/licenses/calcite-linq4j-LICENSE-ASL.txt b/solr/licenses/calcite-linq4j-LICENSE-ASL.txt new file mode 100644 index 00000000000..f7b9863d57e --- /dev/null +++ b/solr/licenses/calcite-linq4j-LICENSE-ASL.txt @@ -0,0 +1,268 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + + + +----------------------------------------------------------------------- + +APACHE CALCITE SUBCOMPONENTS: + +The Apache Calcite project contains subcomponents with separate copyright +notices and license terms. Your use of the source code for the these +subcomponents is subject to the terms and conditions of the following +licenses. + +----------------------------------------------------------------------- + The MIT License +----------------------------------------------------------------------- + +The Apache Calcite project bundles the following files under the MIT License: + +- site + Parts of the web site generated by Jekyll (http://jekyllrb.com/) + Copyright (c) 2008-2015 Tom Preston-Werner +- site/_sass/_font-awesome.scss + Font-awesome css files v4.1.0 (http://fortawesome.github.io/Font-Awesome/) + Copyright (c) 2013 Dave Gandy +- site/_sass/_normalize.scss + normalize.css v3.0.2 | git.io/normalize + Copyright (c) Nicolas Gallagher and Jonathan Neal +- site/_sass/_gridism.scss + Gridism: A simple, responsive, and handy CSS grid by @cobyism + https://github.com/cobyism/gridism + Copyright (c) 2013 Coby Chapple +- site/js/html5shiv.min.js + HTML5 Shiv 3.7.2 | @afarkas @jdalton @jon_neal @rem +- site/js/respond.min.js + Respond.js v1.4.2: min/max-width media query polyfill + Copyright 2013 Scott Jehl + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +----------------------------------------------------------------------- + The Open Font License +----------------------------------------------------------------------- + +The Apache Calcite project bundles the following fonts under the +SIL Open Font License (OFL) - http://scripts.sil.org/OFL/ + +- site/fonts/fontawesome-webfont.* + Font-awesome font files v4.0.3 (http://fortawesome.github.io/Font-Awesome/) diff --git a/solr/licenses/calcite-linq4j-NOTICE.txt b/solr/licenses/calcite-linq4j-NOTICE.txt new file mode 100644 index 00000000000..589ab43a3a3 --- /dev/null +++ b/solr/licenses/calcite-linq4j-NOTICE.txt @@ -0,0 +1,12 @@ +Apache Calcite +Copyright 2012-2017 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +This product is based on source code originally developed +by DynamoBI Corporation, LucidEra Inc., SQLstream Inc. and others +under the auspices of the Eigenbase Foundation +and released as the LucidDB project. + +The web site includes files generated by Jekyll. diff --git a/solr/licenses/commons-compiler-2.7.6.jar.sha1 b/solr/licenses/commons-compiler-2.7.6.jar.sha1 new file mode 100644 index 00000000000..46bbbb7475e --- /dev/null +++ b/solr/licenses/commons-compiler-2.7.6.jar.sha1 @@ -0,0 +1 @@ +b71e76d942b33dfa26e4e3047ff2a774d1f917b4 diff --git a/solr/licenses/commons-compiler-LICENSE-BSD.txt b/solr/licenses/commons-compiler-LICENSE-BSD.txt new file mode 100644 index 00000000000..ef871e24262 --- /dev/null +++ b/solr/licenses/commons-compiler-LICENSE-BSD.txt @@ -0,0 +1,31 @@ +Janino - An embedded Java[TM] compiler + +Copyright (c) 2001-2016, Arno Unkrig +Copyright (c) 2015-2016 TIBCO Software Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials + provided with the distribution. + 3. Neither the name of JANINO nor the names of its contributors + may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER +IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/solr/licenses/commons-compiler-NOTICE.txt b/solr/licenses/commons-compiler-NOTICE.txt new file mode 100644 index 00000000000..203e2f920e6 --- /dev/null +++ b/solr/licenses/commons-compiler-NOTICE.txt @@ -0,0 +1,5 @@ +Janino - An embedded Java[TM] compiler + +Copyright (c) 2001-2016, Arno Unkrig +Copyright (c) 2015-2016 TIBCO Software Inc. +All rights reserved. diff --git a/solr/licenses/eigenbase-properties-1.1.5.jar.sha1 b/solr/licenses/eigenbase-properties-1.1.5.jar.sha1 new file mode 100644 index 00000000000..2617c4debe0 --- /dev/null +++ b/solr/licenses/eigenbase-properties-1.1.5.jar.sha1 @@ -0,0 +1 @@ +a941956b3a4664d0cf728ece06ba25cc2110a3aa diff --git a/solr/licenses/eigenbase-properties-LICENSE-ASL.txt b/solr/licenses/eigenbase-properties-LICENSE-ASL.txt new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/solr/licenses/eigenbase-properties-LICENSE-ASL.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/solr/licenses/eigenbase-properties-NOTICE.txt b/solr/licenses/eigenbase-properties-NOTICE.txt new file mode 100644 index 00000000000..95ee3fd750d --- /dev/null +++ b/solr/licenses/eigenbase-properties-NOTICE.txt @@ -0,0 +1,20 @@ +eigenbase-properties +Copyright (C) 2012-2015, Julian Hyde + +This product includes software from the Eigenbase project, licensed from +DynamoBI Corporation. + +Copyright (C) 2005 Dynamo BI Corporation + +=============================================================================== +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this software except in compliance with the License. +You may obtain a copy of the License at: + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/solr/licenses/janino-2.7.6.jar.sha1 b/solr/licenses/janino-2.7.6.jar.sha1 new file mode 100644 index 00000000000..e64ea0d2fdc --- /dev/null +++ b/solr/licenses/janino-2.7.6.jar.sha1 @@ -0,0 +1 @@ +37fde5de7edd5d7ebe075f03f4c083df2ac73dd8 diff --git a/solr/licenses/janino-LICENSE-BSD.txt b/solr/licenses/janino-LICENSE-BSD.txt new file mode 100644 index 00000000000..ef871e24262 --- /dev/null +++ b/solr/licenses/janino-LICENSE-BSD.txt @@ -0,0 +1,31 @@ +Janino - An embedded Java[TM] compiler + +Copyright (c) 2001-2016, Arno Unkrig +Copyright (c) 2015-2016 TIBCO Software Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials + provided with the distribution. + 3. Neither the name of JANINO nor the names of its contributors + may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER +IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/solr/licenses/janino-NOTICE.txt b/solr/licenses/janino-NOTICE.txt new file mode 100644 index 00000000000..203e2f920e6 --- /dev/null +++ b/solr/licenses/janino-NOTICE.txt @@ -0,0 +1,5 @@ +Janino - An embedded Java[TM] compiler + +Copyright (c) 2001-2016, Arno Unkrig +Copyright (c) 2015-2016 TIBCO Software Inc. +All rights reserved. diff --git a/solr/licenses/presto-parser-0.122.jar.sha1 b/solr/licenses/presto-parser-0.122.jar.sha1 deleted file mode 100644 index 28a8ec24509..00000000000 --- a/solr/licenses/presto-parser-0.122.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b4261cbabfd2f28b8d4d20d7e3a3d1be48bb890c diff --git a/solr/licenses/protobuf-java-2.5.0.jar.sha1 b/solr/licenses/protobuf-java-2.5.0.jar.sha1 deleted file mode 100644 index 31b5fec7abd..00000000000 --- a/solr/licenses/protobuf-java-2.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a10732c76bfacdbd633a7eb0f7968b1059a65dfa diff --git a/solr/licenses/protobuf-java-3.1.0.jar.sha1 b/solr/licenses/protobuf-java-3.1.0.jar.sha1 new file mode 100644 index 00000000000..c0bb9aa0143 --- /dev/null +++ b/solr/licenses/protobuf-java-3.1.0.jar.sha1 @@ -0,0 +1 @@ +e13484d9da178399d32d2d27ee21a77cfb4b7873 diff --git a/solr/licenses/slice-0.10.jar.sha1 b/solr/licenses/slice-0.10.jar.sha1 deleted file mode 100644 index 7b86d91c215..00000000000 --- a/solr/licenses/slice-0.10.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -159a81631ed2cc1bc865f3d8e51239c9e8a20bea diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/sql/DatabaseMetaDataImpl.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/sql/DatabaseMetaDataImpl.java index 53f5c8dd25e..c96ea7c00ed 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/sql/DatabaseMetaDataImpl.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/sql/DatabaseMetaDataImpl.java @@ -705,17 +705,29 @@ class DatabaseMetaDataImpl implements DatabaseMetaData { @Override public ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) throws SQLException { - return this.connectionStatement.executeQuery("select TABLE_CAT, TABLE_SCHEM, TABLE_NAME, TABLE_TYPE, REMARKS from _TABLES_"); + String tableCatCheck = ""; + if(catalog != null) { + tableCatCheck = "tableCat = '\" + catalog + \"' and"; + } + if(schemaPattern == null) { + schemaPattern = "%"; + } + if(tableNamePattern == null) { + tableNamePattern = "%"; + } + return this.connectionStatement.executeQuery("select tableCat, tableSchem, tableName, tableType, remarks from " + + "metadata.TABLES where " + tableCatCheck + " tableSchem like '" + schemaPattern + "' and tableName like '" + + tableNamePattern + "'"); } @Override public ResultSet getSchemas() throws SQLException { - return this.connectionStatement.executeQuery("select TABLE_SCHEM, TABLE_CATALOG from _SCHEMAS_"); + return this.connectionStatement.executeQuery("select distinct tableSchem, tableCat from metadata.TABLES"); } @Override public ResultSet getCatalogs() throws SQLException { - return this.connectionStatement.executeQuery("select TABLE_CAT from _CATALOGS_"); + return this.connectionStatement.executeQuery("select distinct tableCat from metadata.TABLES"); } @Override diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/FacetStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/FacetStream.java index 4e239e62cef..94d937da566 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/FacetStream.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/FacetStream.java @@ -234,6 +234,7 @@ public class FacetStream extends TupleStream implements Expressible { this.zkHost = zkHost; this.params = params; this.buckets = buckets; + System.out.println("####### Bucket count:"+buckets.length); this.metrics = metrics; this.bucketSizeLimit = bucketSizeLimit; this.collection = collection; diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/JDBCStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/JDBCStream.java index 403935826cd..0f951038b19 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/JDBCStream.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/JDBCStream.java @@ -17,6 +17,7 @@ package org.apache.solr.client.solrj.io.stream; import java.io.IOException; +import java.sql.Array; import java.sql.Connection; import java.sql.DriverManager; import java.sql.ResultSet; @@ -88,6 +89,7 @@ public class JDBCStream extends TupleStream implements Expressible { private ResultSetValueSelector[] valueSelectors; protected ResultSet resultSet; protected transient StreamContext streamContext; + protected String sep = Character.toString((char)31); public JDBCStream(String connectionUrl, String sqlQuery, StreamComparator definedSort) throws IOException { this(connectionUrl, sqlQuery, definedSort, null, null); @@ -209,8 +211,8 @@ public class JDBCStream extends TupleStream implements Expressible { try{ resultSet = statement.executeQuery(sqlQuery); } catch (SQLException e) { - throw new IOException(String.format(Locale.ROOT, "Failed to execute sqlQuery '%s' against JDBC connection '%s'", - sqlQuery, connectionUrl), e); + throw new IOException(String.format(Locale.ROOT, "Failed to execute sqlQuery '%s' against JDBC connection '%s'.\n" + + e.getMessage(), sqlQuery, connectionUrl), e); } try{ @@ -226,18 +228,25 @@ public class JDBCStream extends TupleStream implements Expressible { ResultSetValueSelector[] valueSelectors = new ResultSetValueSelector[metadata.getColumnCount()]; for(int columnIdx = 0; columnIdx < metadata.getColumnCount(); ++columnIdx){ - final int columnNumber = columnIdx + 1; // cause it starts at 1 // Use getColumnLabel instead of getColumnName to make sure fields renamed with AS as picked up properly final String columnName = metadata.getColumnLabel(columnNumber); String className = metadata.getColumnClassName(columnNumber); String typeName = metadata.getColumnTypeName(columnNumber); - + if(directSupportedTypes.contains(className)){ valueSelectors[columnIdx] = new ResultSetValueSelector() { public Object selectValue(ResultSet resultSet) throws SQLException { Object obj = resultSet.getObject(columnNumber); if(resultSet.wasNull()){ return null; } + if(obj instanceof String) { + String s = (String)obj; + if(s.indexOf(sep) > -1) { + s = s.substring(1); + return s.split(sep); + } + } + return obj; } public String getColumnName() { @@ -277,6 +286,22 @@ public class JDBCStream extends TupleStream implements Expressible { return columnName; } }; + } else if(Array.class.getName().equals(className)) { + valueSelectors[columnIdx] = new ResultSetValueSelector() { + public Object selectValue(ResultSet resultSet) throws SQLException { + Object o = resultSet.getObject(columnNumber); + if(resultSet.wasNull()){ return null; } + if(o instanceof Array) { + Array array = (Array)o; + return array.getArray(); + } else { + return o; + } + } + public String getColumnName() { + return columnName; + } + }; } else { throw new SQLException(String.format(Locale.ROOT, "Unable to determine the valueSelector for column '%s' (col #%d) of java class '%s' and type '%s'", diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/RollupStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/RollupStream.java index c43f847807e..dddf70117ed 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/RollupStream.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/RollupStream.java @@ -72,9 +72,7 @@ public class RollupStream extends TupleStream implements Expressible { if(1 != streamExpressions.size()){ throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - expecting a single stream but found %d",expression, streamExpressions.size())); } - if(0 == metricExpressions.size()){ - throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - expecting at least 1 metric but found %d",expression, metricExpressions.size())); - } + if(null == overExpression || !(overExpression.getParameter() instanceof StreamExpressionValue)){ throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - expecting single 'over' parameter listing fields to rollup by but didn't find one",expression)); } @@ -247,12 +245,14 @@ public class RollupStream extends TupleStream implements Expressible { t = new Tuple(map); } - currentMetrics = new Metric[metrics.length]; currentKey = hashKey; - for(int i=0; i children() { - List l = new ArrayList(); - return l; + return new ArrayList<>(); } public void open() throws IOException { @@ -233,10 +231,9 @@ public class StatsStream extends TupleStream implements Expressible { done = true; return tuple; } else { - Map fields = new HashMap(); + Map fields = new HashMap<>(); fields.put("EOF", true); - Tuple tuple = new Tuple(fields); - return tuple; + return new Tuple(fields); } } @@ -245,7 +242,7 @@ public class StatsStream extends TupleStream implements Expressible { } private void addStats(ModifiableSolrParams params, Metric[] _metrics) { - Map> m = new HashMap(); + Map> m = new HashMap<>(); for(Metric metric : _metrics) { String metricId = metric.getIdentifier(); if(metricId.contains("(")) { @@ -255,8 +252,11 @@ public class StatsStream extends TupleStream implements Expressible { String column = parts[1]; List stats = m.get(column); - if(stats == null && !column.equals("*")) { - stats = new ArrayList(); + if(stats == null) { + stats = new ArrayList<>(); + } + + if(!column.equals("*")) { m.put(column, stats); } @@ -290,34 +290,36 @@ public class StatsStream extends TupleStream implements Expressible { private Tuple getTuple(NamedList response) { - Map map = new HashMap(); + Map map = new HashMap<>(); + SolrDocumentList solrDocumentList = (SolrDocumentList) response.get("response"); + + long count = solrDocumentList.getNumFound(); if(doCount) { - SolrDocumentList solrDocumentList = (SolrDocumentList) response.get("response"); - this.count = solrDocumentList.getNumFound(); - map.put("count(*)", this.count); + map.put("count(*)", count); } - NamedList stats = (NamedList)response.get("stats"); - NamedList statsFields = (NamedList)stats.get("stats_fields"); + if(count != 0) { + NamedList stats = (NamedList)response.get("stats"); + NamedList statsFields = (NamedList)stats.get("stats_fields"); - for(int i=0; i map, String field, String stat, Object val) { if(stat.equals("mean")) { map.put("avg("+field+")", val); } else { diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/metrics/CountMetric.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/metrics/CountMetric.java index 0e8cbb057b8..61b83398e30 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/metrics/CountMetric.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/metrics/CountMetric.java @@ -24,40 +24,50 @@ import org.apache.solr.client.solrj.io.stream.expr.StreamExpressionParameter; import org.apache.solr.client.solrj.io.stream.expr.StreamFactory; public class CountMetric extends Metric { + private String columnName; private long count; - - public CountMetric(){ - init("count"); + + public CountMetric() { + this("*"); + } + + public CountMetric(String columnName) { + init("count", columnName); } public CountMetric(StreamExpression expression, StreamFactory factory) throws IOException{ // grab all parameters out String functionName = expression.getFunctionName(); String columnName = factory.getValueOperand(expression, 0); - - // validate expression contains only what we want. - if(!"*".equals(columnName)){ - throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - expected %s(*)", expression, functionName)); - } + if(1 != expression.getParameters().size()){ throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - unknown operands found", expression)); } - - init(functionName); - + + init(functionName, columnName); } public String[] getColumns() { - return new String[0]; + if(isAllColumns()) { + return new String[0]; + } + return new String[]{columnName}; } - - private void init(String functionName){ + + private void init(String functionName, String columnName){ + this.columnName = columnName; setFunctionName(functionName); - setIdentifier(functionName, "(*)"); + setIdentifier(functionName, "(", columnName, ")"); + } + + private boolean isAllColumns() { + return "*".equals(this.columnName); } public void update(Tuple tuple) { - ++count; + if(isAllColumns() || tuple.get(columnName) != null) { + ++count; + } } public Long getValue() { @@ -65,10 +75,11 @@ public class CountMetric extends Metric { } public Metric newInstance() { - return new CountMetric(); + return new CountMetric(columnName); } + @Override public StreamExpressionParameter toExpression(StreamFactory factory) throws IOException { - return new StreamExpression(getFunctionName()).withParameter("*"); + return new StreamExpression(getFunctionName()).withParameter(columnName); } } \ No newline at end of file diff --git a/solr/solrj/src/test-files/solrj/solr/collection1/conf/schema-sql.xml b/solr/solrj/src/test-files/solrj/solr/collection1/conf/schema-sql.xml index 579fda3f0bd..56bf625b967 100644 --- a/solr/solrj/src/test-files/solrj/solr/collection1/conf/schema-sql.xml +++ b/solr/solrj/src/test-files/solrj/solr/collection1/conf/schema-sql.xml @@ -44,7 +44,7 @@ positionIncrementGap="0"/> - + diff --git a/solr/solrj/src/test-files/solrj/solr/configsets/streaming/conf/schema.xml b/solr/solrj/src/test-files/solrj/solr/configsets/streaming/conf/schema.xml index e7f277230f2..7d3173addcd 100644 --- a/solr/solrj/src/test-files/solrj/solr/configsets/streaming/conf/schema.xml +++ b/solr/solrj/src/test-files/solrj/solr/configsets/streaming/conf/schema.xml @@ -42,7 +42,7 @@ - + @@ -102,8 +102,8 @@ - - + +