diff --git a/lucene/ivy-versions.properties b/lucene/ivy-versions.properties
index 3b80ba86645..bd05786b493 100644
--- a/lucene/ivy-versions.properties
+++ b/lucene/ivy-versions.properties
@@ -20,6 +20,8 @@ com.codahale.metrics.version = 3.0.1
/com.cybozu.labs/langdetect = 1.1-20120112
/com.drewnoakes/metadata-extractor = 2.6.2
+/com.facebook.presto/presto-parser = 0.107
+
com.fasterxml.jackson.core.version = 2.3.1
/com.fasterxml.jackson.core/jackson-annotations = ${com.fasterxml.jackson.core.version}
/com.fasterxml.jackson.core/jackson-core = ${com.fasterxml.jackson.core.version}
@@ -68,6 +70,7 @@ com.sun.jersey.version = 1.9
/de.l3s.boilerpipe/boilerpipe = 1.1.0
/dom4j/dom4j = 1.6.1
/hsqldb/hsqldb = 1.8.0.10
+/io.airlift/slice = 0.10
/io.netty/netty = 3.7.0.Final
/it.unimi.dsi/fastutil = 6.5.11
/jakarta-regexp/jakarta-regexp = 1.4
@@ -89,6 +92,8 @@ com.sun.jersey.version = 1.9
/net.sourceforge.jmatio/jmatio = 1.0
/net.sourceforge.nekohtml/nekohtml = 1.9.17
/org.antlr/antlr-runtime = 3.5
+/org.antlr/antlr4-runtime = 4.5
+
/org.apache.ant/ant = 1.8.2
/org.apache.avro/avro = 1.7.5
/org.apache.commons/commons-compress = 1.8.1
diff --git a/solr/core/ivy.xml b/solr/core/ivy.xml
index b633c644005..79308d6810c 100644
--- a/solr/core/ivy.xml
+++ b/solr/core/ivy.xml
@@ -122,6 +122,11 @@
+
+
+
+
+
diff --git a/solr/core/src/java/org/apache/solr/handler/SQLHandler.java b/solr/core/src/java/org/apache/solr/handler/SQLHandler.java
new file mode 100644
index 00000000000..8edd44e0253
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/handler/SQLHandler.java
@@ -0,0 +1,781 @@
+package org.apache.solr.handler;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Locale;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import com.facebook.presto.sql.ExpressionFormatter;
+import com.facebook.presto.sql.tree.*;
+import com.google.common.base.Strings;
+import com.google.common.collect.Iterables;
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.comp.ComparatorOrder;
+import org.apache.solr.client.solrj.io.comp.FieldComparator;
+import org.apache.solr.client.solrj.io.comp.MultiComp;
+import org.apache.solr.client.solrj.io.stream.CloudSolrStream;
+import org.apache.solr.client.solrj.io.stream.ParallelStream;
+import org.apache.solr.client.solrj.io.stream.RankStream;
+import org.apache.solr.client.solrj.io.stream.RollupStream;
+import org.apache.solr.client.solrj.io.stream.StreamContext;
+import org.apache.solr.client.solrj.io.stream.TupleStream;
+import org.apache.solr.common.params.CommonParams;
+import org.apache.solr.common.params.SolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.core.CoreContainer;
+import org.apache.solr.core.SolrCore;
+import org.apache.solr.request.SolrQueryRequest;
+import org.apache.solr.response.SolrQueryResponse;
+import org.apache.solr.util.plugin.SolrCoreAware;
+import java.util.List;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.Optional;
+import org.apache.solr.client.solrj.io.stream.metrics.*;
+
+import com.facebook.presto.sql.parser.SqlParser;
+
+public class SQLHandler extends RequestHandlerBase implements SolrCoreAware {
+
+ private Map tableMappings = new HashMap();
+ private String defaultZkhost = null;
+ private String defaultWorkerCollection = null;
+
+ public void inform(SolrCore core) {
+
+ CoreContainer coreContainer = core.getCoreDescriptor().getCoreContainer();
+
+ if(coreContainer.isZooKeeperAware()) {
+ defaultZkhost = core.getCoreDescriptor().getCoreContainer().getZkController().getZkServerAddress();
+ defaultWorkerCollection = core.getCoreDescriptor().getCollectionName();
+ }
+
+ NamedList tableConf = (NamedList)initArgs.get("tables");
+
+ for(Entry entry : tableConf) {
+ String tableName = entry.getKey();
+ if(entry.getValue().indexOf("@") > -1) {
+ String[] parts = entry.getValue().split("@");
+ tableMappings.put(tableName, new TableSpec(parts[0], parts[1]));
+ } else {
+ String collection = entry.getValue();
+ tableMappings.put(tableName, new TableSpec(collection, defaultZkhost));
+ }
+ }
+ }
+
+ public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
+ SolrParams params = req.getParams();
+ String sql = params.get("sql");
+ int numWorkers = params.getInt("numWorkers", 1);
+ String workerCollection = params.get("workerCollection", defaultWorkerCollection);
+ String workerZkhost = params.get("workerZkhost",defaultZkhost);
+ StreamContext context = new StreamContext();
+ TupleStream tupleStream = SQLTupleStreamParser.parse(sql, tableMappings, numWorkers, workerCollection, workerZkhost);
+ context.numWorkers = numWorkers;
+ context.setSolrClientCache(StreamHandler.clientCache);
+ tupleStream.setStreamContext(context);
+ rsp.add("tuples", tupleStream);
+ }
+
+ public String getDescription() {
+ return "SQLHandler";
+ }
+
+ public String getSource() {
+ return null;
+ }
+
+ public static class SQLTupleStreamParser {
+
+ public static TupleStream parse(String sql,
+ Map tableMap,
+ int numWorkers,
+ String workerCollection,
+ String workerZkhost) throws IOException {
+ SqlParser parser = new SqlParser();
+ Statement statement = parser.createStatement(sql);
+
+ SQLVisitor sqlVistor = new SQLVisitor(new StringBuilder());
+
+ sqlVistor.process(statement, new Integer(0));
+
+ TupleStream sqlStream = null;
+
+ if(sqlVistor.groupByQuery) {
+ sqlStream = doGroupBy(sqlVistor, tableMap, numWorkers, workerCollection, workerZkhost);
+ } else {
+ sqlStream = doSelect(sqlVistor, tableMap, numWorkers, workerCollection, workerZkhost);
+ }
+
+ return sqlStream;
+ }
+ }
+
+ private static TupleStream doGroupBy(SQLVisitor sqlVisitor,
+ Map tableMap,
+ int numWorkers,
+ String workerCollection,
+ String workerZkHost) throws IOException {
+
+ Set fieldSet = new HashSet();
+ Bucket[] buckets = getBuckets(sqlVisitor.groupBy, fieldSet);
+ Metric[] metrics = getMetrics(sqlVisitor.fields, fieldSet);
+
+ String fl = fields(fieldSet);
+ String sortDirection = getSortDirection(sqlVisitor.sorts);
+ String sort = bucketSort(buckets, sortDirection);
+
+ TableSpec tableSpec = tableMap.get(sqlVisitor.table);
+ String zkHost = tableSpec.zkHost;
+ String collection = tableSpec.collection;
+ Map params = new HashMap();
+
+ params.put(CommonParams.FL, fl);
+ params.put(CommonParams.Q, sqlVisitor.query);
+ //Always use the /export handler for Group By Queries because it requires exporting full result sets.
+ params.put(CommonParams.QT, "/export");
+
+ if(numWorkers > 1) {
+ params.put("partitionKeys", getPartitionKeys(buckets));
+ }
+
+ params.put("sort", sort);
+
+ TupleStream tupleStream = null;
+
+ CloudSolrStream cstream = new CloudSolrStream(zkHost, collection, params);
+ tupleStream = new RollupStream(cstream, buckets, metrics);
+
+ if(numWorkers > 1) {
+ // Do the rollups in parallel
+ // Maintain the sort of the Tuples coming from the workers.
+ Comparator comp = bucketSortComp(buckets, sortDirection);
+ tupleStream = new ParallelStream(workerZkHost, workerCollection, tupleStream, numWorkers, comp);
+ }
+
+ //TODO: This should be done on the workers, but it won't serialize because it relies on Presto classes.
+ // Once we make this a Expressionable the problem will be solved.
+
+ if(sqlVisitor.havingExpression != null) {
+ tupleStream = new HavingStream(tupleStream, sqlVisitor.havingExpression);
+ }
+
+ if(sqlVisitor.sorts != null && sqlVisitor.sorts.size() > 0) {
+ if(!sortsEqual(buckets, sortDirection, sqlVisitor.sorts)) {
+ int limit = sqlVisitor.limit == -1 ? 100 : sqlVisitor.limit;
+ Comparator comp = getComp(sqlVisitor.sorts);
+ //Rank the Tuples
+ //If parallel stream is used ALL the Rolled up tuples from the workers will be ranked
+ //Providing a true Top or Bottom.
+ tupleStream = new RankStream(tupleStream, limit, comp);
+ } else {
+ // Sort is the same as the same as the underlying stream
+ // Only need to limit the result, not Rank the result
+ if(sqlVisitor.limit > -1) {
+ tupleStream = new LimitStream(tupleStream, sqlVisitor.limit);
+ }
+ }
+ }
+
+ return tupleStream;
+ }
+
+ private static TupleStream doSelect(SQLVisitor sqlVisitor,
+ Map tableMap,
+ int numWorkers,
+ String workerCollection,
+ String workerZkHost) throws IOException {
+ List fields = sqlVisitor.fields;
+ StringBuilder flbuf = new StringBuilder();
+ boolean comma = false;
+ for(String field : fields) {
+
+ if(comma) {
+ flbuf.append(",");
+ }
+
+ comma = true;
+ flbuf.append(field);
+ }
+
+ String fl = flbuf.toString();
+
+ List sorts = sqlVisitor.sorts;
+
+ StringBuilder siBuf = new StringBuilder();
+
+ comma = false;
+ for(SortItem sortItem : sorts) {
+ if(comma) {
+ siBuf.append(",");
+ }
+ siBuf.append(stripQuotes(sortItem.getSortKey().toString()) + " " + ascDesc(sortItem.getOrdering().toString()));
+ }
+
+ TableSpec tableSpec = tableMap.get(sqlVisitor.table);
+ String zkHost = tableSpec.zkHost;
+ String collection = tableSpec.collection;
+ Map params = new HashMap();
+
+ params.put("fl", fl.toString());
+ params.put("q", sqlVisitor.query);
+ params.put("sort", siBuf.toString());
+
+ if(sqlVisitor.limit > -1) {
+ params.put("rows", Integer.toString(sqlVisitor.limit));
+ return new LimitStream(new CloudSolrStream(zkHost, collection, params), sqlVisitor.limit);
+ } else {
+ //Only use the export handler when no limit is specified.
+ params.put(CommonParams.QT, "/export");
+ return new CloudSolrStream(zkHost, collection, params);
+ }
+ }
+
+ private static boolean sortsEqual(Bucket[] buckets, String direction, List sortItems) {
+ if(buckets.length != sortItems.size()) {
+ return false;
+ }
+
+ for(int i=0; i< buckets.length; i++) {
+ Bucket bucket = buckets[i];
+ SortItem sortItem = sortItems.get(i);
+ if(!bucket.toString().equals(stripQuotes(sortItem.getSortKey().toString()))) {
+ return false;
+ }
+
+
+ if(!sortItem.getOrdering().toString().toLowerCase(Locale.getDefault()).contains(direction.toLowerCase(Locale.getDefault()))) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ private static String bucketSort(Bucket[] buckets, String dir) {
+ StringBuilder buf = new StringBuilder();
+ boolean comma = false;
+ for(Bucket bucket : buckets) {
+ if(comma) {
+ buf.append(",");
+ }
+ buf.append(bucket.toString()).append(" ").append(dir);
+ comma = true;
+ }
+
+ return buf.toString();
+ }
+
+ private static String getPartitionKeys(Bucket[] buckets) {
+ StringBuilder buf = new StringBuilder();
+ boolean comma = false;
+ for(Bucket bucket : buckets) {
+ if(comma) {
+ buf.append(",");
+ }
+ buf.append(bucket.toString());
+ comma = true;
+ }
+ return buf.toString();
+ }
+
+ public static String getSortDirection(List sorts) {
+ if(sorts != null && sorts.size() > 0) {
+ for(SortItem item : sorts) {
+ return ascDesc(stripQuotes(item.getOrdering().toString()));
+ }
+ }
+
+ return "asc";
+ }
+
+ private static Comparator bucketSortComp(Bucket[] buckets, String dir) {
+ Comparator[] comps = new Comparator[buckets.length];
+ for(int i=0; i getComp(List sortItems) {
+ Comparator[] comps = new Comparator[sortItems.size()];
+ for(int i=0; i fieldSet) {
+ StringBuilder buf = new StringBuilder();
+ boolean comma = false;
+ for(String field : fieldSet) {
+ if(comma) {
+ buf.append(",");
+ }
+ buf.append(field);
+ comma = true;
+ }
+
+ return buf.toString();
+ }
+
+ private static Metric[] getMetrics(List fields, Set fieldSet) {
+ List metrics = new ArrayList();
+ for(String field : fields) {
+
+ if(field.contains("(")) {
+
+ field = field.substring(0, field.length()-1);
+ String[] parts = field.split("\\(");
+ String function = parts[0];
+ String column = parts[1];
+ if(function.equals("min")) {
+ metrics.add(new MinMetric(column));
+ fieldSet.add(column);
+ } else if(function.equals("max")) {
+ metrics.add(new MaxMetric(column));
+ fieldSet.add(column);
+ } else if(function.equals("sum")) {
+ metrics.add(new SumMetric(column));
+ fieldSet.add(column);
+ } else if(function.equals("avg")) {
+ metrics.add(new MeanMetric(column));
+ fieldSet.add(column);
+ } else if(function.equals("count")) {
+ metrics.add(new CountMetric());
+ }
+ }
+ }
+ return metrics.toArray(new Metric[metrics.size()]);
+ }
+
+ private static Bucket[] getBuckets(List fields, Set fieldSet) {
+ List buckets = new ArrayList();
+ for(String field : fields) {
+ String f = stripQuotes(field);
+ buckets.add(new Bucket(f));
+ fieldSet.add(f);
+ }
+
+ return buckets.toArray(new Bucket[buckets.size()]);
+ }
+
+ private static String ascDesc(String s) {
+ if(s.toLowerCase(Locale.getDefault()).contains("desc")) {
+ return "desc";
+ } else {
+ return "asc";
+ }
+ }
+
+ private static ComparatorOrder ascDescComp(String s) {
+ if(s.toLowerCase(Locale.getDefault()).contains("desc")) {
+ return ComparatorOrder.DESCENDING;
+ } else {
+ return ComparatorOrder.ASCENDING;
+ }
+ }
+
+ private static String stripQuotes(String s) {
+ StringBuilder buf = new StringBuilder();
+ for(int i=0; i {
+
+ protected Void visitLogicalBinaryExpression(LogicalBinaryExpression node, StringBuilder buf) {
+ buf.append("(");
+ process(node.getLeft(), buf);
+ buf.append(" ").append(node.getType().toString()).append(" ");
+ process(node.getRight(), buf);
+ buf.append(")");
+ return null;
+ }
+
+ protected Void visitNotExpression(NotExpression node, StringBuilder buf) {
+ buf.append("-");
+ process(node.getValue(), buf);
+ return null;
+ }
+
+ protected Void visitComparisonExpression(ComparisonExpression node, StringBuilder buf) {
+ String field = node.getLeft().toString();
+ String value = node.getRight().toString();
+ buf.append('(').append(stripQuotes(field) + ":" + stripSingleQuotes(value)).append(')');
+ return null;
+ }
+ }
+
+ static class SQLVisitor extends AstVisitor {
+ private final StringBuilder builder;
+ public String table;
+ public List fields = new ArrayList();
+ public List groupBy = new ArrayList();
+ public List sorts;
+ public String query ="*:*"; //If no query is specified pull all the records
+ public int limit = -1;
+ public boolean groupByQuery;
+ public Expression havingExpression;
+
+ public SQLVisitor(StringBuilder builder) {
+ this.builder = builder;
+ }
+
+ protected Void visitNode(Node node, Integer indent) {
+ throw new UnsupportedOperationException("not yet implemented: " + node);
+ }
+
+ protected Void visitUnnest(Unnest node, Integer indent) {
+ return null;
+ }
+
+ protected Void visitQuery(Query node, Integer indent) {
+ if(node.getWith().isPresent()) {
+ With confidence = (With)node.getWith().get();
+ this.append(indent.intValue(), "WITH");
+ if(confidence.isRecursive()) {
+ }
+
+ Iterator queries = confidence.getQueries().iterator();
+
+ while(queries.hasNext()) {
+ WithQuery query = (WithQuery)queries.next();
+ this.process(new TableSubquery(query.getQuery()), indent);
+ if(queries.hasNext()) {
+ }
+ }
+ }
+
+ this.processRelation(node.getQueryBody(), indent);
+ if(!node.getOrderBy().isEmpty()) {
+ this.sorts = node.getOrderBy();
+ }
+
+ if(node.getLimit().isPresent()) {
+ }
+
+ if(node.getApproximate().isPresent()) {
+
+ }
+
+ return null;
+ }
+
+ protected Void visitQuerySpecification(QuerySpecification node, Integer indent) {
+ this.process(node.getSelect(), indent);
+ if(node.getFrom().isPresent()) {
+ this.process((Node)node.getFrom().get(), indent);
+ }
+
+ if(node.getWhere().isPresent()) {
+ Expression ex = node.getWhere().get();
+ ExpressionVisitor expressionVisitor = new ExpressionVisitor();
+ StringBuilder buf = new StringBuilder();
+ expressionVisitor.process(ex, buf);
+ this.query = buf.toString();
+ }
+
+ if(!node.getGroupBy().isEmpty()) {
+ this.groupByQuery = true;
+ List groups = node.getGroupBy();
+ for(Expression group : groups) {
+ groupBy.add(stripQuotes(group.toString()));
+
+ }
+ }
+
+ if(node.getHaving().isPresent()) {
+ this.havingExpression = node.getHaving().get();
+ }
+
+ if(!node.getOrderBy().isEmpty()) {
+ this.sorts = node.getOrderBy();
+ }
+
+ if(node.getLimit().isPresent()) {
+ this.limit = Integer.parseInt(stripQuotes(node.getLimit().get()));
+ }
+
+ return null;
+ }
+
+
+
+ protected Void visitComparisonExpression(ComparisonExpression node, Integer index) {
+ String field = node.getLeft().toString();
+ String value = node.getRight().toString();
+ query = stripQuotes(field)+":"+stripQuotes(value);
+ return null;
+ }
+
+ protected Void visitSelect(Select node, Integer indent) {
+ this.append(indent.intValue(), "SELECT");
+ if(node.isDistinct()) {
+
+ }
+
+ if(node.getSelectItems().size() > 1) {
+ boolean first = true;
+
+ for(Iterator var4 = node.getSelectItems().iterator(); var4.hasNext(); first = false) {
+ SelectItem item = (SelectItem)var4.next();
+ this.process(item, indent);
+ }
+ } else {
+ this.process((Node) Iterables.getOnlyElement(node.getSelectItems()), indent);
+ }
+
+ return null;
+ }
+
+ protected Void visitSingleColumn(SingleColumn node, Integer indent) {
+ fields.add(stripQuotes(ExpressionFormatter.formatExpression(node.getExpression())));
+
+ if(node.getAlias().isPresent()) {
+ }
+
+ return null;
+ }
+
+ protected Void visitAllColumns(AllColumns node, Integer context) {
+ return null;
+ }
+
+ protected Void visitTable(Table node, Integer indent) {
+ this.table = node.getName().toString();
+ return null;
+ }
+
+ protected Void visitAliasedRelation(AliasedRelation node, Integer indent) {
+ this.process(node.getRelation(), indent);
+ return null;
+ }
+
+ protected Void visitValues(Values node, Integer indent) {
+ boolean first = true;
+
+ for(Iterator var4 = node.getRows().iterator(); var4.hasNext(); first = false) {
+ Expression row = (Expression)var4.next();
+
+ }
+
+ return null;
+ }
+
+ private void processRelation(Relation relation, Integer indent) {
+ if(relation instanceof Table) {
+ } else {
+ this.process(relation, indent);
+ }
+ }
+
+ private StringBuilder append(int indent, String value) {
+ return this.builder.append(indentString(indent)).append(value);
+ }
+
+ private static String indentString(int indent) {
+ return Strings.repeat(" ", indent);
+ }
+ }
+
+ private static class LimitStream extends TupleStream {
+
+ private TupleStream stream;
+ private int limit;
+ private int count;
+
+ public LimitStream(TupleStream stream, int limit) {
+ this.stream = stream;
+ this.limit = limit;
+ }
+
+ public void open() throws IOException {
+ this.stream.open();
+ }
+
+ public void close() throws IOException {
+ this.stream.close();
+ }
+
+ public List children() {
+ List children = new ArrayList();
+ children.add(stream);
+ return children;
+ }
+
+ public void setStreamContext(StreamContext context) {
+ stream.setStreamContext(context);
+ }
+
+ public Tuple read() throws IOException {
+ ++count;
+ if(count > limit) {
+ Map fields = new HashMap();
+ fields.put("EOF", "true");
+ return new Tuple(fields);
+ }
+
+ Tuple tuple = stream.read();
+ return tuple;
+ }
+ }
+
+ public static class HavingStream extends TupleStream {
+
+ private TupleStream stream;
+ private HavingVisitor havingVisitor;
+ private Expression havingExpression;
+
+ public HavingStream(TupleStream stream, Expression havingExpression) {
+ this.stream = stream;
+ this.havingVisitor = new HavingVisitor();
+ this.havingExpression = havingExpression;
+ }
+
+ public void open() throws IOException {
+ this.stream.open();
+ }
+
+ public void close() throws IOException {
+ this.stream.close();
+ }
+
+ public List children() {
+ List children = new ArrayList();
+ children.add(stream);
+ return children;
+ }
+
+ public void setStreamContext(StreamContext context) {
+ stream.setStreamContext(context);
+ }
+
+ public Tuple read() throws IOException {
+ while (true) {
+ Tuple tuple = stream.read();
+ if (tuple.EOF) {
+ return tuple;
+ }
+
+ if (havingVisitor.process(havingExpression, tuple)) {
+ return tuple;
+ }
+ }
+ }
+ }
+
+ private static class HavingVisitor extends AstVisitor {
+
+ protected Boolean visitLogicalBinaryExpression(LogicalBinaryExpression node, Tuple tuple) {
+
+ Boolean b = process(node.getLeft(), tuple);
+ if(node.getType() == LogicalBinaryExpression.Type.AND) {
+ if(!b) {
+ //Short circuit
+ return false;
+ } else {
+ return process(node.getRight(), tuple);
+ }
+ } else {
+ if(b) {
+ //Short circuit
+ return true;
+ } else {
+ return process(node.getRight(), tuple);
+ }
+ }
+ }
+
+ protected Boolean visitComparisonExpression(ComparisonExpression node, Tuple tuple) {
+ String field = stripQuotes(node.getLeft().toString());
+ double d = Double.parseDouble(node.getRight().toString());
+ double td = tuple.getDouble(field);
+ ComparisonExpression.Type t = node.getType();
+
+ switch(t) {
+ case LESS_THAN:
+ return td < d;
+ case LESS_THAN_OR_EQUAL:
+ return td <= d;
+ case NOT_EQUAL:
+ return td != d;
+ case EQUAL:
+ return td == d;
+ case GREATER_THAN:
+ return td <= d;
+ case GREATER_THAN_OR_EQUAL:
+ return td <= d;
+ default:
+ return false;
+ }
+ }
+ }
+ }
diff --git a/solr/core/src/java/org/apache/solr/handler/StreamHandler.java b/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
index 536fe160259..4195351e2d5 100644
--- a/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
@@ -45,7 +45,7 @@ import org.apache.solr.common.util.Base64;
public class StreamHandler extends RequestHandlerBase implements SolrCoreAware {
- private SolrClientCache clientCache = new SolrClientCache();
+ static SolrClientCache clientCache = new SolrClientCache();
private StreamFactory streamFactory = new StreamFactory();
public void inform(SolrCore core) {
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema-sql.xml b/solr/core/src/test-files/solr/collection1/conf/schema-sql.xml
new file mode 100644
index 00000000000..216fa2ce85b
--- /dev/null
+++ b/solr/core/src/test-files/solr/collection1/conf/schema-sql.xml
@@ -0,0 +1,599 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ text
+ id
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-sql.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-sql.xml
new file mode 100644
index 00000000000..4fc231e851f
--- /dev/null
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-sql.xml
@@ -0,0 +1,107 @@
+
+
+
+
+
+ ${tests.luceneMatchVersion:LUCENE_CURRENT}
+
+ ${useCompoundFile:false}
+
+ ${solr.data.dir:}
+
+
+
+
+ ${solr.data.dir:}
+
+
+
+
+
+
+ true
+
+
+
+
+
+ {!xport}
+ xsort
+ false
+
+
+
+ query
+
+
+
+
+
+
+
+ json
+ false
+
+
+
+
+
+ json
+ false
+
+
+ collection1
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ *:*
+
+
+ all
+
+ server-enabled.txt
+
+
+
+
+ solr
+
+
+
+
diff --git a/solr/core/src/test/org/apache/solr/handler/TestSQLHandler.java b/solr/core/src/test/org/apache/solr/handler/TestSQLHandler.java
new file mode 100644
index 00000000000..5470c2d7f19
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/handler/TestSQLHandler.java
@@ -0,0 +1,843 @@
+package org.apache.solr.handler;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+
+import com.facebook.presto.sql.parser.SqlParser;
+import com.facebook.presto.sql.tree.Statement;
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.stream.SolrStream;
+import org.apache.solr.client.solrj.io.stream.TupleStream;
+import org.apache.solr.cloud.AbstractFullDistribZkTestBase;
+import org.apache.solr.common.params.CommonParams;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestSQLHandler extends AbstractFullDistribZkTestBase {
+
+
+ static {
+ schemaString = "schema-sql.xml";
+ }
+
+ public TestSQLHandler() {
+ sliceCount = 2;
+ }
+
+ //@BeforeClass
+ //public static void beforeSuperClass() {
+ //AbstractZkTestCase.SOLRHOME = new File(SOLR_HOME());
+ // }
+
+ @AfterClass
+ public static void afterSuperClass() {
+
+ }
+
+ protected String getCloudSolrConfig() {
+ return "solrconfig-sql.xml";
+ }
+
+ @Before
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ // we expect this time of exception as shards go up and down...
+ //ignoreException(".*");
+
+ System.setProperty("numShards", Integer.toString(sliceCount));
+ }
+
+ @Override
+ @After
+ public void tearDown() throws Exception {
+ super.tearDown();
+ resetExceptionIgnores();
+ }
+
+ private void delete() throws Exception {
+ deleteCore();
+ }
+
+ @Test
+ public void doTest() throws Exception {
+ testPredicate();
+ testBasicSelect();
+ testBasicGrouping();
+ testTimeSeriesGrouping();
+ testParallelBasicGrouping();
+ testParallelTimeSeriesGrouping();
+ }
+
+ private void testPredicate() throws Exception {
+
+ SqlParser parser = new SqlParser();
+ String sql = "select a from b where c = 'd'";
+ Statement statement = parser.createStatement(sql);
+ SQLHandler.SQLVisitor sqlVistor = new SQLHandler.SQLVisitor(new StringBuilder());
+ sqlVistor.process(statement, new Integer(0));
+
+ assert(sqlVistor.query.equals("(c:d)"));
+
+ //Add parens
+ parser = new SqlParser();
+ sql = "select a from b where (c = 'd')";
+ statement = parser.createStatement(sql);
+ sqlVistor = new SQLHandler.SQLVisitor(new StringBuilder());
+ sqlVistor.process(statement, new Integer(0));
+
+ assert(sqlVistor.query.equals("(c:d)"));
+
+ //Phrase
+ parser = new SqlParser();
+ sql = "select a from b where (c = '\"d d\"')";
+ statement = parser.createStatement(sql);
+ sqlVistor = new SQLHandler.SQLVisitor(new StringBuilder());
+ sqlVistor.process(statement, new Integer(0));
+
+ assert(sqlVistor.query.equals("(c:\"d d\")"));
+
+ // AND
+ parser = new SqlParser();
+ sql = "select a from b where ((c = 'd') AND (l = 'z'))";
+ statement = parser.createStatement(sql);
+ sqlVistor = new SQLHandler.SQLVisitor(new StringBuilder());
+ sqlVistor.process(statement, new Integer(0));
+
+ assert(sqlVistor.query.equals("((c:d) AND (l:z))"));
+
+ // OR
+
+ parser = new SqlParser();
+ sql = "select a from b where ((c = 'd') OR (l = 'z'))";
+ statement = parser.createStatement(sql);
+ sqlVistor = new SQLHandler.SQLVisitor(new StringBuilder());
+ sqlVistor.process(statement, new Integer(0));
+
+ assert(sqlVistor.query.equals("((c:d) OR (l:z))"));
+
+ // AND NOT
+
+ parser = new SqlParser();
+ sql = "select a from b where ((c = 'd') AND NOT (l = 'z'))";
+ statement = parser.createStatement(sql);
+ sqlVistor = new SQLHandler.SQLVisitor(new StringBuilder());
+ sqlVistor.process(statement, new Integer(0));
+
+ assert(sqlVistor.query.equals("((c:d) AND -(l:z))"));
+
+ // NESTED
+ parser = new SqlParser();
+ sql = "select a from b where ((c = 'd') OR ((l = 'z') AND (m = 'j')))";
+ statement = parser.createStatement(sql);
+ sqlVistor = new SQLHandler.SQLVisitor(new StringBuilder());
+ sqlVistor.process(statement, new Integer(0));
+
+ assert(sqlVistor.query.equals("((c:d) OR ((l:z) AND (m:j)))"));
+
+ // NESTED NOT
+ parser = new SqlParser();
+ sql = "select a from b where ((c = 'd') OR ((l = 'z') AND NOT (m = 'j')))";
+ statement = parser.createStatement(sql);
+ sqlVistor = new SQLHandler.SQLVisitor(new StringBuilder());
+ sqlVistor.process(statement, new Integer(0));
+
+ assert(sqlVistor.query.equals("((c:d) OR ((l:z) AND -(m:j)))"));
+
+ // RANGE - Will have to do until SQL BETWEEN is supported.
+ // NESTED
+ parser = new SqlParser();
+ sql = "select a from b where ((c = '[0 TO 100]') OR ((l = 'z') AND (m = 'j')))";
+ statement = parser.createStatement(sql);
+ sqlVistor = new SQLHandler.SQLVisitor(new StringBuilder());
+ sqlVistor.process(statement, new Integer(0));
+
+ assert(sqlVistor.query.equals("((c:[0 TO 100]) OR ((l:z) AND (m:j)))"));
+
+ // Wildcard
+ parser = new SqlParser();
+ sql = "select a from b where ((c = '[0 TO 100]') OR ((l = 'z*') AND (m = 'j')))";
+ statement = parser.createStatement(sql);
+ sqlVistor = new SQLHandler.SQLVisitor(new StringBuilder());
+ sqlVistor.process(statement, new Integer(0));
+ assert(sqlVistor.query.equals("((c:[0 TO 100]) OR ((l:z*) AND (m:j)))"));
+
+ // Complex Lucene/Solr Query
+ parser = new SqlParser();
+ sql = "select a from b where ((c = '[0 TO 100]') OR ((l = 'z*') AND (m = '(j OR (k NOT s))')))";
+ statement = parser.createStatement(sql);
+ sqlVistor = new SQLHandler.SQLVisitor(new StringBuilder());
+ sqlVistor.process(statement, new Integer(0));
+ assert(sqlVistor.query.equals("((c:[0 TO 100]) OR ((l:z*) AND (m:(j OR (k NOT s)))))"));
+ }
+
+ private void testBasicSelect() throws Exception {
+ try {
+
+ CloudJettyRunner jetty = this.cloudJettys.get(0);
+
+ del("*:*");
+
+ commit();
+
+ indexr("id", "1", "text", "XXXX XXXX", "str_s", "a", "field_i", "7");
+ indexr("id", "2", "text", "XXXX XXXX", "str_s", "b", "field_i", "8");
+ indexr("id", "3", "text", "XXXX XXXX", "str_s", "a", "field_i", "20");
+ indexr("id", "4", "text", "XXXX XXXX", "str_s", "b", "field_i", "11");
+ indexr("id", "5", "text", "XXXX XXXX", "str_s", "c", "field_i", "30");
+ indexr("id", "6", "text", "XXXX XXXX", "str_s", "c", "field_i", "40");
+ indexr("id", "7", "text", "XXXX XXXX", "str_s", "c", "field_i", "50");
+ indexr("id", "8", "text", "XXXX XXXX", "str_s", "c", "field_i", "60");
+ commit();
+ Map params = new HashMap();
+ params.put(CommonParams.QT, "/sql");
+ params.put("sql", "select id, field_i, str_s from mytable where text='XXXX' order by field_i desc");
+
+ SolrStream solrStream = new SolrStream(jetty.url, params);
+ List tuples = getTuples(solrStream);
+
+ assert(tuples.size() == 8);
+
+ Tuple tuple = null;
+
+ tuple = tuples.get(0);
+ assert(tuple.getLong("id") == 8);
+ assert(tuple.getLong("field_i") == 60);
+ assert(tuple.get("str_s").equals("c"));
+
+ tuple = tuples.get(1);
+ assert(tuple.getLong("id") == 7);
+ assert(tuple.getLong("field_i") == 50);
+ assert(tuple.get("str_s").equals("c"));
+
+ tuple = tuples.get(2);
+ assert(tuple.getLong("id") == 6);
+ assert(tuple.getLong("field_i") == 40);
+ assert(tuple.get("str_s").equals("c"));
+
+ tuple = tuples.get(3);
+ assert(tuple.getLong("id") == 5);
+ assert(tuple.getLong("field_i") == 30);
+ assert(tuple.get("str_s").equals("c"));
+
+ tuple = tuples.get(4);
+ assert(tuple.getLong("id") == 3);
+ assert(tuple.getLong("field_i") == 20);
+ assert(tuple.get("str_s").equals("a"));
+
+ tuple = tuples.get(5);
+ assert(tuple.getLong("id") == 4);
+ assert(tuple.getLong("field_i") == 11);
+ assert(tuple.get("str_s").equals("b"));
+
+ tuple = tuples.get(6);
+ assert(tuple.getLong("id") == 2);
+ assert(tuple.getLong("field_i") == 8);
+ assert(tuple.get("str_s").equals("b"));
+
+ tuple = tuples.get(7);
+ assert(tuple.getLong("id") == 1);
+ assert(tuple.getLong("field_i") == 7);
+ assert(tuple.get("str_s").equals("a"));
+
+ params = new HashMap();
+ params.put(CommonParams.QT, "/sql");
+ params.put("sql", "select id, field_i, str_s from mytable where text='XXXX' order by field_i desc limit 1");
+
+ solrStream = new SolrStream(jetty.url, params);
+ tuples = getTuples(solrStream);
+
+ assert(tuples.size() == 1);
+
+ tuple = tuples.get(0);
+ assert(tuple.getLong("id") == 8);
+ assert(tuple.getLong("field_i") == 60);
+ assert(tuple.get("str_s").equals("c"));
+
+ params = new HashMap();
+ params.put(CommonParams.QT, "/sql");
+ params.put("sql", "select id, field_i, str_s from mytable where text='XXXX' AND id='(1 2 3)' order by field_i desc");
+
+ solrStream = new SolrStream(jetty.url, params);
+ tuples = getTuples(solrStream);
+
+ assert(tuples.size() == 3);
+
+ tuple = tuples.get(0);
+ assert(tuple.getLong("id") == 3);
+ assert(tuple.getLong("field_i") == 20);
+ assert(tuple.get("str_s").equals("a"));
+
+ tuple = tuples.get(1);
+ assert(tuple.getLong("id") == 2);
+ assert(tuple.getLong("field_i") == 8);
+ assert(tuple.get("str_s").equals("b"));
+
+ tuple = tuples.get(2);
+ assert(tuple.getLong("id") == 1);
+ assert(tuple.getLong("field_i") == 7);
+ assert(tuple.get("str_s").equals("a"));
+
+ } finally {
+ delete();
+ }
+ }
+
+ private void testBasicGrouping() throws Exception {
+ try {
+
+ CloudJettyRunner jetty = this.cloudJettys.get(0);
+
+ del("*:*");
+
+ commit();
+
+ indexr("id", "1", "text", "XXXX XXXX", "str_s", "a", "field_i", "7");
+ indexr("id", "2", "text", "XXXX XXXX", "str_s", "b", "field_i", "8");
+ indexr("id", "3", "text", "XXXX XXXX", "str_s", "a", "field_i", "20");
+ indexr("id", "4", "text", "XXXX XXXX", "str_s", "b", "field_i", "11");
+ indexr("id", "5", "text", "XXXX XXXX", "str_s", "c", "field_i", "30");
+ indexr("id", "6", "text", "XXXX XXXX", "str_s", "c", "field_i", "40");
+ indexr("id", "7", "text", "XXXX XXXX", "str_s", "c", "field_i", "50");
+ indexr("id", "8", "text", "XXXX XXXX", "str_s", "c", "field_i", "60");
+ commit();
+ Map params = new HashMap();
+ params.put(CommonParams.QT, "/sql");
+ params.put("sql", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), avg(field_i) from mytable where text='XXXX' group by str_s order by sum(field_i) asc limit 2");
+
+ SolrStream solrStream = new SolrStream(jetty.url, params);
+ List tuples = getTuples(solrStream);
+
+ //Only two results because of the limit.
+ assert(tuples.size() == 2);
+
+ Tuple tuple = null;
+
+ tuple = tuples.get(0);
+ assert(tuple.get("str_s").equals("b"));
+ assert(tuple.getDouble("count(*)") == 2);
+ assert(tuple.getDouble("sum(field_i)") == 19);
+ assert(tuple.getDouble("min(field_i)") == 8);
+ assert(tuple.getDouble("max(field_i)") == 11);
+ assert(tuple.getDouble("avg(field_i)") == 9.5D);
+
+ tuple = tuples.get(1);
+ assert(tuple.get("str_s").equals("a"));
+ assert(tuple.getDouble("count(*)") == 2);
+ assert(tuple.getDouble("sum(field_i)") == 27);
+ assert(tuple.getDouble("min(field_i)") == 7);
+ assert(tuple.getDouble("max(field_i)") == 20);
+ assert(tuple.getDouble("avg(field_i)") == 13.5D);
+
+ params = new HashMap();
+ params.put(CommonParams.QT, "/sql");
+ params.put("sql", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), avg(field_i) from mytable where (text='XXXX' AND NOT text='\"XXXX XXX\"') group by str_s order by str_s desc");
+
+ solrStream = new SolrStream(jetty.url, params);
+ tuples = getTuples(solrStream);
+
+ //The sort by and order by match and no limit is applied. All the Tuples should be returned in
+ //this scenario.
+
+ assert(tuples.size() == 3);
+
+ tuple = tuples.get(0);
+ assert(tuple.get("str_s").equals("c"));
+ assert(tuple.getDouble("count(*)") == 4);
+ assert(tuple.getDouble("sum(field_i)") == 180);
+ assert(tuple.getDouble("min(field_i)") == 30);
+ assert(tuple.getDouble("max(field_i)") == 60);
+ assert(tuple.getDouble("avg(field_i)") == 45);
+
+ tuple = tuples.get(1);
+ assert(tuple.get("str_s").equals("b"));
+ assert(tuple.getDouble("count(*)") == 2);
+ assert(tuple.getDouble("sum(field_i)") == 19);
+ assert(tuple.getDouble("min(field_i)") == 8);
+ assert(tuple.getDouble("max(field_i)") == 11);
+ assert(tuple.getDouble("avg(field_i)") == 9.5D);
+
+ tuple = tuples.get(2);
+ assert(tuple.get("str_s").equals("a"));
+ assert(tuple.getDouble("count(*)") == 2);
+ assert(tuple.getDouble("sum(field_i)") == 27);
+ assert(tuple.getDouble("min(field_i)") == 7);
+ assert(tuple.getDouble("max(field_i)") == 20);
+ assert(tuple.getDouble("avg(field_i)") == 13.5D);
+
+
+ params = new HashMap();
+ params.put(CommonParams.QT, "/sql");
+ params.put("sql", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), avg(field_i) from mytable where text='XXXX' group by str_s having sum(field_i) = 19");
+
+ solrStream = new SolrStream(jetty.url, params);
+ tuples = getTuples(solrStream);
+
+ assert(tuples.size() == 1);
+
+ tuple = tuples.get(0);
+ assert(tuple.get("str_s").equals("b"));
+ assert(tuple.getDouble("count(*)") == 2);
+ assert(tuple.getDouble("sum(field_i)") == 19);
+ assert(tuple.getDouble("min(field_i)") == 8);
+ assert(tuple.getDouble("max(field_i)") == 11);
+ assert(tuple.getDouble("avg(field_i)") == 9.5D);
+
+ params = new HashMap();
+ params.put(CommonParams.QT, "/sql");
+ params.put("sql", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), avg(field_i) from mytable where text='XXXX' group by str_s having ((sum(field_i) = 19) AND (min(field_i) = 8))");
+
+ solrStream = new SolrStream(jetty.url, params);
+ tuples = getTuples(solrStream);
+
+ //Only two results because of the limit.
+ assert(tuples.size() == 1);
+
+ tuple = tuples.get(0);
+ assert(tuple.get("str_s").equals("b"));
+ assert(tuple.getDouble("count(*)") == 2);
+ assert(tuple.getDouble("sum(field_i)") == 19);
+ assert(tuple.getDouble("min(field_i)") == 8);
+ assert(tuple.getDouble("max(field_i)") == 11);
+ assert(tuple.getDouble("avg(field_i)") == 9.5D);
+
+ params = new HashMap();
+ params.put(CommonParams.QT, "/sql");
+ params.put("sql", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), avg(field_i) from mytable where text='XXXX' group by str_s having ((sum(field_i) = 19) AND (min(field_i) = 100))");
+
+ solrStream = new SolrStream(jetty.url, params);
+ tuples = getTuples(solrStream);
+
+ assert(tuples.size() == 0);
+
+
+ } finally {
+ delete();
+ }
+ }
+
+ private void testParallelBasicGrouping() throws Exception {
+ try {
+
+ CloudJettyRunner jetty = this.cloudJettys.get(0);
+
+ del("*:*");
+
+ commit();
+
+ indexr("id", "1", "text", "XXXX XXXX", "str_s", "a", "field_i", "7");
+ indexr("id", "2", "text", "XXXX XXXX", "str_s", "b", "field_i", "8");
+ indexr("id", "3", "text", "XXXX XXXX", "str_s", "a", "field_i", "20");
+ indexr("id", "4", "text", "XXXX XXXX", "str_s", "b", "field_i", "11");
+ indexr("id", "5", "text", "XXXX XXXX", "str_s", "c", "field_i", "30");
+ indexr("id", "6", "text", "XXXX XXXX", "str_s", "c", "field_i", "40");
+ indexr("id", "7", "text", "XXXX XXXX", "str_s", "c", "field_i", "50");
+ indexr("id", "8", "text", "XXXX XXXX", "str_s", "c", "field_i", "60");
+ commit();
+ Map params = new HashMap();
+ params.put(CommonParams.QT, "/sql");
+ params.put("numWorkers", "2");
+ params.put("sql", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), avg(field_i) from mytable where text='XXXX' group by str_s order by sum(field_i) asc limit 2");
+
+ SolrStream solrStream = new SolrStream(jetty.url, params);
+ List tuples = getTuples(solrStream);
+
+ //Only two results because of the limit.
+ assert(tuples.size() == 2);
+
+ Tuple tuple = null;
+
+ tuple = tuples.get(0);
+ assert(tuple.get("str_s").equals("b"));
+ assert(tuple.getDouble("count(*)") == 2);
+ assert(tuple.getDouble("sum(field_i)") == 19);
+ assert(tuple.getDouble("min(field_i)") == 8);
+ assert(tuple.getDouble("max(field_i)") == 11);
+ assert(tuple.getDouble("avg(field_i)") == 9.5D);
+
+ tuple = tuples.get(1);
+ assert(tuple.get("str_s").equals("a"));
+ assert(tuple.getDouble("count(*)") == 2);
+ assert(tuple.getDouble("sum(field_i)") == 27);
+ assert(tuple.getDouble("min(field_i)") == 7);
+ assert(tuple.getDouble("max(field_i)") == 20);
+ assert(tuple.getDouble("avg(field_i)") == 13.5D);
+
+ params = new HashMap();
+ params.put(CommonParams.QT, "/sql");
+ params.put("numWorkers", "2");
+ params.put("sql", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), avg(field_i) from mytable where text='XXXX' group by str_s order by str_s desc");
+
+ solrStream = new SolrStream(jetty.url, params);
+ tuples = getTuples(solrStream);
+
+ //The sort by and order by match and no limit is applied. All the Tuples should be returned in
+ //this scenario.
+
+ assert(tuples.size() == 3);
+
+ tuple = tuples.get(0);
+ assert(tuple.get("str_s").equals("c"));
+ assert(tuple.getDouble("count(*)") == 4);
+ assert(tuple.getDouble("sum(field_i)") == 180);
+ assert(tuple.getDouble("min(field_i)") == 30);
+ assert(tuple.getDouble("max(field_i)") == 60);
+ assert(tuple.getDouble("avg(field_i)") == 45);
+
+ tuple = tuples.get(1);
+ assert(tuple.get("str_s").equals("b"));
+ assert(tuple.getDouble("count(*)") == 2);
+ assert(tuple.getDouble("sum(field_i)") == 19);
+ assert(tuple.getDouble("min(field_i)") == 8);
+ assert(tuple.getDouble("max(field_i)") == 11);
+ assert(tuple.getDouble("avg(field_i)") == 9.5D);
+
+ tuple = tuples.get(2);
+ assert(tuple.get("str_s").equals("a"));
+ assert(tuple.getDouble("count(*)") == 2);
+ assert(tuple.getDouble("sum(field_i)") == 27);
+ assert(tuple.getDouble("min(field_i)") == 7);
+ assert(tuple.getDouble("max(field_i)") == 20);
+ assert(tuple.getDouble("avg(field_i)") == 13.5D);
+
+ params = new HashMap();
+ params.put(CommonParams.QT, "/sql");
+ params.put("numWorkers", "2");
+ params.put("sql", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), avg(field_i) from mytable where text='XXXX' group by str_s having sum(field_i) = 19");
+
+ solrStream = new SolrStream(jetty.url, params);
+ tuples = getTuples(solrStream);
+
+ //Only two results because of the limit.
+ assert(tuples.size() == 1);
+
+ tuple = tuples.get(0);
+ assert(tuple.get("str_s").equals("b"));
+ assert(tuple.getDouble("count(*)") == 2);
+ assert(tuple.getDouble("sum(field_i)") == 19);
+ assert(tuple.getDouble("min(field_i)") == 8);
+ assert(tuple.getDouble("max(field_i)") == 11);
+ assert(tuple.getDouble("avg(field_i)") == 9.5D);
+
+ tuple = tuples.get(0);
+ assert(tuple.get("str_s").equals("b"));
+ assert(tuple.getDouble("count(*)") == 2);
+ assert(tuple.getDouble("sum(field_i)") == 19);
+ assert(tuple.getDouble("min(field_i)") == 8);
+ assert(tuple.getDouble("max(field_i)") == 11);
+ assert(tuple.getDouble("avg(field_i)") == 9.5D);
+
+ params = new HashMap();
+ params.put(CommonParams.QT, "/sql");
+ params.put("numWorkers", "2");
+ params.put("sql", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), avg(field_i) from mytable where text='XXXX' group by str_s having ((sum(field_i) = 19) AND (min(field_i) = 8))");
+
+ solrStream = new SolrStream(jetty.url, params);
+ tuples = getTuples(solrStream);
+
+ //Only two results because of the limit.
+ assert(tuples.size() == 1);
+
+ tuple = tuples.get(0);
+ assert(tuple.get("str_s").equals("b"));
+ assert(tuple.getDouble("count(*)") == 2);
+ assert(tuple.getDouble("sum(field_i)") == 19);
+ assert(tuple.getDouble("min(field_i)") == 8);
+ assert(tuple.getDouble("max(field_i)") == 11);
+ assert(tuple.getDouble("avg(field_i)") == 9.5D);
+
+ params = new HashMap();
+ params.put(CommonParams.QT, "/sql");
+ params.put("numWorkers", "2");
+ params.put("sql", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), avg(field_i) from mytable where text='XXXX' group by str_s having ((sum(field_i) = 19) AND (min(field_i) = 100))");
+
+ solrStream = new SolrStream(jetty.url, params);
+ tuples = getTuples(solrStream);
+
+ assert(tuples.size() == 0);
+
+ } finally {
+ delete();
+ }
+ }
+
+ private void testTimeSeriesGrouping() throws Exception {
+ try {
+
+ CloudJettyRunner jetty = this.cloudJettys.get(0);
+
+ del("*:*");
+
+ commit();
+
+ indexr("id", "1", "year_i", "2015", "month_i", "11", "day_i", "7", "item_i", "5");
+ indexr("id", "2", "year_i", "2015", "month_i", "11", "day_i", "7", "item_i", "10");
+ indexr("id", "3", "year_i", "2015", "month_i", "11", "day_i", "8", "item_i", "30");
+ indexr("id", "4", "year_i", "2015", "month_i", "11", "day_i", "8", "item_i", "12");
+ indexr("id", "5", "year_i", "2015", "month_i", "10", "day_i", "1", "item_i", "4");
+ indexr("id", "6", "year_i", "2015", "month_i", "10", "day_i", "3", "item_i", "5");
+ indexr("id", "7", "year_i", "2014", "month_i", "4", "day_i", "4", "item_i", "6");
+ indexr("id", "8", "year_i", "2014", "month_i", "4", "day_i", "2", "item_i", "1");
+
+ commit();
+ Map params = new HashMap();
+ params.put(CommonParams.QT, "/sql");
+ params.put("sql", "select year_i, sum(item_i) from mytable group by year_i order by year_i desc");
+
+ SolrStream solrStream = new SolrStream(jetty.url, params);
+ List tuples = getTuples(solrStream);
+
+ //Only two results because of the limit.
+ assert(tuples.size() == 2);
+
+ Tuple tuple = null;
+
+ tuple = tuples.get(0);
+ assert(tuple.getLong("year_i") == 2015);
+ assert(tuple.getDouble("sum(item_i)") == 66);
+
+ tuple = tuples.get(1);
+ assert(tuple.getLong("year_i") == 2014);
+ assert(tuple.getDouble("sum(item_i)") == 7);
+
+ params.put("sql", "select year_i, month_i, sum(item_i) from mytable group by year_i, month_i order by year_i desc, month_i desc");
+
+ solrStream = new SolrStream(jetty.url, params);
+ tuples = getTuples(solrStream);
+
+ //Only two results because of the limit.
+ assert(tuples.size() == 3);
+
+ tuple = null;
+
+ tuple = tuples.get(0);
+ assert(tuple.getLong("year_i") == 2015);
+ assert(tuple.getLong("month_i") == 11);
+ assert(tuple.getDouble("sum(item_i)") == 57);
+
+ tuple = tuples.get(1);
+ assert(tuple.getLong("year_i") == 2015);
+ assert(tuple.getLong("month_i") == 10);
+ assert(tuple.getDouble("sum(item_i)") == 9);
+
+ tuple = tuples.get(2);
+ assert(tuple.getLong("year_i") == 2014);
+ assert(tuple.getLong("month_i") == 4);
+ assert(tuple.getDouble("sum(item_i)") == 7);
+
+ params = new HashMap();
+ params.put(CommonParams.QT, "/sql");
+ params.put("sql", "select year_i, month_i, day_i, sum(item_i) from mytable group by year_i, month_i, day_i order by year_i desc, month_i desc, day_i desc");
+
+ solrStream = new SolrStream(jetty.url, params);
+ tuples = getTuples(solrStream);
+
+ //Only two results because of the limit.
+ assert(tuples.size() == 6);
+
+ tuple = null;
+
+ tuple = tuples.get(0);
+ assert(tuple.getLong("year_i") == 2015);
+ assert(tuple.getLong("month_i") == 11);
+ assert(tuple.getLong("day_i") == 8);
+ assert(tuple.getDouble("sum(item_i)") == 42);
+
+ tuple = tuples.get(1);
+ assert(tuple.getLong("year_i") == 2015);
+ assert(tuple.getLong("month_i") == 11);
+ assert(tuple.getLong("day_i") == 7);
+ assert(tuple.getDouble("sum(item_i)") == 15);
+
+ tuple = tuples.get(2);
+ assert(tuple.getLong("year_i") == 2015);
+ assert(tuple.getLong("month_i") == 10);
+ assert(tuple.getLong("day_i") == 3);
+ assert(tuple.getDouble("sum(item_i)") == 5);
+
+ tuple = tuples.get(3);
+ assert(tuple.getLong("year_i") == 2015);
+ assert(tuple.getLong("month_i") == 10);
+ assert(tuple.getLong("day_i") == 1);
+ assert(tuple.getDouble("sum(item_i)") == 4);
+
+ tuple = tuples.get(4);
+ assert(tuple.getLong("year_i") == 2014);
+ assert(tuple.getLong("month_i") == 4);
+ assert(tuple.getLong("day_i") == 4);
+ assert(tuple.getDouble("sum(item_i)") == 6);
+
+ tuple = tuples.get(5);
+ assert(tuple.getLong("year_i") == 2014);
+ assert(tuple.getLong("month_i") == 4);
+ assert(tuple.getLong("day_i") == 2);
+ assert(tuple.getDouble("sum(item_i)") == 1);
+
+ } finally {
+ delete();
+ }
+ }
+
+ private void testParallelTimeSeriesGrouping() throws Exception {
+ try {
+
+ CloudJettyRunner jetty = this.cloudJettys.get(0);
+
+ del("*:*");
+
+ commit();
+
+ indexr("id", "1", "year_i", "2015", "month_i", "11", "day_i", "7", "item_i", "5");
+ indexr("id", "2", "year_i", "2015", "month_i", "11", "day_i", "7", "item_i", "10");
+ indexr("id", "3", "year_i", "2015", "month_i", "11", "day_i", "8", "item_i", "30");
+ indexr("id", "4", "year_i", "2015", "month_i", "11", "day_i", "8", "item_i", "12");
+ indexr("id", "5", "year_i", "2015", "month_i", "10", "day_i", "1", "item_i", "4");
+ indexr("id", "6", "year_i", "2015", "month_i", "10", "day_i", "3", "item_i", "5");
+ indexr("id", "7", "year_i", "2014", "month_i", "4", "day_i", "4", "item_i", "6");
+ indexr("id", "8", "year_i", "2014", "month_i", "4", "day_i", "2", "item_i", "1");
+
+ commit();
+ Map params = new HashMap();
+ params.put(CommonParams.QT, "/sql");
+ params.put("numWorkers", 2);
+ params.put("sql", "select year_i, sum(item_i) from mytable group by year_i order by year_i desc");
+
+ SolrStream solrStream = new SolrStream(jetty.url, params);
+ List tuples = getTuples(solrStream);
+
+ //Only two results because of the limit.
+ assert(tuples.size() == 2);
+
+ Tuple tuple = null;
+
+ tuple = tuples.get(0);
+ assert(tuple.getLong("year_i") == 2015);
+ assert(tuple.getDouble("sum(item_i)") == 66);
+
+ tuple = tuples.get(1);
+ assert(tuple.getLong("year_i") == 2014);
+ assert(tuple.getDouble("sum(item_i)") == 7);
+
+ new HashMap();
+ params.put(CommonParams.QT, "/sql");
+ params.put("numWorkers", 2);
+ params.put("sql", "select year_i, month_i, sum(item_i) from mytable group by year_i, month_i order by year_i desc, month_i desc");
+
+ solrStream = new SolrStream(jetty.url, params);
+ tuples = getTuples(solrStream);
+
+ //Only two results because of the limit.
+ assert(tuples.size() == 3);
+
+ tuple = null;
+
+ tuple = tuples.get(0);
+ assert(tuple.getLong("year_i") == 2015);
+ assert(tuple.getLong("month_i") == 11);
+ assert(tuple.getDouble("sum(item_i)") == 57);
+
+ tuple = tuples.get(1);
+ assert(tuple.getLong("year_i") == 2015);
+ assert(tuple.getLong("month_i") == 10);
+ assert(tuple.getDouble("sum(item_i)") == 9);
+
+ tuple = tuples.get(2);
+ assert(tuple.getLong("year_i") == 2014);
+ assert(tuple.getLong("month_i") == 4);
+ assert(tuple.getDouble("sum(item_i)") == 7);
+
+
+ new HashMap();
+ params.put(CommonParams.QT, "/sql");
+ params.put("numWorkers", 2);
+ params.put("sql", "select year_i, month_i, day_i, sum(item_i) from mytable group by year_i, month_i, day_i order by year_i desc, month_i desc, day_i desc");
+
+ solrStream = new SolrStream(jetty.url, params);
+ tuples = getTuples(solrStream);
+
+ //Only two results because of the limit.
+ assert(tuples.size() == 6);
+
+ tuple = null;
+
+ tuple = tuples.get(0);
+ assert(tuple.getLong("year_i") == 2015);
+ assert(tuple.getLong("month_i") == 11);
+ assert(tuple.getLong("day_i") == 8);
+ assert(tuple.getDouble("sum(item_i)") == 42);
+
+ tuple = tuples.get(1);
+ assert(tuple.getLong("year_i") == 2015);
+ assert(tuple.getLong("month_i") == 11);
+ assert(tuple.getLong("day_i") == 7);
+ assert(tuple.getDouble("sum(item_i)") == 15);
+
+ tuple = tuples.get(2);
+ assert(tuple.getLong("year_i") == 2015);
+ assert(tuple.getLong("month_i") == 10);
+ assert(tuple.getLong("day_i") == 3);
+ assert(tuple.getDouble("sum(item_i)") == 5);
+
+ tuple = tuples.get(3);
+ assert(tuple.getLong("year_i") == 2015);
+ assert(tuple.getLong("month_i") == 10);
+ assert(tuple.getLong("day_i") == 1);
+ assert(tuple.getDouble("sum(item_i)") == 4);
+
+ tuple = tuples.get(4);
+ assert(tuple.getLong("year_i") == 2014);
+ assert(tuple.getLong("month_i") == 4);
+ assert(tuple.getLong("day_i") == 4);
+ assert(tuple.getDouble("sum(item_i)") == 6);
+
+ tuple = tuples.get(5);
+ assert(tuple.getLong("year_i") == 2014);
+ assert(tuple.getLong("month_i") == 4);
+ assert(tuple.getLong("day_i") == 2);
+ assert(tuple.getDouble("sum(item_i)") == 1);
+
+ } finally {
+ delete();
+ }
+ }
+
+ protected List getTuples(TupleStream tupleStream) throws IOException {
+ tupleStream.open();
+ List tuples = new ArrayList();
+ for(;;) {
+ Tuple t = tupleStream.read();
+ if(t.EOF) {
+ break;
+ } else {
+ tuples.add(t);
+ }
+ }
+ tupleStream.close();
+ return tuples;
+ }
+}
diff --git a/solr/licenses/antlr4-runtime-4.5.jar.sha1 b/solr/licenses/antlr4-runtime-4.5.jar.sha1
new file mode 100644
index 00000000000..5299c19c73b
--- /dev/null
+++ b/solr/licenses/antlr4-runtime-4.5.jar.sha1
@@ -0,0 +1 @@
+29e48af049f17dd89153b83a7ad5d01b3b4bcdda
diff --git a/solr/licenses/antlr4-runtime-LICENSE-BSD.txt b/solr/licenses/antlr4-runtime-LICENSE-BSD.txt
new file mode 100644
index 00000000000..95d0a2554f6
--- /dev/null
+++ b/solr/licenses/antlr4-runtime-LICENSE-BSD.txt
@@ -0,0 +1,26 @@
+[The "BSD license"]
+Copyright (c) 2015 Terence Parr, Sam Harwell
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/solr/licenses/antlr4-runtime-NOTICE.txt b/solr/licenses/antlr4-runtime-NOTICE.txt
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/solr/licenses/presto-parser-0.107.jar.sha1 b/solr/licenses/presto-parser-0.107.jar.sha1
new file mode 100644
index 00000000000..0d0d9c66ae6
--- /dev/null
+++ b/solr/licenses/presto-parser-0.107.jar.sha1
@@ -0,0 +1 @@
+f6f1363553855d1b70548721ce6cd5050b88a6bd
\ No newline at end of file
diff --git a/solr/licenses/presto-parser-LICENSE-ASL.txt b/solr/licenses/presto-parser-LICENSE-ASL.txt
new file mode 100644
index 00000000000..d6456956733
--- /dev/null
+++ b/solr/licenses/presto-parser-LICENSE-ASL.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/solr/licenses/presto-parser-NOTICE.txt b/solr/licenses/presto-parser-NOTICE.txt
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/solr/licenses/slice-0.10.jar.sha1 b/solr/licenses/slice-0.10.jar.sha1
new file mode 100644
index 00000000000..7b86d91c215
--- /dev/null
+++ b/solr/licenses/slice-0.10.jar.sha1
@@ -0,0 +1 @@
+159a81631ed2cc1bc865f3d8e51239c9e8a20bea
diff --git a/solr/licenses/slice-LICENSE-ASL.txt b/solr/licenses/slice-LICENSE-ASL.txt
new file mode 100644
index 00000000000..d6456956733
--- /dev/null
+++ b/solr/licenses/slice-LICENSE-ASL.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/solr/licenses/slice-NOTICE.txt b/solr/licenses/slice-NOTICE.txt
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/Tuple.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/Tuple.java
index 1cb7c2204c5..8baa8daa87a 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/Tuple.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/Tuple.java
@@ -58,15 +58,27 @@ public class Tuple implements Cloneable {
}
public String getString(Object key) {
- return (String)this.fields.get(key);
+ return this.fields.get(key).toString();
}
public Long getLong(Object key) {
- return (Long)this.fields.get(key);
+ Object o = this.fields.get(key);
+ if(o instanceof Long) {
+ return (Long)o;
+ } else {
+ //Attempt to parse the long
+ return Long.parseLong(o.toString());
+ }
}
public Double getDouble(Object key) {
- return (Double)this.fields.get(key);
+ Object o = this.fields.get(key);
+ if(o instanceof Double) {
+ return (Double)o;
+ } else {
+ //Attempt to parse the double
+ return Double.parseDouble(o.toString());
+ }
}
public List getStrings(Object key) {
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/comp/HashKey.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/comp/HashKey.java
new file mode 100644
index 00000000000..200446ef1e0
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/comp/HashKey.java
@@ -0,0 +1,80 @@
+package org.apache.solr.client.solrj.io.comp;
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.Serializable;
+
+import org.apache.solr.client.solrj.io.Tuple;
+
+public class HashKey implements Serializable {
+
+ private static final long serialVersionUID = 1;
+
+ private Object[] parts;
+
+
+ public HashKey(String value) {
+ parts = (Object[])value.split("::");
+ }
+
+ public HashKey(Tuple t, String[] keys) {
+ this.parts = new Object[keys.length];
+ for(int i=0; i 0) {
+ buf.append("::");
+ }
+ buf.append(parts[i].toString());
+ }
+
+ return buf.toString();
+ }
+}
\ No newline at end of file
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/RollupStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/RollupStream.java
new file mode 100644
index 00000000000..a6755b2bcb9
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/RollupStream.java
@@ -0,0 +1,135 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+package org.apache.solr.client.solrj.io.stream;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.ArrayList;
+
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.comp.HashKey;
+import org.apache.solr.client.solrj.io.stream.metrics.Bucket;
+import org.apache.solr.client.solrj.io.stream.metrics.Metric;
+
+public class RollupStream extends TupleStream {
+
+ private static final long serialVersionUID = 1;
+
+ private PushBackStream tupleStream;
+ private Bucket[] buckets;
+ private Metric[] metrics;
+ private HashKey currentKey = new HashKey("-");
+ private Metric[] currentMetrics;
+ private boolean finished = false;
+
+ public RollupStream(TupleStream tupleStream,
+ Bucket[] buckets,
+ Metric[] metrics) {
+ this.tupleStream = new PushBackStream(tupleStream);
+ this.buckets = buckets;
+ this.metrics = metrics;
+ }
+
+ public void setStreamContext(StreamContext context) {
+ this.tupleStream.setStreamContext(context);
+ }
+
+ public List children() {
+ List l = new ArrayList();
+ l.add(tupleStream);
+ return l;
+ }
+
+ public void open() throws IOException {
+ tupleStream.open();
+ }
+
+ public void close() throws IOException {
+ tupleStream.close();
+ }
+
+ public Tuple read() throws IOException {
+
+ while(true) {
+ Tuple tuple = tupleStream.read();
+ if(tuple.EOF) {
+ if(!finished) {
+ Map map = new HashMap();
+ for(Metric metric : currentMetrics) {
+ map.put(metric.getName(), metric.getValue());
+ }
+
+ for(int i=0; i doubleMax) {
+ doubleMax = d;
+ }
+ } else {
+ long l = (long)o;
+ if(l > longMax) {
+ longMax = l;
+ }
+ }
+ }
+
+ public Metric newInstance() {
+ return new MaxMetric(column);
+ }
+}
\ No newline at end of file
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/metrics/MeanMetric.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/metrics/MeanMetric.java
new file mode 100644
index 00000000000..8132f6d5451
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/metrics/MeanMetric.java
@@ -0,0 +1,70 @@
+package org.apache.solr.client.solrj.io.stream.metrics;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.Serializable;
+import java.util.Map;
+import java.util.HashMap;
+
+import org.apache.solr.client.solrj.io.Tuple;
+
+public class MeanMetric implements Metric, Serializable {
+
+ private static final long serialVersionUID = 1;
+
+ private String column;
+ private double doubleSum;
+ private long longSum;
+ private long count;
+
+ public MeanMetric(String column) {
+ this.column = column;
+ }
+
+ public String getName() {
+ return "avg("+column+")";
+ }
+
+ public void update(Tuple tuple) {
+ ++count;
+ Object o = tuple.get(column);
+ if(o instanceof Double) {
+ Double d = (Double)tuple.get(column);
+ doubleSum += d.doubleValue();
+ } else {
+ Long l = (Long)tuple.get(column);
+ longSum += l.doubleValue();
+ }
+ }
+
+ public Metric newInstance() {
+ return new MeanMetric(column);
+ }
+
+ public double getValue() {
+ double dcount = (double)count;
+ if(longSum == 0) {
+ double ave = doubleSum/dcount;
+ return ave;
+
+ } else {
+ double ave = longSum/dcount;
+ return ave;
+ }
+ }
+}
\ No newline at end of file
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/metrics/Metric.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/metrics/Metric.java
new file mode 100644
index 00000000000..19f22dfde45
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/metrics/Metric.java
@@ -0,0 +1,28 @@
+package org.apache.solr.client.solrj.io.stream.metrics;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.Serializable;
+import org.apache.solr.client.solrj.io.Tuple;
+
+public interface Metric extends Serializable {
+ public String getName();
+ public double getValue();
+ public void update(Tuple tuple);
+ public Metric newInstance();
+}
\ No newline at end of file
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/metrics/MinMetric.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/metrics/MinMetric.java
new file mode 100644
index 00000000000..630634d4f76
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/metrics/MinMetric.java
@@ -0,0 +1,66 @@
+package org.apache.solr.client.solrj.io.stream.metrics;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.Serializable;
+
+import org.apache.solr.client.solrj.io.Tuple;
+
+public class MinMetric implements Metric, Serializable {
+
+ private static final long serialVersionUID = 1;
+
+ private long longMin = Long.MAX_VALUE;
+ private double doubleMin = Double.MAX_VALUE;
+ private String column;
+
+ public MinMetric(String column) {
+ this.column = column;
+ }
+
+ public String getName() {
+ return "min("+column+")";
+ }
+
+ public double getValue() {
+ if(longMin == Long.MAX_VALUE) {
+ return doubleMin;
+ } else {
+ return longMin;
+ }
+ }
+
+ public void update(Tuple tuple) {
+ Object o = tuple.get(column);
+ if(o instanceof Double) {
+ double d = (double)o;
+ if(d < doubleMin) {
+ doubleMin = d;
+ }
+ } else {
+ long l = (long)o;
+ if(l < longMin) {
+ longMin = l;
+ }
+ }
+ }
+
+ public Metric newInstance() {
+ return new MinMetric(column);
+ }
+}
\ No newline at end of file
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/metrics/SumMetric.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/metrics/SumMetric.java
new file mode 100644
index 00000000000..916b971f333
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/metrics/SumMetric.java
@@ -0,0 +1,62 @@
+package org.apache.solr.client.solrj.io.stream.metrics;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.Serializable;
+
+import org.apache.solr.client.solrj.io.Tuple;
+
+public class SumMetric implements Metric, Serializable {
+
+ private static final long serialVersionUID = 1;
+
+ private String column;
+ private double doubleSum;
+ private long longSum;
+
+ public SumMetric(String column) {
+ this.column = column;
+ }
+
+ public String getName() {
+ return "sum("+column+")";
+ }
+
+ public void update(Tuple tuple) {
+ Object o = tuple.get(column);
+ if(o instanceof Double) {
+ Double d = (Double)o;
+ doubleSum += d.doubleValue();
+ } else {
+ Long l = (Long)o;
+ longSum += l.longValue();
+ }
+ }
+
+ public Metric newInstance() {
+ return new SumMetric(column);
+ }
+
+ public double getValue() {
+ if(longSum == 0) {
+ return doubleSum;
+ } else {
+ return (double)longSum;
+ }
+ }
+}
\ No newline at end of file
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/metrics/package-info.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/metrics/package-info.java
new file mode 100644
index 00000000000..a25732e9a2f
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/metrics/package-info.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+
+/**
+ * Metrics package
+ **/
+package org.apache.solr.client.solrj.io.stream.metrics;
+
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamingTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamingTest.java
index 32ba7f96454..4a0408c0c9e 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamingTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamingTest.java
@@ -39,6 +39,13 @@ import org.apache.solr.client.solrj.io.stream.ReducerStream;
import org.apache.solr.client.solrj.io.stream.TupleStream;
import org.apache.solr.client.solrj.io.stream.UniqueStream;
import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+import org.apache.solr.client.solrj.io.stream.metrics.Bucket;
+import org.apache.solr.client.solrj.io.stream.metrics.CountMetric;
+import org.apache.solr.client.solrj.io.stream.metrics.MaxMetric;
+import org.apache.solr.client.solrj.io.stream.metrics.MeanMetric;
+import org.apache.solr.client.solrj.io.stream.metrics.Metric;
+import org.apache.solr.client.solrj.io.stream.metrics.MinMetric;
+import org.apache.solr.client.solrj.io.stream.metrics.SumMetric;
import org.apache.solr.cloud.AbstractFullDistribZkTestBase;
import org.apache.solr.cloud.AbstractZkTestCase;
import org.apache.solr.common.SolrInputDocument;
@@ -486,6 +493,241 @@ public class StreamingTest extends AbstractFullDistribZkTestBase {
commit();
}
+ private void testRollupStream() throws Exception {
+
+ indexr(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "1");
+ indexr(id, "2", "a_s", "hello0", "a_i", "2", "a_f", "2");
+ indexr(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3");
+ indexr(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4");
+ indexr(id, "1", "a_s", "hello0", "a_i", "1", "a_f", "5");
+ indexr(id, "5", "a_s", "hello3", "a_i", "10", "a_f", "6");
+ indexr(id, "6", "a_s", "hello4", "a_i", "11", "a_f", "7");
+ indexr(id, "7", "a_s", "hello3", "a_i", "12", "a_f", "8");
+ indexr(id, "8", "a_s", "hello3", "a_i", "13", "a_f", "9");
+ indexr(id, "9", "a_s", "hello0", "a_i", "14", "a_f", "10");
+
+ commit();
+
+ String zkHost = zkServer.getZkAddress();
+ streamFactory.withCollectionZkHost("collection1", zkHost);
+
+ Map paramsA = mapParams("q","*:*","fl","a_s,a_i,a_f","sort", "a_s asc");
+ CloudSolrStream stream = new CloudSolrStream(zkHost, "collection1", paramsA);
+
+ Bucket[] buckets = {new Bucket("a_s")};
+
+ Metric[] metrics = {new SumMetric("a_i"),
+ new SumMetric("a_f"),
+ new MinMetric("a_i"),
+ new MinMetric("a_f"),
+ new MaxMetric("a_i"),
+ new MaxMetric("a_f"),
+ new MeanMetric("a_i"),
+ new MeanMetric("a_f"),
+ new CountMetric()};
+
+ RollupStream rollupStream = new RollupStream(stream, buckets, metrics);
+ List tuples = getTuples(rollupStream);
+
+ assert(tuples.size() == 3);
+
+ //Test Long and Double Sums
+
+ Tuple tuple = tuples.get(0);
+ String bucket = tuple.getString("a_s");
+ Double sumi = tuple.getDouble("sum(a_i)");
+ Double sumf = tuple.getDouble("sum(a_f)");
+ Double mini = tuple.getDouble("min(a_i)");
+ Double minf = tuple.getDouble("min(a_f)");
+ Double maxi = tuple.getDouble("max(a_i)");
+ Double maxf = tuple.getDouble("max(a_f)");
+ Double avgi = tuple.getDouble("avg(a_i)");
+ Double avgf = tuple.getDouble("avg(a_f)");
+ Double count = tuple.getDouble("count(*)");
+
+
+ assertTrue(bucket.equals("hello0"));
+ assertTrue(sumi.doubleValue() == 17.0D);
+ assertTrue(sumf.doubleValue() == 18.0D);
+ assertTrue(mini.doubleValue() == 0.0D);
+ assertTrue(minf.doubleValue() == 1.0D);
+ assertTrue(maxi.doubleValue() == 14.0D);
+ assertTrue(maxf.doubleValue() == 10.0D);
+ assertTrue(avgi.doubleValue() == 4.25D);
+ assertTrue(avgf.doubleValue() == 4.5D);
+ assertTrue(count.doubleValue() == 4);
+
+
+ tuple = tuples.get(1);
+ bucket = tuple.getString("a_s");
+ sumi = tuple.getDouble("sum(a_i)");
+ sumf = tuple.getDouble("sum(a_f)");
+ mini = tuple.getDouble("min(a_i)");
+ minf = tuple.getDouble("min(a_f)");
+ maxi = tuple.getDouble("max(a_i)");
+ maxf = tuple.getDouble("max(a_f)");
+ avgi = tuple.getDouble("avg(a_i)");
+ avgf = tuple.getDouble("avg(a_f)");
+ count = tuple.getDouble("count(*)");
+
+ assertTrue(bucket.equals("hello3"));
+ assertTrue(sumi.doubleValue() == 38.0D);
+ assertTrue(sumf.doubleValue() == 26.0D);
+ assertTrue(mini.doubleValue() == 3.0D);
+ assertTrue(minf.doubleValue() == 3.0D);
+ assertTrue(maxi.doubleValue() == 13.0D);
+ assertTrue(maxf.doubleValue() == 9.0D);
+ assertTrue(avgi.doubleValue() == 9.5D);
+ assertTrue(avgf.doubleValue() == 6.5D);
+ assertTrue(count.doubleValue() == 4);
+
+
+ tuple = tuples.get(2);
+ bucket = tuple.getString("a_s");
+ sumi = tuple.getDouble("sum(a_i)");
+ sumf = tuple.getDouble("sum(a_f)");
+ mini = tuple.getDouble("min(a_i)");
+ minf = tuple.getDouble("min(a_f)");
+ maxi = tuple.getDouble("max(a_i)");
+ maxf = tuple.getDouble("max(a_f)");
+ avgi = tuple.getDouble("avg(a_i)");
+ avgf = tuple.getDouble("avg(a_f)");
+ count = tuple.getDouble("count(*)");
+
+ assertTrue(bucket.equals("hello4"));
+ assertTrue(sumi.longValue() == 15);
+ assertTrue(sumf.doubleValue() == 11.0D);
+ assertTrue(mini.doubleValue() == 4.0D);
+ assertTrue(minf.doubleValue() == 4.0D);
+ assertTrue(maxi.doubleValue() == 11.0D);
+ assertTrue(maxf.doubleValue() == 7.0D);
+ assertTrue(avgi.doubleValue() == 7.5D);
+ assertTrue(avgf.doubleValue() == 5.5D);
+ assertTrue(count.doubleValue() == 2);
+
+
+
+ del("*:*");
+ commit();
+ }
+
+
+
+
+ private void testParallelRollupStream() throws Exception {
+
+ indexr(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "1");
+ indexr(id, "2", "a_s", "hello0", "a_i", "2", "a_f", "2");
+ indexr(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3");
+ indexr(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4");
+ indexr(id, "1", "a_s", "hello0", "a_i", "1", "a_f", "5");
+ indexr(id, "5", "a_s", "hello3", "a_i", "10", "a_f", "6");
+ indexr(id, "6", "a_s", "hello4", "a_i", "11", "a_f", "7");
+ indexr(id, "7", "a_s", "hello3", "a_i", "12", "a_f", "8");
+ indexr(id, "8", "a_s", "hello3", "a_i", "13", "a_f", "9");
+ indexr(id, "9", "a_s", "hello0", "a_i", "14", "a_f", "10");
+
+ commit();
+
+ String zkHost = zkServer.getZkAddress();
+ streamFactory.withCollectionZkHost("collection1", zkHost);
+
+ Map paramsA = mapParams("q","*:*","fl","a_s,a_i,a_f","sort", "a_s asc", "partitionKeys", "a_s");
+ CloudSolrStream stream = new CloudSolrStream(zkHost, "collection1", paramsA);
+
+ Bucket[] buckets = {new Bucket("a_s")};
+
+ Metric[] metrics = {new SumMetric("a_i"),
+ new SumMetric("a_f"),
+ new MinMetric("a_i"),
+ new MinMetric("a_f"),
+ new MaxMetric("a_i"),
+ new MaxMetric("a_f"),
+ new MeanMetric("a_i"),
+ new MeanMetric("a_f"),
+ new CountMetric()};
+
+ RollupStream rollupStream = new RollupStream(stream, buckets, metrics);
+ ParallelStream parallelStream = new ParallelStream(zkHost, "collection1", rollupStream, 2, new FieldComparator("a_s", ComparatorOrder.ASCENDING));
+ List tuples = getTuples(parallelStream);
+
+ assert(tuples.size() == 3);
+
+ //Test Long and Double Sums
+
+ Tuple tuple = tuples.get(0);
+ String bucket = tuple.getString("a_s");
+ Double sumi = tuple.getDouble("sum(a_i)");
+ Double sumf = tuple.getDouble("sum(a_f)");
+ Double mini = tuple.getDouble("min(a_i)");
+ Double minf = tuple.getDouble("min(a_f)");
+ Double maxi = tuple.getDouble("max(a_i)");
+ Double maxf = tuple.getDouble("max(a_f)");
+ Double avgi = tuple.getDouble("avg(a_i)");
+ Double avgf = tuple.getDouble("avg(a_f)");
+ Double count = tuple.getDouble("count(*)");
+
+ assertTrue(bucket.equals("hello0"));
+ assertTrue(sumi.doubleValue() == 17.0D);
+ assertTrue(sumf.doubleValue() == 18.0D);
+ assertTrue(mini.doubleValue() == 0.0D);
+ assertTrue(minf.doubleValue() == 1.0D);
+ assertTrue(maxi.doubleValue() == 14.0D);
+ assertTrue(maxf.doubleValue() == 10.0D);
+ assertTrue(avgi.doubleValue() == 4.25D);
+ assertTrue(avgf.doubleValue() == 4.5D);
+ assertTrue(count.doubleValue() == 4);
+
+ tuple = tuples.get(1);
+ bucket = tuple.getString("a_s");
+ sumi = tuple.getDouble("sum(a_i)");
+ sumf = tuple.getDouble("sum(a_f)");
+ mini = tuple.getDouble("min(a_i)");
+ minf = tuple.getDouble("min(a_f)");
+ maxi = tuple.getDouble("max(a_i)");
+ maxf = tuple.getDouble("max(a_f)");
+ avgi = tuple.getDouble("avg(a_i)");
+ avgf = tuple.getDouble("avg(a_f)");
+ count = tuple.getDouble("count(*)");
+
+ assertTrue(bucket.equals("hello3"));
+ assertTrue(sumi.doubleValue() == 38.0D);
+ assertTrue(sumf.doubleValue() == 26.0D);
+ assertTrue(mini.doubleValue() == 3.0D);
+ assertTrue(minf.doubleValue() == 3.0D);
+ assertTrue(maxi.doubleValue() == 13.0D);
+ assertTrue(maxf.doubleValue() == 9.0D);
+ assertTrue(avgi.doubleValue() == 9.5D);
+ assertTrue(avgf.doubleValue() == 6.5D);
+ assertTrue(count.doubleValue() == 4);
+
+ tuple = tuples.get(2);
+ bucket = tuple.getString("a_s");
+ sumi = tuple.getDouble("sum(a_i)");
+ sumf = tuple.getDouble("sum(a_f)");
+ mini = tuple.getDouble("min(a_i)");
+ minf = tuple.getDouble("min(a_f)");
+ maxi = tuple.getDouble("max(a_i)");
+ maxf = tuple.getDouble("max(a_f)");
+ avgi = tuple.getDouble("avg(a_i)");
+ avgf = tuple.getDouble("avg(a_f)");
+ count = tuple.getDouble("count(*)");
+
+ assertTrue(bucket.equals("hello4"));
+ assertTrue(sumi.longValue() == 15);
+ assertTrue(sumf.doubleValue() == 11.0D);
+ assertTrue(mini.doubleValue() == 4.0D);
+ assertTrue(minf.doubleValue() == 4.0D);
+ assertTrue(maxi.doubleValue() == 11.0D);
+ assertTrue(maxf.doubleValue() == 7.0D);
+ assertTrue(avgi.doubleValue() == 7.5D);
+ assertTrue(avgf.doubleValue() == 5.5D);
+ assertTrue(count.doubleValue() == 2);
+
+ del("*:*");
+ commit();
+ }
+
private void testZeroParallelReducerStream() throws Exception {
indexr(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "1");
@@ -782,8 +1024,8 @@ public class StreamingTest extends AbstractFullDistribZkTestBase {
stream = new CloudSolrStream(zkHost, "collection1", params);
tuples = getTuples(stream);
- assert(tuples.size() == 5);
- assertOrder(tuples, 0,2,1,3,4);
+ assert (tuples.size() == 5);
+ assertOrder(tuples, 0, 2, 1, 3, 4);
del("*:*");
commit();
@@ -796,11 +1038,13 @@ public class StreamingTest extends AbstractFullDistribZkTestBase {
testRankStream();
testMergeStream();
testReducerStream();
+ testRollupStream();
testZeroReducerStream();
testParallelEOF();
testParallelUniqueStream();
testParallelRankStream();
testParallelMergeStream();
+ testParallelRollupStream();
testParallelReducerStream();
testZeroParallelReducerStream();
}