diff --git a/build.sh b/build.sh index ec9ecad11bd..78e5fffe175 100755 --- a/build.sh +++ b/build.sh @@ -30,4 +30,4 @@ echo "For examples, see: " echo " " ls -1 examples/*/*sh echo " " -echo "See also http://druid.io/docs/0.6.51" +echo "See also http://druid.io/docs/0.6.52" diff --git a/cassandra-storage/pom.xml b/cassandra-storage/pom.xml index 21953a6e466..0d33b40c711 100644 --- a/cassandra-storage/pom.xml +++ b/cassandra-storage/pom.xml @@ -28,7 +28,7 @@ io.druid druid - 0.6.52-SNAPSHOT + 0.6.53-SNAPSHOT diff --git a/common/pom.xml b/common/pom.xml index 0afd4f1c0df..b02af58de22 100644 --- a/common/pom.xml +++ b/common/pom.xml @@ -28,7 +28,7 @@ io.druid druid - 0.6.52-SNAPSHOT + 0.6.53-SNAPSHOT diff --git a/common/src/main/java/io/druid/concurrent/Execs.java b/common/src/main/java/io/druid/concurrent/Execs.java index ea27bf96e05..308208ef98d 100644 --- a/common/src/main/java/io/druid/concurrent/Execs.java +++ b/common/src/main/java/io/druid/concurrent/Execs.java @@ -21,10 +21,15 @@ package io.druid.concurrent; import com.google.common.util.concurrent.ThreadFactoryBuilder; +import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.RejectedExecutionHandler; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ThreadFactory; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; /** */ @@ -49,4 +54,31 @@ public class Execs { return new ThreadFactoryBuilder().setDaemon(true).setNameFormat(nameFormat).build(); } + + /** + * @param nameFormat nameformat for threadFactory + * @param capacity maximum capacity after which the executorService will block on accepting new tasks + * @return ExecutorService which blocks accepting new tasks when the capacity reached + */ + public static ExecutorService newBlockingSingleThreaded(String nameFormat, int capacity) + { + return new ThreadPoolExecutor( + 1, 1, + 0L, TimeUnit.MILLISECONDS, + new ArrayBlockingQueue(capacity), makeThreadFactory(nameFormat) + , new RejectedExecutionHandler() + { + @Override + public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) + { + try { + ((ArrayBlockingQueue) executor.getQueue()).put(r); + } + catch (InterruptedException e) { + throw new RejectedExecutionException("Got Interrupted while adding to the Queue"); + } + } + } + ); + } } diff --git a/common/src/test/java/io/druid/concurrent/ExecsTest.java b/common/src/test/java/io/druid/concurrent/ExecsTest.java new file mode 100644 index 00000000000..809ed5eac02 --- /dev/null +++ b/common/src/test/java/io/druid/concurrent/ExecsTest.java @@ -0,0 +1,92 @@ +/* + * Druid - a distributed column store. + * Copyright (C) 2012, 2013, 2014 Metamarkets Group Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +package io.druid.concurrent; + +import com.google.common.base.Throwables; +import org.junit.Assert; +import org.junit.Test; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicInteger; + +public class ExecsTest +{ + @Test + public void testBlockingExecutorService() throws Exception + { + final int capacity = 3; + final ExecutorService blockingExecutor = Execs.newBlockingSingleThreaded("test%d", capacity); + final CountDownLatch queueFullSignal = new CountDownLatch(capacity + 1); + final CountDownLatch taskCompletedSignal = new CountDownLatch(2 * capacity); + final CountDownLatch taskStartSignal = new CountDownLatch(1); + final AtomicInteger producedCount = new AtomicInteger(); + final AtomicInteger consumedCount = new AtomicInteger(); + ExecutorService producer = Executors.newSingleThreadExecutor(); + producer.submit( + new Runnable() + { + public void run() + { + for (int i = 0; i < 2 * capacity; i++) { + final int taskID = i; + System.out.println("Produced task" + taskID); + blockingExecutor.submit( + new Runnable() + { + @Override + public void run() + { + System.out.println("Starting task" + taskID); + try { + taskStartSignal.await(); + consumedCount.incrementAndGet(); + taskCompletedSignal.countDown(); + } + catch (Exception e) { + throw Throwables.propagate(e); + } + System.out.println("Completed task" + taskID); + } + } + ); + producedCount.incrementAndGet(); + queueFullSignal.countDown(); + } + } + } + ); + + queueFullSignal.await(); + // verify that the producer blocks + Assert.assertEquals(capacity + 1, producedCount.get()); + // let the tasks run + taskStartSignal.countDown(); + // wait until all tasks complete + taskCompletedSignal.await(); + // verify all tasks consumed + Assert.assertEquals(2 * capacity, consumedCount.get()); + // cleanup + blockingExecutor.shutdown(); + producer.shutdown(); + + } +} diff --git a/docs/content/Broker.md b/docs/content/Broker.md index b4da210a35f..0cd6893a3a4 100644 --- a/docs/content/Broker.md +++ b/docs/content/Broker.md @@ -37,7 +37,7 @@ The broker module uses several of the default modules in [Configuration](Configu |Property|Possible Values|Description|Default| |--------|---------------|-----------|-------| -|`druid.broker.cache.type`|`local`, `memcache`|The type of cache to use for queries.|`local`| +|`druid.broker.cache.type`|`local`, `memcached`|The type of cache to use for queries.|`local`| |`druid.broker.balancer.type`|`random`, `connectionCount`|Determines how the broker balances connections to compute nodes. `random` choose randomly, `connectionCount` picks the node with the fewest number of active connections to|`random`| #### Local Cache diff --git a/docs/content/Examples.md b/docs/content/Examples.md index 335feb3f233..2ea2b8567c8 100644 --- a/docs/content/Examples.md +++ b/docs/content/Examples.md @@ -19,13 +19,13 @@ Clone Druid and build it: git clone https://github.com/metamx/druid.git druid cd druid git fetch --tags -git checkout druid-0.6.51 +git checkout druid-0.6.52 ./build.sh ``` ### Downloading the DSK (Druid Standalone Kit) -[Download](http://static.druid.io/artifacts/releases/druid-services-0.6.51-bin.tar.gz) a stand-alone tarball and run it: +[Download](http://static.druid.io/artifacts/releases/druid-services-0.6.52-bin.tar.gz) a stand-alone tarball and run it: ``` bash tar -xzf druid-services-0.X.X-bin.tar.gz diff --git a/docs/content/Plumber.md b/docs/content/Plumber.md index dfbb3b6b3bf..bbf6b79cbc4 100644 --- a/docs/content/Plumber.md +++ b/docs/content/Plumber.md @@ -13,6 +13,8 @@ We provide a brief description of the example to exemplify the types of things t * `windowPeriod` is the amount of lag time to allow events. This is configured with a 10 minute window, meaning that any event more than 10 minutes ago will be thrown away and not included in the segment generated by the realtime server. * `basePersistDirectory` is the directory to put things that need persistence. The plumber is responsible for the actual intermediate persists and this tells it where to store those persists. +* `maxPendingPersists` is how many persists a plumber can do concurrently without starting to block. + Available Plumbers ------------------ diff --git a/docs/content/Realtime-ingestion.md b/docs/content/Realtime-ingestion.md index f183ad06804..8b895822cf5 100644 --- a/docs/content/Realtime-ingestion.md +++ b/docs/content/Realtime-ingestion.md @@ -97,7 +97,6 @@ This describes the data schema for the output Druid segment. More information ab |aggregators|Array of Objects|The list of aggregators to use to aggregate colliding rows together.|yes| |dataSource|String|The name of the dataSource that the segment belongs to.|yes| |indexGranularity|String|The granularity of the data inside the segment. E.g. a value of "minute" will mean that data is aggregated at minutely granularity. That is, if there are collisions in the tuple (minute(timestamp), dimensions), then it will aggregate values together using the aggregators instead of storing individual rows.|yes| -|segmentGranularity|String|The granularity of the segment as a whole. This is generally larger than the index granularity and describes the rate at which the realtime server will push segments out for historical servers to take over.|yes| |shardSpec|Object|This describes the shard that is represented by this server. This must be specified properly in order to have multiple realtime nodes indexing the same data stream in a sharded fashion.|no| ### Config diff --git a/docs/content/Realtime.md b/docs/content/Realtime.md index 8351ae3479e..0ee39550b09 100644 --- a/docs/content/Realtime.md +++ b/docs/content/Realtime.md @@ -27,7 +27,7 @@ druid.host=localhost druid.service=realtime druid.port=8083 -druid.extensions.coordinates=["io.druid.extensions:druid-kafka-seven:0.6.51"] +druid.extensions.coordinates=["io.druid.extensions:druid-kafka-seven:0.6.52"] druid.zk.service.host=localhost diff --git a/docs/content/Tutorial:-A-First-Look-at-Druid.md b/docs/content/Tutorial:-A-First-Look-at-Druid.md index 6b6557bd7e0..f0dd8c61bba 100644 --- a/docs/content/Tutorial:-A-First-Look-at-Druid.md +++ b/docs/content/Tutorial:-A-First-Look-at-Druid.md @@ -49,7 +49,7 @@ There are two ways to setup Druid: download a tarball, or [Build From Source](Bu ### Download a Tarball -We've built a tarball that contains everything you'll need. You'll find it [here](http://static.druid.io/artifacts/releases/druid-services-0.6.51-bin.tar.gz). Download this file to a directory of your choosing. +We've built a tarball that contains everything you'll need. You'll find it [here](http://static.druid.io/artifacts/releases/druid-services-0.6.52-bin.tar.gz). Download this file to a directory of your choosing. You can extract the awesomeness within by issuing: @@ -60,7 +60,7 @@ tar -zxvf druid-services-*-bin.tar.gz Not too lost so far right? That's great! If you cd into the directory: ``` -cd druid-services-0.6.51 +cd druid-services-0.6.52 ``` You should see a bunch of files: diff --git a/docs/content/Tutorial:-Loading-Your-Data-Part-1.md b/docs/content/Tutorial:-Loading-Your-Data-Part-1.md index 45ad2179cee..5a9d57b7ecb 100644 --- a/docs/content/Tutorial:-Loading-Your-Data-Part-1.md +++ b/docs/content/Tutorial:-Loading-Your-Data-Part-1.md @@ -219,9 +219,9 @@ Congratulations! The segment has completed building. Once a segment is built, a You should see the following logs on the coordinator: ```bash -2013-10-09 21:41:54,368 INFO [Coordinator-Exec--0] io.druid.server.coordinator.DruidCoordinatorLogger - [_default_tier] : Assigned 1 segments among 1 servers -2013-10-09 21:41:54,369 INFO [Coordinator-Exec--0] io.druid.server.coordinator.DruidCoordinatorLogger - Load Queues: -2013-10-09 21:41:54,369 INFO [Coordinator-Exec--0] io.druid.server.coordinator.DruidCoordinatorLogger - Server[localhost:8081, historical, _default_tier] has 1 left to load, 0 left to drop, 4,477 bytes queued, 4,477 bytes served. +2013-10-09 21:41:54,368 INFO [Coordinator-Exec--0] io.druid.server.coordinator.helper.DruidCoordinatorLogger - [_default_tier] : Assigned 1 segments among 1 servers +2013-10-09 21:41:54,369 INFO [Coordinator-Exec--0] io.druid.server.coordinator.helper.DruidCoordinatorLogger - Load Queues: +2013-10-09 21:41:54,369 INFO [Coordinator-Exec--0] io.druid.server.coordinator.helper.DruidCoordinatorLogger - Server[localhost:8081, historical, _default_tier] has 1 left to load, 0 left to drop, 4,477 bytes queued, 4,477 bytes served. ``` These logs indicate that the coordinator has assigned our new segment to the historical node to download and serve. If you look at the historical node logs, you should see: diff --git a/docs/content/Tutorial:-Loading-Your-Data-Part-2.md b/docs/content/Tutorial:-Loading-Your-Data-Part-2.md index 5efd2c05955..913548a1d9c 100644 --- a/docs/content/Tutorial:-Loading-Your-Data-Part-2.md +++ b/docs/content/Tutorial:-Loading-Your-Data-Part-2.md @@ -45,7 +45,7 @@ With real-world data, we recommend having a message bus such as [Apache Kafka](h #### Setting up Kafka -[KafkaFirehoseFactory](https://github.com/metamx/druid/blob/druid-0.6.51/realtime/src/main/java/com/metamx/druid/realtime/firehose/KafkaFirehoseFactory.java) is how druid communicates with Kafka. Using this [Firehose](Firehose.html) with the right configuration, we can import data into Druid in real-time without writing any code. To load data to a real-time node via Kafka, we'll first need to initialize Zookeeper and Kafka, and then configure and initialize a [Realtime](Realtime.html) node. +[KafkaFirehoseFactory](https://github.com/metamx/druid/blob/druid-0.6.52/realtime/src/main/java/com/metamx/druid/realtime/firehose/KafkaFirehoseFactory.java) is how druid communicates with Kafka. Using this [Firehose](Firehose.html) with the right configuration, we can import data into Druid in real-time without writing any code. To load data to a real-time node via Kafka, we'll first need to initialize Zookeeper and Kafka, and then configure and initialize a [Realtime](Realtime.html) node. Instructions for booting a Zookeeper and then Kafka cluster are available [here](http://kafka.apache.org/07/quickstart.html). diff --git a/docs/content/Tutorial:-The-Druid-Cluster.md b/docs/content/Tutorial:-The-Druid-Cluster.md index d76e01e097b..1ce36bca5a1 100644 --- a/docs/content/Tutorial:-The-Druid-Cluster.md +++ b/docs/content/Tutorial:-The-Druid-Cluster.md @@ -13,7 +13,9 @@ In this tutorial, we will set up other types of Druid nodes and external depende If you followed the first tutorial, you should already have Druid downloaded. If not, let's go back and do that first. -You can download the latest version of druid [here](http://static.druid.io/artifacts/releases/druid-services-0.6.51-bin.tar.gz) and untar the contents within by issuing: +You can download the latest version of druid [here](http://static.druid.io/artifacts/releases/druid-services-0.6.52-bin.tar.gz) + +and untar the contents within by issuing: ```bash tar -zxvf druid-services-*-bin.tar.gz @@ -147,7 +149,7 @@ druid.port=8081 druid.zk.service.host=localhost -druid.extensions.coordinates=["io.druid.extensions:druid-s3-extensions:0.6.51"] +druid.extensions.coordinates=["io.druid.extensions:druid-s3-extensions:0.6.52"] # Dummy read only AWS account (used to download example data) druid.s3.secretKey=QyyfVZ7llSiRg6Qcrql1eEUG7buFpAK6T6engr1b @@ -238,7 +240,7 @@ druid.port=8083 druid.zk.service.host=localhost -druid.extensions.coordinates=["io.druid.extensions:druid-examples:0.6.51","io.druid.extensions:druid-kafka-seven:0.6.51"] +druid.extensions.coordinates=["io.druid.extensions:druid-examples:0.6.52","io.druid.extensions:druid-kafka-seven:0.6.52"] # Change this config to db to hand off to the rest of the Druid cluster druid.publish.type=noop diff --git a/docs/content/Tutorial:-Webstream.md b/docs/content/Tutorial:-Webstream.md index a3f3e1e9046..176835fb923 100644 --- a/docs/content/Tutorial:-Webstream.md +++ b/docs/content/Tutorial:-Webstream.md @@ -37,7 +37,7 @@ There are two ways to setup Druid: download a tarball, or [Build From Source](Bu h3. Download a Tarball -We've built a tarball that contains everything you'll need. You'll find it [here](http://static.druid.io/artifacts/releases/druid-services-0.6.51-bin.tar.gz) +We've built a tarball that contains everything you'll need. You'll find it [here](http://static.druid.io/artifacts/releases/druid-services-0.6.52-bin.tar.gz) Download this file to a directory of your choosing. You can extract the awesomeness within by issuing: @@ -48,7 +48,7 @@ tar zxvf druid-services-*-bin.tar.gz Not too lost so far right? That's great! If you cd into the directory: ``` -cd druid-services-0.6.51 +cd druid-services-0.6.52 ``` You should see a bunch of files: diff --git a/docs/content/Twitter-Tutorial.textile b/docs/content/Twitter-Tutorial.textile index 3ca3c6571b9..c08774a57bc 100644 --- a/docs/content/Twitter-Tutorial.textile +++ b/docs/content/Twitter-Tutorial.textile @@ -9,7 +9,7 @@ There are two ways to setup Druid: download a tarball, or build it from source. h3. Download a Tarball -We've built a tarball that contains everything you'll need. You'll find it "here":http://static.druid.io/artifacts/releases/druid-services-0.6.51-bin.tar.gz. +We've built a tarball that contains everything you'll need. You'll find it "here":http://static.druid.io/artifacts/releases/druid-services-0.6.52-bin.tar.gz. Download this bad boy to a directory of your choosing. You can extract the awesomeness within by issuing: diff --git a/examples/config/historical/runtime.properties b/examples/config/historical/runtime.properties index 69ab0cd4dce..dec3ad5113b 100644 --- a/examples/config/historical/runtime.properties +++ b/examples/config/historical/runtime.properties @@ -4,7 +4,7 @@ druid.port=8081 druid.zk.service.host=localhost -druid.extensions.coordinates=["io.druid.extensions:druid-s3-extensions:0.6.51"] +druid.extensions.coordinates=["io.druid.extensions:druid-s3-extensions:0.6.52"] # Dummy read only AWS account (used to download example data) druid.s3.secretKey=QyyfVZ7llSiRg6Qcrql1eEUG7buFpAK6T6engr1b diff --git a/examples/config/realtime/runtime.properties b/examples/config/realtime/runtime.properties index 33d09cdf9e6..cbab843c0e4 100644 --- a/examples/config/realtime/runtime.properties +++ b/examples/config/realtime/runtime.properties @@ -4,7 +4,7 @@ druid.port=8083 druid.zk.service.host=localhost -druid.extensions.coordinates=["io.druid.extensions:druid-examples:0.6.51","io.druid.extensions:druid-kafka-seven:0.6.51","io.druid.extensions:druid-rabbitmq:0.6.51"] +druid.extensions.coordinates=["io.druid.extensions:druid-examples:0.6.52","io.druid.extensions:druid-kafka-seven:0.6.52","io.druid.extensions:druid-rabbitmq:0.6.52"] # Change this config to db to hand off to the rest of the Druid cluster druid.publish.type=noop diff --git a/examples/pom.xml b/examples/pom.xml index 840b4ff5b52..1f77718f582 100644 --- a/examples/pom.xml +++ b/examples/pom.xml @@ -28,7 +28,7 @@ io.druid druid - 0.6.52-SNAPSHOT + 0.6.53-SNAPSHOT diff --git a/examples/src/test/java/io/druid/examples/web/WebJsonSupplierTest.java b/examples/src/test/java/io/druid/examples/web/WebJsonSupplierTest.java index d80fd5479f9..ca181427c82 100644 --- a/examples/src/test/java/io/druid/examples/web/WebJsonSupplierTest.java +++ b/examples/src/test/java/io/druid/examples/web/WebJsonSupplierTest.java @@ -29,7 +29,7 @@ public class WebJsonSupplierTest public void checkInvalidUrl() throws Exception { - String invalidURL = "http://invalid.url"; + String invalidURL = "http://invalid.url."; WebJsonSupplier supplier = new WebJsonSupplier(invalidURL); supplier.getInput(); } diff --git a/hdfs-storage/pom.xml b/hdfs-storage/pom.xml index 87980619bb2..8ec3b6dcf4b 100644 --- a/hdfs-storage/pom.xml +++ b/hdfs-storage/pom.xml @@ -28,7 +28,7 @@ io.druid druid - 0.6.52-SNAPSHOT + 0.6.53-SNAPSHOT diff --git a/hll/pom.xml b/hll/pom.xml new file mode 100644 index 00000000000..58e17295c96 --- /dev/null +++ b/hll/pom.xml @@ -0,0 +1,82 @@ + + + + + 4.0.0 + io.druid.extensions + druid-hll + druid-hll + druid-hll + + + io.druid + druid + 0.6.53-SNAPSHOT + + + + + io.druid + druid-api + + + io.druid + druid-processing + ${project.parent.version} + + + com.metamx + emitter + + + net.sf.trove4j + trove4j + 3.0.3 + + + commons-codec + commons-codec + + + + + junit + junit + test + + + + + + + maven-jar-plugin + + + + true + true + + + + + + + diff --git a/hll/src/main/java/io/druid/query/aggregation/HyperloglogAggregator.java b/hll/src/main/java/io/druid/query/aggregation/HyperloglogAggregator.java new file mode 100755 index 00000000000..ddbe60dcf67 --- /dev/null +++ b/hll/src/main/java/io/druid/query/aggregation/HyperloglogAggregator.java @@ -0,0 +1,137 @@ +/* + * Druid - a distributed column store. + * Copyright (C) 2012 Metamarkets Group Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +package io.druid.query.aggregation; + +import com.google.common.hash.Hashing; +import com.metamx.common.ISE; +import com.metamx.common.logger.Logger; +import gnu.trove.map.TIntByteMap; +import gnu.trove.map.hash.TIntByteHashMap; +import io.druid.segment.ObjectColumnSelector; + +import java.util.Comparator; + +public class HyperloglogAggregator implements Aggregator +{ + private static final Logger log = new Logger(HyperloglogAggregator.class); + + public static final int log2m = 12; + public static final int m = (int) Math.pow(2, log2m); + public static final double alphaMM = (0.7213 / (1 + 1.079 / m)) * m * m; + + private final String name; + private final ObjectColumnSelector selector; + + private TIntByteHashMap ibMap; + + static final Comparator COMPARATOR = new Comparator() + { + @Override + public int compare(Object o, Object o1) + { + return o.equals(o1) ? 0 : 1; + } + }; + + public static Object combine(Object lhs, Object rhs) + { + final TIntByteMap newIbMap = new TIntByteHashMap((TIntByteMap) lhs); + final TIntByteMap rightIbMap = (TIntByteMap) rhs; + final int[] keys = rightIbMap.keys(); + + for (int key : keys) { + if (newIbMap.get(key) == newIbMap.getNoEntryValue() || rightIbMap.get(key) > newIbMap.get(key)) { + newIbMap.put(key, rightIbMap.get(key)); + } + } + + return newIbMap; + } + + public HyperloglogAggregator(String name, ObjectColumnSelector selector) + { + this.name = name; + this.selector = selector; + this.ibMap = new TIntByteHashMap(); + } + + @Override + public void aggregate() + { + final Object value = selector.get(); + + if (value == null) { + return; + } + + if (value instanceof TIntByteHashMap) { + final TIntByteHashMap newIbMap = (TIntByteHashMap) value; + final int[] indexes = newIbMap.keys(); + + for (int index : indexes) { + if (ibMap.get(index) == ibMap.getNoEntryValue() || newIbMap.get(index) > ibMap.get(index)) { + ibMap.put(index, newIbMap.get(index)); + } + } + } else if (value instanceof String) { + log.debug("value [%s]", selector.get()); + + final long id = Hashing.murmur3_128().hashString((String) (value)).asLong(); + final int bucket = (int) (id >>> (Long.SIZE - log2m)); + final int zerolength = Long.numberOfLeadingZeros((id << log2m) | (1 << (log2m - 1)) + 1) + 1; + + if (ibMap.get(bucket) == ibMap.getNoEntryValue() || ibMap.get(bucket) < (byte) zerolength) { + ibMap.put(bucket, (byte) zerolength); + } + } else { + throw new ISE("Aggregate does not support values of type[%s]", value.getClass().getName()); + } + } + + @Override + public void reset() + { + this.ibMap = new TIntByteHashMap(); + } + + @Override + public Object get() + { + return ibMap; + } + + @Override + public float getFloat() + { + throw new UnsupportedOperationException("HyperloglogAggregator does not support getFloat()"); + } + + @Override + public String getName() + { + return name; + } + + @Override + public void close() + { + // do nothing + } +} diff --git a/hll/src/main/java/io/druid/query/aggregation/HyperloglogAggregatorFactory.java b/hll/src/main/java/io/druid/query/aggregation/HyperloglogAggregatorFactory.java new file mode 100755 index 00000000000..3f7150b9221 --- /dev/null +++ b/hll/src/main/java/io/druid/query/aggregation/HyperloglogAggregatorFactory.java @@ -0,0 +1,209 @@ +/* + * Druid - a distributed column store. + * Copyright (C) 2012 Metamarkets Group Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +package io.druid.query.aggregation; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.google.common.base.Preconditions; +import com.metamx.common.logger.Logger; +import gnu.trove.map.hash.TIntByteHashMap; +import io.druid.segment.ColumnSelectorFactory; +import org.apache.commons.codec.binary.Base64; + +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.Comparator; +import java.util.List; + +public class HyperloglogAggregatorFactory implements AggregatorFactory +{ + private static final Logger log = new Logger(HyperloglogAggregatorFactory.class); + private static final byte[] CACHE_KEY = new byte[]{0x37}; + + private final String name; + private final String fieldName; + + @JsonCreator + public HyperloglogAggregatorFactory( + @JsonProperty("name") final String name, + @JsonProperty("fieldName") final String fieldName + ) + { + Preconditions.checkNotNull(name, "Must have a valid, non-null aggregator name"); + Preconditions.checkNotNull(fieldName, "Must have a valid, non-null fieldName"); + + this.name = name; + this.fieldName = fieldName; + } + + @Override + public Aggregator factorize(ColumnSelectorFactory metricFactory) + { + return new HyperloglogAggregator( + name, + metricFactory.makeObjectColumnSelector(fieldName) + ); + } + + @Override + public BufferAggregator factorizeBuffered( + ColumnSelectorFactory metricFactory + ) + { + return new HyperloglogBufferAggregator( + metricFactory.makeObjectColumnSelector(fieldName) + ); + } + + @Override + public Comparator getComparator() + { + return HyperloglogAggregator.COMPARATOR; + } + + @Override + public Object combine(Object lhs, Object rhs) + { + if (rhs == null) { + return lhs; + } + if (lhs == null) { + return rhs; + } + return HyperloglogAggregator.combine(lhs, rhs); + } + + @Override + public AggregatorFactory getCombiningFactory() + { + log.debug("factory name: %s", name); + return new HyperloglogAggregatorFactory(name, fieldName); + } + + @Override + public Object deserialize(Object object) + { + log.debug("class name: [%s]:value [%s]", object.getClass().getName(), object); + + final String k = (String) object; + final byte[] ibmapByte = Base64.decodeBase64(k); + + final ByteBuffer buffer = ByteBuffer.wrap(ibmapByte); + final int keylength = buffer.getInt(); + final int valuelength = buffer.getInt(); + + TIntByteHashMap newIbMap; + + if (keylength == 0) { + newIbMap = new TIntByteHashMap(); + } else { + final int[] keys = new int[keylength]; + final byte[] values = new byte[valuelength]; + + for (int i = 0; i < keylength; i++) { + keys[i] = buffer.getInt(); + } + buffer.get(values); + + newIbMap = new TIntByteHashMap(keys, values); + } + + return newIbMap; + } + + @Override + public Object finalizeComputation(Object object) + { + final TIntByteHashMap ibMap = (TIntByteHashMap) object; + final int[] keys = ibMap.keys(); + final int count = keys.length; + + double registerSum = 0; + double zeros = 0.0; + + for (int key : keys) { + int val = ibMap.get(key); + + registerSum += 1.0 / (1 << val); + + if (val == 0) { + zeros++; + } + } + + registerSum += (HyperloglogAggregator.m - count); + zeros += HyperloglogAggregator.m - count; + + double estimate = HyperloglogAggregator.alphaMM * (1.0 / registerSum); + + if (estimate <= (5.0 / 2.0) * (HyperloglogAggregator.m)) { + // Small Range Estimate + return Math.round(HyperloglogAggregator.m * Math.log(HyperloglogAggregator.m / zeros)); + } else { + return Math.round(estimate); + } + } + + @JsonProperty + public String getFieldName() + { + return fieldName; + } + + @Override + @JsonProperty + public String getName() + { + return name; + } + + @Override + public List requiredFields() + { + return Arrays.asList(fieldName); + } + + @Override + public byte[] getCacheKey() + { + + byte[] fieldNameBytes = fieldName.getBytes(); + return ByteBuffer.allocate(1 + fieldNameBytes.length).put(CACHE_KEY) + .put(fieldNameBytes).array(); + } + + @Override + public String getTypeName() + { + return "hyperloglog"; + } + + @Override + public int getMaxIntermediateSize() + { + return HyperloglogAggregator.m; + } + + @Override + public Object getAggregatorStartValue() + { + return new TIntByteHashMap(); + } +} diff --git a/hll/src/main/java/io/druid/query/aggregation/HyperloglogBufferAggregator.java b/hll/src/main/java/io/druid/query/aggregation/HyperloglogBufferAggregator.java new file mode 100755 index 00000000000..3681fc8bd40 --- /dev/null +++ b/hll/src/main/java/io/druid/query/aggregation/HyperloglogBufferAggregator.java @@ -0,0 +1,94 @@ +/* + * Druid - a distributed column store. + * Copyright (C) 2012 Metamarkets Group Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +package io.druid.query.aggregation; + +import gnu.trove.map.hash.TIntByteHashMap; +import gnu.trove.procedure.TIntByteProcedure; +import io.druid.segment.ObjectColumnSelector; + +import java.nio.ByteBuffer; + +public class HyperloglogBufferAggregator implements BufferAggregator +{ + private final ObjectColumnSelector selector; + + public HyperloglogBufferAggregator(ObjectColumnSelector selector) + { + this.selector = selector; + } + + /* + * byte 1 key length byte 2 value length byte 3...n key array byte n+1.... + * value array + */ + @Override + public void init(ByteBuffer buf, int position) + { + for (int i = 0; i < HyperloglogAggregator.m; i++) { + buf.put(position + i, (byte) 0); + } + } + + @Override + public void aggregate(ByteBuffer buf, int position) + { + final ByteBuffer fb = buf; + final int fp = position; + final TIntByteHashMap newObj = (TIntByteHashMap) (selector.get()); + + newObj.forEachEntry( + new TIntByteProcedure() + { + public boolean execute(int a, byte b) + { + if (b > fb.get(fp + a)) { + fb.put(fp + a, b); + } + return true; + } + } + ); + } + + @Override + public Object get(ByteBuffer buf, int position) + { + final TIntByteHashMap ret = new TIntByteHashMap(); + + for (int i = 0; i < HyperloglogAggregator.m; i++) { + if (buf.get(position + i) != 0) { + ret.put(i, buf.get(position + i)); + } + } + return ret; + } + + @Override + public float getFloat(ByteBuffer buf, int position) + { + throw new UnsupportedOperationException("HyperloglogAggregator does not support getFloat()"); + } + + @Override + public void close() + { + // do nothing + } +} diff --git a/hll/src/main/java/io/druid/query/aggregation/HyperloglogComplexMetricSerde.java b/hll/src/main/java/io/druid/query/aggregation/HyperloglogComplexMetricSerde.java new file mode 100755 index 00000000000..8ba20b4a458 --- /dev/null +++ b/hll/src/main/java/io/druid/query/aggregation/HyperloglogComplexMetricSerde.java @@ -0,0 +1,137 @@ +/* + * Druid - a distributed column store. + * Copyright (C) 2012 Metamarkets Group Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +package io.druid.query.aggregation; + +import gnu.trove.map.hash.TIntByteHashMap; +import io.druid.data.input.InputRow; +import io.druid.segment.column.ColumnBuilder; +import io.druid.segment.column.ValueType; +import io.druid.segment.data.GenericIndexed; +import io.druid.segment.data.ObjectStrategy; +import io.druid.segment.serde.ColumnPartSerde; +import io.druid.segment.serde.ComplexColumnPartSerde; +import io.druid.segment.serde.ComplexColumnPartSupplier; +import io.druid.segment.serde.ComplexMetricExtractor; +import io.druid.segment.serde.ComplexMetricSerde; + +import java.nio.ByteBuffer; +import java.util.List; + +public class HyperloglogComplexMetricSerde extends ComplexMetricSerde +{ + @Override + public String getTypeName() + { + return "hyperloglog"; + } + + @Override + public ComplexMetricExtractor getExtractor() + { + return new HyperloglogComplexMetricExtractor(); + } + + @Override + public ColumnPartSerde deserializeColumn(ByteBuffer buffer, ColumnBuilder builder) + { + GenericIndexed column = GenericIndexed.read(buffer, getObjectStrategy()); + builder.setType(ValueType.COMPLEX); + builder.setComplexColumn(new ComplexColumnPartSupplier("hyperloglog", column)); + return new ComplexColumnPartSerde(column, "hyperloglog"); + } + + @Override + public ObjectStrategy getObjectStrategy() + { + return new HyperloglogObjectStrategy(); + } + + public static class HyperloglogObjectStrategy implements ObjectStrategy + { + @Override + public Class getClazz() + { + return TIntByteHashMap.class; + } + + @Override + public TIntByteHashMap fromByteBuffer(ByteBuffer buffer, int numBytes) + { + int keylength = buffer.getInt(); + int valuelength = buffer.getInt(); + if (keylength == 0) { + return new TIntByteHashMap(); + } + int[] keys = new int[keylength]; + byte[] values = new byte[valuelength]; + + for (int i = 0; i < keylength; i++) { + keys[i] = buffer.getInt(); + } + + buffer.get(values); + + TIntByteHashMap tib = new TIntByteHashMap(keys, values); + return tib; + } + + @Override + public byte[] toBytes(TIntByteHashMap val) + { + TIntByteHashMap ibmap = val; + int[] indexesResult = ibmap.keys(); + byte[] valueResult = ibmap.values(); + ByteBuffer buffer = ByteBuffer.allocate(4 * indexesResult.length + valueResult.length + 8); + byte[] result = new byte[4 * indexesResult.length + valueResult.length + 8]; + buffer.putInt((int) indexesResult.length); + buffer.putInt((int) valueResult.length); + for (int i = 0; i < indexesResult.length; i++) { + buffer.putInt(indexesResult[i]); + } + + buffer.put(valueResult); + buffer.flip(); + buffer.get(result); + return result; + } + + @Override + public int compare(TIntByteHashMap o1, TIntByteHashMap o2) + { + return o1.equals(o2) ? 0 : 1; + } + } + + public static class HyperloglogComplexMetricExtractor implements ComplexMetricExtractor + { + @Override + public Class extractedClass() + { + return List.class; + } + + @Override + public Object extractValue(InputRow inputRow, String metricName) + { + return inputRow.getRaw(metricName); + } + } +} + diff --git a/hll/src/main/java/io/druid/query/aggregation/HyperloglogDruidModule.java b/hll/src/main/java/io/druid/query/aggregation/HyperloglogDruidModule.java new file mode 100644 index 00000000000..f7ef5cad748 --- /dev/null +++ b/hll/src/main/java/io/druid/query/aggregation/HyperloglogDruidModule.java @@ -0,0 +1,140 @@ +/* + * Druid - a distributed column store. + * Copyright (C) 2012 Metamarkets Group Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +package io.druid.query.aggregation; + +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.DeserializationContext; +import com.fasterxml.jackson.databind.JsonDeserializer; +import com.fasterxml.jackson.databind.JsonSerializer; +import com.fasterxml.jackson.databind.Module; +import com.fasterxml.jackson.databind.SerializerProvider; +import com.fasterxml.jackson.databind.jsontype.NamedType; +import com.fasterxml.jackson.databind.module.SimpleModule; +import com.google.common.collect.ImmutableList; +import com.google.inject.Binder; +import gnu.trove.map.hash.TIntByteHashMap; +import io.druid.initialization.DruidModule; +import io.druid.segment.serde.ComplexMetrics; +import org.apache.commons.codec.binary.Base64; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.List; + +/** + */ +public class HyperloglogDruidModule implements DruidModule +{ + @Override + public List getJacksonModules() + { + return ImmutableList.of( + new HyperloglogJacksonSerdeModule().registerSubtypes( + new NamedType(HyperloglogAggregatorFactory.class, "hyperloglog") + ) + ); + } + + @Override + public void configure(Binder binder) + { + if (ComplexMetrics.getSerdeForType("hyperloglog") == null) { + ComplexMetrics.registerSerde("hyperloglog", new HyperloglogComplexMetricSerde()); + } + } + + public static class HyperloglogJacksonSerdeModule extends SimpleModule + { + public HyperloglogJacksonSerdeModule() + { + super("Hyperloglog deserializers"); + + addDeserializer( + TIntByteHashMap.class, + new JsonDeserializer() + { + @Override + public TIntByteHashMap deserialize( + JsonParser jp, + DeserializationContext ctxt + ) throws IOException + { + byte[] ibmapByte = Base64.decodeBase64(jp.getText()); + + ByteBuffer buffer = ByteBuffer.wrap(ibmapByte); + int keylength = buffer.getInt(); + int valuelength = buffer.getInt(); + if (keylength == 0) { + return (new TIntByteHashMap()); + } + int[] keys = new int[keylength]; + byte[] values = new byte[valuelength]; + + for (int i = 0; i < keylength; i++) { + keys[i] = buffer.getInt(); + } + buffer.get(values); + + return (new TIntByteHashMap(keys, values)); + } + } + ); + + addSerializer( + TIntByteHashMap.class, + new JsonSerializer() + { + @Override + public void serialize( + TIntByteHashMap ibmap, + JsonGenerator jsonGenerator, + SerializerProvider serializerProvider + ) + throws IOException, JsonProcessingException + { + int[] indexesResult = ibmap.keys(); + byte[] valueResult = ibmap.values(); + ByteBuffer buffer = ByteBuffer + .allocate( + 4 * indexesResult.length + + valueResult.length + 8 + ); + byte[] result = new byte[4 * indexesResult.length + + valueResult.length + 8]; + buffer.putInt((int) indexesResult.length); + buffer.putInt((int) valueResult.length); + for (int i = 0; i < indexesResult.length; i++) { + buffer.putInt(indexesResult[i]); + } + + buffer.put(valueResult); + buffer.flip(); + buffer.get(result); + String str = Base64.encodeBase64String(result); + jsonGenerator.writeString(str); + } + } + ); + + } + } +} diff --git a/hll/src/main/resources/META-INF/services/io.druid.initialization.DruidModule b/hll/src/main/resources/META-INF/services/io.druid.initialization.DruidModule new file mode 100644 index 00000000000..75977329c70 --- /dev/null +++ b/hll/src/main/resources/META-INF/services/io.druid.initialization.DruidModule @@ -0,0 +1 @@ +io.druid.query.aggregation.HyperloglogDruidModule \ No newline at end of file diff --git a/hll/src/test/java/io/druid/query/aggregation/HyperloglogAggregatorTest.java b/hll/src/test/java/io/druid/query/aggregation/HyperloglogAggregatorTest.java new file mode 100644 index 00000000000..7d0a8f3c0e6 --- /dev/null +++ b/hll/src/test/java/io/druid/query/aggregation/HyperloglogAggregatorTest.java @@ -0,0 +1,162 @@ +/* + * Druid - a distributed column store. + * Copyright (C) 2012 Metamarkets Group Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +package io.druid.query.aggregation; + +import gnu.trove.map.hash.TIntByteHashMap; +import org.junit.Assert; +import org.junit.Test; + +import java.util.Comparator; + +public class HyperloglogAggregatorTest +{ + @Test + public void testAggregate() + { + final TestHllComplexMetricSelector selector = new TestHllComplexMetricSelector(); + final HyperloglogAggregatorFactory aggFactory = new HyperloglogAggregatorFactory("billy", "billyG"); + final HyperloglogAggregator agg = new HyperloglogAggregator("billy", selector); + + Assert.assertEquals("billy", agg.getName()); + + Assert.assertEquals(0L, aggFactory.finalizeComputation(agg.get())); + Assert.assertEquals(0L, aggFactory.finalizeComputation(agg.get())); + Assert.assertEquals(0L, aggFactory.finalizeComputation(agg.get())); + + aggregate(selector, agg); + aggregate(selector, agg); + aggregate(selector, agg); + + Assert.assertEquals(3L, aggFactory.finalizeComputation(agg.get())); + Assert.assertEquals(3L, aggFactory.finalizeComputation(agg.get())); + Assert.assertEquals(3L, aggFactory.finalizeComputation(agg.get())); + + aggregate(selector, agg); + aggregate(selector, agg); + + Assert.assertEquals(5L, aggFactory.finalizeComputation(agg.get())); + Assert.assertEquals(5L, aggFactory.finalizeComputation(agg.get())); + } + + @Test + public void testComparator() + { + final TestHllComplexMetricSelector selector = new TestHllComplexMetricSelector(); + final Comparator comp = new HyperloglogAggregatorFactory("billy", "billyG").getComparator(); + final HyperloglogAggregator agg = new HyperloglogAggregator("billy", selector); + + Object first = new TIntByteHashMap((TIntByteHashMap) agg.get()); + agg.aggregate(); + + Assert.assertEquals(0, comp.compare(first, first)); + Assert.assertEquals(0, comp.compare(agg.get(), agg.get())); + Assert.assertEquals(1, comp.compare(agg.get(), first)); + } + + @Test + public void testHighCardinalityAggregate() + { + final TestHllComplexMetricSelector selector = new TestHllComplexMetricSelector(); + final HyperloglogAggregatorFactory aggFactory = new HyperloglogAggregatorFactory("billy", "billyG"); + final HyperloglogAggregator agg = new HyperloglogAggregator("billy", selector); + + final int card = 100000; + + for (int i = 0; i < card; i++) { + aggregate(selector, agg); + } + + Assert.assertEquals(99443L, aggFactory.finalizeComputation(agg.get())); + } + + // Provides a nice printout of error rates as a function of cardinality + //@Test + public void benchmarkAggregation() throws Exception + { + final TestHllComplexMetricSelector selector = new TestHllComplexMetricSelector(); + final HyperloglogAggregatorFactory aggFactory = new HyperloglogAggregatorFactory("billy", "billyG"); + + double error = 0.0d; + int count = 0; + + final int[] valsToCheck = { + 10, 20, 50, 100, 1000, 2000, 5000, 10000, 20000, 50000, 100000, 1000000, 2000000, 10000000, Integer.MAX_VALUE + }; + + for (int numThings : valsToCheck) { + long startTime = System.currentTimeMillis(); + final HyperloglogAggregator agg = new HyperloglogAggregator("billy", selector); + + for (int i = 0; i < numThings; ++i) { + if (i != 0 && i % 100000000 == 0) { + ++count; + error = computeError(error, count, i, (Long) aggFactory.finalizeComputation(agg.get()), startTime); + } + aggregate(selector, agg); + } + + ++count; + error = computeError(error, count, numThings, (Long) aggFactory.finalizeComputation(agg.get()), startTime); + } + } + + //@Test + public void benchmarkCombine() throws Exception + { + int count; + long totalTime = 0; + + final TestHllComplexMetricSelector selector = new TestHllComplexMetricSelector(); + TIntByteHashMap combined = new TIntByteHashMap(); + + for (count = 0; count < 1000000; ++count) { + final HyperloglogAggregator agg = new HyperloglogAggregator("billy", selector); + aggregate(selector, agg); + + long start = System.nanoTime(); + combined = (TIntByteHashMap) HyperloglogAggregator.combine(agg.get(), combined); + totalTime += System.nanoTime() - start; + } + System.out.printf("benchmarkCombine took %d ms%n", totalTime / 1000000); + } + + private double computeError(double error, int count, long exactValue, long estimatedValue, long startTime) + { + final double errorThisTime = Math.abs((double) exactValue - estimatedValue) / exactValue; + + error += errorThisTime; + + System.out.printf( + "%,d ==? %,d in %,d millis. actual error[%,f%%], avg. error [%,f%%]%n", + exactValue, + estimatedValue, + System.currentTimeMillis() - startTime, + 100 * errorThisTime, + (error / count) * 100 + ); + return error; + } + + private void aggregate(TestHllComplexMetricSelector selector, HyperloglogAggregator agg) + { + agg.aggregate(); + selector.increment(); + } +} diff --git a/hll/src/test/java/io/druid/query/aggregation/TestHllComplexMetricSelector.java b/hll/src/test/java/io/druid/query/aggregation/TestHllComplexMetricSelector.java new file mode 100644 index 00000000000..2ba241e01ca --- /dev/null +++ b/hll/src/test/java/io/druid/query/aggregation/TestHllComplexMetricSelector.java @@ -0,0 +1,45 @@ +/* + * Druid - a distributed column store. + * Copyright (C) 2013 Metamarkets Group Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +package io.druid.query.aggregation; + +import io.druid.segment.ObjectColumnSelector; + +public class TestHllComplexMetricSelector implements ObjectColumnSelector +{ + private int index = 0; + + + @Override + public Class classOfObject() + { + return String.class; + } + + @Override + public String get() + { + return String.valueOf(index); + } + + public void increment() + { + ++index; + } +} diff --git a/indexing-hadoop/pom.xml b/indexing-hadoop/pom.xml index daaea363a68..726a106e47c 100644 --- a/indexing-hadoop/pom.xml +++ b/indexing-hadoop/pom.xml @@ -28,7 +28,7 @@ io.druid druid - 0.6.52-SNAPSHOT + 0.6.53-SNAPSHOT diff --git a/indexing-hadoop/src/main/java/io/druid/indexer/DbUpdaterJob.java b/indexing-hadoop/src/main/java/io/druid/indexer/DbUpdaterJob.java index 32e81d26c25..36b67e10c05 100644 --- a/indexing-hadoop/src/main/java/io/druid/indexer/DbUpdaterJob.java +++ b/indexing-hadoop/src/main/java/io/druid/indexer/DbUpdaterJob.java @@ -19,11 +19,9 @@ package io.druid.indexer; -import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.ImmutableMap; import com.metamx.common.logger.Logger; import io.druid.db.DbConnector; -import io.druid.jackson.DefaultObjectMapper; import io.druid.timeline.DataSegment; import org.joda.time.DateTime; import org.skife.jdbi.v2.Handle; @@ -39,8 +37,6 @@ public class DbUpdaterJob implements Jobby { private static final Logger log = new Logger(DbUpdaterJob.class); - private static final ObjectMapper jsonMapper = new DefaultObjectMapper(); - private final HadoopDruidIndexerConfig config; private final IDBI dbi; @@ -82,7 +78,7 @@ public class DbUpdaterJob implements Jobby .put("partitioned", segment.getShardSpec().getPartitionNum()) .put("version", segment.getVersion()) .put("used", true) - .put("payload", jsonMapper.writeValueAsString(segment)) + .put("payload", HadoopDruidIndexerConfig.jsonMapper.writeValueAsString(segment)) .build() ); diff --git a/indexing-service/pom.xml b/indexing-service/pom.xml index a97c98861cd..1a7881a56ff 100644 --- a/indexing-service/pom.xml +++ b/indexing-service/pom.xml @@ -28,7 +28,7 @@ io.druid druid - 0.6.52-SNAPSHOT + 0.6.53-SNAPSHOT diff --git a/indexing-service/src/main/java/io/druid/indexing/common/task/RealtimeIndexTask.java b/indexing-service/src/main/java/io/druid/indexing/common/task/RealtimeIndexTask.java index 01fa6e69149..7a40035c3e6 100644 --- a/indexing-service/src/main/java/io/druid/indexing/common/task/RealtimeIndexTask.java +++ b/indexing-service/src/main/java/io/druid/indexing/common/task/RealtimeIndexTask.java @@ -86,6 +86,9 @@ public class RealtimeIndexTask extends AbstractTask @JsonIgnore private final Period windowPeriod; + @JsonIgnore + private final int maxPendingPersists; + @JsonIgnore private final IndexGranularity segmentGranularity; @@ -106,6 +109,7 @@ public class RealtimeIndexTask extends AbstractTask @JsonProperty("firehose") FirehoseFactory firehoseFactory, @JsonProperty("fireDepartmentConfig") FireDepartmentConfig fireDepartmentConfig, @JsonProperty("windowPeriod") Period windowPeriod, + @JsonProperty("maxPendingPersists") int maxPendingPersists, @JsonProperty("segmentGranularity") IndexGranularity segmentGranularity, @JsonProperty("rejectionPolicy") RejectionPolicyFactory rejectionPolicyFactory ) @@ -113,7 +117,7 @@ public class RealtimeIndexTask extends AbstractTask super( id == null ? makeTaskId(schema.getDataSource(), schema.getShardSpec().getPartitionNum(), new DateTime().toString()) - :id, + : id, String.format( "index_realtime_%s", @@ -135,6 +139,9 @@ public class RealtimeIndexTask extends AbstractTask this.firehoseFactory = firehoseFactory; this.fireDepartmentConfig = fireDepartmentConfig; this.windowPeriod = windowPeriod; + this.maxPendingPersists = (maxPendingPersists == 0) + ? RealtimePlumberSchool.DEFAULT_MAX_PENDING_PERSISTS + : maxPendingPersists; this.segmentGranularity = segmentGranularity; this.rejectionPolicyFactory = rejectionPolicyFactory; } @@ -196,6 +203,7 @@ public class RealtimeIndexTask extends AbstractTask new File(toolbox.getTaskWorkDir(), "persist"), segmentGranularity ); + realtimePlumberSchool.setDefaultMaxPendingPersists(maxPendingPersists); final SegmentPublisher segmentPublisher = new TaskActionSegmentPublisher(this, toolbox); diff --git a/indexing-service/src/main/java/io/druid/indexing/overlord/scaling/AutoScalingStrategy.java b/indexing-service/src/main/java/io/druid/indexing/overlord/scaling/AutoScalingStrategy.java index 392919df039..0c9c7a06bee 100644 --- a/indexing-service/src/main/java/io/druid/indexing/overlord/scaling/AutoScalingStrategy.java +++ b/indexing-service/src/main/java/io/druid/indexing/overlord/scaling/AutoScalingStrategy.java @@ -30,6 +30,8 @@ public interface AutoScalingStrategy public AutoScalingData terminate(List ips); + public AutoScalingData terminateWithIds(List ids); + /** * Provides a lookup of ip addresses to node ids * @param ips - nodes IPs diff --git a/indexing-service/src/main/java/io/druid/indexing/overlord/scaling/EC2AutoScalingStrategy.java b/indexing-service/src/main/java/io/druid/indexing/overlord/scaling/EC2AutoScalingStrategy.java index b59f3d1e74e..9081e7323d1 100644 --- a/indexing-service/src/main/java/io/druid/indexing/overlord/scaling/EC2AutoScalingStrategy.java +++ b/indexing-service/src/main/java/io/druid/indexing/overlord/scaling/EC2AutoScalingStrategy.java @@ -155,32 +155,15 @@ public class EC2AutoScalingStrategy implements AutoScalingStrategy } try { - log.info("Terminating instance[%s]", instances); - amazonEC2Client.terminateInstances( - new TerminateInstancesRequest( - Lists.transform( - instances, - new Function() - { - @Override - public String apply(Instance input) - { - return input.getInstanceId(); - } - } - ) - ) - ); - - return new AutoScalingData( + return terminateWithIds( Lists.transform( - ips, - new Function() + instances, + new Function() { @Override - public String apply(@Nullable String input) + public String apply(Instance input) { - return String.format("%s:%s", input, config.getWorkerPort()); + return input.getInstanceId(); } } ) @@ -193,6 +176,28 @@ public class EC2AutoScalingStrategy implements AutoScalingStrategy return null; } + @Override + public AutoScalingData terminateWithIds(List ids) + { + if (ids.isEmpty()) { + return new AutoScalingData(Lists.newArrayList()); + } + + try { + log.info("Terminating instances[%s]", ids); + amazonEC2Client.terminateInstances( + new TerminateInstancesRequest(ids) + ); + + return new AutoScalingData(ids); + } + catch (Exception e) { + log.error(e, "Unable to terminate any instances."); + } + + return null; + } + @Override public List ipToIdLookup(List ips) { diff --git a/indexing-service/src/main/java/io/druid/indexing/overlord/scaling/NoopAutoScalingStrategy.java b/indexing-service/src/main/java/io/druid/indexing/overlord/scaling/NoopAutoScalingStrategy.java index 893f69ca9f4..8c3c14ca336 100644 --- a/indexing-service/src/main/java/io/druid/indexing/overlord/scaling/NoopAutoScalingStrategy.java +++ b/indexing-service/src/main/java/io/druid/indexing/overlord/scaling/NoopAutoScalingStrategy.java @@ -44,6 +44,13 @@ public class NoopAutoScalingStrategy implements AutoScalingStrategy return null; } + @Override + public AutoScalingData terminateWithIds(List ids) + { + log.info("If I were a real strategy I'd terminate %s now", ids); + return null; + } + @Override public List ipToIdLookup(List ips) { diff --git a/indexing-service/src/main/java/io/druid/indexing/overlord/scaling/SimpleResourceManagementStrategy.java b/indexing-service/src/main/java/io/druid/indexing/overlord/scaling/SimpleResourceManagementStrategy.java index 10e084b3c9e..76b752a707a 100644 --- a/indexing-service/src/main/java/io/druid/indexing/overlord/scaling/SimpleResourceManagementStrategy.java +++ b/indexing-service/src/main/java/io/druid/indexing/overlord/scaling/SimpleResourceManagementStrategy.java @@ -132,8 +132,7 @@ public class SimpleResourceManagementStrategy implements ResourceManagementStrat .addData("provisioningCount", currentlyProvisioning.size()) .emit(); - List nodeIps = autoScalingStrategy.idToIpLookup(Lists.newArrayList(currentlyProvisioning)); - autoScalingStrategy.terminate(nodeIps); + autoScalingStrategy.terminateWithIds(Lists.newArrayList(currentlyProvisioning)); currentlyProvisioning.clear(); } } diff --git a/indexing-service/src/test/java/io/druid/indexing/common/TestRealtimeTask.java b/indexing-service/src/test/java/io/druid/indexing/common/TestRealtimeTask.java index 178cae10513..ba3a7787289 100644 --- a/indexing-service/src/test/java/io/druid/indexing/common/TestRealtimeTask.java +++ b/indexing-service/src/test/java/io/druid/indexing/common/TestRealtimeTask.java @@ -51,6 +51,7 @@ public class TestRealtimeTask extends RealtimeIndexTask null, null, null, + 1, null, null ); diff --git a/indexing-service/src/test/java/io/druid/indexing/common/task/TaskSerdeTest.java b/indexing-service/src/test/java/io/druid/indexing/common/task/TaskSerdeTest.java index 77ae0af4b52..e9ace7ac18a 100644 --- a/indexing-service/src/test/java/io/druid/indexing/common/task/TaskSerdeTest.java +++ b/indexing-service/src/test/java/io/druid/indexing/common/task/TaskSerdeTest.java @@ -198,6 +198,7 @@ public class TaskSerdeTest null, null, new Period("PT10M"), + 1, IndexGranularity.HOUR, null ); diff --git a/indexing-service/src/test/java/io/druid/indexing/overlord/scaling/EC2AutoScalingStrategyTest.java b/indexing-service/src/test/java/io/druid/indexing/overlord/scaling/EC2AutoScalingStrategyTest.java index e6cd52c80ac..1fd3510b45a 100644 --- a/indexing-service/src/test/java/io/druid/indexing/overlord/scaling/EC2AutoScalingStrategyTest.java +++ b/indexing-service/src/test/java/io/druid/indexing/overlord/scaling/EC2AutoScalingStrategyTest.java @@ -131,6 +131,6 @@ public class EC2AutoScalingStrategyTest AutoScalingData deleted = strategy.terminate(Arrays.asList("dummyIP")); Assert.assertEquals(deleted.getNodeIds().size(), 1); - Assert.assertEquals(String.format("%s:8080", IP), deleted.getNodeIds().get(0)); + Assert.assertEquals(INSTANCE_ID, deleted.getNodeIds().get(0)); } } diff --git a/indexing-service/src/test/java/io/druid/indexing/overlord/scaling/SimpleResourceManagementStrategyTest.java b/indexing-service/src/test/java/io/druid/indexing/overlord/scaling/SimpleResourceManagementStrategyTest.java index 6ffc6ae6222..aa41656aa90 100644 --- a/indexing-service/src/test/java/io/druid/indexing/overlord/scaling/SimpleResourceManagementStrategyTest.java +++ b/indexing-service/src/test/java/io/druid/indexing/overlord/scaling/SimpleResourceManagementStrategyTest.java @@ -187,9 +187,7 @@ public class SimpleResourceManagementStrategyTest EasyMock.expect(autoScalingStrategy.ipToIdLookup(EasyMock.>anyObject())) .andReturn(Lists.newArrayList()).times(2); - EasyMock.expect(autoScalingStrategy.idToIpLookup(EasyMock.>anyObject())) - .andReturn(Lists.newArrayList()); - EasyMock.expect(autoScalingStrategy.terminate(EasyMock.>anyObject())) + EasyMock.expect(autoScalingStrategy.terminateWithIds(EasyMock.>anyObject())) .andReturn(null); EasyMock.expect(autoScalingStrategy.provision()).andReturn( new AutoScalingData(Lists.newArrayList("fake")) diff --git a/indexing-service/src/test/java/io/druid/indexing/worker/TaskAnnouncementTest.java b/indexing-service/src/test/java/io/druid/indexing/worker/TaskAnnouncementTest.java index e4c16b11c28..303780de3b2 100644 --- a/indexing-service/src/test/java/io/druid/indexing/worker/TaskAnnouncementTest.java +++ b/indexing-service/src/test/java/io/druid/indexing/worker/TaskAnnouncementTest.java @@ -46,6 +46,7 @@ public class TaskAnnouncementTest null, null, new Period("PT10M"), + 1, IndexGranularity.HOUR, null ); diff --git a/kafka-eight/pom.xml b/kafka-eight/pom.xml index 2126d975fd9..5ebec47b519 100644 --- a/kafka-eight/pom.xml +++ b/kafka-eight/pom.xml @@ -28,7 +28,7 @@ io.druid druid - 0.6.52-SNAPSHOT + 0.6.53-SNAPSHOT diff --git a/kafka-seven/pom.xml b/kafka-seven/pom.xml index c174067823c..6a26af7e73a 100644 --- a/kafka-seven/pom.xml +++ b/kafka-seven/pom.xml @@ -28,7 +28,7 @@ io.druid druid - 0.6.52-SNAPSHOT + 0.6.53-SNAPSHOT @@ -39,7 +39,7 @@ kafka core-kafka - 0.7.2-mmx1 + 0.7.2-mmx4 log4j diff --git a/pom.xml b/pom.xml index 395ff15927f..990a1a75e5c 100644 --- a/pom.xml +++ b/pom.xml @@ -23,7 +23,7 @@ io.druid druid pom - 0.6.52-SNAPSHOT + 0.6.53-SNAPSHOT druid druid @@ -41,7 +41,7 @@ UTF-8 0.25.2 2.3.0 - 0.1.7 + 0.1.8 @@ -59,6 +59,7 @@ kafka-seven kafka-eight rabbitmq + hll @@ -217,47 +218,47 @@ com.fasterxml.jackson.core jackson-annotations - 2.2.2 + 2.2.3 com.fasterxml.jackson.core jackson-core - 2.2.2 + 2.2.3 com.fasterxml.jackson.core jackson-databind - 2.2.2 + 2.2.3 com.fasterxml.jackson.datatype jackson-datatype-guava - 2.2.2 + 2.2.3 com.fasterxml.jackson.datatype jackson-datatype-joda - 2.2.2 + 2.2.3 com.fasterxml.jackson.dataformat jackson-dataformat-smile - 2.2.2 + 2.2.3 com.fasterxml.jackson.jaxrs jackson-jaxrs-json-provider - 2.2.2 + 2.2.3 org.codehaus.jackson jackson-core-asl - 1.9.11 + 1.9.13 org.codehaus.jackson jackson-mapper-asl - 1.9.11 + 1.9.13 org.hibernate diff --git a/processing/pom.xml b/processing/pom.xml index e105c320955..e12371ac4c1 100644 --- a/processing/pom.xml +++ b/processing/pom.xml @@ -28,7 +28,7 @@ io.druid druid - 0.6.52-SNAPSHOT + 0.6.53-SNAPSHOT diff --git a/processing/src/main/java/io/druid/query/Query.java b/processing/src/main/java/io/druid/query/Query.java index f4ad958e75d..39da69c2b40 100644 --- a/processing/src/main/java/io/druid/query/Query.java +++ b/processing/src/main/java/io/druid/query/Query.java @@ -25,6 +25,7 @@ import com.metamx.common.guava.Sequence; import io.druid.query.groupby.GroupByQuery; import io.druid.query.metadata.metadata.SegmentMetadataQuery; import io.druid.query.search.search.SearchQuery; +import io.druid.query.select.SelectQuery; import io.druid.query.spec.QuerySegmentSpec; import io.druid.query.timeboundary.TimeBoundaryQuery; import io.druid.query.timeseries.TimeseriesQuery; @@ -42,6 +43,7 @@ import java.util.Map; @JsonSubTypes.Type(name = Query.TIME_BOUNDARY, value = TimeBoundaryQuery.class), @JsonSubTypes.Type(name = Query.GROUP_BY, value = GroupByQuery.class), @JsonSubTypes.Type(name = Query.SEGMENT_METADATA, value = SegmentMetadataQuery.class), + @JsonSubTypes.Type(name = Query.SELECT, value = SelectQuery.class), @JsonSubTypes.Type(name = Query.TOPN, value = TopNQuery.class) }) public interface Query @@ -51,6 +53,7 @@ public interface Query public static final String TIME_BOUNDARY = "timeBoundary"; public static final String GROUP_BY = "groupBy"; public static final String SEGMENT_METADATA = "segmentMetadata"; + public static final String SELECT = "select"; public static final String TOPN = "topN"; public String getDataSource(); diff --git a/processing/src/main/java/io/druid/query/select/EventHolder.java b/processing/src/main/java/io/druid/query/select/EventHolder.java new file mode 100644 index 00000000000..1ac3661d1f5 --- /dev/null +++ b/processing/src/main/java/io/druid/query/select/EventHolder.java @@ -0,0 +1,117 @@ +/* + * Druid - a distributed column store. + * Copyright (C) 2012, 2013 Metamarkets Group Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +package io.druid.query.select; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.google.common.collect.Maps; +import org.joda.time.DateTime; + +import java.util.Map; + +/** + */ +public class EventHolder +{ + public static final String timestampKey = "timestamp"; + + private final String segmentId; + private final int offset; + private final Map event; + + @JsonCreator + public EventHolder( + @JsonProperty("segmentId") String segmentId, + @JsonProperty("offset") int offset, + @JsonProperty("event") Map event + ) + { + this.segmentId = segmentId; + this.offset = offset; + this.event = event; + } + + public DateTime getTimestamp() + { + return (DateTime) event.get(timestampKey); + } + + @JsonProperty + public String getSegmentId() + { + return segmentId; + } + + @JsonProperty + public int getOffset() + { + return offset; + } + + @JsonProperty + public Map getEvent() + { + return event; + } + + @Override + public boolean equals(Object o) + { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + EventHolder that = (EventHolder) o; + + if (offset != that.offset) { + return false; + } + if (!Maps.difference(event, ((EventHolder) o).event).areEqual()) { + return false; + } + if (segmentId != null ? !segmentId.equals(that.segmentId) : that.segmentId != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() + { + int result = segmentId != null ? segmentId.hashCode() : 0; + result = 31 * result + offset; + result = 31 * result + (event != null ? event.hashCode() : 0); + return result; + } + + @Override + public String toString() + { + return "EventHolder{" + + "segmentId='" + segmentId + '\'' + + ", offset=" + offset + + ", event=" + event + + '}'; + } +} diff --git a/processing/src/main/java/io/druid/query/select/PagingSpec.java b/processing/src/main/java/io/druid/query/select/PagingSpec.java new file mode 100644 index 00000000000..7be4cf62746 --- /dev/null +++ b/processing/src/main/java/io/druid/query/select/PagingSpec.java @@ -0,0 +1,100 @@ +/* + * Druid - a distributed column store. + * Copyright (C) 2012, 2013 Metamarkets Group Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +package io.druid.query.select; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.google.common.primitives.Ints; + +import java.nio.ByteBuffer; +import java.util.LinkedHashMap; +import java.util.Map; + +/** + */ +public class PagingSpec +{ + private final LinkedHashMap pagingIdentifiers; + private final int threshold; + + @JsonCreator + public PagingSpec( + @JsonProperty("pagingIdentifiers") LinkedHashMap pagingIdentifiers, + @JsonProperty("threshold") int threshold + ) + { + this.pagingIdentifiers = pagingIdentifiers; + this.threshold = threshold; + } + + @JsonProperty + public Map getPagingIdentifiers() + { + return pagingIdentifiers; + } + + @JsonProperty + public int getThreshold() + { + return threshold; + } + + public byte[] getCacheKey() + { + final byte[][] pagingKeys = new byte[pagingIdentifiers.size()][]; + final byte[][] pagingValues = new byte[pagingIdentifiers.size()][]; + + int index = 0; + int pagingKeysSize = 0; + int pagingValuesSize = 0; + for (Map.Entry entry : pagingIdentifiers.entrySet()) { + pagingKeys[index] = entry.getKey().getBytes(); + pagingValues[index] = ByteBuffer.allocate(Ints.BYTES).putInt(entry.getValue()).array(); + pagingKeysSize += pagingKeys[index].length; + pagingValuesSize += Ints.BYTES; + index++; + } + + final byte[] thresholdBytes = ByteBuffer.allocate(Ints.BYTES).putInt(threshold).array(); + + final ByteBuffer queryCacheKey = ByteBuffer.allocate(pagingKeysSize + pagingValuesSize + thresholdBytes.length); + + for (byte[] pagingKey : pagingKeys) { + queryCacheKey.put(pagingKey); + } + + for (byte[] pagingValue : pagingValues) { + queryCacheKey.put(pagingValue); + } + + queryCacheKey.put(thresholdBytes); + + return queryCacheKey.array(); + } + + @Override + public String toString() + { + return "PagingSpec{" + + "pagingIdentifiers=" + pagingIdentifiers + + ", threshold=" + threshold + + '}'; + } +} diff --git a/processing/src/main/java/io/druid/query/select/SelectBinaryFn.java b/processing/src/main/java/io/druid/query/select/SelectBinaryFn.java new file mode 100644 index 00000000000..7de46c26b6a --- /dev/null +++ b/processing/src/main/java/io/druid/query/select/SelectBinaryFn.java @@ -0,0 +1,77 @@ +/* + * Druid - a distributed column store. + * Copyright (C) 2012, 2013 Metamarkets Group Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +package io.druid.query.select; + +import com.metamx.common.guava.nary.BinaryFn; +import io.druid.granularity.AllGranularity; +import io.druid.granularity.QueryGranularity; +import io.druid.query.Result; +import org.joda.time.DateTime; + +/** + */ +public class SelectBinaryFn + implements BinaryFn, Result, Result> +{ + private final QueryGranularity gran; + private final PagingSpec pagingSpec; + + public SelectBinaryFn( + QueryGranularity granularity, + PagingSpec pagingSpec + ) + { + this.gran = granularity; + this.pagingSpec = pagingSpec; + } + + @Override + public Result apply( + Result arg1, Result arg2 + ) + { + if (arg1 == null) { + return arg2; + } + + if (arg2 == null) { + return arg1; + } + + final DateTime timestamp = (gran instanceof AllGranularity) + ? arg1.getTimestamp() + : gran.toDateTime(gran.truncate(arg1.getTimestamp().getMillis())); + + SelectResultValueBuilder builder = new SelectResultValueBuilder(timestamp, pagingSpec.getThreshold()); + + SelectResultValue arg1Val = arg1.getValue(); + SelectResultValue arg2Val = arg2.getValue(); + + for (EventHolder event : arg1Val) { + builder.addEntry(event); + } + + for (EventHolder event : arg2Val) { + builder.addEntry(event); + } + + return builder.build(); + } +} diff --git a/processing/src/main/java/io/druid/query/select/SelectQuery.java b/processing/src/main/java/io/druid/query/select/SelectQuery.java new file mode 100644 index 00000000000..8c5eb2ba59f --- /dev/null +++ b/processing/src/main/java/io/druid/query/select/SelectQuery.java @@ -0,0 +1,149 @@ +/* + * Druid - a distributed column store. + * Copyright (C) 2012, 2013 Metamarkets Group Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +package io.druid.query.select; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonTypeName; +import io.druid.granularity.QueryGranularity; +import io.druid.query.BaseQuery; +import io.druid.query.Query; +import io.druid.query.Result; +import io.druid.query.filter.DimFilter; +import io.druid.query.spec.QuerySegmentSpec; + +import java.util.List; +import java.util.Map; + +/** + */ +@JsonTypeName("select") +public class SelectQuery extends BaseQuery> +{ + private final DimFilter dimFilter; + private final QueryGranularity granularity; + private final List dimensions; + private final List metrics; + private final PagingSpec pagingSpec; + + @JsonCreator + public SelectQuery( + @JsonProperty("dataSource") String dataSource, + @JsonProperty("intervals") QuerySegmentSpec querySegmentSpec, + @JsonProperty("filter") DimFilter dimFilter, + @JsonProperty("granularity") QueryGranularity granularity, + @JsonProperty("dimensions") List dimensions, + @JsonProperty("metrics") List metrics, + @JsonProperty("pagingSpec") PagingSpec pagingSpec, + @JsonProperty("context") Map context + ) + { + super(dataSource, querySegmentSpec, context); + this.dimFilter = dimFilter; + this.granularity = granularity; + this.dimensions = dimensions; + this.metrics = metrics; + this.pagingSpec = pagingSpec; + } + + @Override + public boolean hasFilters() + { + return dimFilter != null; + } + + @Override + public String getType() + { + return Query.SELECT; + } + + @JsonProperty("filter") + public DimFilter getDimensionsFilter() + { + return dimFilter; + } + + @JsonProperty + public QueryGranularity getGranularity() + { + return granularity; + } + + @JsonProperty + public List getDimensions() + { + return dimensions; + } + + @JsonProperty + public PagingSpec getPagingSpec() + { + return pagingSpec; + } + + @JsonProperty + public List getMetrics() + { + return metrics; + } + + public SelectQuery withQuerySegmentSpec(QuerySegmentSpec querySegmentSpec) + { + return new SelectQuery( + getDataSource(), + querySegmentSpec, + dimFilter, + granularity, + dimensions, + metrics, + pagingSpec, + getContext() + ); + } + + public SelectQuery withOverriddenContext(Map contextOverrides) + { + return new SelectQuery( + getDataSource(), + getQuerySegmentSpec(), + dimFilter, + granularity, + dimensions, + metrics, + pagingSpec, + computeOverridenContext(contextOverrides) + ); + } + + @Override + public String toString() + { + return "SelectQuery{" + + "dataSource='" + getDataSource() + '\'' + + ", querySegmentSpec=" + getQuerySegmentSpec() + + ", dimFilter=" + dimFilter + + ", granularity=" + granularity + + ", dimensions=" + dimensions + + ", metrics=" + metrics + + ", pagingSpec=" + pagingSpec + + '}'; + } +} diff --git a/processing/src/main/java/io/druid/query/select/SelectQueryEngine.java b/processing/src/main/java/io/druid/query/select/SelectQueryEngine.java new file mode 100644 index 00000000000..3238ac01f7a --- /dev/null +++ b/processing/src/main/java/io/druid/query/select/SelectQueryEngine.java @@ -0,0 +1,167 @@ +/* + * Druid - a distributed column store. + * Copyright (C) 2012, 2013 Metamarkets Group Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +package io.druid.query.select; + +import com.google.common.base.Function; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.metamx.common.guava.BaseSequence; +import com.metamx.common.guava.Sequence; +import io.druid.query.QueryRunnerHelper; +import io.druid.query.Result; +import io.druid.segment.Cursor; +import io.druid.segment.DimensionSelector; +import io.druid.segment.ObjectColumnSelector; +import io.druid.segment.Segment; +import io.druid.segment.StorageAdapter; +import io.druid.segment.TimestampColumnSelector; +import io.druid.segment.data.IndexedInts; +import io.druid.segment.filter.Filters; +import org.joda.time.DateTime; + +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +/** + */ +public class SelectQueryEngine +{ + public Sequence> process(final SelectQuery query, final Segment segment) + { + return new BaseSequence<>( + new BaseSequence.IteratorMaker, Iterator>>() + { + @Override + public Iterator> make() + { + final StorageAdapter adapter = segment.asStorageAdapter(); + + final Iterable dims; + if (query.getDimensions() == null || query.getDimensions().isEmpty()) { + dims = adapter.getAvailableDimensions(); + } else { + dims = query.getDimensions(); + } + + final Iterable metrics; + if (query.getMetrics() == null || query.getMetrics().isEmpty()) { + metrics = adapter.getAvailableMetrics(); + } else { + metrics = query.getMetrics(); + } + + return QueryRunnerHelper.makeCursorBasedQuery( + adapter, + query.getQuerySegmentSpec().getIntervals(), + Filters.convertDimensionFilters(query.getDimensionsFilter()), + query.getGranularity(), + new Function>() + { + @Override + public Result apply(Cursor cursor) + { + final SelectResultValueBuilder builder = new SelectResultValueBuilder( + cursor.getTime(), + query.getPagingSpec() + .getThreshold() + ); + + final TimestampColumnSelector timestampColumnSelector = cursor.makeTimestampColumnSelector(); + + final Map dimSelectors = Maps.newHashMap(); + for (String dim : dims) { + final DimensionSelector dimSelector = cursor.makeDimensionSelector(dim); + dimSelectors.put(dim, dimSelector); + } + + final Map metSelectors = Maps.newHashMap(); + for (String metric : metrics) { + final ObjectColumnSelector metricSelector = cursor.makeObjectColumnSelector(metric); + metSelectors.put(metric, metricSelector); + } + + int startOffset; + if (query.getPagingSpec().getPagingIdentifiers() == null) { + startOffset = 0; + } else { + Integer offset = query.getPagingSpec().getPagingIdentifiers().get(segment.getIdentifier()); + startOffset = (offset == null) ? 0 : offset; + } + + cursor.advanceTo(startOffset); + + int offset = 0; + while (!cursor.isDone() && offset < query.getPagingSpec().getThreshold()) { + final Map theEvent = Maps.newLinkedHashMap(); + theEvent.put(EventHolder.timestampKey, new DateTime(timestampColumnSelector.getTimestamp())); + + for (Map.Entry dimSelector : dimSelectors.entrySet()) { + final String dim = dimSelector.getKey(); + final DimensionSelector selector = dimSelector.getValue(); + final IndexedInts vals = selector.getRow(); + + if (vals.size() == 1) { + final String dimVal = selector.lookupName(vals.get(0)); + theEvent.put(dim, dimVal); + } else { + List dimVals = Lists.newArrayList(); + for (int i = 0; i < vals.size(); ++i) { + dimVals.add(selector.lookupName(vals.get(i))); + } + theEvent.put(dim, dimVals); + } + } + + for (Map.Entry metSelector : metSelectors.entrySet()) { + final String metric = metSelector.getKey(); + final ObjectColumnSelector selector = metSelector.getValue(); + theEvent.put(metric, selector.get()); + } + + builder.addEntry( + new EventHolder( + segment.getIdentifier(), + startOffset + offset, + theEvent + ) + ); + cursor.advance(); + offset++; + } + + return builder.build(); + } + } + ).iterator(); + } + + @Override + public void cleanup(Iterator> toClean) + { + // https://github.com/metamx/druid/issues/128 + while (toClean.hasNext()) { + toClean.next(); + } + } + } + ); + } +} diff --git a/processing/src/main/java/io/druid/query/select/SelectQueryQueryToolChest.java b/processing/src/main/java/io/druid/query/select/SelectQueryQueryToolChest.java new file mode 100644 index 00000000000..9e5a365479c --- /dev/null +++ b/processing/src/main/java/io/druid/query/select/SelectQueryQueryToolChest.java @@ -0,0 +1,291 @@ +/* + * Druid - a distributed column store. + * Copyright (C) 2012, 2013 Metamarkets Group Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +package io.druid.query.select; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Function; +import com.google.common.base.Functions; +import com.google.common.base.Joiner; +import com.google.common.collect.Ordering; +import com.google.common.collect.Sets; +import com.google.inject.Inject; +import com.metamx.common.guava.MergeSequence; +import com.metamx.common.guava.Sequence; +import com.metamx.common.guava.nary.BinaryFn; +import com.metamx.emitter.service.ServiceMetricEvent; +import io.druid.collections.OrderedMergeSequence; +import io.druid.granularity.QueryGranularity; +import io.druid.query.CacheStrategy; +import io.druid.query.IntervalChunkingQueryRunner; +import io.druid.query.Query; +import io.druid.query.QueryConfig; +import io.druid.query.QueryRunner; +import io.druid.query.QueryToolChest; +import io.druid.query.Result; +import io.druid.query.ResultGranularTimestampComparator; +import io.druid.query.ResultMergeQueryRunner; +import io.druid.query.aggregation.MetricManipulationFn; +import io.druid.query.filter.DimFilter; +import org.joda.time.DateTime; +import org.joda.time.Interval; +import org.joda.time.Minutes; + +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; + +/** + */ +public class SelectQueryQueryToolChest extends QueryToolChest, SelectQuery> +{ + private static final byte SELECT_QUERY = 0x13; + + private static final Joiner COMMA_JOIN = Joiner.on(","); + private static final TypeReference OBJECT_TYPE_REFERENCE = + new TypeReference() + { + }; + + private static final TypeReference> TYPE_REFERENCE = + new TypeReference>() + { + }; + + private final QueryConfig config; + private final ObjectMapper jsonMapper; + + @Inject + public SelectQueryQueryToolChest(QueryConfig config, ObjectMapper jsonMapper) + { + this.config = config; + this.jsonMapper = jsonMapper; + } + + @Override + public QueryRunner> mergeResults(QueryRunner> queryRunner) + { + return new ResultMergeQueryRunner>(queryRunner) + { + @Override + protected Ordering> makeOrdering(Query> query) + { + return Ordering.from( + new ResultGranularTimestampComparator( + ((SelectQuery) query).getGranularity() + ) + ); + } + + @Override + protected BinaryFn, Result, Result> createMergeFn( + Query> input + ) + { + SelectQuery query = (SelectQuery) input; + return new SelectBinaryFn( + query.getGranularity(), + query.getPagingSpec() + ); + } + }; + } + + @Override + public Sequence> mergeSequences(Sequence>> seqOfSequences) + { + return new OrderedMergeSequence>(getOrdering(), seqOfSequences); + } + + @Override + public ServiceMetricEvent.Builder makeMetricBuilder(SelectQuery query) + { + int numMinutes = 0; + for (Interval interval : query.getIntervals()) { + numMinutes += Minutes.minutesIn(interval).getMinutes(); + } + + return new ServiceMetricEvent.Builder() + .setUser2(query.getDataSource()) + .setUser4("Select") + .setUser5(COMMA_JOIN.join(query.getIntervals())) + .setUser6(String.valueOf(query.hasFilters())) + .setUser9(Minutes.minutes(numMinutes).toString()); + } + + @Override + public Function, Result> makeMetricManipulatorFn( + final SelectQuery query, final MetricManipulationFn fn + ) + { + return Functions.identity(); + } + + @Override + public TypeReference> getResultTypeReference() + { + return TYPE_REFERENCE; + } + + @Override + public CacheStrategy, Object, SelectQuery> getCacheStrategy(final SelectQuery query) + { + return new CacheStrategy, Object, SelectQuery>() + { + @Override + public byte[] computeCacheKey(SelectQuery query) + { + final DimFilter dimFilter = query.getDimensionsFilter(); + final byte[] filterBytes = dimFilter == null ? new byte[]{} : dimFilter.getCacheKey(); + final byte[] granularityBytes = query.getGranularity().cacheKey(); + + final Set dimensions = Sets.newTreeSet(); + if (query.getDimensions() != null) { + dimensions.addAll(query.getDimensions()); + } + + final byte[][] dimensionsBytes = new byte[dimensions.size()][]; + int dimensionsBytesSize = 0; + int index = 0; + for (String dimension : dimensions) { + dimensionsBytes[index] = dimension.getBytes(); + dimensionsBytesSize += dimensionsBytes[index].length; + ++index; + } + + + final Set metrics = Sets.newTreeSet(); + if (query.getMetrics() != null) { + dimensions.addAll(query.getMetrics()); + } + + final byte[][] metricBytes = new byte[metrics.size()][]; + int metricBytesSize = 0; + index = 0; + for (String metric : metrics) { + metricBytes[index] = metric.getBytes(); + metricBytesSize += metricBytes[index].length; + ++index; + } + + final ByteBuffer queryCacheKey = ByteBuffer + .allocate( + 1 + + granularityBytes.length + + filterBytes.length + + query.getPagingSpec().getCacheKey().length + + dimensionsBytesSize + + metricBytesSize + ) + .put(SELECT_QUERY) + .put(granularityBytes) + .put(filterBytes) + .put(query.getPagingSpec().getCacheKey()); + + for (byte[] dimensionsByte : dimensionsBytes) { + queryCacheKey.put(dimensionsByte); + } + + for (byte[] metricByte : metricBytes) { + queryCacheKey.put(metricByte); + } + + return queryCacheKey.array(); + } + + @Override + public TypeReference getCacheObjectClazz() + { + return OBJECT_TYPE_REFERENCE; + } + + @Override + public Function, Object> prepareForCache() + { + return new Function, Object>() + { + @Override + public Object apply(final Result input) + { + return Arrays.asList( + input.getTimestamp().getMillis(), + input.getValue().getPagingIdentifiers(), + input.getValue().getEvents() + ); + } + }; + } + + @Override + public Function> pullFromCache() + { + return new Function>() + { + private final QueryGranularity granularity = query.getGranularity(); + + @Override + public Result apply(Object input) + { + List results = (List) input; + Iterator resultIter = results.iterator(); + + DateTime timestamp = granularity.toDateTime(((Number) resultIter.next()).longValue()); + + return new Result( + timestamp, + new SelectResultValue( + (Map) jsonMapper.convertValue( + resultIter.next(), new TypeReference>() + { + } + ), + (List) jsonMapper.convertValue( + resultIter.next(), new TypeReference>() + { + } + ) + ) + ); + } + }; + } + + @Override + public Sequence> mergeSequences(Sequence>> seqOfSequences) + { + return new MergeSequence>(getOrdering(), seqOfSequences); + } + }; + } + + @Override + public QueryRunner> preMergeQueryDecoration(QueryRunner> runner) + { + return new IntervalChunkingQueryRunner>(runner, config.getChunkPeriod()); + } + + public Ordering> getOrdering() + { + return Ordering.natural(); + } +} diff --git a/processing/src/main/java/io/druid/query/select/SelectQueryRunnerFactory.java b/processing/src/main/java/io/druid/query/select/SelectQueryRunnerFactory.java new file mode 100644 index 00000000000..6e995b15f44 --- /dev/null +++ b/processing/src/main/java/io/druid/query/select/SelectQueryRunnerFactory.java @@ -0,0 +1,106 @@ +/* + * Druid - a distributed column store. + * Copyright (C) 2012, 2013 Metamarkets Group Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +package io.druid.query.select; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.inject.Inject; +import com.metamx.common.ISE; +import com.metamx.common.guava.Sequence; +import io.druid.query.ChainedExecutionQueryRunner; +import io.druid.query.Query; +import io.druid.query.QueryConfig; +import io.druid.query.QueryRunner; +import io.druid.query.QueryRunnerFactory; +import io.druid.query.QueryToolChest; +import io.druid.query.Result; +import io.druid.segment.Segment; + +import java.util.concurrent.ExecutorService; + +/** + */ +public class SelectQueryRunnerFactory + implements QueryRunnerFactory, SelectQuery> +{ + public static SelectQueryRunnerFactory create(ObjectMapper jsonMapper) + { + return new SelectQueryRunnerFactory( + new SelectQueryQueryToolChest(new QueryConfig(), jsonMapper), + new SelectQueryEngine() + ); + } + + private final SelectQueryQueryToolChest toolChest; + private final SelectQueryEngine engine; + + @Inject + public SelectQueryRunnerFactory( + SelectQueryQueryToolChest toolChest, + SelectQueryEngine engine + ) + { + this.toolChest = toolChest; + this.engine = engine; + } + + @Override + public QueryRunner> createRunner(final Segment segment) + { + return new SelectQueryRunner(engine, segment); + } + + @Override + public QueryRunner> mergeRunners( + ExecutorService queryExecutor, Iterable>> queryRunners + ) + { + return new ChainedExecutionQueryRunner>( + queryExecutor, toolChest.getOrdering(), queryRunners + ); + } + + @Override + public QueryToolChest, SelectQuery> getToolchest() + { + return toolChest; + } + + private static class SelectQueryRunner implements QueryRunner> + { + private final SelectQueryEngine engine; + private final Segment segment; + + private SelectQueryRunner(SelectQueryEngine engine, Segment segment) + { + this.engine = engine; + this.segment = segment; + } + + @Override + public Sequence> run(Query> input) + { + if (!(input instanceof SelectQuery)) { + throw new ISE("Got a [%s] which isn't a %s", input.getClass(), SelectQuery.class); + } + + return engine.process((SelectQuery) input, segment); + } + } +} diff --git a/processing/src/main/java/io/druid/query/select/SelectResultValue.java b/processing/src/main/java/io/druid/query/select/SelectResultValue.java new file mode 100644 index 00000000000..d3ce5d6ef68 --- /dev/null +++ b/processing/src/main/java/io/druid/query/select/SelectResultValue.java @@ -0,0 +1,108 @@ +/* + * Druid - a distributed column store. + * Copyright (C) 2012, 2013 Metamarkets Group Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +package io.druid.query.select; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonValue; +import com.google.common.base.Function; +import com.google.common.collect.Lists; +import com.metamx.common.ISE; + +import javax.annotation.Nullable; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +/** + */ +public class SelectResultValue implements Iterable +{ + private final Map pagingIdentifiers; + private final List events; + + @JsonCreator + public SelectResultValue( + @JsonProperty("pagingIdentifiers") Map pagingIdentifiers, + @JsonProperty("events") List events) + { + this.pagingIdentifiers = pagingIdentifiers; + this.events = events; + } + + @JsonProperty + public Map getPagingIdentifiers() + { + return pagingIdentifiers; + } + + @JsonProperty + public List getEvents() + { + return events; + } + + @Override + public Iterator iterator() + { + return events.iterator(); + } + + @Override + public boolean equals(Object o) + { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + SelectResultValue that = (SelectResultValue) o; + + if (events != null ? !events.equals(that.events) : that.events != null) { + return false; + } + if (pagingIdentifiers != null + ? !pagingIdentifiers.equals(that.pagingIdentifiers) + : that.pagingIdentifiers != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() + { + int result = pagingIdentifiers != null ? pagingIdentifiers.hashCode() : 0; + result = 31 * result + (events != null ? events.hashCode() : 0); + return result; + } + + @Override + public String toString() + { + return "SelectResultValue{" + + "pagingIdentifiers=" + pagingIdentifiers + + ", events=" + events + + '}'; + } +} diff --git a/processing/src/main/java/io/druid/query/select/SelectResultValueBuilder.java b/processing/src/main/java/io/druid/query/select/SelectResultValueBuilder.java new file mode 100644 index 00000000000..d417bddae8e --- /dev/null +++ b/processing/src/main/java/io/druid/query/select/SelectResultValueBuilder.java @@ -0,0 +1,98 @@ +/* + * Druid - a distributed column store. + * Copyright (C) 2012, 2013 Metamarkets Group Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +package io.druid.query.select; + +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.collect.MinMaxPriorityQueue; +import com.google.common.primitives.Longs; +import io.druid.query.Result; +import org.joda.time.DateTime; + +import java.util.Comparator; +import java.util.List; +import java.util.Map; + +/** + */ +public class SelectResultValueBuilder +{ + private static final Comparator comparator = new Comparator() + { + @Override + public int compare(EventHolder o1, EventHolder o2) + { + int retVal = Longs.compare(o1.getTimestamp().getMillis(), o2.getTimestamp().getMillis()); + + if (retVal == 0) { + retVal = o1.getSegmentId().compareTo(o2.getSegmentId()); + } + + if (retVal == 0) { + retVal = Integer.compare(o1.getOffset(), o2.getOffset()); + } + + return retVal; + } + }; + + private final DateTime timestamp; + + private MinMaxPriorityQueue pQueue = null; + + public SelectResultValueBuilder( + DateTime timestamp, + int threshold + ) + { + this.timestamp = timestamp; + + instantiatePQueue(threshold, comparator); + } + + public void addEntry( + EventHolder event + ) + { + pQueue.add(event); + } + + public Result build() + { + // Pull out top aggregated values + List values = Lists.newArrayListWithCapacity(pQueue.size()); + Map pagingIdentifiers = Maps.newLinkedHashMap(); + while (!pQueue.isEmpty()) { + EventHolder event = pQueue.remove(); + pagingIdentifiers.put(event.getSegmentId(), event.getOffset()); + values.add(event); + } + + return new Result( + timestamp, + new SelectResultValue(pagingIdentifiers, values) + ); + } + + private void instantiatePQueue(int threshold, final Comparator comparator) + { + this.pQueue = MinMaxPriorityQueue.orderedBy(comparator).maximumSize(threshold).create(); + } +} diff --git a/processing/src/main/java/io/druid/segment/ColumnSelectorFactory.java b/processing/src/main/java/io/druid/segment/ColumnSelectorFactory.java index 1e640591499..318f93af9a3 100644 --- a/processing/src/main/java/io/druid/segment/ColumnSelectorFactory.java +++ b/processing/src/main/java/io/druid/segment/ColumnSelectorFactory.java @@ -24,6 +24,7 @@ package io.druid.segment; */ public interface ColumnSelectorFactory { + public TimestampColumnSelector makeTimestampColumnSelector(); public DimensionSelector makeDimensionSelector(String dimensionName); public FloatColumnSelector makeFloatColumnSelector(String columnName); public ObjectColumnSelector makeObjectColumnSelector(String columnName); diff --git a/processing/src/main/java/io/druid/segment/Cursor.java b/processing/src/main/java/io/druid/segment/Cursor.java index 42ed6b337af..34070e3f15d 100644 --- a/processing/src/main/java/io/druid/segment/Cursor.java +++ b/processing/src/main/java/io/druid/segment/Cursor.java @@ -21,10 +21,12 @@ package io.druid.segment;import org.joda.time.DateTime; /** */ + public interface Cursor extends ColumnSelectorFactory { public DateTime getTime(); public void advance(); + public void advanceTo(int offset); public boolean isDone(); public void reset(); } diff --git a/processing/src/main/java/io/druid/segment/IncrementalIndexSegment.java b/processing/src/main/java/io/druid/segment/IncrementalIndexSegment.java index 9e04e2af836..f21f7f1fa09 100644 --- a/processing/src/main/java/io/druid/segment/IncrementalIndexSegment.java +++ b/processing/src/main/java/io/druid/segment/IncrementalIndexSegment.java @@ -30,18 +30,21 @@ import java.io.IOException; public class IncrementalIndexSegment implements Segment { private final IncrementalIndex index; + private final String segmentIdentifier; public IncrementalIndexSegment( - IncrementalIndex index + IncrementalIndex index, + String segmentIdentifier ) { this.index = index; + this.segmentIdentifier = segmentIdentifier; } @Override public String getIdentifier() { - throw new UnsupportedOperationException(); + return segmentIdentifier; } @Override diff --git a/processing/src/main/java/io/druid/segment/QueryableIndexStorageAdapter.java b/processing/src/main/java/io/druid/segment/QueryableIndexStorageAdapter.java index 72758ef0b14..86637cf0d4a 100644 --- a/processing/src/main/java/io/druid/segment/QueryableIndexStorageAdapter.java +++ b/processing/src/main/java/io/druid/segment/QueryableIndexStorageAdapter.java @@ -23,6 +23,7 @@ import com.google.common.base.Function; import com.google.common.base.Functions; import com.google.common.collect.ImmutableList; import com.google.common.collect.Maps; +import com.google.common.collect.Sets; import com.google.common.io.Closeables; import com.metamx.common.collect.MoreIterators; import com.metamx.common.guava.FunctionalIterable; @@ -77,6 +78,12 @@ public class QueryableIndexStorageAdapter implements StorageAdapter return index.getAvailableDimensions(); } + @Override + public Iterable getAvailableMetrics() + { + return Sets.difference(Sets.newHashSet(index.getColumnNames()), Sets.newHashSet(index.getAvailableDimensions())); + } + @Override public int getDimensionCardinality(String dimension) { @@ -224,6 +231,16 @@ public class QueryableIndexStorageAdapter implements StorageAdapter cursorOffset.increment(); } + @Override + public void advanceTo(int offset) + { + int count = 0; + while (count < offset && !isDone()) { + advance(); + count++; + } + } + @Override public boolean isDone() { @@ -236,6 +253,19 @@ public class QueryableIndexStorageAdapter implements StorageAdapter cursorOffset = initOffset.clone(); } + @Override + public TimestampColumnSelector makeTimestampColumnSelector() + { + return new TimestampColumnSelector() + { + @Override + public long getTimestamp() + { + return timestamps.getLongSingleValueRow(cursorOffset.getOffset()); + } + }; + } + @Override public DimensionSelector makeDimensionSelector(String dimension) { @@ -249,8 +279,7 @@ public class QueryableIndexStorageAdapter implements StorageAdapter if (column == null) { return null; - } - else if (columnDesc.getCapabilities().hasMultipleValues()) { + } else if (columnDesc.getCapabilities().hasMultipleValues()) { return new DimensionSelector() { @Override @@ -608,6 +637,12 @@ public class QueryableIndexStorageAdapter implements StorageAdapter ++currRow; } + @Override + public void advanceTo(int offset) + { + currRow += offset; + } + @Override public boolean isDone() { @@ -620,6 +655,19 @@ public class QueryableIndexStorageAdapter implements StorageAdapter currRow = initRow; } + @Override + public TimestampColumnSelector makeTimestampColumnSelector() + { + return new TimestampColumnSelector() + { + @Override + public long getTimestamp() + { + return timestamps.getLongSingleValueRow(currRow); + } + }; + } + @Override public DimensionSelector makeDimensionSelector(String dimension) { @@ -633,8 +681,7 @@ public class QueryableIndexStorageAdapter implements StorageAdapter if (dict == null) { return null; - } - else if (column.getCapabilities().hasMultipleValues()) { + } else if (column.getCapabilities().hasMultipleValues()) { return new DimensionSelector() { @Override diff --git a/processing/src/main/java/io/druid/segment/StorageAdapter.java b/processing/src/main/java/io/druid/segment/StorageAdapter.java index ef6ed7c6bd6..a799e3ed6f6 100644 --- a/processing/src/main/java/io/druid/segment/StorageAdapter.java +++ b/processing/src/main/java/io/druid/segment/StorageAdapter.java @@ -17,7 +17,9 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ -package io.druid.segment;import io.druid.segment.data.Indexed; +package io.druid.segment; + +import io.druid.segment.data.Indexed; import org.joda.time.DateTime; import org.joda.time.Interval; @@ -28,6 +30,7 @@ public interface StorageAdapter extends CursorFactory public String getSegmentIdentifier(); public Interval getInterval(); public Indexed getAvailableDimensions(); + public Iterable getAvailableMetrics(); public int getDimensionCardinality(String dimension); public DateTime getMinTime(); public DateTime getMaxTime(); diff --git a/processing/src/main/java/io/druid/segment/TimestampColumnSelector.java b/processing/src/main/java/io/druid/segment/TimestampColumnSelector.java new file mode 100644 index 00000000000..dc36f6a4064 --- /dev/null +++ b/processing/src/main/java/io/druid/segment/TimestampColumnSelector.java @@ -0,0 +1,27 @@ +/* + * Druid - a distributed column store. + * Copyright (C) 2012, 2013 Metamarkets Group Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +package io.druid.segment; + +/** + */ +public interface TimestampColumnSelector +{ + public long getTimestamp(); +} diff --git a/processing/src/main/java/io/druid/segment/incremental/IncrementalIndex.java b/processing/src/main/java/io/druid/segment/incremental/IncrementalIndex.java index 4f6c6016088..c13a1c3c588 100644 --- a/processing/src/main/java/io/druid/segment/incremental/IncrementalIndex.java +++ b/processing/src/main/java/io/druid/segment/incremental/IncrementalIndex.java @@ -45,6 +45,7 @@ import io.druid.segment.ColumnSelectorFactory; import io.druid.segment.DimensionSelector; import io.druid.segment.FloatColumnSelector; import io.druid.segment.ObjectColumnSelector; +import io.druid.segment.TimestampColumnSelector; import io.druid.segment.serde.ComplexMetricExtractor; import io.druid.segment.serde.ComplexMetricSerde; import io.druid.segment.serde.ComplexMetrics; @@ -197,6 +198,19 @@ public class IncrementalIndex implements Iterable aggs[i] = agg.factorize( new ColumnSelectorFactory() { + @Override + public TimestampColumnSelector makeTimestampColumnSelector() + { + return new TimestampColumnSelector() + { + @Override + public long getTimestamp() + { + return in.getTimestampFromEpoch(); + } + }; + } + @Override public FloatColumnSelector makeFloatColumnSelector(String columnName) { diff --git a/processing/src/main/java/io/druid/segment/incremental/IncrementalIndexStorageAdapter.java b/processing/src/main/java/io/druid/segment/incremental/IncrementalIndexStorageAdapter.java index fee9e728cb1..d0243f39123 100644 --- a/processing/src/main/java/io/druid/segment/incremental/IncrementalIndexStorageAdapter.java +++ b/processing/src/main/java/io/druid/segment/incremental/IncrementalIndexStorageAdapter.java @@ -1,3 +1,4 @@ + /* * Druid - a distributed column store. * Copyright (C) 2012, 2013 Metamarkets Group Inc. @@ -38,6 +39,7 @@ import io.druid.segment.DimensionSelector; import io.druid.segment.FloatColumnSelector; import io.druid.segment.ObjectColumnSelector; import io.druid.segment.StorageAdapter; +import io.druid.segment.TimestampColumnSelector; import io.druid.segment.data.Indexed; import io.druid.segment.data.IndexedInts; import io.druid.segment.data.ListIndexed; @@ -87,6 +89,12 @@ public class IncrementalIndexStorageAdapter implements StorageAdapter return new ListIndexed(index.getDimensions(), String.class); } + @Override + public Iterable getAvailableMetrics() + { + return index.getMetricNames(); + } + @Override public int getDimensionCardinality(String dimension) { @@ -205,6 +213,16 @@ public class IncrementalIndexStorageAdapter implements StorageAdapter } } + @Override + public void advanceTo(int offset) + { + int count = 0; + while (count < offset && !isDone()) { + advance(); + count++; + } + } + @Override public boolean isDone() { @@ -237,6 +255,19 @@ public class IncrementalIndexStorageAdapter implements StorageAdapter } + @Override + public TimestampColumnSelector makeTimestampColumnSelector() + { + return new TimestampColumnSelector() + { + @Override + public long getTimestamp() + { + return currEntry.getKey().getTimestamp(); + } + }; + } + @Override public DimensionSelector makeDimensionSelector(String dimension) { diff --git a/processing/src/test/java/io/druid/query/QueryRunnerTestHelper.java b/processing/src/test/java/io/druid/query/QueryRunnerTestHelper.java index 58c1e97034c..2f51162f076 100644 --- a/processing/src/test/java/io/druid/query/QueryRunnerTestHelper.java +++ b/processing/src/test/java/io/druid/query/QueryRunnerTestHelper.java @@ -48,11 +48,13 @@ import java.util.List; */ public class QueryRunnerTestHelper { + public static final String segmentId= "testSegment"; public static final String dataSource = "testing"; public static final QueryGranularity dayGran = QueryGranularity.DAY; public static final QueryGranularity allGran = QueryGranularity.ALL; public static final String providerDimension = "proVider"; public static final String qualityDimension = "quality"; + public static final String placementDimension = "placement"; public static final String placementishDimension = "placementish"; public static final String indexMetric = "index"; public static final CountAggregatorFactory rowsCount = new CountAggregatorFactory("rows"); @@ -110,13 +112,13 @@ public class QueryRunnerTestHelper return Arrays.asList( new Object[][]{ { - makeQueryRunner(factory, new IncrementalIndexSegment(rtIndex)) + makeQueryRunner(factory, new IncrementalIndexSegment(rtIndex, segmentId)) }, { - makeQueryRunner(factory, new QueryableIndexSegment(null, mMappedTestIndex)) + makeQueryRunner(factory, new QueryableIndexSegment(segmentId, mMappedTestIndex)) }, { - makeQueryRunner(factory, new QueryableIndexSegment(null, mergedRealtimeIndex)) + makeQueryRunner(factory, new QueryableIndexSegment(segmentId, mergedRealtimeIndex)) } } ); diff --git a/processing/src/test/java/io/druid/query/metadata/SegmentAnalyzerTest.java b/processing/src/test/java/io/druid/query/metadata/SegmentAnalyzerTest.java index 55ed5887480..e1bc774ebfd 100644 --- a/processing/src/test/java/io/druid/query/metadata/SegmentAnalyzerTest.java +++ b/processing/src/test/java/io/druid/query/metadata/SegmentAnalyzerTest.java @@ -47,7 +47,7 @@ public class SegmentAnalyzerTest public void testIncrementalDoesNotWork() throws Exception { final List results = getSegmentAnalysises( - new IncrementalIndexSegment(TestIndex.getIncrementalTestIndex()) + new IncrementalIndexSegment(TestIndex.getIncrementalTestIndex(), null) ); Assert.assertEquals(0, results.size()); diff --git a/processing/src/test/java/io/druid/query/select/SelectBinaryFnTest.java b/processing/src/test/java/io/druid/query/select/SelectBinaryFnTest.java new file mode 100644 index 00000000000..5eb059deb30 --- /dev/null +++ b/processing/src/test/java/io/druid/query/select/SelectBinaryFnTest.java @@ -0,0 +1,224 @@ +/* + * Druid - a distributed column store. + * Copyright (C) 2012, 2013 Metamarkets Group Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +package io.druid.query.select; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Maps; +import com.metamx.common.ISE; +import io.druid.granularity.QueryGranularity; +import io.druid.query.Result; +import junit.framework.Assert; +import org.joda.time.DateTime; +import org.junit.Test; + +import java.util.Arrays; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.List; + +/** + */ +public class SelectBinaryFnTest +{ + private static final String segmentId1 = "testSegment"; + + private static final String segmentId2 = "testSegment"; + + @Test + public void testApply() throws Exception + { + SelectBinaryFn binaryFn = new SelectBinaryFn(QueryGranularity.ALL, new PagingSpec(null, 5)); + + Result res1 = new Result<>( + new DateTime("2013-01-01"), + new SelectResultValue( + ImmutableMap.of(), + Arrays.asList( + new EventHolder( + segmentId1, + 0, + ImmutableMap.of( + EventHolder.timestampKey, + new DateTime("2013-01-01T00"), + "dim", + "first" + ) + ), + new EventHolder( + segmentId1, + 1, + ImmutableMap.of( + EventHolder.timestampKey, + new DateTime("2013-01-01T03"), + "dim", + "fourth" + ) + ), + new EventHolder( + segmentId1, + 2, + ImmutableMap.of( + EventHolder.timestampKey, + new DateTime("2013-01-01T05"), + "dim", + "sixth" + ) + ) + ) + ) + ); + + + Result res2 = new Result<>( + new DateTime("2013-01-01"), + new SelectResultValue( + ImmutableMap.of(), + Arrays.asList( + new EventHolder( + segmentId2, + 0, + ImmutableMap.of( + EventHolder.timestampKey, + new DateTime("2013-01-01T00"), + "dim", + "second" + ) + ), + new EventHolder( + segmentId2, + 1, + ImmutableMap.of( + EventHolder.timestampKey, + new DateTime("2013-01-01T02"), + "dim", + "third" + ) + ), + new EventHolder( + segmentId2, + 2, + ImmutableMap.of( + EventHolder.timestampKey, + new DateTime("2013-01-01T04"), + "dim", + "fifth" + ) + ) + ) + ) + ); + + Result merged = binaryFn.apply(res1, res2); + + Assert.assertEquals(res1.getTimestamp(), merged.getTimestamp()); + + LinkedHashMap expectedPageIds = Maps.newLinkedHashMap(); + expectedPageIds.put(segmentId1, 0); + expectedPageIds.put(segmentId2, 0); + expectedPageIds.put(segmentId2, 1); + expectedPageIds.put(segmentId1, 1); + expectedPageIds.put(segmentId2, 2); + + Iterator exSegmentIter = expectedPageIds.keySet().iterator(); + Iterator acSegmentIter = merged.getValue().getPagingIdentifiers().keySet().iterator(); + + verifyIters(exSegmentIter, acSegmentIter); + + Iterator exOffsetIter = expectedPageIds.values().iterator(); + Iterator acOffsetIter = merged.getValue().getPagingIdentifiers().values().iterator(); + + verifyIters(exOffsetIter, acOffsetIter); + + List exEvents = Arrays.asList( + new EventHolder( + segmentId1, + 0, + ImmutableMap.of( + EventHolder.timestampKey, + new DateTime("2013-01-01T00"), "dim", "first" + ) + ), + new EventHolder( + segmentId2, + 0, + ImmutableMap.of( + EventHolder.timestampKey, + new DateTime("2013-01-01T00"), + "dim", + "second" + ) + ), + new EventHolder( + segmentId2, + 1, + ImmutableMap.of( + EventHolder.timestampKey, + new DateTime("2013-01-01T02"), + "dim", + "third" + ) + ), + new EventHolder( + segmentId1, + 1, + ImmutableMap.of( + EventHolder.timestampKey, + new DateTime("2013-01-01T03"), + "dim", + "fourth" + ) + ), + new EventHolder( + segmentId2, + 2, + ImmutableMap.of( + EventHolder.timestampKey, + new DateTime("2013-01-01T04"), + "dim", + "fifth" + ) + ) + ); + + List acEvents = merged.getValue().getEvents(); + + + verifyEvents(exEvents, acEvents); + } + + private void verifyIters(Iterator iter1, Iterator iter2) + { + while (iter1.hasNext()) { + Assert.assertEquals(iter1.next(), iter2.next()); + } + + if (iter2.hasNext()) { + throw new ISE("This should be empty!"); + } + } + + private void verifyEvents(List events1, List events2) + { + Iterator ex = events1.iterator(); + Iterator ac = events2.iterator(); + + verifyIters(ex, ac); + } +} diff --git a/processing/src/test/java/io/druid/query/select/SelectQueryRunnerTest.java b/processing/src/test/java/io/druid/query/select/SelectQueryRunnerTest.java new file mode 100644 index 00000000000..6c7b26d6059 --- /dev/null +++ b/processing/src/test/java/io/druid/query/select/SelectQueryRunnerTest.java @@ -0,0 +1,403 @@ +/* + * Druid - a distributed column store. + * Copyright (C) 2012, 2013 Metamarkets Group Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +package io.druid.query.select; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.metamx.common.ISE; +import com.metamx.common.guava.Sequences; +import io.druid.jackson.DefaultObjectMapper; +import io.druid.query.QueryRunner; +import io.druid.query.QueryRunnerTestHelper; +import io.druid.query.Result; +import io.druid.query.filter.SelectorDimFilter; +import io.druid.query.spec.LegacySegmentSpec; +import org.joda.time.DateTime; +import org.joda.time.Interval; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +/** + */ +@RunWith(Parameterized.class) +public class SelectQueryRunnerTest +{ + @Parameterized.Parameters + public static Collection constructorFeeder() throws IOException + { + return QueryRunnerTestHelper.makeQueryRunners( + SelectQueryRunnerFactory.create(new DefaultObjectMapper()) + ); + } + + private static final String providerLowercase = "provider"; + + private final QueryRunner runner; + + public SelectQueryRunnerTest( + QueryRunner runner + ) + { + this.runner = runner; + } + + @Test + public void testFullOnSelect() + { + SelectQuery query = new SelectQuery( + QueryRunnerTestHelper.dataSource, + QueryRunnerTestHelper.fullOnInterval, + null, + QueryRunnerTestHelper.allGran, + Lists.newArrayList(), + Lists.newArrayList(), + new PagingSpec(null, 3), + null + ); + + Iterable> results = Sequences.toList( + runner.run(query), + Lists.>newArrayList() + ); + + List> expectedResults = Arrays.asList( + new Result( + new DateTime("2011-01-12T00:00:00.000Z"), + new SelectResultValue( + ImmutableMap.of(QueryRunnerTestHelper.segmentId, 2), + Arrays.asList( + new EventHolder( + QueryRunnerTestHelper.segmentId, + 0, + new ImmutableMap.Builder() + .put(EventHolder.timestampKey, new DateTime("2011-01-12T00:00:00.000Z")) + .put(providerLowercase, "spot") + .put(QueryRunnerTestHelper.qualityDimension, "automotive") + .put(QueryRunnerTestHelper.placementDimension, "preferred") + .put(QueryRunnerTestHelper.placementishDimension, Lists.newArrayList("a", "preferred")) + .put(QueryRunnerTestHelper.indexMetric, 100.000000F) + .build() + ), + new EventHolder( + QueryRunnerTestHelper.segmentId, + 1, + new ImmutableMap.Builder() + .put(EventHolder.timestampKey, new DateTime("2011-01-12T00:00:00.000Z")) + .put(providerLowercase, "spot") + .put(QueryRunnerTestHelper.qualityDimension, "business") + .put(QueryRunnerTestHelper.placementDimension, "preferred") + .put(QueryRunnerTestHelper.placementishDimension, Lists.newArrayList("b", "preferred")) + .put(QueryRunnerTestHelper.indexMetric, 100.000000F) + .build() + ), + new EventHolder( + QueryRunnerTestHelper.segmentId, + 2, + new ImmutableMap.Builder() + .put(EventHolder.timestampKey, new DateTime("2011-01-12T00:00:00.000Z")) + .put(providerLowercase, "spot") + .put(QueryRunnerTestHelper.qualityDimension, "entertainment") + .put(QueryRunnerTestHelper.placementDimension, "preferred") + .put(QueryRunnerTestHelper.placementishDimension, Lists.newArrayList("e", "preferred")) + .put(QueryRunnerTestHelper.indexMetric, 100.000000F) + .build() + ) + ) + ) + ) + ); + + verify(expectedResults, results); + } + + @Test + public void testSelectWithDimsAndMets() + { + SelectQuery query = new SelectQuery( + QueryRunnerTestHelper.dataSource, + QueryRunnerTestHelper.fullOnInterval, + null, + QueryRunnerTestHelper.allGran, + Lists.newArrayList(providerLowercase), + Lists.newArrayList(QueryRunnerTestHelper.indexMetric), + new PagingSpec(null, 3), + null + ); + + Iterable> results = Sequences.toList( + runner.run(query), + Lists.>newArrayList() + ); + + List> expectedResults = Arrays.asList( + new Result( + new DateTime("2011-01-12T00:00:00.000Z"), + new SelectResultValue( + ImmutableMap.of(QueryRunnerTestHelper.segmentId, 2), + Arrays.asList( + new EventHolder( + QueryRunnerTestHelper.segmentId, + 0, + new ImmutableMap.Builder() + .put(EventHolder.timestampKey, new DateTime("2011-01-12T00:00:00.000Z")) + .put(providerLowercase, "spot") + .put(QueryRunnerTestHelper.indexMetric, 100.000000F) + .build() + ), + new EventHolder( + QueryRunnerTestHelper.segmentId, + 1, + new ImmutableMap.Builder() + .put(EventHolder.timestampKey, new DateTime("2011-01-12T00:00:00.000Z")) + .put(providerLowercase, "spot") + .put(QueryRunnerTestHelper.indexMetric, 100.000000F) + .build() + ), + new EventHolder( + QueryRunnerTestHelper.segmentId, + 2, + new ImmutableMap.Builder() + .put(EventHolder.timestampKey, new DateTime("2011-01-12T00:00:00.000Z")) + .put(providerLowercase, "spot") + .put(QueryRunnerTestHelper.indexMetric, 100.000000F) + .build() + ) + ) + ) + ) + ); + + verify(expectedResults, results); + } + + @Test + public void testSelectPagination() + { + SelectQuery query = new SelectQuery( + QueryRunnerTestHelper.dataSource, + QueryRunnerTestHelper.fullOnInterval, + null, + QueryRunnerTestHelper.allGran, + Lists.newArrayList(QueryRunnerTestHelper.qualityDimension), + Lists.newArrayList(QueryRunnerTestHelper.indexMetric), + new PagingSpec(Maps.newLinkedHashMap(ImmutableMap.of(QueryRunnerTestHelper.segmentId, 3)), 3), + null + ); + + Iterable> results = Sequences.toList( + runner.run(query), + Lists.>newArrayList() + ); + + List> expectedResults = Arrays.asList( + new Result( + new DateTime("2011-01-12T00:00:00.000Z"), + new SelectResultValue( + ImmutableMap.of(QueryRunnerTestHelper.segmentId, 5), + Arrays.asList( + new EventHolder( + QueryRunnerTestHelper.segmentId, + 3, + new ImmutableMap.Builder() + .put(EventHolder.timestampKey, new DateTime("2011-01-12T00:00:00.000Z")) + .put(QueryRunnerTestHelper.qualityDimension, "health") + .put(QueryRunnerTestHelper.indexMetric, 100.000000F) + .build() + ), + new EventHolder( + QueryRunnerTestHelper.segmentId, + 4, + new ImmutableMap.Builder() + .put(EventHolder.timestampKey, new DateTime("2011-01-12T00:00:00.000Z")) + .put(QueryRunnerTestHelper.qualityDimension, "mezzanine") + .put(QueryRunnerTestHelper.indexMetric, 100.000000F) + .build() + ), + new EventHolder( + QueryRunnerTestHelper.segmentId, + 5, + new ImmutableMap.Builder() + .put(EventHolder.timestampKey, new DateTime("2011-01-12T00:00:00.000Z")) + .put(QueryRunnerTestHelper.qualityDimension, "news") + .put(QueryRunnerTestHelper.indexMetric, 100.000000F) + .build() + ) + ) + ) + ) + ); + + verify(expectedResults, results); + } + + @Test + public void testFullOnSelectWithFilter() + { + SelectQuery query = new SelectQuery( + QueryRunnerTestHelper.dataSource, + new LegacySegmentSpec(new Interval("2011-01-12/2011-01-14")), + new SelectorDimFilter(QueryRunnerTestHelper.providerDimension, "spot"), + QueryRunnerTestHelper.dayGran, + Lists.newArrayList(QueryRunnerTestHelper.qualityDimension), + Lists.newArrayList(QueryRunnerTestHelper.indexMetric), + new PagingSpec(Maps.newLinkedHashMap(ImmutableMap.of(QueryRunnerTestHelper.segmentId, 3)), 3), + null + ); + + Iterable> results = Sequences.toList( + runner.run(query), + Lists.>newArrayList() + ); + + List> expectedResults = Arrays.asList( + new Result( + new DateTime("2011-01-12T00:00:00.000Z"), + new SelectResultValue( + ImmutableMap.of(QueryRunnerTestHelper.segmentId, 5), + Arrays.asList( + new EventHolder( + QueryRunnerTestHelper.segmentId, + 3, + new ImmutableMap.Builder() + .put(EventHolder.timestampKey, new DateTime("2011-01-12T00:00:00.000Z")) + .put(QueryRunnerTestHelper.qualityDimension, "health") + .put(QueryRunnerTestHelper.indexMetric, 100.000000F) + .build() + ), + new EventHolder( + QueryRunnerTestHelper.segmentId, + 4, + new ImmutableMap.Builder() + .put(EventHolder.timestampKey, new DateTime("2011-01-12T00:00:00.000Z")) + .put(QueryRunnerTestHelper.qualityDimension, "mezzanine") + .put(QueryRunnerTestHelper.indexMetric, 100.000000F) + .build() + ), + new EventHolder( + QueryRunnerTestHelper.segmentId, + 5, + new ImmutableMap.Builder() + .put(EventHolder.timestampKey, new DateTime("2011-01-12T00:00:00.000Z")) + .put(QueryRunnerTestHelper.qualityDimension, "news") + .put(QueryRunnerTestHelper.indexMetric, 100.000000F) + .build() + ) + ) + ) + ), + new Result( + new DateTime("2011-01-13T00:00:00.000Z"), + new SelectResultValue( + ImmutableMap.of(QueryRunnerTestHelper.segmentId, 5), + Arrays.asList( + new EventHolder( + QueryRunnerTestHelper.segmentId, + 3, + new ImmutableMap.Builder() + .put(EventHolder.timestampKey, new DateTime("2011-01-13T00:00:00.000Z")) + .put(QueryRunnerTestHelper.qualityDimension, "health") + .put(QueryRunnerTestHelper.indexMetric, 114.947403F) + .build() + ), + new EventHolder( + QueryRunnerTestHelper.segmentId, + 4, + new ImmutableMap.Builder() + .put(EventHolder.timestampKey, new DateTime("2011-01-13T00:00:00.000Z")) + .put(QueryRunnerTestHelper.qualityDimension, "mezzanine") + .put(QueryRunnerTestHelper.indexMetric, 104.465767F) + .build() + ), + new EventHolder( + QueryRunnerTestHelper.segmentId, + 5, + new ImmutableMap.Builder() + .put(EventHolder.timestampKey, new DateTime("2011-01-13T00:00:00.000Z")) + .put(QueryRunnerTestHelper.qualityDimension, "news") + .put(QueryRunnerTestHelper.indexMetric, 102.851683F) + .build() + ) + ) + ) + ) + ); + + verify(expectedResults, results); + } + + private static void verify( + Iterable> expectedResults, + Iterable> actualResults + ) + { + Iterator> expectedIter = expectedResults.iterator(); + Iterator> actualIter = actualResults.iterator(); + + while (expectedIter.hasNext()) { + Result expected = expectedIter.next(); + Result actual = actualIter.next(); + + Assert.assertEquals(expected.getTimestamp(), actual.getTimestamp()); + + for (Map.Entry entry : expected.getValue().getPagingIdentifiers().entrySet()) { + Assert.assertEquals(entry.getValue(), actual.getValue().getPagingIdentifiers().get(entry.getKey())); + } + + Iterator expectedEvts = expected.getValue().getEvents().iterator(); + Iterator actualEvts = actual.getValue().getEvents().iterator(); + + while (expectedEvts.hasNext()) { + EventHolder exHolder = expectedEvts.next(); + EventHolder acHolder = actualEvts.next(); + + Assert.assertEquals(exHolder.getTimestamp(), acHolder.getTimestamp()); + Assert.assertEquals(exHolder.getOffset(), acHolder.getOffset()); + + for (Map.Entry ex : exHolder.getEvent().entrySet()) { + Object actVal = acHolder.getEvent().get(ex.getKey()); + + // work around for current II limitations + if (acHolder.getEvent().get(ex.getKey()) instanceof Double) { + actVal = ((Double) actVal).floatValue(); + } + Assert.assertEquals(ex.getValue(), actVal); + } + } + + if (actualEvts.hasNext()) { + throw new ISE("This event iterator should be exhausted!"); + } + } + + if (actualIter.hasNext()) { + throw new ISE("This iterator should be exhausted!"); + } + } +} diff --git a/processing/src/test/java/io/druid/query/timeseries/TimeseriesQueryRunnerBonusTest.java b/processing/src/test/java/io/druid/query/timeseries/TimeseriesQueryRunnerBonusTest.java index b31ff2bf1ea..d1497a19026 100644 --- a/processing/src/test/java/io/druid/query/timeseries/TimeseriesQueryRunnerBonusTest.java +++ b/processing/src/test/java/io/druid/query/timeseries/TimeseriesQueryRunnerBonusTest.java @@ -90,7 +90,7 @@ public class TimeseriesQueryRunnerBonusTest final QueryRunnerFactory factory = TimeseriesQueryRunnerFactory.create(); final QueryRunner> runner = makeQueryRunner( factory, - new IncrementalIndexSegment(index) + new IncrementalIndexSegment(index, null) ); TimeseriesQuery query = Druids.newTimeseriesQueryBuilder() diff --git a/processing/src/test/java/io/druid/query/topn/TopNQueryRunnerTestHelper.java b/processing/src/test/java/io/druid/query/topn/TopNQueryRunnerTestHelper.java index 977cd9e7cdf..97b837a4b48 100644 --- a/processing/src/test/java/io/druid/query/topn/TopNQueryRunnerTestHelper.java +++ b/processing/src/test/java/io/druid/query/topn/TopNQueryRunnerTestHelper.java @@ -48,7 +48,7 @@ public class TopNQueryRunnerTestHelper return Arrays.asList( new Object[][]{ { - makeQueryRunner(factory, new IncrementalIndexSegment(rtIndex)) + makeQueryRunner(factory, new IncrementalIndexSegment(rtIndex, null)) }, { makeQueryRunner(factory, new QueryableIndexSegment(null, mMappedTestIndex)) diff --git a/processing/src/test/java/io/druid/segment/filter/SpatialFilterBonusTest.java b/processing/src/test/java/io/druid/segment/filter/SpatialFilterBonusTest.java index 8e423dfcb47..0eb327972ee 100644 --- a/processing/src/test/java/io/druid/segment/filter/SpatialFilterBonusTest.java +++ b/processing/src/test/java/io/druid/segment/filter/SpatialFilterBonusTest.java @@ -83,7 +83,7 @@ public class SpatialFilterBonusTest return Arrays.asList( new Object[][]{ { - new IncrementalIndexSegment(rtIndex) + new IncrementalIndexSegment(rtIndex, null) }, { new QueryableIndexSegment(null, mMappedTestIndex) diff --git a/processing/src/test/java/io/druid/segment/filter/SpatialFilterTest.java b/processing/src/test/java/io/druid/segment/filter/SpatialFilterTest.java index b59d4a3d59f..d342c12c577 100644 --- a/processing/src/test/java/io/druid/segment/filter/SpatialFilterTest.java +++ b/processing/src/test/java/io/druid/segment/filter/SpatialFilterTest.java @@ -83,7 +83,7 @@ public class SpatialFilterTest return Arrays.asList( new Object[][]{ { - new IncrementalIndexSegment(rtIndex) + new IncrementalIndexSegment(rtIndex, null) }, { new QueryableIndexSegment(null, mMappedTestIndex) diff --git a/publications/whitepaper/vldb.cls b/publications/whitepaper/vldb.cls deleted file mode 100644 index 7ff7547658b..00000000000 --- a/publications/whitepaper/vldb.cls +++ /dev/null @@ -1,1400 +0,0 @@ -% VLDB.CLS - Version 1.8c -% -% based on: -% SIG-ALTERNATE.CLS - VERSION 1.8 -% "COMPATIBLE" WITH THE "ACM_PROC_ARTICLE-SP.CLS" V2.7SP -% Gerald Murray July 26th. 2005 -% -% ---- Start of 'updates' ---- -% -% Allowance made to switch default fonts between those systems using -% METAFONT and those using 'Type 1' or 'Truetype' fonts. -% See LINE NUMBER 252 for details. -% Also provided for enumerated/annotated Corollaries 'surrounded' by -% enumerated Theorems (line 841). -% Gerry November 11th. 1999 -% -% Made the Permission Statement / Conference Info / Copyright Info -% 'user definable' in the source .tex file OR automatic if -% not specified. -% -% Georgia fixed bug in sub-sub-section numbering in paragraphs (July 29th. 2002) -% JS/GM fix to vertical spacing before Proofs (July 30th. 2002) -% -% Footnotes inside table cells using \minipage (Oct. 2002) -% -% Enforced 'US letter' page size and updated PVLDB copyright (UR, Oct 2010) -% -% Made the file template-able, so vol/no information can be dynmacally generated. (AhmetSacan, Sep 2012) -% -% Added widow line penalties. (AhmetSacan, Sep 2012) -% ---- End of 'updates' ---- -% -\def\fileversion{v1.8c} % for VLDB's and ACM's tracking purposes -\def\filedate{July 26, 2005} % Gerry Murray's tracking data -\def\docdate {Tuesday 26th. July 2005} % Gerry Murray (with deltas to doc} -\usepackage[pdftex,letterpaper]{geometry} % fixed to US letter size for output (since version 1.8c - UR 2010) -\usepackage{epsfig} -\usepackage{amssymb} -\usepackage{amsmath} -\usepackage{amsfonts} -% -% VLDB DOCUMENT STYLE -% based on ACM's sig-alternate.cls, modified 31 Oct 2010 for PVLDB, -% with VLDB-specific copyright notice and fixed US letter paper size. -% -% SIG-ALTERNATE DOCUMENT STYLE -% G.K.M. Tobin August-October 1999 -% adapted from ARTICLE document style by Ken Traub, Olin Shivers -% also using elements of esub2acm.cls -% HEAVILY MODIFIED, SUBSEQUENTLY, BY GERRY MURRAY 2000 -% ARTICLE DOCUMENT STYLE -- Released 16 March 1988 -% for LaTeX version 2.09 -% Copyright (C) 1988 by Leslie Lamport -% -% -%%% sig-alternate.cls is an 'ALTERNATE' document style for producing -%%% two-column camera-ready pages for ACM conferences. -%%% THIS FILE DOES NOT STRICTLY ADHERE TO THE SIGS (BOARD-ENDORSED) -%%% PROCEEDINGS STYLE. It has been designed to produce a 'tighter' -%%% paper in response to concerns over page budgets. -%%% The main features of this style are: -%%% -%%% 1) Two columns. -%%% 2) Side and top margins of 4.5pc, bottom margin of 6pc, column gutter of -%%% 2pc, hence columns are 20pc wide and 55.5pc tall. (6pc =3D 1in, approx) -%%% 3) First page has title information, and an extra 6pc of space at the -%%% bottom of the first column for the ACM copyright notice. -%%% 4) Text is 9pt on 10pt baselines; titles (except main) are 9pt bold. -%%% 5) US letter paper size (since v1.8c) -%%% -%%% -%%% There are a few restrictions you must observe: -%%% -%%% 1) You cannot change the font size; ACM wants you to use 9pt. -%%% 3) You must start your paper with the \maketitle command. Prior to the -%%% \maketitle you must have \title and \author commands. If you have a -%%% \date command it will be ignored; no date appears on the paper, since -%%% the proceedings will have a date on the front cover. -%%% 4) Marginal paragraphs, tables of contents, lists of figures and tables, -%%% and page headings are all forbidden. -%%% 5) The `figure' environment will produce a figure one column wide; if you -%%% want one that is two columns wide, use `figure*'. -%%% -% -%%% Copyright Space: -%%% This style automatically reserves 1" blank space at the bottom of page 1/ -%%% column 1. This space can optionally be filled with some text using the -%%% \toappear{...} command. If used, this command must be BEFORE the \maketitle -%%% command. If this command is defined AND [preprint] is on, then the -%%% space is filled with the {...} text (at the bottom); otherwise, it is -%%% blank. If you use \toappearbox{...} instead of \toappear{...} then a -%%% box will be drawn around the text (if [preprint] is on). -%%% -%%% A typical usage looks like this: -%%% \toappear{To appear in the Ninth AES Conference on Medievil Lithuanian -%%% Embalming Technique, June 1991, Alfaretta, Georgia.} -%%% This will be included in the preprint, and left out of the conference -%%% version. -%%% -%%% WARNING: -%%% Some dvi-ps converters heuristically allow chars to drift from their -%%% true positions a few pixels. This may be noticeable with the 9pt sans-serif -%%% bold font used for section headers. -%%% You may turn this hackery off via the -e option: -%%% dvips -e 0 foo.dvi >foo.ps -%%% -\typeout{Document Class 'vldb' - based on 'sig-alternate' <26th. July '05>. Modified by G.K.M. Tobin/Gerry Murray} -\typeout{Based in part upon document Style `acmconf' <22 May 89>. Hacked 4/91 by} -\typeout{shivers@cs.cmu.edu, 4/93 by theobald@cs.mcgill.ca} -\typeout{Excerpts were taken from (Journal Style) 'esub2acm.cls'.} -\typeout{****** Bugs/comments/suggestions/technicalities to Gerry Murray -- murray@hq.acm.org ******} -\typeout{Questions on the style, SIGS policies, etc. to Adrienne Griscti griscti@acm.org} -\oddsidemargin 4.5pc -\evensidemargin 4.5pc -\advance\oddsidemargin by -1in % Correct for LaTeX gratuitousness -\advance\evensidemargin by -1in % Correct for LaTeX gratuitousness -\marginparwidth 0pt % Margin pars are not allowed. -\marginparsep 11pt % Horizontal space between outer margin and - % marginal note - - % Top of page: -\topmargin 4.5pc % Nominal distance from top of page to top of - % box containing running head. -\advance\topmargin by -1in % Correct for LaTeX gratuitousness -\headheight 0pt % Height of box containing running head. -\headsep 0pt % Space between running head and text. - % Bottom of page: -\footskip 30pt % Distance from baseline of box containing foot - % to baseline of last line of text. -\@ifundefined{footheight}{\newdimen\footheight}{}% this is for LaTeX2e -\footheight 12pt % Height of box containing running foot. - -%% Must redefine the top margin so there's room for headers and -%% page numbers if you are using the preprint option. Footers -%% are OK as is. Olin. -\advance\topmargin by -37pt % Leave 37pt above text for headers -\headheight 12pt % Height of box containing running head. -\headsep 25pt % Space between running head and text. - -\textheight 666pt % 9 1/4 column height -\textwidth 42pc % Width of text line. - % For two-column mode: -\columnsep 2pc % Space between columns -\columnseprule 0pt % Width of rule between columns. -\hfuzz 1pt % Allow some variation in column width, otherwise it's - % too hard to typeset in narrow columns. - -\footnotesep 5.6pt % Height of strut placed at the beginning of every - % footnote =3D height of normal \footnotesize strut, - % so no extra space between footnotes. - -\skip\footins 8.1pt plus 4pt minus 2pt % Space between last line of text and - % top of first footnote. -\floatsep 11pt plus 2pt minus 2pt % Space between adjacent floats moved - % to top or bottom of text page. -\textfloatsep 18pt plus 2pt minus 4pt % Space between main text and floats - % at top or bottom of page. -\intextsep 11pt plus 2pt minus 2pt % Space between in-text figures and - % text. -\@ifundefined{@maxsep}{\newdimen\@maxsep}{}% this is for LaTeX2e -\@maxsep 18pt % The maximum of \floatsep, - % \textfloatsep and \intextsep (minus - % the stretch and shrink). -\dblfloatsep 11pt plus 2pt minus 2pt % Same as \floatsep for double-column - % figures in two-column mode. -\dbltextfloatsep 18pt plus 2pt minus 4pt% \textfloatsep for double-column - % floats. -\@ifundefined{@dblmaxsep}{\newdimen\@dblmaxsep}{}% this is for LaTeX2e -\@dblmaxsep 18pt % The maximum of \dblfloatsep and - % \dbltexfloatsep. -\@fptop 0pt plus 1fil % Stretch at top of float page/column. (Must be - % 0pt plus ...) -\@fpsep 8pt plus 2fil % Space between floats on float page/column. -\@fpbot 0pt plus 1fil % Stretch at bottom of float page/column. (Must be - % 0pt plus ... ) -\@dblfptop 0pt plus 1fil % Stretch at top of float page. (Must be 0pt plus ...) -\@dblfpsep 8pt plus 2fil % Space between floats on float page. -\@dblfpbot 0pt plus 1fil % Stretch at bottom of float page. (Must be - % 0pt plus ... ) -\marginparpush 5pt % Minimum vertical separation between two marginal - % notes. - -\parskip 0pt plus 1pt % Extra vertical space between paragraphs. -\parindent 9pt % GM July 2000 / was 0pt - width of paragraph indentation. -\partopsep 2pt plus 1pt minus 1pt% Extra vertical space, in addition to - % \parskip and \topsep, added when user - % leaves blank line before environment. - -\@lowpenalty 51 % Produced by \nopagebreak[1] or \nolinebreak[1] -\@medpenalty 151 % Produced by \nopagebreak[2] or \nolinebreak[2] -\@highpenalty 301 % Produced by \nopagebreak[3] or \nolinebreak[3] - -\@beginparpenalty -\@lowpenalty % Before a list or paragraph environment. -\@endparpenalty -\@lowpenalty % After a list or paragraph environment. -\@itempenalty -\@lowpenalty % Between list items. - -% Try to prevent widow lines. -\clubpenalty=9996 -\widowpenalty=9999 -\brokenpenalty=4991 -\predisplaypenalty=10000 -\postdisplaypenalty=1549 -\displaywidowpenalty=1602 - -\@namedef{ds@10pt}{\@latexerr{The `10pt' option is not allowed in the `acmconf' - document style.}\@eha} -\@namedef{ds@11pt}{\@latexerr{The `11pt' option is not allowed in the `acmconf' - document style.}\@eha} -\@namedef{ds@12pt}{\@latexerr{The `12pt' option is not allowed in the `acmconf' - document style.}\@eha} - -\@options - -\lineskip 2pt % \lineskip is 1pt for all font sizes. -\normallineskip 2pt -\def\baselinestretch{1} - -\abovedisplayskip 9pt plus2pt minus4.5pt% -\belowdisplayskip \abovedisplayskip -\abovedisplayshortskip \z@ plus3pt% -\belowdisplayshortskip 5.4pt plus3pt minus3pt% -\let\@listi\@listI % Setting of \@listi added 9 Jun 87 - -\def\small{\@setsize\small{9pt}\viiipt\@viiipt -\abovedisplayskip 7.6pt plus 3pt minus 4pt% -\belowdisplayskip \abovedisplayskip -\abovedisplayshortskip \z@ plus2pt% -\belowdisplayshortskip 3.6pt plus2pt minus 2pt -\def\@listi{\leftmargin\leftmargini %% Added 22 Dec 87 -\topsep 4pt plus 2pt minus 2pt\parsep 2pt plus 1pt minus 1pt -\itemsep \parsep}} - -\def\footnotesize{\@setsize\footnotesize{9pt}\ixpt\@ixpt -\abovedisplayskip 6.4pt plus 2pt minus 4pt% -\belowdisplayskip \abovedisplayskip -\abovedisplayshortskip \z@ plus 1pt% -\belowdisplayshortskip 2.7pt plus 1pt minus 2pt -\def\@listi{\leftmargin\leftmargini %% Added 22 Dec 87 -\topsep 3pt plus 1pt minus 1pt\parsep 2pt plus 1pt minus 1pt -\itemsep \parsep}} - -\newcount\aucount -\newcount\originalaucount -\newdimen\auwidth -\auwidth=\textwidth -\newdimen\auskip -\newcount\auskipcount -\newdimen\auskip -\global\auskip=1pc -\newdimen\allauboxes -\allauboxes=\auwidth -\newtoks\addauthors -\newcount\addauflag -\global\addauflag=0 %Haven't shown additional authors yet - -\newtoks\subtitletext -\gdef\subtitle#1{\subtitletext={#1}} - -\gdef\additionalauthors#1{\addauthors={#1}} - -\gdef\numberofauthors#1{\global\aucount=#1 -\ifnum\aucount>3\global\originalaucount=\aucount \global\aucount=3\fi %g} -\global\auskipcount=\aucount\global\advance\auskipcount by 1 -\global\multiply\auskipcount by 2 -\global\multiply\auskip by \auskipcount -\global\advance\auwidth by -\auskip -\global\divide\auwidth by \aucount} - -% \and was modified to count the number of authors. GKMT 12 Aug 1999 -\def\alignauthor{% % \begin{tabular} -\end{tabular}% - \begin{tabular}[t]{p{\auwidth}}\centering}% - -% *** NOTE *** NOTE *** NOTE *** NOTE *** -% If you have 'font problems' then you may need -% to change these, e.g. 'arialb' instead of "arialbd". -% Gerry Murray 11/11/1999 -% *** OR ** comment out block A and activate block B or vice versa. -% ********************************************** -% -% -- Start of block A -- (Type 1 or Truetype fonts) -%\newfont{\secfnt}{timesbd at 12pt} % was timenrb originally - now is timesbd -%\newfont{\secit}{timesbi at 12pt} %13 Jan 00 gkmt -%\newfont{\subsecfnt}{timesi at 11pt} % was timenrri originally - now is timesi -%\newfont{\subsecit}{timesbi at 11pt} % 13 Jan 00 gkmt -- was times changed to timesbi gm 2/4/2000 -% % because "normal" is italic, "italic" is Roman -%\newfont{\ttlfnt}{arialbd at 18pt} % was arialb originally - now is arialbd -%\newfont{\ttlit}{arialbi at 18pt} % 13 Jan 00 gkmt -%\newfont{\subttlfnt}{arial at 14pt} % was arialr originally - now is arial -%\newfont{\subttlit}{ariali at 14pt} % 13 Jan 00 gkmt -%\newfont{\subttlbf}{arialbd at 14pt} % 13 Jan 00 gkmt -%\newfont{\aufnt}{arial at 12pt} % was arialr originally - now is arial -%\newfont{\auit}{ariali at 12pt} % 13 Jan 00 gkmt -%\newfont{\affaddr}{arial at 10pt} % was arialr originally - now is arial -%\newfont{\affaddrit}{ariali at 10pt} %13 Jan 00 gkmt -%\newfont{\eaddfnt}{arial at 12pt} % was arialr originally - now is arial -%\newfont{\ixpt}{times at 9pt} % was timenrr originally - now is times -%\newfont{\confname}{timesi at 8pt} % was timenrri - now is timesi -%\newfont{\crnotice}{times at 8pt} % was timenrr originally - now is times -%\newfont{\ninept}{times at 9pt} % was timenrr originally - now is times - -% ********************************************* -% -- End of block A -- -% -% -% -- Start of block B -- METAFONT -% +++++++++++++++++++++++++++++++++++++++++++++ -% Next (default) block for those using Metafont -% Gerry Murray 11/11/1999 -% *** THIS BLOCK FOR THOSE USING METAFONT ***** -% ********************************************* -\newfont{\secfnt}{ptmb at 12pt} -\newfont{\secit}{ptmbi at 12pt} %13 Jan 00 gkmt -\newfont{\subsecfnt}{ptmri at 11pt} -\newfont{\subsecit}{ptmbi at 11pt} % 13 Jan 00 gkmt -- was ptmr changed to ptmbi gm 2/4/2000 - % because "normal" is italic, "italic" is Roman -\newfont{\ttlfnt}{phvb at 18pt} -\newfont{\ttlit}{phvbo at 18pt} % GM 2/4/2000 -\newfont{\subttlfnt}{phvr at 14pt} -\newfont{\subttlit}{phvro at 14pt} % GM 2/4/2000 -\newfont{\subttlbf}{phvb at 14pt} % 13 Jan 00 gkmt -\newfont{\aufnt}{phvr at 12pt} -\newfont{\auit}{phvro at 12pt} % GM 2/4/2000 -\newfont{\affaddr}{phvr at 10pt} -\newfont{\affaddrit}{phvro at 10pt} % GM 2/4/2000 -\newfont{\eaddfnt}{phvr at 12pt} -\newfont{\ixpt}{ptmr at 9pt} -\newfont{\confname}{ptmri at 8pt} -\newfont{\crnotice}{ptmr at 8pt} -\newfont{\ninept}{ptmr at 9pt} -% +++++++++++++++++++++++++++++++++++++++++++++ -% -- End of block B -- - - -\def\email#1{{{\eaddfnt{\vskip 4pt#1}}}} - -\def\addauthorsection{\ifnum\originalaucount>3 - \section{Additional Authors}\the\addauthors - \fi} - -\newcount\savesection -\newcount\sectioncntr -\global\sectioncntr=1 - -\setcounter{secnumdepth}{3} - -\def\appendix{\par -\section*{APPENDIX} -\setcounter{section}{0} - \setcounter{subsection}{0} - \def\thesection{\Alph{section}} } - -\leftmargini 22.5pt -\leftmarginii 19.8pt % > \labelsep + width of '(m)' -\leftmarginiii 16.8pt % > \labelsep + width of 'vii.' -\leftmarginiv 15.3pt % > \labelsep + width of 'M.' -\leftmarginv 9pt -\leftmarginvi 9pt - -\leftmargin\leftmargini -\labelsep 4.5pt -\labelwidth\leftmargini\advance\labelwidth-\labelsep - -\def\@listI{\leftmargin\leftmargini \parsep 3.6pt plus 2pt minus 1pt% -\topsep 7.2pt plus 2pt minus 4pt% -\itemsep 3.6pt plus 2pt minus 1pt} - -\let\@listi\@listI -\@listi - -\def\@listii{\leftmargin\leftmarginii - \labelwidth\leftmarginii\advance\labelwidth-\labelsep - \topsep 3.6pt plus 2pt minus 1pt - \parsep 1.8pt plus 0.9pt minus 0.9pt - \itemsep \parsep} - -\def\@listiii{\leftmargin\leftmarginiii - \labelwidth\leftmarginiii\advance\labelwidth-\labelsep - \topsep 1.8pt plus 0.9pt minus 0.9pt - \parsep \z@ \partopsep 1pt plus 0pt minus 1pt - \itemsep \topsep} - -\def\@listiv{\leftmargin\leftmarginiv - \labelwidth\leftmarginiv\advance\labelwidth-\labelsep} - -\def\@listv{\leftmargin\leftmarginv - \labelwidth\leftmarginv\advance\labelwidth-\labelsep} - -\def\@listvi{\leftmargin\leftmarginvi - \labelwidth\leftmarginvi\advance\labelwidth-\labelsep} - -\def\labelenumi{\theenumi.} -\def\theenumi{\arabic{enumi}} - -\def\labelenumii{(\theenumii)} -\def\theenumii{\alph{enumii}} -\def\p@enumii{\theenumi} - -\def\labelenumiii{\theenumiii.} -\def\theenumiii{\roman{enumiii}} -\def\p@enumiii{\theenumi(\theenumii)} - -\def\labelenumiv{\theenumiv.} -\def\theenumiv{\Alph{enumiv}} -\def\p@enumiv{\p@enumiii\theenumiii} - -\def\labelitemi{$\bullet$} -\def\labelitemii{\bf --} -\def\labelitemiii{$\ast$} -\def\labelitemiv{$\cdot$} - -\def\verse{\let\\=\@centercr - \list{}{\itemsep\z@ \itemindent -1.5em\listparindent \itemindent - \rightmargin\leftmargin\advance\leftmargin 1.5em}\item[]} -\let\endverse\endlist - -\def\quotation{\list{}{\listparindent 1.5em - \itemindent\listparindent - \rightmargin\leftmargin \parsep 0pt plus 1pt}\item[]} -\let\endquotation=\endlist - -\def\quote{\list{}{\rightmargin\leftmargin}\item[]} -\let\endquote=\endlist - -\def\descriptionlabel#1{\hspace\labelsep \bf #1} -\def\description{\list{}{\labelwidth\z@ \itemindent-\leftmargin - \let\makelabel\descriptionlabel}} - -\let\enddescription\endlist - -\def\theequation{\arabic{equation}} - -\arraycolsep 4.5pt % Half the space between columns in an array environment. -\tabcolsep 5.4pt % Half the space between columns in a tabular environment. -\arrayrulewidth .4pt % Width of rules in array and tabular environment. -\doublerulesep 1.8pt % Space between adjacent rules in array or tabular env. - -\tabbingsep \labelsep % Space used by the \' command. (See LaTeX manual.) - -\skip\@mpfootins =\skip\footins - -\fboxsep =2.7pt % Space left between box and text by \fbox and \framebox. -\fboxrule =.4pt % Width of rules in box made by \fbox and \framebox. - -\def\thepart{\Roman{part}} % Roman numeral part numbers. -\def\thesection {\arabic{section}} -\def\thesubsection {\thesection.\arabic{subsection}} -%\def\thesubsubsection {\thesubsection.\arabic{subsubsection}} % GM 7/30/2002 -%\def\theparagraph {\thesubsubsection.\arabic{paragraph}} % GM 7/30/2002 -\def\thesubparagraph {\theparagraph.\arabic{subparagraph}} - -\def\@pnumwidth{1.55em} -\def\@tocrmarg {2.55em} -\def\@dotsep{4.5} -\setcounter{tocdepth}{3} - -\def\tableofcontents{\@latexerr{\tableofcontents: Tables of contents are not - allowed in the `acmconf' document style.}\@eha} - -\def\l@part#1#2{\addpenalty{\@secpenalty} - \addvspace{2.25em plus 1pt} % space above part line - \begingroup - \@tempdima 3em % width of box holding part number, used by - \parindent \z@ \rightskip \@pnumwidth %% \numberline - \parfillskip -\@pnumwidth - {\large \bf % set line in \large boldface - \leavevmode % TeX command to enter horizontal mode. - #1\hfil \hbox to\@pnumwidth{\hss #2}}\par - \nobreak % Never break after part entry - \endgroup} - -\def\l@section#1#2{\addpenalty{\@secpenalty} % good place for page break - \addvspace{1.0em plus 1pt} % space above toc entry - \@tempdima 1.5em % width of box holding section number - \begingroup - \parindent \z@ \rightskip \@pnumwidth - \parfillskip -\@pnumwidth - \bf % Boldface. - \leavevmode % TeX command to enter horizontal mode. - \advance\leftskip\@tempdima %% added 5 Feb 88 to conform to - \hskip -\leftskip %% 25 Jan 88 change to \numberline - #1\nobreak\hfil \nobreak\hbox to\@pnumwidth{\hss #2}\par - \endgroup} - - -\def\l@subsection{\@dottedtocline{2}{1.5em}{2.3em}} -\def\l@subsubsection{\@dottedtocline{3}{3.8em}{3.2em}} -\def\l@paragraph{\@dottedtocline{4}{7.0em}{4.1em}} -\def\l@subparagraph{\@dottedtocline{5}{10em}{5em}} - -\def\listoffigures{\@latexerr{\listoffigures: Lists of figures are not - allowed in the `acmconf' document style.}\@eha} - -\def\l@figure{\@dottedtocline{1}{1.5em}{2.3em}} - -\def\listoftables{\@latexerr{\listoftables: Lists of tables are not - allowed in the `acmconf' document style.}\@eha} -\let\l@table\l@figure - -\def\footnoterule{\kern-3\p@ - \hrule width .4\columnwidth - \kern 2.6\p@} % The \hrule has default height of .4pt . -% ------ -\long\def\@makefntext#1{\noindent -%\hbox to .5em{\hss$^{\@thefnmark}$}#1} % original -\hbox to .5em{\hss\textsuperscript{\@thefnmark}}#1} % C. Clifton / GM Oct. 2nd. 2002 -% ------- - -\long\def\@maketntext#1{\noindent -#1} - -\long\def\@maketitlenotetext#1#2{\noindent - \hbox to 1.8em{\hss$^{#1}$}#2} - -\setcounter{topnumber}{2} -\def\topfraction{.7} -\setcounter{bottomnumber}{1} -\def\bottomfraction{.3} -\setcounter{totalnumber}{3} -\def\textfraction{.2} -\def\floatpagefraction{.5} -\setcounter{dbltopnumber}{2} -\def\dbltopfraction{.7} -\def\dblfloatpagefraction{.5} - -% -\long\def\@makecaption#1#2{ - \vskip \baselineskip - \setbox\@tempboxa\hbox{\textbf{#1: #2}} - \ifdim \wd\@tempboxa >\hsize % IF longer than one line: - \textbf{#1: #2}\par % THEN set as ordinary paragraph. - \else % ELSE center. - \hbox to\hsize{\hfil\box\@tempboxa\hfil}\par - \fi} - -% - -\long\def\@makecaption#1#2{ - \vskip 10pt - \setbox\@tempboxa\hbox{\textbf{#1: #2}} - \ifdim \wd\@tempboxa >\hsize % IF longer than one line: - \textbf{#1: #2}\par % THEN set as ordinary paragraph. - \else % ELSE center. - \hbox to\hsize{\hfil\box\@tempboxa\hfil} - \fi} - -\@ifundefined{figure}{\newcounter {figure}} % this is for LaTeX2e - -\def\fps@figure{tbp} -\def\ftype@figure{1} -\def\ext@figure{lof} -\def\fnum@figure{Figure \thefigure} -\def\figure{\@float{figure}} -\let\endfigure\end@float -\@namedef{figure*}{\@dblfloat{figure}} -\@namedef{endfigure*}{\end@dblfloat} - -\@ifundefined{table}{\newcounter {table}} % this is for LaTeX2e - -\def\fps@table{tbp} -\def\ftype@table{2} -\def\ext@table{lot} -\def\fnum@table{Table \thetable} -\def\table{\@float{table}} -\let\endtable\end@float -\@namedef{table*}{\@dblfloat{table}} -\@namedef{endtable*}{\end@dblfloat} - -\newtoks\titleboxnotes -\newcount\titleboxnoteflag - -\def\maketitle{\par - \begingroup - \def\thefootnote{\fnsymbol{footnote}} - \def\@makefnmark{\hbox - to 0pt{$^{\@thefnmark}$\hss}} - \twocolumn[\@maketitle] -\@thanks - \endgroup - \setcounter{footnote}{0} - \let\maketitle\relax - \let\@maketitle\relax - \gdef\@thanks{}\gdef\@author{}\gdef\@title{}\gdef\@subtitle{}\let\thanks\relax - \@copyrightspace} - -%% CHANGES ON NEXT LINES -\newif\if@ll % to record which version of LaTeX is in use - -\expandafter\ifx\csname LaTeXe\endcsname\relax % LaTeX2.09 is used -\else% LaTeX2e is used, so set ll to true -\global\@lltrue -\fi - -\if@ll - \NeedsTeXFormat{LaTeX2e} - \ProvidesClass{vldb} [2010/10/19 - V1.8b - based on sig-alternate V1.8 - based on acmproc.cls V1.3 ] - \RequirePackage{latexsym}% QUERY: are these two really needed? - \let\dooptions\ProcessOptions -\else - \let\dooptions\@options -\fi -%% END CHANGES - -\def\@height{height} -\def\@width{width} -\def\@minus{minus} -\def\@plus{plus} -\def\hb@xt@{\hbox to} -\newif\if@faircopy -\@faircopyfalse -\def\ds@faircopy{\@faircopytrue} - -\def\ds@preprint{\@faircopyfalse} - -\@twosidetrue -\@mparswitchtrue -\def\ds@draft{\overfullrule 5\p@} -%% CHANGE ON NEXT LINE -\dooptions - -\lineskip \p@ -\normallineskip \p@ -\def\baselinestretch{1} -\def\@ptsize{0} %needed for amssymbols.sty - -%% CHANGES ON NEXT LINES -\if@ll% allow use of old-style font change commands in LaTeX2e -\@maxdepth\maxdepth -% -\DeclareOldFontCommand{\rm}{\ninept\rmfamily}{\mathrm} -\DeclareOldFontCommand{\sf}{\normalfont\sffamily}{\mathsf} -\DeclareOldFontCommand{\tt}{\normalfont\ttfamily}{\mathtt} -\DeclareOldFontCommand{\bf}{\normalfont\bfseries}{\mathbf} -\DeclareOldFontCommand{\it}{\normalfont\itshape}{\mathit} -\DeclareOldFontCommand{\sl}{\normalfont\slshape}{\@nomath\sl} -\DeclareOldFontCommand{\sc}{\normalfont\scshape}{\@nomath\sc} -\DeclareRobustCommand*{\cal}{\@fontswitch{\relax}{\mathcal}} -\DeclareRobustCommand*{\mit}{\@fontswitch{\relax}{\mathnormal}} -\fi -% -\if@ll - \renewcommand{\rmdefault}{cmr} % was 'ttm' -% Note! I have also found 'mvr' to work ESPECIALLY well. -% Gerry - October 1999 -% You may need to change your LV1times.fd file so that sc is -% mapped to cmcsc - -for smallcaps -- that is if you decide -% to change {cmr} to {times} above. (Not recommended) - \renewcommand{\@ptsize}{} - \renewcommand{\normalsize}{% - \@setfontsize\normalsize\@ixpt{10.5\p@}%\ninept% - \abovedisplayskip 6\p@ \@plus2\p@ \@minus\p@ - \belowdisplayskip \abovedisplayskip - \abovedisplayshortskip 6\p@ \@minus 3\p@ - \belowdisplayshortskip 6\p@ \@minus 3\p@ - \let\@listi\@listI - } -\else - \def\@normalsize{%changed next to 9 from 10 - \@setsize\normalsize{9\p@}\ixpt\@ixpt - \abovedisplayskip 6\p@ \@plus2\p@ \@minus\p@ - \belowdisplayskip \abovedisplayskip - \abovedisplayshortskip 6\p@ \@minus 3\p@ - \belowdisplayshortskip 6\p@ \@minus 3\p@ - \let\@listi\@listI - }% -\fi -\if@ll - \newcommand\scriptsize{\@setfontsize\scriptsize\@viipt{8\p@}} - \newcommand\tiny{\@setfontsize\tiny\@vpt{6\p@}} - \newcommand\large{\@setfontsize\large\@xiipt{14\p@}} - \newcommand\Large{\@setfontsize\Large\@xivpt{18\p@}} - \newcommand\LARGE{\@setfontsize\LARGE\@xviipt{20\p@}} - \newcommand\huge{\@setfontsize\huge\@xxpt{25\p@}} - \newcommand\Huge{\@setfontsize\Huge\@xxvpt{30\p@}} -\else - \def\scriptsize{\@setsize\scriptsize{8\p@}\viipt\@viipt} - \def\tiny{\@setsize\tiny{6\p@}\vpt\@vpt} - \def\large{\@setsize\large{14\p@}\xiipt\@xiipt} - \def\Large{\@setsize\Large{18\p@}\xivpt\@xivpt} - \def\LARGE{\@setsize\LARGE{20\p@}\xviipt\@xviipt} - \def\huge{\@setsize\huge{25\p@}\xxpt\@xxpt} - \def\Huge{\@setsize\Huge{30\p@}\xxvpt\@xxvpt} -\fi -\normalsize - -% make aubox hsize/number of authors up to 3, less gutter -% then showbox gutter showbox gutter showbox -- GKMT Aug 99 -\newbox\@acmtitlebox -\def\@maketitle{\newpage - \null - \setbox\@acmtitlebox\vbox{% -\baselineskip 20pt -\vskip 2em % Vertical space above title. - \begin{center} - {\ttlfnt \@title\par} % Title set in 18pt Helvetica (Arial) bold size. - \vskip 1.5em % Vertical space after title. -%This should be the subtitle. -{\subttlfnt \the\subtitletext\par}\vskip 1.25em%\fi - {\baselineskip 16pt\aufnt % each author set in \12 pt Arial, in a - \lineskip .5em % tabular environment - \begin{tabular}[t]{c}\@author - \end{tabular}\par} - \vskip 1.5em % Vertical space after author. - \end{center}} - \dimen0=\ht\@acmtitlebox - \advance\dimen0 by -12.75pc\relax % Increased space for title box -- KBT - \unvbox\@acmtitlebox - \ifdim\dimen0<0.0pt\relax\vskip-\dimen0\fi} - - -\newcount\titlenotecount -\global\titlenotecount=0 -\newtoks\tntoks -\newtoks\tntokstwo -\newtoks\tntoksthree -\newtoks\tntoksfour -\newtoks\tntoksfive - -\def\abstract{ -\ifnum\titlenotecount>0 % was =1 - \insert\footins{% - \reset@font\footnotesize - \interlinepenalty\interfootnotelinepenalty - \splittopskip\footnotesep - \splitmaxdepth \dp\strutbox \floatingpenalty \@MM - \hsize\columnwidth \@parboxrestore - \protected@edef\@currentlabel{% - }% - \color@begingroup -\ifnum\titlenotecount=1 - \@maketntext{% - \raisebox{4pt}{$\ast$}\rule\z@\footnotesep\ignorespaces\the\tntoks\@finalstrut\strutbox}% -\fi -\ifnum\titlenotecount=2 - \@maketntext{% - \raisebox{4pt}{$\ast$}\rule\z@\footnotesep\ignorespaces\the\tntoks\par\@finalstrut\strutbox}% -\@maketntext{% - \raisebox{4pt}{$\dagger$}\rule\z@\footnotesep\ignorespaces\the\tntokstwo\@finalstrut\strutbox}% -\fi -\ifnum\titlenotecount=3 - \@maketntext{% - \raisebox{4pt}{$\ast$}\rule\z@\footnotesep\ignorespaces\the\tntoks\par\@finalstrut\strutbox}% -\@maketntext{% - \raisebox{4pt}{$\dagger$}\rule\z@\footnotesep\ignorespaces\the\tntokstwo\par\@finalstrut\strutbox}% -\@maketntext{% - \raisebox{4pt}{$\ddagger$}\rule\z@\footnotesep\ignorespaces\the\tntoksthree\@finalstrut\strutbox}% -\fi -\ifnum\titlenotecount=4 - \@maketntext{% - \raisebox{4pt}{$\ast$}\rule\z@\footnotesep\ignorespaces\the\tntoks\par\@finalstrut\strutbox}% -\@maketntext{% - \raisebox{4pt}{$\dagger$}\rule\z@\footnotesep\ignorespaces\the\tntokstwo\par\@finalstrut\strutbox}% -\@maketntext{% - \raisebox{4pt}{$\ddagger$}\rule\z@\footnotesep\ignorespaces\the\tntoksthree\par\@finalstrut\strutbox}% -\@maketntext{% - \raisebox{4pt}{$\S$}\rule\z@\footnotesep\ignorespaces\the\tntoksfour\@finalstrut\strutbox}% -\fi -\ifnum\titlenotecount=5 - \@maketntext{% - \raisebox{4pt}{$\ast$}\rule\z@\footnotesep\ignorespaces\the\tntoks\par\@finalstrut\strutbox}% -\@maketntext{% - \raisebox{4pt}{$\dagger$}\rule\z@\footnotesep\ignorespaces\the\tntokstwo\par\@finalstrut\strutbox}% -\@maketntext{% - \raisebox{4pt}{$\ddagger$}\rule\z@\footnotesep\ignorespaces\the\tntoksthree\par\@finalstrut\strutbox}% -\@maketntext{% - \raisebox{4pt}{$\S$}\rule\z@\footnotesep\ignorespaces\the\tntoksfour\par\@finalstrut\strutbox}% -\@maketntext{% - \raisebox{4pt}{$\P$}\rule\z@\footnotesep\ignorespaces\the\tntoksfive\@finalstrut\strutbox}% -\fi - \color@endgroup} %g} -\fi -\setcounter{footnote}{0} -\section*{ABSTRACT}\normalsize%\ninept -} - -\def\endabstract{\if@twocolumn\else\endquotation\fi} - -\def\keywords{\if@twocolumn -\section*{Keywords} -\else \small -\quotation -\fi} - -\def\terms{\if@twocolumn -\section*{General Terms} -\else \small -\quotation -\fi} - -% -- Classification needs to be a bit smart due to optionals - Gerry/Georgia November 2nd. 1999 -\newcount\catcount -\global\catcount=1 - -\def\category#1#2#3{% -\ifnum\catcount=1 -\section*{Categories and Subject Descriptors} -\advance\catcount by 1\else{\unskip; }\fi - \@ifnextchar [{\@category{#1}{#2}{#3}}{\@category{#1}{#2}{#3}[]}% -} - -\def\@category#1#2#3[#4]{% - \begingroup - \let\and\relax - #1 [\textbf{#2}]% - \if!#4!% - \if!#3!\else : #3\fi - \else - :\space - \if!#3!\else #3\kern\z@---\hskip\z@\fi - \textit{#4}% - \fi - \endgroup -} -% - -%%% This section (written by KBT) handles the 1" box in the lower left -%%% corner of the left column of the first page by creating a picture, -%%% and inserting the predefined string at the bottom (with a negative -%%% displacement to offset the space allocated for a non-existent -%%% caption). -%%% -\newtoks\copyrightnotice -\def\ftype@copyrightbox{8} -\def\@copyrightspace{ -\@float{copyrightbox}[b] -\begin{center} -\setlength{\unitlength}{1pc} -\begin{picture}(20,6) %Space for copyright notice -\put(0,-0.95){\crnotice{\@toappear}} -\end{picture} -\end{center} -\end@float} - -\def\@toappear{} % Default setting blank - commands below change this. -\long\def\toappear#1{\def\@toappear{\parbox[b]{20pc}{\baselineskip 9pt#1}}} -\def\toappearbox#1{\def\@toappear{\raisebox{5pt}{\framebox[20pc]{\parbox[b]{19pc}{#1}}}}} - -\newtoks\conf -\newtoks\confinfo -\def\conferenceinfo#1#2{\global\conf={#1}\global\confinfo{#2}} - - -\def\marginpar{\@latexerr{The \marginpar command is not allowed in the - `acmconf' document style.}\@eha} - -\mark{{}{}} % Initializes TeX's marks - -\def\today{\ifcase\month\or - January\or February\or March\or April\or May\or June\or - July\or August\or September\or October\or November\or December\fi - \space\number\day, \number\year} - -\def\@begintheorem#1#2{% - \parskip 0pt % GM July 2000 (for tighter spacing) - \trivlist - \item[% - \hskip 10\p@ - \hskip \labelsep - {{\sc #1}\hskip 5\p@\relax#2.}% - ] - \it -} -\def\@opargbegintheorem#1#2#3{% - \parskip 0pt % GM July 2000 (for tighter spacing) - \trivlist - \item[% - \hskip 10\p@ - \hskip \labelsep - {\sc #1\ #2\ % This mod by Gerry to enumerate corollaries - \setbox\@tempboxa\hbox{(#3)} % and bracket the 'corollary title' - \ifdim \wd\@tempboxa>\z@ % and retain the correct numbering of e.g. theorems - \hskip 5\p@\relax % if they occur 'around' said corollaries. - \box\@tempboxa % Gerry - Nov. 1999. - \fi.}% - ] - \it -} -\newif\if@qeded -\global\@qededfalse - -% -- original -%\def\proof{% -% \vspace{-\parskip} % GM July 2000 (for tighter spacing) -% \global\@qededfalse -% \@ifnextchar[{\@xproof}{\@proof}% -%} -% -- end of original - -% (JSS) Fix for vertical spacing bug - Gerry Murray July 30th. 2002 -\def\proof{% -\vspace{-\lastskip}\vspace{-\parsep}\penalty-51% -\global\@qededfalse -\@ifnextchar[{\@xproof}{\@proof}% -} - -\def\endproof{% - \if@qeded\else\qed\fi - \endtrivlist -} -\def\@proof{% - \trivlist - \item[% - \hskip 10\p@ - \hskip \labelsep - {\sc Proof.}% - ] - \ignorespaces -} -\def\@xproof[#1]{% - \trivlist - \item[\hskip 10\p@\hskip \labelsep{\sc Proof #1.}]% - \ignorespaces -} -\def\qed{% - \unskip - \kern 10\p@ - \begingroup - \unitlength\p@ - \linethickness{.4\p@}% - \framebox(6,6){}% - \endgroup - \global\@qededtrue -} - -\def\newdef#1#2{% - \expandafter\@ifdefinable\csname #1\endcsname - {\@definecounter{#1}% - \expandafter\xdef\csname the#1\endcsname{\@thmcounter{#1}}% - \global\@namedef{#1}{\@defthm{#1}{#2}}% - \global\@namedef{end#1}{\@endtheorem}% - }% -} -\def\@defthm#1#2{% - \refstepcounter{#1}% - \@ifnextchar[{\@ydefthm{#1}{#2}}{\@xdefthm{#1}{#2}}% -} -\def\@xdefthm#1#2{% - \@begindef{#2}{\csname the#1\endcsname}% - \ignorespaces -} -\def\@ydefthm#1#2[#3]{% - \trivlist - \item[% - \hskip 10\p@ - \hskip \labelsep - {\it #2% - \savebox\@tempboxa{#3}% - \ifdim \wd\@tempboxa>\z@ - \ \box\@tempboxa - \fi.% - }]% - \ignorespaces -} -\def\@begindef#1#2{% - \trivlist - \item[% - \hskip 10\p@ - \hskip \labelsep - {\it #1\ \rm #2.}% - ]% -} -\def\theequation{\arabic{equation}} - -\newcounter{part} -\newcounter{section} -\newcounter{subsection}[section] -\newcounter{subsubsection}[subsection] -\newcounter{paragraph}[subsubsection] -\def\thepart{\Roman{part}} -\def\thesection{\arabic{section}} -\def\thesubsection{\thesection.\arabic{subsection}} -\def\thesubsubsection{\thesubsection.\arabic{subsubsection}} %removed \subsecfnt 29 July 2002 gkmt -\def\theparagraph{\thesubsubsection.\arabic{paragraph}} %removed \subsecfnt 29 July 2002 gkmt -\newif\if@uchead -\@ucheadfalse - -%% CHANGES: NEW NOTE -%% NOTE: OK to use old-style font commands below, since they were -%% suitably redefined for LaTeX2e -%% END CHANGES -\setcounter{secnumdepth}{3} -\def\part{% - \@startsection{part}{9}{\z@}{-10\p@ \@plus -4\p@ \@minus -2\p@} - {4\p@}{\normalsize\@ucheadtrue}% -} -\def\section{% - \@startsection{section}{1}{\z@}{-10\p@ \@plus -4\p@ \@minus -2\p@}% GM - {4\p@}{\baselineskip 14pt\secfnt\@ucheadtrue}% -} - -\def\subsection{% - \@startsection{subsection}{2}{\z@}{-8\p@ \@plus -2\p@ \@minus -\p@} - {4\p@}{\secfnt}% -} -\def\subsubsection{% - \@startsection{subsubsection}{3}{\z@}{-8\p@ \@plus -2\p@ \@minus -\p@}% - {4\p@}{\subsecfnt}% -} -\def\paragraph{% - \vskip 12pt\@startsection{paragraph}{3}{\z@}{6\p@ \@plus \p@}% - {-5\p@}{\subsecfnt}% -} -\let\@period=. -\def\@startsection#1#2#3#4#5#6{% - \if@noskipsec %gkmt, 11 aug 99 - \global\let\@period\@empty - \leavevmode - \global\let\@period.% - \fi - \par % - \@tempskipa #4\relax - \@afterindenttrue - \ifdim \@tempskipa <\z@ - \@tempskipa -\@tempskipa - \@afterindentfalse - \fi - \if@nobreak - \everypar{}% - \else - \addpenalty\@secpenalty - \addvspace\@tempskipa - \fi -\parskip=0pt % GM July 2000 (non numbered) section heads - \@ifstar - {\@ssect{#3}{#4}{#5}{#6}} - {\@dblarg{\@sect{#1}{#2}{#3}{#4}{#5}{#6}}}% -} -\def\@sect#1#2#3#4#5#6[#7]#8{% - \ifnum #2>\c@secnumdepth - \let\@svsec\@empty - \else - \refstepcounter{#1}% - \edef\@svsec{% - \begingroup - %\ifnum#2>2 \noexpand\rm \fi % changed to next 29 July 2002 gkmt - \ifnum#2>2 \noexpand#6 \fi - \csname the#1\endcsname - \endgroup - \ifnum #2=1\relax .\fi - \hskip 1em - }% - \fi - \@tempskipa #5\relax - \ifdim \@tempskipa>\z@ - \begingroup - #6\relax - \@hangfrom{\hskip #3\relax\@svsec}% - \begingroup - \interlinepenalty \@M - \if@uchead - \uppercase{#8}% - \else - #8% - \fi - \par - \endgroup - \endgroup - \csname #1mark\endcsname{#7}% - \vskip -12pt %gkmt, 11 aug 99 and GM July 2000 (was -14) - numbered section head spacing -\addcontentsline{toc}{#1}{% - \ifnum #2>\c@secnumdepth \else - \protect\numberline{\csname the#1\endcsname}% - \fi - #7% - }% - \else - \def\@svsechd{% - #6% - \hskip #3\relax - \@svsec - \if@uchead - \uppercase{#8}% - \else - #8% - \fi - \csname #1mark\endcsname{#7}% - \addcontentsline{toc}{#1}{% - \ifnum #2>\c@secnumdepth \else - \protect\numberline{\csname the#1\endcsname}% - \fi - #7% - }% - }% - \fi - \@xsect{#5}\hskip 1pt - \par -} -\def\@xsect#1{% - \@tempskipa #1\relax - \ifdim \@tempskipa>\z@ - \par - \nobreak - \vskip \@tempskipa - \@afterheading - \else - \global\@nobreakfalse - \global\@noskipsectrue - \everypar{% - \if@noskipsec - \global\@noskipsecfalse - \clubpenalty\@M - \hskip -\parindent - \begingroup - \@svsechd - \@period - \endgroup - \unskip - \@tempskipa #1\relax - \hskip -\@tempskipa - \else - \clubpenalty \@clubpenalty - \everypar{}% - \fi - }% - \fi - \ignorespaces -} -\def\@trivlist{% - \@topsepadd\topsep - \if@noskipsec - \global\let\@period\@empty - \leavevmode - \global\let\@period.% - \fi - \ifvmode - \advance\@topsepadd\partopsep - \else - \unskip - \par - \fi - \if@inlabel - \@noparitemtrue - \@noparlisttrue - \else - \@noparlistfalse - \@topsep\@topsepadd - \fi - \advance\@topsep \parskip - \leftskip\z@skip - \rightskip\@rightskip - \parfillskip\@flushglue - \@setpar{\if@newlist\else{\@@par}\fi} - \global\@newlisttrue - \@outerparskip\parskip -} - -%%% Actually, 'abbrev' works just fine as the default -%%% Bibliography style. - -\typeout{Using 'Abbrev' bibliography style} -\newcommand\bibyear[2]{% - \unskip\quad\ignorespaces#1\unskip - \if#2..\quad \else \quad#2 \fi -} -\newcommand{\bibemph}[1]{{\em#1}} -\newcommand{\bibemphic}[1]{{\em#1\/}} -\newcommand{\bibsc}[1]{{\sc#1}} -\def\@normalcite{% - \def\@cite##1##2{[##1\if@tempswa , ##2\fi]}% -} -\def\@citeNB{% - \def\@cite##1##2{##1\if@tempswa , ##2\fi}% -} -\def\@citeRB{% - \def\@cite##1##2{##1\if@tempswa , ##2\fi]}% -} -\def\start@cite#1#2{% - \edef\citeauthoryear##1##2##3{% - ###1% - \ifnum#2=\z@ \else\ ###2\fi - }% - \ifnum#1=\thr@@ - \let\@@cite\@citeyear - \else - \let\@@cite\@citenormal - \fi - \@ifstar{\@citeNB\@@cite}{\@normalcite\@@cite}% -} -\def\cite{\start@cite23} -\def\citeNP{\cite*} -\def\citeA{\start@cite10} -\def\citeANP{\citeA*} -\def\shortcite{\start@cite23} -\def\shortciteNP{\shortcite*} -\def\shortciteA{\start@cite20} -\def\shortciteANP{\shortciteA*} -\def\citeyear{\start@cite30} -\def\citeyearNP{\citeyear*} -\def\citeN{% - \@citeRB - \def\citeauthoryear##1##2##3{##1\ [##3% - \def\reserved@a{##1}% - \def\citeauthoryear####1####2####3{% - \def\reserved@b{####1}% - \ifx\reserved@a\reserved@b - ####3% - \else - \errmessage{Package acmart Error: author mismatch - in \string\citeN^^J^^J% - See the acmart package documentation for explanation}% - \fi - }% - }% - \@ifstar\@citeyear\@citeyear -} -\def\shortciteN{% - \@citeRB - \def\citeauthoryear##1##2##3{##2\ [##3% - \def\reserved@a{##2}% - \def\citeauthoryear####1####2####3{% - \def\reserved@b{####2}% - \ifx\reserved@a\reserved@b - ####3% - \else - \errmessage{Package acmart Error: author mismatch - in \string\shortciteN^^J^^J% - See the acmart package documentation for explanation}% - \fi - }% - }% - \@ifstar\@citeyear\@citeyear % GM July 2000 -} -\def\@citenormal{% - \@ifnextchar [{\@tempswatrue\@citex;} - {\@tempswafalse\@citex,[]}% % GM July 2000 -} -\def\@citeyear{% - \@ifnextchar [{\@tempswatrue\@citex,}% - {\@tempswafalse\@citex,[]}% -} -\def\@citex#1[#2]#3{% - \let\@citea\@empty - \@cite{% - \@for\@citeb:=#3\do{% - \@citea - \def\@citea{#1 }% - \edef\@citeb{\expandafter\@iden\@citeb}% - \if@filesw - \immediate\write\@auxout{\string\citation{\@citeb}}% - \fi - \@ifundefined{b@\@citeb}{% - {\bf ?}% - \@warning{% - Citation `\@citeb' on page \thepage\space undefined% - }% - }% - {\csname b@\@citeb\endcsname}% - }% - }{#2}% -} -\let\@biblabel\@gobble -\newdimen\bibindent -\setcounter{enumi}{1} -\bibindent=0em -\def\thebibliography#1{% -\ifnum\addauflag=0\addauthorsection\global\addauflag=1\fi - \section[References]{% <=== OPTIONAL ARGUMENT ADDED HERE - {References} % was uppercased but this affects pdf bookmarks (SP/GM October 2004) - \vskip -9pt % GM July 2000 (for tighter spacing) - \@mkboth{{\refname}}{{\refname}}% - }% - \list{[\arabic{enumi}]}{% - \settowidth\labelwidth{[#1]}% - \leftmargin\labelwidth - \advance\leftmargin\labelsep - \advance\leftmargin\bibindent - \parsep=0pt\itemsep=1pt % GM July 2000 - \itemindent -\bibindent - \listparindent \itemindent - \usecounter{enumi} - }% - \let\newblock\@empty - \raggedright % GM July 2000 - \sloppy - \sfcode`\.=1000\relax -} - - -\gdef\balancecolumns -{\vfill\eject -\global\@colht=\textheight -\global\ht\@cclv=\textheight -} - -\newcount\colcntr -\global\colcntr=0 -\newbox\savebox - -\gdef \@makecol {% -\global\advance\colcntr by 1 -\ifnum\colcntr>2 \global\colcntr=1\fi - \ifvoid\footins - \setbox\@outputbox \box\@cclv - \else - \setbox\@outputbox \vbox{% -\boxmaxdepth \@maxdepth - \@tempdima\dp\@cclv - \unvbox \@cclv - \vskip-\@tempdima - \vskip \skip\footins - \color@begingroup - \normalcolor - \footnoterule - \unvbox \footins - \color@endgroup - }% - \fi - \xdef\@freelist{\@freelist\@midlist}% - \global \let \@midlist \@empty - \@combinefloats - \ifvbox\@kludgeins - \@makespecialcolbox - \else - \setbox\@outputbox \vbox to\@colht {% -\@texttop - \dimen@ \dp\@outputbox - \unvbox \@outputbox - \vskip -\dimen@ - \@textbottom - }% - \fi - \global \maxdepth \@maxdepth -} -\def\titlenote{\@ifnextchar[\@xtitlenote{\stepcounter\@mpfn -\global\advance\titlenotecount by 1 -\ifnum\titlenotecount=1 - \raisebox{9pt}{$\ast$} -\fi -\ifnum\titlenotecount=2 - \raisebox{9pt}{$\dagger$} -\fi -\ifnum\titlenotecount=3 - \raisebox{9pt}{$\ddagger$} -\fi -\ifnum\titlenotecount=4 -\raisebox{9pt}{$\S$} -\fi -\ifnum\titlenotecount=5 -\raisebox{9pt}{$\P$} -\fi - \@titlenotetext -}} - -\long\def\@titlenotetext#1{\insert\footins{% -\ifnum\titlenotecount=1\global\tntoks={#1}\fi -\ifnum\titlenotecount=2\global\tntokstwo={#1}\fi -\ifnum\titlenotecount=3\global\tntoksthree={#1}\fi -\ifnum\titlenotecount=4\global\tntoksfour={#1}\fi -\ifnum\titlenotecount=5\global\tntoksfive={#1}\fi - \reset@font\footnotesize - \interlinepenalty\interfootnotelinepenalty - \splittopskip\footnotesep - \splitmaxdepth \dp\strutbox \floatingpenalty \@MM - \hsize\columnwidth \@parboxrestore - \protected@edef\@currentlabel{% - }% - \color@begingroup - \color@endgroup}} - -%%%%%%%%%%%%%%%%%%%%%%%%% -\ps@plain -\baselineskip=11pt -\let\thepage\relax % For NO page numbers - GM Nov. 30th. 1999 and July 2000 -\def\setpagenumber#1{\global\setcounter{page}{#1}} -\pagenumbering{arabic} % Arabic page numbers GM July 2000 -\twocolumn % Double column. -\flushbottom % Even bottom -- alas, does not balance columns at end of document -\pagestyle{plain} - -% Need Copyright Year and Copyright Data to be user definable (in .tex file). -% Gerry Nov. 30th. 1999 -\newtoks\copyrtyr -\newtoks\acmcopyr -\newtoks\boilerplate -\global\acmcopyr={X-XXXXX-XX-X/XX/XX} % Default - 5/11/2001 *** Gerry -\global\copyrtyr={200X} % Default - 3/3/2003 *** Gerry -\def\CopyrightYear#1{\global\copyrtyr{#1}} -\def\crdata#1{\global\acmcopyr{#1}} -\def\permission#1{\global\boilerplate{#1}} -% -%\global\boilerplate={Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. To copy otherwise, to republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee.} -%\newtoks\copyrightetc -%\global\copyrightetc{Copyright \the\copyrtyr\ ACM \the\acmcopyr\ ...\$5.00} - -\global\boilerplate={Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. To copy otherwise, to republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Articles from this volume were invited to present their results at The 38th International Conference on Very Large Data Bases, August 26th - 31st 2013, Riva del Garda, Trento, Italy. } - -\global\conf{Proceedings of the VLDB Endowment,} -\global\confinfo{Vol. 6, No. XXX} -\newtoks\copyrightetc -\global\copyrightetc{Copyright 2011 VLDB Endowment 2150-8097/11/11... \$ 10.00} - -%ahmetsacan, 20111024: disabled showing conference name and Vol/Issue/Month. We now stamp this on the papers ourselves. -%\toappear{\the\boilerplate\par -%{\confname{\the\conf}} \the\confinfo\par \the\copyrightetc. -%} -% -%% End of vldb.cls -- V1.8c - 05/15/2011 -- -%% Ahmet Sacan -- December 2011 (volume, issue, and dates are dynamically updated) -%% Uwe Roehm -- Oct-Dec 2010 & Jan-Apr 2011 -%% Gerry Murray -- Wednesday July 26th. 2005 diff --git a/rabbitmq/pom.xml b/rabbitmq/pom.xml index 99518779c42..8779721fb40 100644 --- a/rabbitmq/pom.xml +++ b/rabbitmq/pom.xml @@ -9,7 +9,7 @@ io.druid druid - 0.6.52-SNAPSHOT + 0.6.53-SNAPSHOT diff --git a/s3-extensions/pom.xml b/s3-extensions/pom.xml index e264a7a42b8..6c8bfd24d6a 100644 --- a/s3-extensions/pom.xml +++ b/s3-extensions/pom.xml @@ -28,7 +28,7 @@ io.druid druid - 0.6.52-SNAPSHOT + 0.6.53-SNAPSHOT diff --git a/server/pom.xml b/server/pom.xml index 6fbdc16f1b7..d8bcfcd82f1 100644 --- a/server/pom.xml +++ b/server/pom.xml @@ -28,7 +28,7 @@ io.druid druid - 0.6.52-SNAPSHOT + 0.6.53-SNAPSHOT diff --git a/server/src/main/java/io/druid/client/CachingClusteredClient.java b/server/src/main/java/io/druid/client/CachingClusteredClient.java index 31a644cf887..f5b868e30fb 100644 --- a/server/src/main/java/io/druid/client/CachingClusteredClient.java +++ b/server/src/main/java/io/druid/client/CachingClusteredClient.java @@ -313,7 +313,7 @@ public class CachingClusteredClient implements QueryRunner final MultipleSpecificSegmentSpec segmentSpec = new MultipleSpecificSegmentSpec(descriptors); List intervals = segmentSpec.getIntervals(); - if ("realtime".equals(server.getType()) || !populateCache || isBySegment) { + if (server.isRealtime() || !populateCache || isBySegment) { resultSeqToAdd = clientQueryable.run(query.withQuerySegmentSpec(segmentSpec)); } else { resultSeqToAdd = toolChest.mergeSequences( diff --git a/server/src/main/java/io/druid/client/DruidServer.java b/server/src/main/java/io/druid/client/DruidServer.java index 53acc501513..9cf0d992bd6 100644 --- a/server/src/main/java/io/druid/client/DruidServer.java +++ b/server/src/main/java/io/druid/client/DruidServer.java @@ -36,7 +36,10 @@ import java.util.concurrent.ConcurrentMap; */ public class DruidServer implements Comparable { + public static final int DEFAULT_PRIORITY = 0; + public static final int DEFAULT_NUM_REPLICANTS = 2; public static final String DEFAULT_TIER = "_default_tier"; + private static final Logger log = new Logger(DruidServer.class); private final Object lock = new Object(); @@ -59,7 +62,8 @@ public class DruidServer implements Comparable node.getHost(), config.getMaxSize(), type, - config.getTier() + config.getTier(), + DEFAULT_PRIORITY ); } @@ -69,10 +73,11 @@ public class DruidServer implements Comparable @JsonProperty("host") String host, @JsonProperty("maxSize") long maxSize, @JsonProperty("type") String type, - @JsonProperty("tier") String tier + @JsonProperty("tier") String tier, + @JsonProperty("priority") int priority ) { - this.metadata = new DruidServerMetadata(name, host, maxSize, type, tier); + this.metadata = new DruidServerMetadata(name, host, maxSize, type, tier, priority); this.dataSources = new ConcurrentHashMap(); this.segments = new ConcurrentHashMap(); @@ -118,6 +123,12 @@ public class DruidServer implements Comparable return metadata.getTier(); } + @JsonProperty + public int getPriority() + { + return metadata.getPriority(); + } + @JsonProperty public Map getSegments() { @@ -125,6 +136,11 @@ public class DruidServer implements Comparable return Collections.unmodifiableMap(segments); } + public boolean isRealtime() + { + return getType().equalsIgnoreCase("realtime"); + } + public DataSegment getSegment(String segmentName) { return segments.get(segmentName); diff --git a/server/src/main/java/io/druid/client/DruidServerConfig.java b/server/src/main/java/io/druid/client/DruidServerConfig.java index 089c05ff021..f0ab6ded61d 100644 --- a/server/src/main/java/io/druid/client/DruidServerConfig.java +++ b/server/src/main/java/io/druid/client/DruidServerConfig.java @@ -32,7 +32,10 @@ public class DruidServerConfig private long maxSize = 0; @JsonProperty - private String tier = "_default_tier"; + private String tier = DruidServer.DEFAULT_TIER; + + @JsonProperty + private int priority = DruidServer.DEFAULT_PRIORITY; public long getMaxSize() { @@ -43,4 +46,9 @@ public class DruidServerConfig { return tier; } + + public int getPriority() + { + return priority; + } } diff --git a/server/src/main/java/io/druid/client/selector/ConnectionCountServerSelectorStrategy.java b/server/src/main/java/io/druid/client/selector/ConnectionCountServerSelectorStrategy.java index 8a75e5403cf..4376ac01137 100644 --- a/server/src/main/java/io/druid/client/selector/ConnectionCountServerSelectorStrategy.java +++ b/server/src/main/java/io/druid/client/selector/ConnectionCountServerSelectorStrategy.java @@ -20,10 +20,14 @@ package io.druid.client.selector; import com.google.common.primitives.Ints; +import com.metamx.common.ISE; +import io.druid.timeline.DataSegment; import java.util.Collections; import java.util.Comparator; +import java.util.Map; import java.util.Set; +import java.util.TreeMap; public class ConnectionCountServerSelectorStrategy implements ServerSelectorStrategy { @@ -37,8 +41,25 @@ public class ConnectionCountServerSelectorStrategy implements ServerSelectorStra }; @Override - public QueryableDruidServer pick(Set servers) + public QueryableDruidServer pick( + TreeMap> prioritizedServers, DataSegment segment + ) { - return Collections.min(servers, comparator); + final Map.Entry> highestPriorityServers = prioritizedServers.pollLastEntry(); + + if (highestPriorityServers == null) { + return null; + } + + final Set servers = highestPriorityServers.getValue(); + final int size = servers.size(); + switch (size) { + case 0: + throw new ISE("[%s] Something hella weird going on here. We should not be here", segment.getIdentifier()); + case 1: + return highestPriorityServers.getValue().iterator().next(); + default: + return Collections.min(servers, comparator); + } } } diff --git a/server/src/main/java/io/druid/client/selector/RandomServerSelectorStrategy.java b/server/src/main/java/io/druid/client/selector/RandomServerSelectorStrategy.java index 25e27ce50df..0e295211484 100644 --- a/server/src/main/java/io/druid/client/selector/RandomServerSelectorStrategy.java +++ b/server/src/main/java/io/druid/client/selector/RandomServerSelectorStrategy.java @@ -20,17 +20,36 @@ package io.druid.client.selector; import com.google.common.collect.Iterators; +import com.metamx.common.ISE; +import io.druid.timeline.DataSegment; +import java.util.Map; import java.util.Random; import java.util.Set; +import java.util.TreeMap; public class RandomServerSelectorStrategy implements ServerSelectorStrategy { private static final Random random = new Random(); @Override - public QueryableDruidServer pick(Set servers) + public QueryableDruidServer pick(TreeMap> prioritizedServers, DataSegment segment) { - return Iterators.get(servers.iterator(), random.nextInt(servers.size())); + final Map.Entry> highestPriorityServers = prioritizedServers.pollLastEntry(); + + if (highestPriorityServers == null) { + return null; + } + + final Set servers = highestPriorityServers.getValue(); + final int size = servers.size(); + switch (size) { + case 0: + throw new ISE("[%s] Something hella weird going on here. We should not be here", segment.getIdentifier()); + case 1: + return highestPriorityServers.getValue().iterator().next(); + default: + return Iterators.get(servers.iterator(), random.nextInt(size)); + } } } diff --git a/server/src/main/java/io/druid/client/selector/ServerSelector.java b/server/src/main/java/io/druid/client/selector/ServerSelector.java index 3a7464a05c8..232a722e563 100644 --- a/server/src/main/java/io/druid/client/selector/ServerSelector.java +++ b/server/src/main/java/io/druid/client/selector/ServerSelector.java @@ -19,18 +19,21 @@ package io.druid.client.selector; +import com.google.api.client.util.Maps; import com.google.common.collect.Sets; -import com.google.common.primitives.Ints; +import com.metamx.emitter.EmittingLogger; import io.druid.timeline.DataSegment; -import java.util.Collections; -import java.util.Comparator; import java.util.Set; +import java.util.TreeMap; /** */ public class ServerSelector implements DiscoverySelector { + + private static final EmittingLogger log = new EmittingLogger(ServerSelector.class); + private final Set servers = Sets.newHashSet(); private final DataSegment segment; @@ -76,12 +79,17 @@ public class ServerSelector implements DiscoverySelector public QueryableDruidServer pick() { synchronized (this) { - final int size = servers.size(); - switch (size) { - case 0: return null; - case 1: return servers.iterator().next(); - default: return strategy.pick(servers); + TreeMap> prioritizedServers = Maps.newTreeMap(); + for (QueryableDruidServer server : servers) { + Set theServers = prioritizedServers.get(server.getServer().getPriority()); + if (theServers == null) { + theServers = Sets.newHashSet(); + prioritizedServers.put(server.getServer().getPriority(), theServers); + } + theServers.add(server); } + + return strategy.pick(prioritizedServers, segment); } } } diff --git a/server/src/main/java/io/druid/client/selector/ServerSelectorStrategy.java b/server/src/main/java/io/druid/client/selector/ServerSelectorStrategy.java index d4684d28e08..065253061d8 100644 --- a/server/src/main/java/io/druid/client/selector/ServerSelectorStrategy.java +++ b/server/src/main/java/io/druid/client/selector/ServerSelectorStrategy.java @@ -21,8 +21,10 @@ package io.druid.client.selector; import com.fasterxml.jackson.annotation.JsonSubTypes; import com.fasterxml.jackson.annotation.JsonTypeInfo; +import io.druid.timeline.DataSegment; import java.util.Set; +import java.util.TreeMap; @JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "type", defaultImpl = RandomServerSelectorStrategy.class) @JsonSubTypes(value = { @@ -31,5 +33,5 @@ import java.util.Set; }) public interface ServerSelectorStrategy { - public QueryableDruidServer pick(Set servers); + public QueryableDruidServer pick(TreeMap> prioritizedServers, DataSegment segment); } diff --git a/server/src/main/java/io/druid/curator/announcement/Announcer.java b/server/src/main/java/io/druid/curator/announcement/Announcer.java index 8b017c41a24..3471ea60d87 100644 --- a/server/src/main/java/io/druid/curator/announcement/Announcer.java +++ b/server/src/main/java/io/druid/curator/announcement/Announcer.java @@ -63,6 +63,7 @@ public class Announcer private final PathChildrenCacheFactory factory; private final List> toAnnounce = Lists.newArrayList(); + private final List> toUpdate = Lists.newArrayList(); private final ConcurrentMap listeners = new MapMaker().makeMap(); private final ConcurrentMap> announcements = new MapMaker().makeMap(); private final List parentsIBuilt = new CopyOnWriteArrayList(); @@ -92,6 +93,11 @@ public class Announcer announce(pair.lhs, pair.rhs); } toAnnounce.clear(); + + for (Pair pair : toUpdate) { + update(pair.lhs, pair.rhs); + } + toUpdate.clear(); } } @@ -268,6 +274,13 @@ public class Announcer public void update(final String path, final byte[] bytes) { + synchronized (toAnnounce) { + if (!started) { + toUpdate.add(Pair.of(path, bytes)); + return; + } + } + final ZKPaths.PathAndNode pathAndNode = ZKPaths.getPathAndNode(path); final String parentPath = pathAndNode.getPath(); diff --git a/server/src/main/java/io/druid/db/DatabaseRuleManager.java b/server/src/main/java/io/druid/db/DatabaseRuleManager.java index 036acda23d6..dd4084d3ed5 100644 --- a/server/src/main/java/io/druid/db/DatabaseRuleManager.java +++ b/server/src/main/java/io/druid/db/DatabaseRuleManager.java @@ -23,6 +23,7 @@ import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Supplier; import com.google.common.base.Throwables; +import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.inject.Inject; @@ -31,14 +32,14 @@ import com.metamx.common.concurrent.ScheduledExecutors; import com.metamx.common.lifecycle.LifecycleStart; import com.metamx.common.lifecycle.LifecycleStop; import com.metamx.common.logger.Logger; +import io.druid.client.DruidServer; import io.druid.concurrent.Execs; import io.druid.guice.ManageLifecycle; import io.druid.guice.annotations.Json; -import io.druid.server.coordinator.rules.PeriodLoadRule; +import io.druid.server.coordinator.rules.ForeverLoadRule; import io.druid.server.coordinator.rules.Rule; import org.joda.time.DateTime; import org.joda.time.Duration; -import org.joda.time.Period; import org.skife.jdbi.v2.FoldController; import org.skife.jdbi.v2.Folder3; import org.skife.jdbi.v2.Handle; @@ -86,10 +87,11 @@ public class DatabaseRuleManager } final List defaultRules = Arrays.asList( - new PeriodLoadRule( - new Period("P5000Y"), - 2, - "_default_tier" + new ForeverLoadRule( + ImmutableMap.of( + DruidServer.DEFAULT_TIER, + DruidServer.DEFAULT_NUM_REPLICANTS + ) ) ); final String version = new DateTime().toString(); diff --git a/server/src/main/java/io/druid/db/DatabaseSegmentManager.java b/server/src/main/java/io/druid/db/DatabaseSegmentManager.java index 3d68ad46978..930322106f8 100644 --- a/server/src/main/java/io/druid/db/DatabaseSegmentManager.java +++ b/server/src/main/java/io/druid/db/DatabaseSegmentManager.java @@ -462,7 +462,8 @@ public class DatabaseSegmentManager } } - private String getSegmentsTable() { + private String getSegmentsTable() + { return dbTables.get().getSegmentsTable(); } } diff --git a/server/src/main/java/io/druid/guice/QueryRunnerFactoryModule.java b/server/src/main/java/io/druid/guice/QueryRunnerFactoryModule.java index fc1ab48fcfc..6f4dc80b059 100644 --- a/server/src/main/java/io/druid/guice/QueryRunnerFactoryModule.java +++ b/server/src/main/java/io/druid/guice/QueryRunnerFactoryModule.java @@ -31,6 +31,8 @@ import io.druid.query.metadata.SegmentMetadataQueryRunnerFactory; import io.druid.query.metadata.metadata.SegmentMetadataQuery; import io.druid.query.search.SearchQueryRunnerFactory; import io.druid.query.search.search.SearchQuery; +import io.druid.query.select.SelectQuery; +import io.druid.query.select.SelectQueryRunnerFactory; import io.druid.query.timeboundary.TimeBoundaryQuery; import io.druid.query.timeboundary.TimeBoundaryQueryRunnerFactory; import io.druid.query.timeseries.TimeseriesQuery; @@ -51,6 +53,7 @@ public class QueryRunnerFactoryModule extends QueryToolChestModule .put(TimeBoundaryQuery.class, TimeBoundaryQueryRunnerFactory.class) .put(SegmentMetadataQuery.class, SegmentMetadataQueryRunnerFactory.class) .put(GroupByQuery.class, GroupByQueryRunnerFactory.class) + .put(SelectQuery.class, SelectQueryRunnerFactory.class) .put(TopNQuery.class, TopNQueryRunnerFactory.class) .build(); diff --git a/server/src/main/java/io/druid/guice/QueryToolChestModule.java b/server/src/main/java/io/druid/guice/QueryToolChestModule.java index 4e103db29de..ed5b59591a9 100644 --- a/server/src/main/java/io/druid/guice/QueryToolChestModule.java +++ b/server/src/main/java/io/druid/guice/QueryToolChestModule.java @@ -34,6 +34,8 @@ import io.druid.query.metadata.metadata.SegmentMetadataQuery; import io.druid.query.search.SearchQueryQueryToolChest; import io.druid.query.search.search.SearchQuery; import io.druid.query.search.search.SearchQueryConfig; +import io.druid.query.select.SelectQuery; +import io.druid.query.select.SelectQueryQueryToolChest; import io.druid.query.timeboundary.TimeBoundaryQuery; import io.druid.query.timeboundary.TimeBoundaryQueryQueryToolChest; import io.druid.query.timeseries.TimeseriesQuery; @@ -55,6 +57,7 @@ public class QueryToolChestModule implements Module .put(TimeBoundaryQuery.class, TimeBoundaryQueryQueryToolChest.class) .put(SegmentMetadataQuery.class, SegmentMetadataQueryQueryToolChest.class) .put(GroupByQuery.class, GroupByQueryQueryToolChest.class) + .put(SelectQuery.class, SelectQueryQueryToolChest.class) .put(TopNQuery.class, TopNQueryQueryToolChest.class) .build(); diff --git a/server/src/main/java/io/druid/guice/StorageNodeModule.java b/server/src/main/java/io/druid/guice/StorageNodeModule.java index f50ab39bbb7..01f9563c7d5 100644 --- a/server/src/main/java/io/druid/guice/StorageNodeModule.java +++ b/server/src/main/java/io/druid/guice/StorageNodeModule.java @@ -67,7 +67,8 @@ public class StorageNodeModule implements Module node.getHost(), config.getMaxSize(), nodeType.getNodeType(), - config.getTier() + config.getTier(), + config.getPriority() ); } } diff --git a/server/src/main/java/io/druid/segment/realtime/FireHydrant.java b/server/src/main/java/io/druid/segment/realtime/FireHydrant.java index d7c82dbf95b..db99ff45719 100644 --- a/server/src/main/java/io/druid/segment/realtime/FireHydrant.java +++ b/server/src/main/java/io/druid/segment/realtime/FireHydrant.java @@ -33,11 +33,12 @@ public class FireHydrant public FireHydrant( IncrementalIndex index, - int count + int count, + String segmentIdentifier ) { this.index = index; - this.adapter = new IncrementalIndexSegment(index); + this.adapter = new IncrementalIndexSegment(index, segmentIdentifier); this.count = count; } diff --git a/server/src/main/java/io/druid/segment/realtime/plumber/FlushingPlumber.java b/server/src/main/java/io/druid/segment/realtime/plumber/FlushingPlumber.java new file mode 100644 index 00000000000..1367e21de88 --- /dev/null +++ b/server/src/main/java/io/druid/segment/realtime/plumber/FlushingPlumber.java @@ -0,0 +1,197 @@ +package io.druid.segment.realtime.plumber; + +import com.google.common.collect.Lists; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import com.metamx.common.concurrent.ScheduledExecutors; +import com.metamx.emitter.EmittingLogger; +import com.metamx.emitter.service.ServiceEmitter; +import io.druid.common.guava.ThreadRenamingCallable; +import io.druid.query.QueryRunnerFactoryConglomerate; +import io.druid.segment.IndexGranularity; +import io.druid.segment.realtime.FireDepartmentMetrics; +import io.druid.segment.realtime.Schema; +import io.druid.server.coordination.DataSegmentAnnouncer; +import org.joda.time.DateTime; +import org.joda.time.Duration; +import org.joda.time.Period; + +import java.io.File; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; + +/** + */ +public class FlushingPlumber extends RealtimePlumber +{ + private static final EmittingLogger log = new EmittingLogger(FlushingPlumber.class); + + private final Duration flushDuration; + + private volatile ScheduledExecutorService flushScheduledExec = null; + private volatile boolean stopped = false; + + public FlushingPlumber( + Duration flushDuration, + Period windowPeriod, + File basePersistDirectory, + IndexGranularity segmentGranularity, + Schema schema, + FireDepartmentMetrics metrics, + RejectionPolicy rejectionPolicy, + ServiceEmitter emitter, + QueryRunnerFactoryConglomerate conglomerate, + DataSegmentAnnouncer segmentAnnouncer, + ExecutorService queryExecutorService, + VersioningPolicy versioningPolicy, + int maxPendingPersists + ) + { + super( + windowPeriod, + basePersistDirectory, + segmentGranularity, + schema, + metrics, + rejectionPolicy, + emitter, + conglomerate, + segmentAnnouncer, + queryExecutorService, + versioningPolicy, + null, + null, + null, + maxPendingPersists + ); + + this.flushDuration = flushDuration; + } + + @Override + public void startJob() + { + log.info("Starting job for %s", getSchema().getDataSource()); + + computeBaseDir(getSchema()).mkdirs(); + initializeExecutors(); + + if (flushScheduledExec == null) { + flushScheduledExec = Executors.newScheduledThreadPool( + 1, + new ThreadFactoryBuilder() + .setDaemon(true) + .setNameFormat("flushing_scheduled_%d") + .build() + ); + } + + bootstrapSinksFromDisk(); + startFlushThread(); + } + + protected void flushAfterDuration(final long truncatedTime, final Sink sink) + { + log.info( + "Abandoning segment %s at %s", + sink.getSegment().getIdentifier(), + new DateTime().plusMillis((int) flushDuration.getMillis()) + ); + + ScheduledExecutors.scheduleWithFixedDelay( + flushScheduledExec, + flushDuration, + new Callable() + { + @Override + public ScheduledExecutors.Signal call() throws Exception + { + log.info("Abandoning segment %s", sink.getSegment().getIdentifier()); + abandonSegment(truncatedTime, sink); + return ScheduledExecutors.Signal.STOP; + } + } + ); + } + + private void startFlushThread() + { + final long truncatedNow = getSegmentGranularity().truncate(new DateTime()).getMillis(); + final long windowMillis = getWindowPeriod().toStandardDuration().getMillis(); + + log.info( + "Expect to run at [%s]", + new DateTime().plus( + new Duration(System.currentTimeMillis(), getSegmentGranularity().increment(truncatedNow) + windowMillis) + ) + ); + + ScheduledExecutors + .scheduleAtFixedRate( + flushScheduledExec, + new Duration(System.currentTimeMillis(), getSegmentGranularity().increment(truncatedNow) + windowMillis), + new Duration(truncatedNow, getSegmentGranularity().increment(truncatedNow)), + new ThreadRenamingCallable( + String.format( + "%s-flusher-%d", + getSchema().getDataSource(), + getSchema().getShardSpec().getPartitionNum() + ) + ) + { + @Override + public ScheduledExecutors.Signal doCall() + { + if (stopped) { + log.info("Stopping flusher thread"); + return ScheduledExecutors.Signal.STOP; + } + + long minTimestamp = getSegmentGranularity().truncate( + getRejectionPolicy().getCurrMaxTime().minus(windowMillis) + ).getMillis(); + + List> sinksToPush = Lists.newArrayList(); + for (Map.Entry entry : getSinks().entrySet()) { + final Long intervalStart = entry.getKey(); + if (intervalStart < minTimestamp) { + log.info("Adding entry[%s] to flush.", entry); + sinksToPush.add(entry); + } + } + + for (final Map.Entry entry : sinksToPush) { + flushAfterDuration(entry.getKey(), entry.getValue()); + } + + if (stopped) { + log.info("Stopping flusher thread"); + return ScheduledExecutors.Signal.STOP; + } else { + return ScheduledExecutors.Signal.REPEAT; + } + } + } + ); + } + + @Override + public void finishJob() + { + log.info("Stopping job"); + + for (final Map.Entry entry : getSinks().entrySet()) { + abandonSegment(entry.getKey(), entry.getValue()); + } + shutdownExecutors(); + + if (flushScheduledExec != null) { + flushScheduledExec.shutdown(); + } + + stopped = true; + } +} diff --git a/server/src/main/java/io/druid/segment/realtime/plumber/FlushingPlumberSchool.java b/server/src/main/java/io/druid/segment/realtime/plumber/FlushingPlumberSchool.java new file mode 100644 index 00000000000..eeb0b3c03ce --- /dev/null +++ b/server/src/main/java/io/druid/segment/realtime/plumber/FlushingPlumberSchool.java @@ -0,0 +1,130 @@ +/* + * Druid - a distributed column store. + * Copyright (C) 2012, 2013 Metamarkets Group Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +package io.druid.segment.realtime.plumber; + +import com.fasterxml.jackson.annotation.JacksonInject; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.google.common.base.Preconditions; +import com.metamx.emitter.EmittingLogger; +import com.metamx.emitter.service.ServiceEmitter; +import io.druid.guice.annotations.Processing; +import io.druid.query.QueryRunnerFactoryConglomerate; +import io.druid.segment.IndexGranularity; +import io.druid.segment.realtime.FireDepartmentMetrics; +import io.druid.segment.realtime.Schema; +import io.druid.server.coordination.DataSegmentAnnouncer; +import org.joda.time.Duration; +import org.joda.time.Period; + +import javax.validation.constraints.NotNull; +import java.io.File; +import java.util.concurrent.ExecutorService; + +/** + * This plumber just drops segments at the end of a flush duration instead of handing them off. It is only useful if you want to run + * a real time node without the rest of the Druid cluster. + */ +public class FlushingPlumberSchool implements PlumberSchool +{ + private static final EmittingLogger log = new EmittingLogger(FlushingPlumberSchool.class); + + private final Duration flushDuration; + private final Period windowPeriod; + private final File basePersistDirectory; + private final IndexGranularity segmentGranularity; + private final int maxPendingPersists; + + @JacksonInject + @NotNull + private volatile ServiceEmitter emitter; + + @JacksonInject + @NotNull + private volatile QueryRunnerFactoryConglomerate conglomerate = null; + + @JacksonInject + @NotNull + private volatile DataSegmentAnnouncer segmentAnnouncer = null; + + @JacksonInject + @NotNull + @Processing + private volatile ExecutorService queryExecutorService = null; + + private volatile VersioningPolicy versioningPolicy = null; + private volatile RejectionPolicyFactory rejectionPolicyFactory = null; + + @JsonCreator + public FlushingPlumberSchool( + @JsonProperty("flushDuration") Duration flushDuration, + @JsonProperty("windowPeriod") Period windowPeriod, + @JsonProperty("basePersistDirectory") File basePersistDirectory, + @JsonProperty("segmentGranularity") IndexGranularity segmentGranularity + ) + { + this.flushDuration = flushDuration; + this.windowPeriod = windowPeriod; + this.basePersistDirectory = basePersistDirectory; + this.segmentGranularity = segmentGranularity; + this.versioningPolicy = new IntervalStartVersioningPolicy(); + this.rejectionPolicyFactory = new ServerTimeRejectionPolicyFactory(); + // Workaround for Jackson issue where if maxPendingPersists is null, all JacksonInjects fail + this.maxPendingPersists = RealtimePlumberSchool.DEFAULT_MAX_PENDING_PERSISTS; + + Preconditions.checkNotNull(flushDuration, "FlushingPlumberSchool requires a flushDuration."); + Preconditions.checkNotNull(windowPeriod, "FlushingPlumberSchool requires a windowPeriod."); + Preconditions.checkNotNull(basePersistDirectory, "FlushingPlumberSchool requires a basePersistDirectory."); + Preconditions.checkNotNull(segmentGranularity, "FlushingPlumberSchool requires a segmentGranularity."); + } + + @Override + + public Plumber findPlumber(final Schema schema, final FireDepartmentMetrics metrics) + { + verifyState(); + + final RejectionPolicy rejectionPolicy = rejectionPolicyFactory.create(windowPeriod); + log.info("Creating plumber using rejectionPolicy[%s]", rejectionPolicy); + + return new FlushingPlumber( + flushDuration, + windowPeriod, + basePersistDirectory, + segmentGranularity, + schema, + metrics, + rejectionPolicy, + emitter, + conglomerate, + segmentAnnouncer, + queryExecutorService, + versioningPolicy, + maxPendingPersists + ); + } + + private void verifyState() + { + Preconditions.checkNotNull(conglomerate, "must specify a queryRunnerFactoryConglomerate to do this action."); + Preconditions.checkNotNull(segmentAnnouncer, "must specify a segmentAnnouncer to do this action."); + Preconditions.checkNotNull(emitter, "must specify a serviceEmitter to do this action."); + } +} diff --git a/server/src/main/java/io/druid/segment/realtime/plumber/PlumberSchool.java b/server/src/main/java/io/druid/segment/realtime/plumber/PlumberSchool.java index f1e186011da..a2495fba178 100644 --- a/server/src/main/java/io/druid/segment/realtime/plumber/PlumberSchool.java +++ b/server/src/main/java/io/druid/segment/realtime/plumber/PlumberSchool.java @@ -27,9 +27,10 @@ import io.druid.segment.realtime.Schema; /** */ @JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "type") -@JsonSubTypes( - @JsonSubTypes.Type(name = "realtime", value = RealtimePlumberSchool.class) -) +@JsonSubTypes(value = { + @JsonSubTypes.Type(name = "realtime", value = RealtimePlumberSchool.class), + @JsonSubTypes.Type(name = "flushing", value = FlushingPlumberSchool.class) +}) public interface PlumberSchool { /** diff --git a/server/src/main/java/io/druid/segment/realtime/plumber/RealtimePlumber.java b/server/src/main/java/io/druid/segment/realtime/plumber/RealtimePlumber.java new file mode 100644 index 00000000000..2b4d1f5f44b --- /dev/null +++ b/server/src/main/java/io/druid/segment/realtime/plumber/RealtimePlumber.java @@ -0,0 +1,735 @@ +package io.druid.segment.realtime.plumber; + +import com.google.common.base.Function; +import com.google.common.base.Joiner; +import com.google.common.base.Throwables; +import com.google.common.collect.Iterables; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.primitives.Ints; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import com.metamx.common.Pair; +import com.metamx.common.concurrent.ScheduledExecutors; +import com.metamx.common.guava.FunctionalIterable; +import com.metamx.emitter.EmittingLogger; +import com.metamx.emitter.service.ServiceEmitter; +import com.metamx.emitter.service.ServiceMetricEvent; +import io.druid.client.DruidServer; +import io.druid.client.ServerView; +import io.druid.common.guava.ThreadRenamingCallable; +import io.druid.common.guava.ThreadRenamingRunnable; +import io.druid.concurrent.Execs; +import io.druid.query.MetricsEmittingQueryRunner; +import io.druid.query.Query; +import io.druid.query.QueryRunner; +import io.druid.query.QueryRunnerFactory; +import io.druid.query.QueryRunnerFactoryConglomerate; +import io.druid.query.QueryToolChest; +import io.druid.query.SegmentDescriptor; +import io.druid.query.spec.SpecificSegmentQueryRunner; +import io.druid.query.spec.SpecificSegmentSpec; +import io.druid.segment.IndexGranularity; +import io.druid.segment.IndexIO; +import io.druid.segment.IndexMerger; +import io.druid.segment.QueryableIndex; +import io.druid.segment.QueryableIndexSegment; +import io.druid.segment.Segment; +import io.druid.segment.loading.DataSegmentPusher; +import io.druid.segment.realtime.FireDepartmentMetrics; +import io.druid.segment.realtime.FireHydrant; +import io.druid.segment.realtime.Schema; +import io.druid.segment.realtime.SegmentPublisher; +import io.druid.server.coordination.DataSegmentAnnouncer; +import io.druid.timeline.DataSegment; +import io.druid.timeline.TimelineObjectHolder; +import io.druid.timeline.VersionedIntervalTimeline; +import io.druid.timeline.partition.SingleElementPartitionChunk; +import org.apache.commons.io.FileUtils; +import org.joda.time.DateTime; +import org.joda.time.Duration; +import org.joda.time.Interval; +import org.joda.time.Period; + +import javax.annotation.Nullable; +import java.io.File; +import java.io.FilenameFilter; +import java.io.IOException; +import java.util.Arrays; +import java.util.Comparator; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; + +/** + */ +public class RealtimePlumber implements Plumber +{ + private static final EmittingLogger log = new EmittingLogger(RealtimePlumber.class); + + private final Period windowPeriod; + private final File basePersistDirectory; + private final IndexGranularity segmentGranularity; + private final Schema schema; + private final FireDepartmentMetrics metrics; + private final RejectionPolicy rejectionPolicy; + private final ServiceEmitter emitter; + private final QueryRunnerFactoryConglomerate conglomerate; + private final DataSegmentAnnouncer segmentAnnouncer; + private final ExecutorService queryExecutorService; + private final VersioningPolicy versioningPolicy; + private final DataSegmentPusher dataSegmentPusher; + private final SegmentPublisher segmentPublisher; + private final ServerView serverView; + private final int maxPendingPersists; + + private final Object handoffCondition = new Object(); + private final Map sinks = Maps.newConcurrentMap(); + private final VersionedIntervalTimeline sinkTimeline = new VersionedIntervalTimeline( + String.CASE_INSENSITIVE_ORDER + ); + + private volatile boolean shuttingDown = false; + private volatile boolean stopped = false; + private volatile ExecutorService persistExecutor = null; + private volatile ScheduledExecutorService scheduledExecutor = null; + + public RealtimePlumber( + Period windowPeriod, + File basePersistDirectory, + IndexGranularity segmentGranularity, + Schema schema, + FireDepartmentMetrics metrics, + RejectionPolicy rejectionPolicy, + ServiceEmitter emitter, + QueryRunnerFactoryConglomerate conglomerate, + DataSegmentAnnouncer segmentAnnouncer, + ExecutorService queryExecutorService, + VersioningPolicy versioningPolicy, + DataSegmentPusher dataSegmentPusher, + SegmentPublisher segmentPublisher, + ServerView serverView, + int maxPendingPersists + ) + { + this.windowPeriod = windowPeriod; + this.basePersistDirectory = basePersistDirectory; + this.segmentGranularity = segmentGranularity; + this.schema = schema; + this.metrics = metrics; + this.rejectionPolicy = rejectionPolicy; + this.emitter = emitter; + this.conglomerate = conglomerate; + this.segmentAnnouncer = segmentAnnouncer; + this.queryExecutorService = queryExecutorService; + this.versioningPolicy = versioningPolicy; + this.dataSegmentPusher = dataSegmentPusher; + this.segmentPublisher = segmentPublisher; + this.serverView = serverView; + this.maxPendingPersists = maxPendingPersists; + } + + public Schema getSchema() + { + return schema; + } + + public Period getWindowPeriod() + { + return windowPeriod; + } + + public IndexGranularity getSegmentGranularity() + { + return segmentGranularity; + } + + public VersioningPolicy getVersioningPolicy() + { + return versioningPolicy; + } + + public RejectionPolicy getRejectionPolicy() + { + return rejectionPolicy; + } + + public Map getSinks() + { + return sinks; + } + + @Override + public void startJob() + { + computeBaseDir(schema).mkdirs(); + initializeExecutors(); + bootstrapSinksFromDisk(); + registerServerViewCallback(); + startPersistThread(); + } + + @Override + public Sink getSink(long timestamp) + { + if (!rejectionPolicy.accept(timestamp)) { + return null; + } + + final long truncatedTime = segmentGranularity.truncate(timestamp); + + Sink retVal = sinks.get(truncatedTime); + + if (retVal == null) { + final Interval sinkInterval = new Interval( + new DateTime(truncatedTime), + segmentGranularity.increment(new DateTime(truncatedTime)) + ); + + retVal = new Sink(sinkInterval, schema, versioningPolicy.getVersion(sinkInterval)); + + try { + segmentAnnouncer.announceSegment(retVal.getSegment()); + sinks.put(truncatedTime, retVal); + sinkTimeline.add(retVal.getInterval(), retVal.getVersion(), new SingleElementPartitionChunk(retVal)); + } + catch (IOException e) { + log.makeAlert(e, "Failed to announce new segment[%s]", schema.getDataSource()) + .addData("interval", retVal.getInterval()) + .emit(); + } + } + + return retVal; + } + + @Override + public QueryRunner getQueryRunner(final Query query) + { + final QueryRunnerFactory> factory = conglomerate.findFactory(query); + final QueryToolChest> toolchest = factory.getToolchest(); + + final Function, ServiceMetricEvent.Builder> builderFn = + new Function, ServiceMetricEvent.Builder>() + { + + @Override + public ServiceMetricEvent.Builder apply(@Nullable Query input) + { + return toolchest.makeMetricBuilder(query); + } + }; + + List> querySinks = Lists.newArrayList(); + for (Interval interval : query.getIntervals()) { + querySinks.addAll(sinkTimeline.lookup(interval)); + } + + return toolchest.mergeResults( + factory.mergeRunners( + queryExecutorService, + FunctionalIterable + .create(querySinks) + .transform( + new Function, QueryRunner>() + { + @Override + public QueryRunner apply(TimelineObjectHolder holder) + { + final Sink theSink = holder.getObject().getChunk(0).getObject(); + return new SpecificSegmentQueryRunner( + new MetricsEmittingQueryRunner( + emitter, + builderFn, + factory.mergeRunners( + MoreExecutors.sameThreadExecutor(), + Iterables.transform( + theSink, + new Function>() + { + @Override + public QueryRunner apply(FireHydrant input) + { + return factory.createRunner(input.getSegment()); + } + } + ) + ) + ), + new SpecificSegmentSpec( + new SegmentDescriptor( + holder.getInterval(), + theSink.getSegment().getVersion(), + theSink.getSegment().getShardSpec().getPartitionNum() + ) + ) + ); + } + } + ) + ) + ); + } + + @Override + public void persist(final Runnable commitRunnable) + { + final List> indexesToPersist = Lists.newArrayList(); + for (Sink sink : sinks.values()) { + if (sink.swappable()) { + indexesToPersist.add(Pair.of(sink.swap(), sink.getInterval())); + } + } + + log.info("Submitting persist runnable for dataSource[%s]", schema.getDataSource()); + + persistExecutor.execute( + new ThreadRenamingRunnable(String.format("%s-incremental-persist", schema.getDataSource())) + { + @Override + public void doRun() + { + for (Pair pair : indexesToPersist) { + metrics.incrementRowOutputCount(persistHydrant(pair.lhs, schema, pair.rhs)); + } + commitRunnable.run(); + } + } + ); + } + + // Submits persist-n-merge task for a Sink to the persistExecutor + private void persistAndMerge(final long truncatedTime, final Sink sink) + { + final String threadName = String.format( + "%s-%s-persist-n-merge", schema.getDataSource(), new DateTime(truncatedTime) + ); + persistExecutor.execute( + new ThreadRenamingRunnable(threadName) + { + @Override + public void doRun() + { + final Interval interval = sink.getInterval(); + + for (FireHydrant hydrant : sink) { + if (!hydrant.hasSwapped()) { + log.info("Hydrant[%s] hasn't swapped yet, swapping. Sink[%s]", hydrant, sink); + final int rowCount = persistHydrant(hydrant, schema, interval); + metrics.incrementRowOutputCount(rowCount); + } + } + + final File mergedTarget = new File(computePersistDir(schema, interval), "merged"); + if (mergedTarget.exists()) { + log.info("Skipping already-merged sink: %s", sink); + return; + } + + File mergedFile = null; + try { + List indexes = Lists.newArrayList(); + for (FireHydrant fireHydrant : sink) { + Segment segment = fireHydrant.getSegment(); + final QueryableIndex queryableIndex = segment.asQueryableIndex(); + log.info("Adding hydrant[%s]", fireHydrant); + indexes.add(queryableIndex); + } + + mergedFile = IndexMerger.mergeQueryableIndex( + indexes, + schema.getAggregators(), + mergedTarget + ); + + QueryableIndex index = IndexIO.loadIndex(mergedFile); + + DataSegment segment = dataSegmentPusher.push( + mergedFile, + sink.getSegment().withDimensions(Lists.newArrayList(index.getAvailableDimensions())) + ); + + segmentPublisher.publishSegment(segment); + } + catch (IOException e) { + log.makeAlert(e, "Failed to persist merged index[%s]", schema.getDataSource()) + .addData("interval", interval) + .emit(); + if (shuttingDown) { + // We're trying to shut down, and this segment failed to push. Let's just get rid of it. + abandonSegment(truncatedTime, sink); + } + } + + if (mergedFile != null) { + try { + log.info("Deleting Index File[%s]", mergedFile); + FileUtils.deleteDirectory(mergedFile); + } + catch (IOException e) { + log.warn(e, "Error deleting directory[%s]", mergedFile); + } + } + } + } + ); + } + + @Override + public void finishJob() + { + log.info("Shutting down..."); + + shuttingDown = true; + + for (final Map.Entry entry : sinks.entrySet()) { + persistAndMerge(entry.getKey(), entry.getValue()); + } + + while (!sinks.isEmpty()) { + try { + log.info( + "Cannot shut down yet! Sinks remaining: %s", + Joiner.on(", ").join( + Iterables.transform( + sinks.values(), + new Function() + { + @Override + public String apply(Sink input) + { + return input.getSegment().getIdentifier(); + } + } + ) + ) + ); + + synchronized (handoffCondition) { + while (!sinks.isEmpty()) { + handoffCondition.wait(); + } + } + } + catch (InterruptedException e) { + throw Throwables.propagate(e); + } + } + + shutdownExecutors(); + + stopped = true; + } + + protected void initializeExecutors() + { + if (persistExecutor == null) { + // use a blocking single threaded executor to throttle the firehose when write to disk is slow + persistExecutor = Execs.newBlockingSingleThreaded( + "plumber_persist_%d", maxPendingPersists + ); + } + if (scheduledExecutor == null) { + scheduledExecutor = Executors.newScheduledThreadPool( + 1, + new ThreadFactoryBuilder() + .setDaemon(true) + .setNameFormat("plumber_scheduled_%d") + .build() + ); + } + } + + protected void shutdownExecutors() + { + // scheduledExecutor is shutdown here, but persistExecutor is shutdown when the + // ServerView sends it a new segment callback + if (scheduledExecutor != null) { + scheduledExecutor.shutdown(); + } + } + + protected void bootstrapSinksFromDisk() + { + File baseDir = computeBaseDir(schema); + if (baseDir == null || !baseDir.exists()) { + return; + } + + File[] files = baseDir.listFiles(); + if (files == null) { + return; + } + + for (File sinkDir : files) { + Interval sinkInterval = new Interval(sinkDir.getName().replace("_", "/")); + + //final File[] sinkFiles = sinkDir.listFiles(); + // To avoid reading and listing of "merged" dir + final File[] sinkFiles = sinkDir.listFiles( + new FilenameFilter() + { + @Override + public boolean accept(File dir, String fileName) + { + return !(Ints.tryParse(fileName) == null); + } + } + ); + Arrays.sort( + sinkFiles, + new Comparator() + { + @Override + public int compare(File o1, File o2) + { + try { + return Ints.compare(Integer.parseInt(o1.getName()), Integer.parseInt(o2.getName())); + } + catch (NumberFormatException e) { + log.error(e, "Couldn't compare as numbers? [%s][%s]", o1, o2); + return o1.compareTo(o2); + } + } + } + ); + + try { + List hydrants = Lists.newArrayList(); + for (File segmentDir : sinkFiles) { + log.info("Loading previously persisted segment at [%s]", segmentDir); + + // Although this has been tackled at start of this method. + // Just a doubly-check added to skip "merged" dir. from being added to hydrants + // If 100% sure that this is not needed, this check can be removed. + if (Ints.tryParse(segmentDir.getName()) == null) { + continue; + } + + hydrants.add( + new FireHydrant( + new QueryableIndexSegment( + DataSegment.makeDataSegmentIdentifier( + schema.getDataSource(), + sinkInterval.getStart(), + sinkInterval.getEnd(), + versioningPolicy.getVersion(sinkInterval), + schema.getShardSpec() + ), + IndexIO.loadIndex(segmentDir) + ), + Integer.parseInt(segmentDir.getName()) + ) + ); + } + + Sink currSink = new Sink(sinkInterval, schema, versioningPolicy.getVersion(sinkInterval), hydrants); + sinks.put(sinkInterval.getStartMillis(), currSink); + sinkTimeline.add( + currSink.getInterval(), + currSink.getVersion(), + new SingleElementPartitionChunk(currSink) + ); + + segmentAnnouncer.announceSegment(currSink.getSegment()); + } + catch (IOException e) { + log.makeAlert(e, "Problem loading sink[%s] from disk.", schema.getDataSource()) + .addData("interval", sinkInterval) + .emit(); + } + } + } + + protected void startPersistThread() + { + final long truncatedNow = segmentGranularity.truncate(new DateTime()).getMillis(); + final long windowMillis = windowPeriod.toStandardDuration().getMillis(); + + log.info( + "Expect to run at [%s]", + new DateTime().plus( + new Duration(System.currentTimeMillis(), segmentGranularity.increment(truncatedNow) + windowMillis) + ) + ); + + ScheduledExecutors + .scheduleAtFixedRate( + scheduledExecutor, + new Duration(System.currentTimeMillis(), segmentGranularity.increment(truncatedNow) + windowMillis), + new Duration(truncatedNow, segmentGranularity.increment(truncatedNow)), + new ThreadRenamingCallable( + String.format( + "%s-overseer-%d", + schema.getDataSource(), + schema.getShardSpec().getPartitionNum() + ) + ) + { + @Override + public ScheduledExecutors.Signal doCall() + { + if (stopped) { + log.info("Stopping merge-n-push overseer thread"); + return ScheduledExecutors.Signal.STOP; + } + + log.info("Starting merge and push."); + + long minTimestamp = segmentGranularity.truncate( + rejectionPolicy.getCurrMaxTime().minus(windowMillis) + ).getMillis(); + + List> sinksToPush = Lists.newArrayList(); + for (Map.Entry entry : sinks.entrySet()) { + final Long intervalStart = entry.getKey(); + if (intervalStart < minTimestamp) { + log.info("Adding entry[%s] for merge and push.", entry); + sinksToPush.add(entry); + } + } + + for (final Map.Entry entry : sinksToPush) { + persistAndMerge(entry.getKey(), entry.getValue()); + } + + if (stopped) { + log.info("Stopping merge-n-push overseer thread"); + return ScheduledExecutors.Signal.STOP; + } else { + return ScheduledExecutors.Signal.REPEAT; + } + } + } + ); + } + + /** + * Unannounces a given sink and removes all local references to it. + */ + protected void abandonSegment(final long truncatedTime, final Sink sink) + { + try { + segmentAnnouncer.unannounceSegment(sink.getSegment()); + FileUtils.deleteDirectory(computePersistDir(schema, sink.getInterval())); + log.info("Removing sinkKey %d for segment %s", truncatedTime, sink.getSegment().getIdentifier()); + sinks.remove(truncatedTime); + sinkTimeline.remove( + sink.getInterval(), + sink.getVersion(), + new SingleElementPartitionChunk<>(sink) + ); + synchronized (handoffCondition) { + handoffCondition.notifyAll(); + } + } + catch (IOException e) { + log.makeAlert(e, "Unable to abandon old segment for dataSource[%s]", schema.getDataSource()) + .addData("interval", sink.getInterval()) + .emit(); + } + } + + protected File computeBaseDir(Schema schema) + { + return new File(basePersistDirectory, schema.getDataSource()); + } + + protected File computePersistDir(Schema schema, Interval interval) + { + return new File(computeBaseDir(schema), interval.toString().replace("/", "_")); + } + + /** + * Persists the given hydrant and returns the number of rows persisted + * + * @param indexToPersist + * @param schema + * @param interval + * + * @return the number of rows persisted + */ + protected int persistHydrant(FireHydrant indexToPersist, Schema schema, Interval interval) + { + if (indexToPersist.hasSwapped()) { + log.info( + "DataSource[%s], Interval[%s], Hydrant[%s] already swapped. Ignoring request to persist.", + schema.getDataSource(), interval, indexToPersist + ); + return 0; + } + + log.info("DataSource[%s], Interval[%s], persisting Hydrant[%s]", schema.getDataSource(), interval, indexToPersist); + try { + int numRows = indexToPersist.getIndex().size(); + + File persistedFile = IndexMerger.persist( + indexToPersist.getIndex(), + new File(computePersistDir(schema, interval), String.valueOf(indexToPersist.getCount())) + ); + + indexToPersist.swapSegment( + new QueryableIndexSegment( + indexToPersist.getSegment().getIdentifier(), + IndexIO.loadIndex(persistedFile) + ) + ); + + return numRows; + } + catch (IOException e) { + log.makeAlert("dataSource[%s] -- incremental persist failed", schema.getDataSource()) + .addData("interval", interval) + .addData("count", indexToPersist.getCount()) + .emit(); + + throw Throwables.propagate(e); + } + } + + private void registerServerViewCallback() + { + serverView.registerSegmentCallback( + persistExecutor, + new ServerView.BaseSegmentCallback() + { + @Override + public ServerView.CallbackAction segmentAdded(DruidServer server, DataSegment segment) + { + if (stopped) { + log.info("Unregistering ServerViewCallback"); + persistExecutor.shutdown(); + return ServerView.CallbackAction.UNREGISTER; + } + + if (server.isRealtime()) { + return ServerView.CallbackAction.CONTINUE; + } + + log.debug("Checking segment[%s] on server[%s]", segment, server); + if (schema.getDataSource().equals(segment.getDataSource())) { + final Interval interval = segment.getInterval(); + for (Map.Entry entry : sinks.entrySet()) { + final Long sinkKey = entry.getKey(); + if (interval.contains(sinkKey)) { + final Sink sink = entry.getValue(); + log.info("Segment[%s] matches sink[%s] on server[%s]", segment, sink, server); + + final String segmentVersion = segment.getVersion(); + final String sinkVersion = sink.getSegment().getVersion(); + if (segmentVersion.compareTo(sinkVersion) >= 0) { + log.info("Segment version[%s] >= sink version[%s]", segmentVersion, sinkVersion); + abandonSegment(sinkKey, sink); + } + } + } + } + + return ServerView.CallbackAction.CONTINUE; + } + } + ); + } +} diff --git a/server/src/main/java/io/druid/segment/realtime/plumber/RealtimePlumberSchool.java b/server/src/main/java/io/druid/segment/realtime/plumber/RealtimePlumberSchool.java index 9dc18257b68..f7d6398a194 100644 --- a/server/src/main/java/io/druid/segment/realtime/plumber/RealtimePlumberSchool.java +++ b/server/src/main/java/io/druid/segment/realtime/plumber/RealtimePlumberSchool.java @@ -22,83 +22,35 @@ package io.druid.segment.realtime.plumber; import com.fasterxml.jackson.annotation.JacksonInject; import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; -import com.google.common.base.Function; -import com.google.common.base.Joiner; import com.google.common.base.Preconditions; -import com.google.common.base.Throwables; -import com.google.common.collect.Iterables; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; -import com.google.common.primitives.Ints; -import com.google.common.util.concurrent.MoreExecutors; -import com.google.common.util.concurrent.ThreadFactoryBuilder; -import com.metamx.common.Pair; -import com.metamx.common.concurrent.ScheduledExecutors; -import com.metamx.common.guava.FunctionalIterable; import com.metamx.emitter.EmittingLogger; import com.metamx.emitter.service.ServiceEmitter; -import com.metamx.emitter.service.ServiceMetricEvent; -import io.druid.client.DruidServer; import io.druid.client.ServerView; -import io.druid.common.guava.ThreadRenamingCallable; -import io.druid.common.guava.ThreadRenamingRunnable; import io.druid.guice.annotations.Processing; -import io.druid.query.MetricsEmittingQueryRunner; -import io.druid.query.Query; -import io.druid.query.QueryRunner; -import io.druid.query.QueryRunnerFactory; import io.druid.query.QueryRunnerFactoryConglomerate; -import io.druid.query.QueryToolChest; -import io.druid.query.SegmentDescriptor; -import io.druid.query.spec.SpecificSegmentQueryRunner; -import io.druid.query.spec.SpecificSegmentSpec; import io.druid.segment.IndexGranularity; -import io.druid.segment.IndexIO; -import io.druid.segment.IndexMerger; -import io.druid.segment.QueryableIndex; -import io.druid.segment.QueryableIndexSegment; -import io.druid.segment.Segment; import io.druid.segment.loading.DataSegmentPusher; import io.druid.segment.realtime.FireDepartmentMetrics; -import io.druid.segment.realtime.FireHydrant; import io.druid.segment.realtime.Schema; import io.druid.segment.realtime.SegmentPublisher; import io.druid.server.coordination.DataSegmentAnnouncer; -import io.druid.timeline.DataSegment; -import io.druid.timeline.TimelineObjectHolder; -import io.druid.timeline.VersionedIntervalTimeline; -import io.druid.timeline.partition.SingleElementPartitionChunk; -import org.apache.commons.io.FileUtils; -import org.joda.time.DateTime; -import org.joda.time.Duration; -import org.joda.time.Interval; import org.joda.time.Period; -import javax.annotation.Nullable; import javax.validation.constraints.NotNull; import java.io.File; -import java.io.FilenameFilter; -import java.io.IOException; -import java.util.Arrays; -import java.util.Comparator; -import java.util.List; -import java.util.Map; import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; /** */ public class RealtimePlumberSchool implements PlumberSchool { + public static final int DEFAULT_MAX_PENDING_PERSISTS = 2; + private static final EmittingLogger log = new EmittingLogger(RealtimePlumberSchool.class); private final Period windowPeriod; private final File basePersistDirectory; private final IndexGranularity segmentGranularity; - private final Object handoffCondition = new Object(); - - private volatile boolean shuttingDown = false; @JacksonInject @NotNull @@ -129,6 +81,7 @@ public class RealtimePlumberSchool implements PlumberSchool @Processing private volatile ExecutorService queryExecutorService = null; + private volatile int maxPendingPersists; private volatile VersioningPolicy versioningPolicy = null; private volatile RejectionPolicyFactory rejectionPolicyFactory = null; @@ -144,6 +97,8 @@ public class RealtimePlumberSchool implements PlumberSchool this.segmentGranularity = segmentGranularity; this.versioningPolicy = new IntervalStartVersioningPolicy(); this.rejectionPolicyFactory = new ServerTimeRejectionPolicyFactory(); + // Workaround for Jackson issue where if maxPendingPersists is null, all JacksonInjects fail + this.maxPendingPersists = RealtimePlumberSchool.DEFAULT_MAX_PENDING_PERSISTS; Preconditions.checkNotNull(windowPeriod, "RealtimePlumberSchool requires a windowPeriod."); Preconditions.checkNotNull(basePersistDirectory, "RealtimePlumberSchool requires a basePersistDirectory."); @@ -197,6 +152,11 @@ public class RealtimePlumberSchool implements PlumberSchool this.queryExecutorService = executorService; } + public void setDefaultMaxPendingPersists(int maxPendingPersists) + { + this.maxPendingPersists = maxPendingPersists; + } + @Override public Plumber findPlumber(final Schema schema, final FireDepartmentMetrics metrics) { @@ -205,572 +165,23 @@ public class RealtimePlumberSchool implements PlumberSchool final RejectionPolicy rejectionPolicy = rejectionPolicyFactory.create(windowPeriod); log.info("Creating plumber using rejectionPolicy[%s]", rejectionPolicy); - return new Plumber() - { - private volatile boolean stopped = false; - private volatile ExecutorService persistExecutor = null; - private volatile ScheduledExecutorService scheduledExecutor = null; - - private final Map sinks = Maps.newConcurrentMap(); - private final VersionedIntervalTimeline sinkTimeline = new VersionedIntervalTimeline( - String.CASE_INSENSITIVE_ORDER - ); - - @Override - public void startJob() - { - computeBaseDir(schema).mkdirs(); - initializeExecutors(); - bootstrapSinksFromDisk(); - registerServerViewCallback(); - startPersistThread(); - } - - @Override - public Sink getSink(long timestamp) - { - if (!rejectionPolicy.accept(timestamp)) { - return null; - } - - final long truncatedTime = segmentGranularity.truncate(timestamp); - - Sink retVal = sinks.get(truncatedTime); - - if (retVal == null) { - final Interval sinkInterval = new Interval( - new DateTime(truncatedTime), - segmentGranularity.increment(new DateTime(truncatedTime)) - ); - - retVal = new Sink(sinkInterval, schema, versioningPolicy.getVersion(sinkInterval)); - - try { - segmentAnnouncer.announceSegment(retVal.getSegment()); - sinks.put(truncatedTime, retVal); - sinkTimeline.add(retVal.getInterval(), retVal.getVersion(), new SingleElementPartitionChunk(retVal)); - } - catch (IOException e) { - log.makeAlert(e, "Failed to announce new segment[%s]", schema.getDataSource()) - .addData("interval", retVal.getInterval()) - .emit(); - } - } - - return retVal; - } - - @Override - public QueryRunner getQueryRunner(final Query query) - { - final QueryRunnerFactory> factory = conglomerate.findFactory(query); - final QueryToolChest> toolchest = factory.getToolchest(); - - final Function, ServiceMetricEvent.Builder> builderFn = - new Function, ServiceMetricEvent.Builder>() - { - - @Override - public ServiceMetricEvent.Builder apply(@Nullable Query input) - { - return toolchest.makeMetricBuilder(query); - } - }; - - List> querySinks = Lists.newArrayList(); - for (Interval interval : query.getIntervals()) { - querySinks.addAll(sinkTimeline.lookup(interval)); - } - - return toolchest.mergeResults( - factory.mergeRunners( - queryExecutorService, - FunctionalIterable - .create(querySinks) - .transform( - new Function, QueryRunner>() - { - @Override - public QueryRunner apply(TimelineObjectHolder holder) - { - final Sink theSink = holder.getObject().getChunk(0).getObject(); - return new SpecificSegmentQueryRunner( - new MetricsEmittingQueryRunner( - emitter, - builderFn, - factory.mergeRunners( - MoreExecutors.sameThreadExecutor(), - Iterables.transform( - theSink, - new Function>() - { - @Override - public QueryRunner apply(FireHydrant input) - { - return factory.createRunner(input.getSegment()); - } - } - ) - ) - ), - new SpecificSegmentSpec( - new SegmentDescriptor( - holder.getInterval(), - theSink.getSegment().getVersion(), - theSink.getSegment().getShardSpec().getPartitionNum() - ) - ) - ); - } - } - ) - ) - ); - } - - @Override - public void persist(final Runnable commitRunnable) - { - final List> indexesToPersist = Lists.newArrayList(); - for (Sink sink : sinks.values()) { - if (sink.swappable()) { - indexesToPersist.add(Pair.of(sink.swap(), sink.getInterval())); - } - } - - log.info("Submitting persist runnable for dataSource[%s]", schema.getDataSource()); - - persistExecutor.execute( - new ThreadRenamingRunnable(String.format("%s-incremental-persist", schema.getDataSource())) - { - @Override - public void doRun() - { - for (Pair pair : indexesToPersist) { - metrics.incrementRowOutputCount(persistHydrant(pair.lhs, schema, pair.rhs)); - } - commitRunnable.run(); - } - } - ); - } - - // Submits persist-n-merge task for a Sink to the persistExecutor - private void persistAndMerge(final long truncatedTime, final Sink sink) - { - final String threadName = String.format( - "%s-%s-persist-n-merge", schema.getDataSource(), new DateTime(truncatedTime) - ); - persistExecutor.execute( - new ThreadRenamingRunnable(threadName) - { - @Override - public void doRun() - { - final Interval interval = sink.getInterval(); - - for (FireHydrant hydrant : sink) { - if (!hydrant.hasSwapped()) { - log.info("Hydrant[%s] hasn't swapped yet, swapping. Sink[%s]", hydrant, sink); - final int rowCount = persistHydrant(hydrant, schema, interval); - metrics.incrementRowOutputCount(rowCount); - } - } - - final File mergedTarget = new File(computePersistDir(schema, interval), "merged"); - if (mergedTarget.exists()) { - log.info("Skipping already-merged sink: %s", sink); - return; - } - - File mergedFile = null; - try { - List indexes = Lists.newArrayList(); - for (FireHydrant fireHydrant : sink) { - Segment segment = fireHydrant.getSegment(); - final QueryableIndex queryableIndex = segment.asQueryableIndex(); - log.info("Adding hydrant[%s]", fireHydrant); - indexes.add(queryableIndex); - } - - mergedFile = IndexMerger.mergeQueryableIndex( - indexes, - schema.getAggregators(), - mergedTarget - ); - - QueryableIndex index = IndexIO.loadIndex(mergedFile); - - DataSegment segment = dataSegmentPusher.push( - mergedFile, - sink.getSegment().withDimensions(Lists.newArrayList(index.getAvailableDimensions())) - ); - - segmentPublisher.publishSegment(segment); - } - catch (IOException e) { - log.makeAlert(e, "Failed to persist merged index[%s]", schema.getDataSource()) - .addData("interval", interval) - .emit(); - if (shuttingDown) { - // We're trying to shut down, and this segment failed to push. Let's just get rid of it. - abandonSegment(truncatedTime, sink); - } - } - - if (mergedFile != null) { - try { - log.info("Deleting Index File[%s]", mergedFile); - FileUtils.deleteDirectory(mergedFile); - } - catch (IOException e) { - log.warn(e, "Error deleting directory[%s]", mergedFile); - } - } - } - } - ); - } - - @Override - public void finishJob() - { - log.info("Shutting down..."); - - shuttingDown = true; - - for (final Map.Entry entry : sinks.entrySet()) { - persistAndMerge(entry.getKey(), entry.getValue()); - } - - while (!sinks.isEmpty()) { - try { - log.info( - "Cannot shut down yet! Sinks remaining: %s", - Joiner.on(", ").join( - Iterables.transform( - sinks.values(), - new Function() - { - @Override - public String apply(Sink input) - { - return input.getSegment().getIdentifier(); - } - } - ) - ) - ); - - synchronized (handoffCondition) { - while (!sinks.isEmpty()) { - handoffCondition.wait(); - } - } - } - catch (InterruptedException e) { - throw Throwables.propagate(e); - } - } - - // scheduledExecutor is shutdown here, but persistExecutor is shutdown when the - // ServerView sends it a new segment callback - if (scheduledExecutor != null) { - scheduledExecutor.shutdown(); - } - - stopped = true; - } - - private void initializeExecutors() - { - if (persistExecutor == null) { - persistExecutor = Executors.newFixedThreadPool( - 1, - new ThreadFactoryBuilder() - .setDaemon(true) - .setNameFormat("plumber_persist_%d") - .build() - ); - } - if (scheduledExecutor == null) { - scheduledExecutor = Executors.newScheduledThreadPool( - 1, - new ThreadFactoryBuilder() - .setDaemon(true) - .setNameFormat("plumber_scheduled_%d") - .build() - ); - } - } - - private void bootstrapSinksFromDisk() - { - File baseDir = computeBaseDir(schema); - if (baseDir == null || !baseDir.exists()) { - return; - } - - File[] files = baseDir.listFiles(); - if (files == null) { - return; - } - - for (File sinkDir : files) { - Interval sinkInterval = new Interval(sinkDir.getName().replace("_", "/")); - - //final File[] sinkFiles = sinkDir.listFiles(); - // To avoid reading and listing of "merged" dir - final File[] sinkFiles = sinkDir.listFiles( - new FilenameFilter() - { - @Override - public boolean accept(File dir, String fileName) - { - return !(Ints.tryParse(fileName) == null); - } - } - ); - Arrays.sort( - sinkFiles, - new Comparator() - { - @Override - public int compare(File o1, File o2) - { - try { - return Ints.compare(Integer.parseInt(o1.getName()), Integer.parseInt(o2.getName())); - } - catch (NumberFormatException e) { - log.error(e, "Couldn't compare as numbers? [%s][%s]", o1, o2); - return o1.compareTo(o2); - } - } - } - ); - - try { - List hydrants = Lists.newArrayList(); - for (File segmentDir : sinkFiles) { - log.info("Loading previously persisted segment at [%s]", segmentDir); - - // Although this has been tackled at start of this method. - // Just a doubly-check added to skip "merged" dir. from being added to hydrants - // If 100% sure that this is not needed, this check can be removed. - if (Ints.tryParse(segmentDir.getName()) == null) { - continue; - } - - hydrants.add( - new FireHydrant( - new QueryableIndexSegment(null, IndexIO.loadIndex(segmentDir)), - Integer.parseInt(segmentDir.getName()) - ) - ); - } - - Sink currSink = new Sink(sinkInterval, schema, versioningPolicy.getVersion(sinkInterval), hydrants); - sinks.put(sinkInterval.getStartMillis(), currSink); - sinkTimeline.add( - currSink.getInterval(), - currSink.getVersion(), - new SingleElementPartitionChunk(currSink) - ); - - segmentAnnouncer.announceSegment(currSink.getSegment()); - } - catch (IOException e) { - log.makeAlert(e, "Problem loading sink[%s] from disk.", schema.getDataSource()) - .addData("interval", sinkInterval) - .emit(); - } - } - } - - private void registerServerViewCallback() - { - serverView.registerSegmentCallback( - persistExecutor, - new ServerView.BaseSegmentCallback() - { - @Override - public ServerView.CallbackAction segmentAdded(DruidServer server, DataSegment segment) - { - if (stopped) { - log.info("Unregistering ServerViewCallback"); - persistExecutor.shutdown(); - return ServerView.CallbackAction.UNREGISTER; - } - - if ("realtime".equals(server.getType())) { - return ServerView.CallbackAction.CONTINUE; - } - - log.debug("Checking segment[%s] on server[%s]", segment, server); - if (schema.getDataSource().equals(segment.getDataSource())) { - final Interval interval = segment.getInterval(); - for (Map.Entry entry : sinks.entrySet()) { - final Long sinkKey = entry.getKey(); - if (interval.contains(sinkKey)) { - final Sink sink = entry.getValue(); - log.info("Segment[%s] matches sink[%s] on server[%s]", segment, sink, server); - - final String segmentVersion = segment.getVersion(); - final String sinkVersion = sink.getSegment().getVersion(); - if (segmentVersion.compareTo(sinkVersion) >= 0) { - log.info("Segment version[%s] >= sink version[%s]", segmentVersion, sinkVersion); - abandonSegment(sinkKey, sink); - } - } - } - } - - return ServerView.CallbackAction.CONTINUE; - } - } - ); - } - - private void startPersistThread() - { - final long truncatedNow = segmentGranularity.truncate(new DateTime()).getMillis(); - final long windowMillis = windowPeriod.toStandardDuration().getMillis(); - - log.info( - "Expect to run at [%s]", - new DateTime().plus( - new Duration(System.currentTimeMillis(), segmentGranularity.increment(truncatedNow) + windowMillis) - ) - ); - - ScheduledExecutors - .scheduleAtFixedRate( - scheduledExecutor, - new Duration(System.currentTimeMillis(), segmentGranularity.increment(truncatedNow) + windowMillis), - new Duration(truncatedNow, segmentGranularity.increment(truncatedNow)), - new ThreadRenamingCallable( - String.format( - "%s-overseer-%d", - schema.getDataSource(), - schema.getShardSpec().getPartitionNum() - ) - ) - { - @Override - public ScheduledExecutors.Signal doCall() - { - if (stopped) { - log.info("Stopping merge-n-push overseer thread"); - return ScheduledExecutors.Signal.STOP; - } - - log.info("Starting merge and push."); - - long minTimestamp = segmentGranularity.truncate( - rejectionPolicy.getCurrMaxTime().minus(windowMillis) - ).getMillis(); - - List> sinksToPush = Lists.newArrayList(); - for (Map.Entry entry : sinks.entrySet()) { - final Long intervalStart = entry.getKey(); - if (intervalStart < minTimestamp) { - log.info("Adding entry[%s] for merge and push.", entry); - sinksToPush.add(entry); - } - } - - for (final Map.Entry entry : sinksToPush) { - persistAndMerge(entry.getKey(), entry.getValue()); - } - - if (stopped) { - log.info("Stopping merge-n-push overseer thread"); - return ScheduledExecutors.Signal.STOP; - } else { - return ScheduledExecutors.Signal.REPEAT; - } - } - } - ); - } - - /** - * Unannounces a given sink and removes all local references to it. - */ - private void abandonSegment(final long truncatedTime, final Sink sink) { - try { - segmentAnnouncer.unannounceSegment(sink.getSegment()); - FileUtils.deleteDirectory(computePersistDir(schema, sink.getInterval())); - log.info("Removing sinkKey %d for segment %s", truncatedTime, sink.getSegment().getIdentifier()); - sinks.remove(truncatedTime); - sinkTimeline.remove( - sink.getInterval(), - sink.getVersion(), - new SingleElementPartitionChunk<>(sink) - ); - synchronized (handoffCondition) { - handoffCondition.notifyAll(); - } - } - catch (Exception e) { - log.makeAlert(e, "Unable to abandon old segment for dataSource[%s]", schema.getDataSource()) - .addData("interval", sink.getInterval()) - .emit(); - } - } - }; - } - - private File computeBaseDir(Schema schema) - { - return new File(basePersistDirectory, schema.getDataSource()); - } - - private File computePersistDir(Schema schema, Interval interval) - { - return new File(computeBaseDir(schema), interval.toString().replace("/", "_")); - } - - /** - * Persists the given hydrant and returns the number of rows persisted - * - * @param indexToPersist - * @param schema - * @param interval - * - * @return the number of rows persisted - */ - private int persistHydrant(FireHydrant indexToPersist, Schema schema, Interval interval) - { - if (indexToPersist.hasSwapped()) { - log.info( - "DataSource[%s], Interval[%s], Hydrant[%s] already swapped. Ignoring request to persist.", - schema.getDataSource(), interval, indexToPersist - ); - return 0; - } - - log.info("DataSource[%s], Interval[%s], persisting Hydrant[%s]", schema.getDataSource(), interval, indexToPersist); - try { - int numRows = indexToPersist.getIndex().size(); - - File persistedFile = IndexMerger.persist( - indexToPersist.getIndex(), - new File(computePersistDir(schema, interval), String.valueOf(indexToPersist.getCount())) - ); - - indexToPersist.swapSegment(new QueryableIndexSegment(null, IndexIO.loadIndex(persistedFile))); - - return numRows; - } - catch (IOException e) { - log.makeAlert("dataSource[%s] -- incremental persist failed", schema.getDataSource()) - .addData("interval", interval) - .addData("count", indexToPersist.getCount()) - .emit(); - - throw Throwables.propagate(e); - } + return new RealtimePlumber( + windowPeriod, + basePersistDirectory, + segmentGranularity, + schema, + metrics, + rejectionPolicy, + emitter, + conglomerate, + segmentAnnouncer, + queryExecutorService, + versioningPolicy, + dataSegmentPusher, + segmentPublisher, + serverView, + maxPendingPersists + ); } private void verifyState() diff --git a/server/src/main/java/io/druid/segment/realtime/plumber/Sink.java b/server/src/main/java/io/druid/segment/realtime/plumber/Sink.java index ee6b50e27d0..e2759406eef 100644 --- a/server/src/main/java/io/druid/segment/realtime/plumber/Sink.java +++ b/server/src/main/java/io/druid/segment/realtime/plumber/Sink.java @@ -178,12 +178,12 @@ public class Sink implements Iterable FireHydrant old; if (currIndex == null) { // Only happens on initialization, cannot synchronize on null old = currIndex; - currIndex = new FireHydrant(newIndex, hydrants.size()); + currIndex = new FireHydrant(newIndex, hydrants.size(), getSegment().getIdentifier()); hydrants.add(currIndex); } else { synchronized (currIndex) { old = currIndex; - currIndex = new FireHydrant(newIndex, hydrants.size()); + currIndex = new FireHydrant(newIndex, hydrants.size(), getSegment().getIdentifier()); hydrants.add(currIndex); } } diff --git a/server/src/main/java/io/druid/server/bridge/Bridge.java b/server/src/main/java/io/druid/server/bridge/Bridge.java new file mode 100644 index 00000000000..8509221c564 --- /dev/null +++ b/server/src/main/java/io/druid/server/bridge/Bridge.java @@ -0,0 +1,36 @@ +/* + * Druid - a distributed column store. + * Copyright (C) 2012, 2013 Metamarkets Group Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +package io.druid.server.bridge; + +import com.google.inject.BindingAnnotation; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + */ +@BindingAnnotation +@Target({ElementType.FIELD, ElementType.PARAMETER, ElementType.METHOD}) +@Retention(RetentionPolicy.RUNTIME) +public @interface Bridge +{ +} diff --git a/server/src/main/java/io/druid/server/bridge/BridgeCuratorConfig.java b/server/src/main/java/io/druid/server/bridge/BridgeCuratorConfig.java new file mode 100644 index 00000000000..5f616eb49a1 --- /dev/null +++ b/server/src/main/java/io/druid/server/bridge/BridgeCuratorConfig.java @@ -0,0 +1,31 @@ +/* + * Druid - a distributed column store. + * Copyright (C) 2012, 2013 Metamarkets Group Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +package io.druid.server.bridge; + +import io.druid.curator.CuratorConfig; +import org.skife.config.Config; + +/** + */ +public abstract class BridgeCuratorConfig extends CuratorConfig +{ + @Config("druid.bridge.zk.service.host") + public abstract String getParentZkHosts(); +} diff --git a/server/src/main/java/io/druid/server/bridge/BridgeQuerySegmentWalker.java b/server/src/main/java/io/druid/server/bridge/BridgeQuerySegmentWalker.java new file mode 100644 index 00000000000..d7e4674fab2 --- /dev/null +++ b/server/src/main/java/io/druid/server/bridge/BridgeQuerySegmentWalker.java @@ -0,0 +1,126 @@ +/* + * Druid - a distributed column store. + * Copyright (C) 2012, 2013 Metamarkets Group Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +package io.druid.server.bridge; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Charsets; +import com.google.inject.Inject; +import com.metamx.common.guava.Sequence; +import com.metamx.common.guava.Sequences; +import com.metamx.common.logger.Logger; +import com.metamx.http.client.HttpClient; +import com.metamx.http.client.response.StatusResponseHandler; +import com.metamx.http.client.response.StatusResponseHolder; +import io.druid.client.selector.Server; +import io.druid.curator.discovery.ServerDiscoverySelector; +import io.druid.guice.annotations.Global; +import io.druid.query.Query; +import io.druid.query.QueryRunner; +import io.druid.query.QuerySegmentWalker; +import io.druid.query.SegmentDescriptor; +import org.joda.time.Interval; + +import java.net.URL; +import java.util.List; + +/** + */ +public class BridgeQuerySegmentWalker implements QuerySegmentWalker +{ + private static final Logger log = new Logger(BridgeQuerySegmentWalker.class); + + private final ServerDiscoverySelector brokerSelector; + private final HttpClient httpClient; + private final ObjectMapper jsonMapper; + private final StatusResponseHandler responseHandler; + + @Inject + public BridgeQuerySegmentWalker( + ServerDiscoverySelector brokerSelector, + @Global HttpClient httpClient, + ObjectMapper jsonMapper + ) + { + this.brokerSelector = brokerSelector; + this.httpClient = httpClient; + this.jsonMapper = jsonMapper; + this.responseHandler = new StatusResponseHandler(Charsets.UTF_8); + } + + @Override + public QueryRunner getQueryRunnerForIntervals( + Query query, Iterable intervals + ) + { + return makeRunner(); + } + + @Override + public QueryRunner getQueryRunnerForSegments( + Query query, Iterable specs + ) + { + return makeRunner(); + } + + private QueryRunner makeRunner() + { + return new QueryRunner() + { + @Override + public Sequence run(Query query) + { + try { + Server instance = brokerSelector.pick(); + if (instance == null) { + return Sequences.empty(); + } + + final String url = String.format( + "http://%s/druid/v2/", + brokerSelector.pick().getHost() + ); + + StatusResponseHolder response = httpClient.post(new URL(url)) + .setContent( + "application/json", + jsonMapper.writeValueAsBytes(query) + ) + .go(responseHandler) + .get(); + + List results = jsonMapper.readValue( + response.getContent(), new TypeReference>() + { + } + ); + + return Sequences.simple(results); + } + catch (Exception e) { + log.error(e, "Exception with bridge query"); + + return Sequences.empty(); + } + } + }; + } +} diff --git a/server/src/main/java/io/druid/server/bridge/BridgeZkCoordinator.java b/server/src/main/java/io/druid/server/bridge/BridgeZkCoordinator.java new file mode 100644 index 00000000000..400b270779c --- /dev/null +++ b/server/src/main/java/io/druid/server/bridge/BridgeZkCoordinator.java @@ -0,0 +1,132 @@ +/* + * Druid - a distributed column store. + * Copyright (C) 2012, 2013 Metamarkets Group Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +package io.druid.server.bridge; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Throwables; +import com.google.inject.Inject; +import com.metamx.common.logger.Logger; +import io.druid.client.DruidServer; +import io.druid.client.ServerView; +import io.druid.concurrent.Execs; +import io.druid.db.DatabaseSegmentManager; +import io.druid.segment.realtime.DbSegmentPublisher; +import io.druid.server.coordination.BaseZkCoordinator; +import io.druid.server.coordination.DataSegmentChangeCallback; +import io.druid.server.coordination.DataSegmentChangeHandler; +import io.druid.server.coordination.DruidServerMetadata; +import io.druid.server.initialization.ZkPathsConfig; +import io.druid.timeline.DataSegment; +import org.apache.curator.framework.CuratorFramework; + +import java.util.concurrent.ExecutorService; + +/** + */ +public class BridgeZkCoordinator extends BaseZkCoordinator +{ + private static final Logger log = new Logger(BaseZkCoordinator.class); + + private final DbSegmentPublisher dbSegmentPublisher; + private final DatabaseSegmentManager databaseSegmentManager; + private final ServerView serverView; + + private final ExecutorService exec = Execs.singleThreaded("BridgeZkCoordinatorServerView-%s"); + + @Inject + public BridgeZkCoordinator( + ObjectMapper jsonMapper, + ZkPathsConfig zkPaths, + DruidServerMetadata me, + @Bridge CuratorFramework curator, + DbSegmentPublisher dbSegmentPublisher, + DatabaseSegmentManager databaseSegmentManager, + ServerView serverView + ) + { + super(jsonMapper, zkPaths, me, curator); + + this.dbSegmentPublisher = dbSegmentPublisher; + this.databaseSegmentManager = databaseSegmentManager; + this.serverView = serverView; + } + + @Override + public void loadLocalCache() + { + // do nothing + } + + @Override + public DataSegmentChangeHandler getDataSegmentChangeHandler() + { + return BridgeZkCoordinator.this; + } + + @Override + public void addSegment(final DataSegment segment, final DataSegmentChangeCallback callback) + { + try { + log.info("Publishing segment %s", segment.getIdentifier()); + dbSegmentPublisher.publishSegment(segment); + serverView.registerSegmentCallback( + exec, + new ServerView.BaseSegmentCallback() + { + @Override + public ServerView.CallbackAction segmentAdded( + DruidServer server, DataSegment theSegment + ) + { + if (theSegment.equals(segment)) { + callback.execute(); + } + return ServerView.CallbackAction.CONTINUE; + } + } + ); + } + catch (Exception e) { + throw Throwables.propagate(e); + } + } + + @Override + public void removeSegment(final DataSegment segment, final DataSegmentChangeCallback callback) + { + databaseSegmentManager.removeSegment(segment.getDataSource(), segment.getIdentifier()); + serverView.registerSegmentCallback( + exec, + new ServerView.BaseSegmentCallback() + { + @Override + public ServerView.CallbackAction segmentRemoved( + DruidServer server, DataSegment theSegment + ) + { + if (theSegment.equals(segment)) { + callback.execute(); + } + return ServerView.CallbackAction.CONTINUE; + } + } + ); + } +} diff --git a/server/src/main/java/io/druid/server/bridge/DruidClusterBridge.java b/server/src/main/java/io/druid/server/bridge/DruidClusterBridge.java new file mode 100644 index 00000000000..f4073a18678 --- /dev/null +++ b/server/src/main/java/io/druid/server/bridge/DruidClusterBridge.java @@ -0,0 +1,391 @@ +/* + * Druid - a distributed column store. + * Copyright (C) 2012, 2013 Metamarkets Group Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +package io.druid.server.bridge; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Predicate; +import com.google.common.base.Throwables; +import com.google.common.collect.Maps; +import com.google.common.io.Closeables; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import com.google.inject.Inject; +import com.metamx.common.concurrent.ScheduledExecutorFactory; +import com.metamx.common.concurrent.ScheduledExecutors; +import com.metamx.common.guava.FunctionalIterable; +import com.metamx.common.lifecycle.LifecycleStart; +import com.metamx.common.lifecycle.LifecycleStop; +import com.metamx.emitter.EmittingLogger; +import io.druid.client.DruidServer; +import io.druid.client.ServerInventoryView; +import io.druid.client.ServerView; +import io.druid.concurrent.Execs; +import io.druid.curator.announcement.Announcer; +import io.druid.guice.ManageLifecycle; +import io.druid.guice.annotations.Self; +import io.druid.server.DruidNode; +import io.druid.server.coordination.AbstractDataSegmentAnnouncer; +import io.druid.server.coordination.DataSegmentAnnouncer; +import io.druid.server.coordination.DruidServerMetadata; +import io.druid.timeline.DataSegment; +import org.apache.curator.framework.CuratorFramework; +import org.apache.curator.framework.recipes.leader.LeaderLatch; +import org.apache.curator.framework.recipes.leader.LeaderLatchListener; +import org.apache.curator.utils.ZKPaths; + +import java.io.IOException; +import java.util.Map; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.atomic.AtomicReference; + +/** + */ +@ManageLifecycle +public class DruidClusterBridge +{ + public static final String BRIDGE_OWNER_NODE = "_BRIDGE"; + public static final String NODE_TYPE = "bridge"; + + private static final EmittingLogger log = new EmittingLogger(DruidClusterBridge.class); + + private final ObjectMapper jsonMapper; + private final DruidClusterBridgeConfig config; + private final ScheduledExecutorService exec; + private final DruidNode self; + + // Communicates to the ZK cluster that this bridge node is deployed at + private final CuratorFramework curator; + private final AtomicReference leaderLatch; + + // Communicates to the remote (parent) ZK cluster + private final BridgeZkCoordinator bridgeZkCoordinator; + private final Announcer announcer; + private final ServerInventoryView serverInventoryView; + + private final Map segments = Maps.newHashMap(); + private final Object lock = new Object(); + + private volatile boolean started = false; + private volatile boolean leader = false; + + @Inject + public DruidClusterBridge( + ObjectMapper jsonMapper, + DruidClusterBridgeConfig config, + ScheduledExecutorFactory scheduledExecutorFactory, + @Self DruidNode self, + CuratorFramework curator, + AtomicReference leaderLatch, + BridgeZkCoordinator bridgeZkCoordinator, + @Bridge Announcer announcer, + @Bridge final AbstractDataSegmentAnnouncer dataSegmentAnnouncer, + ServerInventoryView serverInventoryView + ) + { + this.jsonMapper = jsonMapper; + this.config = config; + this.bridgeZkCoordinator = bridgeZkCoordinator; + this.announcer = announcer; + this.serverInventoryView = serverInventoryView; + this.curator = curator; + this.leaderLatch = leaderLatch; + + this.exec = scheduledExecutorFactory.create(1, "Coordinator-Exec--%d"); + this.self = self; + + ExecutorService serverInventoryViewExec = Executors.newFixedThreadPool( + 1, + new ThreadFactoryBuilder().setDaemon(true) + .setNameFormat("DruidClusterBridge-ServerInventoryView-%d") + .build() + ); + + serverInventoryView.registerSegmentCallback( + serverInventoryViewExec, + new ServerView.BaseSegmentCallback() + { + @Override + public ServerView.CallbackAction segmentAdded( + DruidServer server, DataSegment segment + ) + { + try { + synchronized (lock) { + Integer count = segments.get(segment); + if (count == null) { + segments.put(segment, 1); + dataSegmentAnnouncer.announceSegment(segment); + } else { + segments.put(segment, count + 1); + } + } + } + catch (Exception e) { + throw Throwables.propagate(e); + } + + return ServerView.CallbackAction.CONTINUE; + } + + @Override + public ServerView.CallbackAction segmentRemoved(DruidServer server, DataSegment segment) + { + try { + synchronized (lock) { + serverRemovedSegment(dataSegmentAnnouncer, segment, server); + } + } + catch (Exception e) { + throw Throwables.propagate(e); + } + + return ServerView.CallbackAction.CONTINUE; + } + } + ); + + serverInventoryView.registerServerCallback( + serverInventoryViewExec, + new ServerView.ServerCallback() + { + @Override + public ServerView.CallbackAction serverRemoved(DruidServer server) + { + try { + for (DataSegment dataSegment : server.getSegments().values()) { + serverRemovedSegment(dataSegmentAnnouncer, dataSegment, server); + } + } + catch (Exception e) { + throw Throwables.propagate(e); + } + return ServerView.CallbackAction.CONTINUE; + } + } + + ); + } + + public boolean isLeader() + { + return leader; + } + + @LifecycleStart + public void start() + { + synchronized (lock) { + if (started) { + return; + } + started = true; + + createNewLeaderLatch(); + try { + leaderLatch.get().start(); + } + catch (Exception e) { + throw Throwables.propagate(e); + } + } + } + + private LeaderLatch createNewLeaderLatch() + { + final LeaderLatch newLeaderLatch = new LeaderLatch( + curator, ZKPaths.makePath(config.getConnectorPath(), BRIDGE_OWNER_NODE), self.getHost() + ); + + newLeaderLatch.addListener( + new LeaderLatchListener() + { + @Override + public void isLeader() + { + becomeLeader(); + } + + @Override + public void notLeader() + { + stopBeingLeader(); + } + }, + Execs.singleThreaded("CoordinatorLeader-%s") + ); + + return leaderLatch.getAndSet(newLeaderLatch); + } + + @LifecycleStop + public void stop() + { + synchronized (lock) { + if (!started) { + return; + } + + stopBeingLeader(); + + try { + leaderLatch.get().close(); + } + catch (IOException e) { + log.warn(e, "Unable to close leaderLatch, ignoring"); + } + + exec.shutdownNow(); + + started = false; + } + } + + private void becomeLeader() + { + synchronized (lock) { + if (!started) { + return; + } + + log.info("Go-Go Gadgetmobile! Starting bridge in %s", config.getStartDelay()); + try { + bridgeZkCoordinator.start(); + serverInventoryView.start(); + + ScheduledExecutors.scheduleWithFixedDelay( + exec, + config.getStartDelay(), + config.getPeriod(), + new Callable() + { + @Override + public ScheduledExecutors.Signal call() + { + if (leader) { + Iterable servers = FunctionalIterable + .create(serverInventoryView.getInventory()) + .filter( + new Predicate() + { + @Override + public boolean apply( + DruidServer input + ) + { + return !input.isRealtime(); + } + } + ); + + long totalMaxSize = 0; + for (DruidServer server : servers) { + totalMaxSize += server.getMaxSize(); + } + + if (totalMaxSize == 0) { + log.warn("No servers founds!"); + } else { + DruidServerMetadata me = new DruidServerMetadata( + self.getHost(), + self.getHost(), + totalMaxSize, + NODE_TYPE, + config.getTier(), + config.getPriority() + ); + + try { + final String path = ZKPaths.makePath(config.getAnnouncementsPath(), self.getHost()); + log.info("Updating [%s] to have a maxSize of[%,d] bytes", self.getHost(), totalMaxSize); + announcer.update(path, jsonMapper.writeValueAsBytes(me)); + } + catch (Exception e) { + throw Throwables.propagate(e); + } + } + } + if (leader) { // (We might no longer be leader) + return ScheduledExecutors.Signal.REPEAT; + } else { + return ScheduledExecutors.Signal.STOP; + } + } + } + ); + + leader = true; + } + catch (Exception e) { + log.makeAlert(e, "Exception becoming leader") + .emit(); + final LeaderLatch oldLatch = createNewLeaderLatch(); + Closeables.closeQuietly(oldLatch); + try { + leaderLatch.get().start(); + } + catch (Exception e1) { + // If an exception gets thrown out here, then the bridge will zombie out 'cause it won't be looking for + // the latch anymore. I don't believe it's actually possible for an Exception to throw out here, but + // Curator likes to have "throws Exception" on methods so it might happen... + log.makeAlert(e1, "I am a zombie") + .emit(); + } + } + } + } + + private void stopBeingLeader() + { + synchronized (lock) { + try { + log.info("I'll get you next time, Gadget. Next time!"); + + bridgeZkCoordinator.stop(); + serverInventoryView.stop(); + + leader = false; + } + catch (Exception e) { + log.makeAlert(e, "Unable to stopBeingLeader").emit(); + } + } + } + + private void serverRemovedSegment(DataSegmentAnnouncer dataSegmentAnnouncer, DataSegment segment, DruidServer server) + throws IOException + { + Integer count = segments.get(segment); + if (count != null) { + if (count == 1) { + dataSegmentAnnouncer.unannounceSegment(segment); + segments.remove(segment); + } else { + segments.put(segment, count - 1); + } + } else { + log.makeAlert("Trying to remove a segment that was never added?") + .addData("server", server.getHost()) + .addData("segmentId", segment.getIdentifier()) + .emit(); + } + } +} diff --git a/server/src/main/java/io/druid/server/bridge/DruidClusterBridgeConfig.java b/server/src/main/java/io/druid/server/bridge/DruidClusterBridgeConfig.java new file mode 100644 index 00000000000..5478a375577 --- /dev/null +++ b/server/src/main/java/io/druid/server/bridge/DruidClusterBridgeConfig.java @@ -0,0 +1,52 @@ +/* + * Druid - a distributed column store. + * Copyright (C) 2012, 2013 Metamarkets Group Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +package io.druid.server.bridge; + +import io.druid.client.DruidServer; +import io.druid.server.initialization.ZkPathsConfig; +import org.joda.time.Duration; +import org.skife.config.Config; +import org.skife.config.Default; + +/** + */ +public abstract class DruidClusterBridgeConfig extends ZkPathsConfig +{ + @Config("druid.server.tier") + @Default(DruidServer.DEFAULT_TIER) + public abstract String getTier(); + + @Config("druid.bridge.startDelay") + @Default("PT300s") + public abstract Duration getStartDelay(); + + @Config("druid.bridge.period") + @Default("PT60s") + public abstract Duration getPeriod(); + + @Config("druid.bridge.broker.serviceName") + public abstract String getBrokerServiceName(); + + @Config("druid.server.priority") + public int getPriority() + { + return DruidServer.DEFAULT_PRIORITY; + } +} diff --git a/server/src/main/java/io/druid/server/coordination/BaseZkCoordinator.java b/server/src/main/java/io/druid/server/coordination/BaseZkCoordinator.java new file mode 100644 index 00000000000..33d3118e304 --- /dev/null +++ b/server/src/main/java/io/druid/server/coordination/BaseZkCoordinator.java @@ -0,0 +1,196 @@ +/* + * Druid - a distributed column store. + * Copyright (C) 2012, 2013 Metamarkets Group Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +package io.druid.server.coordination; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Throwables; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import com.metamx.common.lifecycle.LifecycleStart; +import com.metamx.common.lifecycle.LifecycleStop; +import com.metamx.emitter.EmittingLogger; +import io.druid.server.initialization.ZkPathsConfig; +import org.apache.curator.framework.CuratorFramework; +import org.apache.curator.framework.recipes.cache.ChildData; +import org.apache.curator.framework.recipes.cache.PathChildrenCache; +import org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent; +import org.apache.curator.framework.recipes.cache.PathChildrenCacheListener; +import org.apache.curator.utils.ZKPaths; + +import java.io.IOException; + +/** + */ +public abstract class BaseZkCoordinator implements DataSegmentChangeHandler +{ + private static final EmittingLogger log = new EmittingLogger(ZkCoordinator.class); + + private final Object lock = new Object(); + + private final ObjectMapper jsonMapper; + private final ZkPathsConfig zkPaths; + private final DruidServerMetadata me; + private final CuratorFramework curator; + + private volatile PathChildrenCache loadQueueCache; + private volatile boolean started; + + public BaseZkCoordinator( + ObjectMapper jsonMapper, + ZkPathsConfig zkPaths, + DruidServerMetadata me, + CuratorFramework curator + ) + { + this.jsonMapper = jsonMapper; + this.zkPaths = zkPaths; + this.me = me; + this.curator = curator; + } + + @LifecycleStart + public void start() throws IOException + { + synchronized (lock) { + if (started) { + return; + } + + log.info("Starting zkCoordinator for server[%s]", me); + + final String loadQueueLocation = ZKPaths.makePath(zkPaths.getLoadQueuePath(), me.getName()); + final String servedSegmentsLocation = ZKPaths.makePath(zkPaths.getServedSegmentsPath(), me.getName()); + final String liveSegmentsLocation = ZKPaths.makePath(zkPaths.getLiveSegmentsPath(), me.getName()); + + loadQueueCache = new PathChildrenCache( + curator, + loadQueueLocation, + true, + true, + new ThreadFactoryBuilder().setDaemon(true).setNameFormat("ZkCoordinator-%s").build() + ); + + try { + curator.newNamespaceAwareEnsurePath(loadQueueLocation).ensure(curator.getZookeeperClient()); + curator.newNamespaceAwareEnsurePath(servedSegmentsLocation).ensure(curator.getZookeeperClient()); + curator.newNamespaceAwareEnsurePath(liveSegmentsLocation).ensure(curator.getZookeeperClient()); + + loadLocalCache(); + + loadQueueCache.getListenable().addListener( + new PathChildrenCacheListener() + { + @Override + public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception + { + final ChildData child = event.getData(); + switch (event.getType()) { + case CHILD_ADDED: + final String path = child.getPath(); + final DataSegmentChangeRequest segment = jsonMapper.readValue( + child.getData(), DataSegmentChangeRequest.class + ); + + log.info("New node[%s] with segmentClass[%s]", path, segment.getClass()); + + try { + segment.go( + getDataSegmentChangeHandler(), + new DataSegmentChangeCallback() + { + boolean hasRun = false; + + @Override + public void execute() + { + try { + if (!hasRun) { + curator.delete().guaranteed().forPath(path); + log.info("Completed processing for node[%s]", path); + hasRun = true; + } + } + catch (Exception e) { + throw Throwables.propagate(e); + } + } + } + ); + } + catch (Exception e) { + try { + curator.delete().guaranteed().forPath(path); + } + catch (Exception e1) { + log.info(e1, "Failed to delete node[%s], but ignoring exception.", path); + } + + log.makeAlert(e, "Segment load/unload: uncaught exception.") + .addData("node", path) + .addData("nodeProperties", segment) + .emit(); + } + + break; + case CHILD_REMOVED: + log.info("%s was removed", event.getData().getPath()); + break; + default: + log.info("Ignoring event[%s]", event); + } + } + } + ); + loadQueueCache.start(); + } + catch (Exception e) { + Throwables.propagateIfPossible(e, IOException.class); + throw Throwables.propagate(e); + } + + started = true; + } + } + + @LifecycleStop + public void stop() + { + log.info("Stopping ZkCoordinator for [%s]", me); + synchronized (lock) { + if (!started) { + return; + } + + try { + loadQueueCache.close(); + } + catch (Exception e) { + throw Throwables.propagate(e); + } + finally { + loadQueueCache = null; + started = false; + } + } + } + + public abstract void loadLocalCache(); + + public abstract DataSegmentChangeHandler getDataSegmentChangeHandler(); +} diff --git a/server/src/main/java/io/druid/server/coordination/DataSegmentChangeCallback.java b/server/src/main/java/io/druid/server/coordination/DataSegmentChangeCallback.java new file mode 100644 index 00000000000..fe0cd038b7d --- /dev/null +++ b/server/src/main/java/io/druid/server/coordination/DataSegmentChangeCallback.java @@ -0,0 +1,8 @@ +package io.druid.server.coordination; + +/** + */ +public interface DataSegmentChangeCallback +{ + public void execute(); +} diff --git a/server/src/main/java/io/druid/server/coordination/DataSegmentChangeHandler.java b/server/src/main/java/io/druid/server/coordination/DataSegmentChangeHandler.java index 460e4b4256f..98f2bcd7b47 100644 --- a/server/src/main/java/io/druid/server/coordination/DataSegmentChangeHandler.java +++ b/server/src/main/java/io/druid/server/coordination/DataSegmentChangeHandler.java @@ -25,6 +25,6 @@ import io.druid.timeline.DataSegment; */ public interface DataSegmentChangeHandler { - public void addSegment(DataSegment segment); - public void removeSegment(DataSegment segment); + public void addSegment(DataSegment segment, DataSegmentChangeCallback callback); + public void removeSegment(DataSegment segment, DataSegmentChangeCallback callback); } diff --git a/server/src/main/java/io/druid/server/coordination/DataSegmentChangeRequest.java b/server/src/main/java/io/druid/server/coordination/DataSegmentChangeRequest.java index 0c29d1e1c75..c5fa72e6bc0 100644 --- a/server/src/main/java/io/druid/server/coordination/DataSegmentChangeRequest.java +++ b/server/src/main/java/io/druid/server/coordination/DataSegmentChangeRequest.java @@ -32,5 +32,5 @@ import com.fasterxml.jackson.annotation.JsonTypeInfo; }) public interface DataSegmentChangeRequest { - public void go(DataSegmentChangeHandler handler); + public void go(DataSegmentChangeHandler handler, DataSegmentChangeCallback callback); } diff --git a/server/src/main/java/io/druid/server/coordination/DruidServerMetadata.java b/server/src/main/java/io/druid/server/coordination/DruidServerMetadata.java index ecb7f33b2ea..6e564734c8c 100644 --- a/server/src/main/java/io/druid/server/coordination/DruidServerMetadata.java +++ b/server/src/main/java/io/druid/server/coordination/DruidServerMetadata.java @@ -31,6 +31,7 @@ public class DruidServerMetadata private final long maxSize; private final String tier; private final String type; + private final int priority; @JsonCreator public DruidServerMetadata( @@ -38,7 +39,8 @@ public class DruidServerMetadata @JsonProperty("host") String host, @JsonProperty("maxSize") long maxSize, @JsonProperty("type") String type, - @JsonProperty("tier") String tier + @JsonProperty("tier") String tier, + @JsonProperty("priority") int priority ) { this.name = name; @@ -46,6 +48,7 @@ public class DruidServerMetadata this.maxSize = maxSize; this.tier = tier; this.type = type; + this.priority = priority; } @JsonProperty @@ -78,6 +81,12 @@ public class DruidServerMetadata return type; } + @JsonProperty + public int getPriority() + { + return priority; + } + @Override public String toString() { @@ -87,6 +96,7 @@ public class DruidServerMetadata ", maxSize=" + maxSize + ", tier='" + tier + '\'' + ", type='" + type + '\'' + + ", priority='" + priority + '\'' + '}'; } } diff --git a/server/src/main/java/io/druid/server/coordination/SegmentChangeRequestDrop.java b/server/src/main/java/io/druid/server/coordination/SegmentChangeRequestDrop.java index 9b0fc755fb3..2514f22871e 100644 --- a/server/src/main/java/io/druid/server/coordination/SegmentChangeRequestDrop.java +++ b/server/src/main/java/io/druid/server/coordination/SegmentChangeRequestDrop.java @@ -46,9 +46,9 @@ public class SegmentChangeRequestDrop implements DataSegmentChangeRequest } @Override - public void go(DataSegmentChangeHandler handler) + public void go(DataSegmentChangeHandler handler, DataSegmentChangeCallback callback) { - handler.removeSegment(segment); + handler.removeSegment(segment, callback); } @Override diff --git a/server/src/main/java/io/druid/server/coordination/SegmentChangeRequestLoad.java b/server/src/main/java/io/druid/server/coordination/SegmentChangeRequestLoad.java index 2a66693b66f..11eba9684c0 100644 --- a/server/src/main/java/io/druid/server/coordination/SegmentChangeRequestLoad.java +++ b/server/src/main/java/io/druid/server/coordination/SegmentChangeRequestLoad.java @@ -39,9 +39,9 @@ public class SegmentChangeRequestLoad implements DataSegmentChangeRequest } @Override - public void go(DataSegmentChangeHandler handler) + public void go(DataSegmentChangeHandler handler, DataSegmentChangeCallback callback) { - handler.addSegment(segment); + handler.addSegment(segment, callback); } @JsonProperty diff --git a/server/src/main/java/io/druid/server/coordination/SegmentChangeRequestNoop.java b/server/src/main/java/io/druid/server/coordination/SegmentChangeRequestNoop.java index e713278b47c..7c51f3f0b6f 100644 --- a/server/src/main/java/io/druid/server/coordination/SegmentChangeRequestNoop.java +++ b/server/src/main/java/io/druid/server/coordination/SegmentChangeRequestNoop.java @@ -24,8 +24,8 @@ package io.druid.server.coordination; public class SegmentChangeRequestNoop implements DataSegmentChangeRequest { @Override - public void go(DataSegmentChangeHandler handler) + public void go(DataSegmentChangeHandler handler, DataSegmentChangeCallback callback) { - + // do nothing } } diff --git a/server/src/main/java/io/druid/server/coordination/ZkCoordinator.java b/server/src/main/java/io/druid/server/coordination/ZkCoordinator.java index 246415f57d0..28edb8ddca2 100644 --- a/server/src/main/java/io/druid/server/coordination/ZkCoordinator.java +++ b/server/src/main/java/io/druid/server/coordination/ZkCoordinator.java @@ -20,23 +20,14 @@ package io.druid.server.coordination; import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.base.Throwables; import com.google.common.collect.Lists; -import com.google.common.util.concurrent.ThreadFactoryBuilder; import com.google.inject.Inject; -import com.metamx.common.lifecycle.LifecycleStart; -import com.metamx.common.lifecycle.LifecycleStop; import com.metamx.emitter.EmittingLogger; import io.druid.segment.loading.SegmentLoaderConfig; import io.druid.segment.loading.SegmentLoadingException; import io.druid.server.initialization.ZkPathsConfig; import io.druid.timeline.DataSegment; import org.apache.curator.framework.CuratorFramework; -import org.apache.curator.framework.recipes.cache.ChildData; -import org.apache.curator.framework.recipes.cache.PathChildrenCache; -import org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent; -import org.apache.curator.framework.recipes.cache.PathChildrenCacheListener; -import org.apache.curator.utils.ZKPaths; import java.io.File; import java.io.IOException; @@ -44,23 +35,15 @@ import java.util.List; /** */ -public class ZkCoordinator implements DataSegmentChangeHandler +public class ZkCoordinator extends BaseZkCoordinator { private static final EmittingLogger log = new EmittingLogger(ZkCoordinator.class); - private final Object lock = new Object(); - private final ObjectMapper jsonMapper; private final SegmentLoaderConfig config; - private final ZkPathsConfig zkPaths; - private final DruidServerMetadata me; private final DataSegmentAnnouncer announcer; - private final CuratorFramework curator; private final ServerManager serverManager; - private volatile PathChildrenCache loadQueueCache; - private volatile boolean started; - @Inject public ZkCoordinator( ObjectMapper jsonMapper, @@ -72,129 +55,20 @@ public class ZkCoordinator implements DataSegmentChangeHandler ServerManager serverManager ) { + super(jsonMapper, zkPaths, me, curator); + this.jsonMapper = jsonMapper; this.config = config; - this.zkPaths = zkPaths; - this.me = me; this.announcer = announcer; - this.curator = curator; this.serverManager = serverManager; } - @LifecycleStart - public void start() throws IOException - { - log.info("Starting zkCoordinator for server[%s]", me); - synchronized (lock) { - if (started) { - return; - } - - final String loadQueueLocation = ZKPaths.makePath(zkPaths.getLoadQueuePath(), me.getName()); - final String servedSegmentsLocation = ZKPaths.makePath(zkPaths.getServedSegmentsPath(), me.getName()); - final String liveSegmentsLocation = ZKPaths.makePath(zkPaths.getLiveSegmentsPath(), me.getName()); - - loadQueueCache = new PathChildrenCache( - curator, - loadQueueLocation, - true, - true, - new ThreadFactoryBuilder().setDaemon(true).setNameFormat("ZkCoordinator-%s").build() - ); - - try { - config.getInfoDir().mkdirs(); - - curator.newNamespaceAwareEnsurePath(loadQueueLocation).ensure(curator.getZookeeperClient()); - curator.newNamespaceAwareEnsurePath(servedSegmentsLocation).ensure(curator.getZookeeperClient()); - curator.newNamespaceAwareEnsurePath(liveSegmentsLocation).ensure(curator.getZookeeperClient()); - - loadCache(); - - loadQueueCache.getListenable().addListener( - new PathChildrenCacheListener() - { - @Override - public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception - { - final ChildData child = event.getData(); - switch (event.getType()) { - case CHILD_ADDED: - final String path = child.getPath(); - final DataSegmentChangeRequest segment = jsonMapper.readValue( - child.getData(), DataSegmentChangeRequest.class - ); - - log.info("New node[%s] with segmentClass[%s]", path, segment.getClass()); - - try { - segment.go(ZkCoordinator.this); - curator.delete().guaranteed().forPath(path); - - log.info("Completed processing for node[%s]", path); - } - catch (Exception e) { - try { - curator.delete().guaranteed().forPath(path); - } - catch (Exception e1) { - log.info(e1, "Failed to delete node[%s], but ignoring exception.", path); - } - - log.makeAlert(e, "Segment load/unload: uncaught exception.") - .addData("node", path) - .addData("nodeProperties", segment) - .emit(); - } - - break; - case CHILD_REMOVED: - log.info("%s was removed", event.getData().getPath()); - break; - default: - log.info("Ignoring event[%s]", event); - } - } - } - ); - loadQueueCache.start(); - } - catch (Exception e) { - Throwables.propagateIfPossible(e, IOException.class); - throw Throwables.propagate(e); - } - - started = true; - } - } - - @LifecycleStop - public void stop() - { - log.info("Stopping ZkCoordinator with config[%s]", config); - synchronized (lock) { - if (!started) { - return; - } - - - try { - loadQueueCache.close(); - } - catch (Exception e) { - throw Throwables.propagate(e); - } - finally { - loadQueueCache = null; - started = false; - } - } - } - - private void loadCache() + @Override + public void loadLocalCache() { + final long start = System.currentTimeMillis(); File baseDir = config.getInfoDir(); - if (!baseDir.exists()) { + if (!baseDir.exists() && !config.getInfoDir().mkdirs()) { return; } @@ -221,11 +95,27 @@ public class ZkCoordinator implements DataSegmentChangeHandler } } - addSegments(cachedSegments); + addSegments( + cachedSegments, + new DataSegmentChangeCallback() + { + @Override + public void execute() + { + log.info("Cache load took %,d ms", System.currentTimeMillis() - start); + } + } + ); } @Override - public void addSegment(DataSegment segment) + public DataSegmentChangeHandler getDataSegmentChangeHandler() + { + return ZkCoordinator.this; + } + + @Override + public void addSegment(DataSegment segment, DataSegmentChangeCallback callback) { try { log.info("Loading segment %s", segment.getIdentifier()); @@ -235,7 +125,7 @@ public class ZkCoordinator implements DataSegmentChangeHandler loaded = serverManager.loadSegment(segment); } catch (Exception e) { - removeSegment(segment); + removeSegment(segment, callback); throw new SegmentLoadingException(e, "Exception loading segment[%s]", segment.getIdentifier()); } @@ -246,7 +136,7 @@ public class ZkCoordinator implements DataSegmentChangeHandler jsonMapper.writeValue(segmentInfoCacheFile, segment); } catch (IOException e) { - removeSegment(segment); + removeSegment(segment, callback); throw new SegmentLoadingException( e, "Failed to write to disk segment info cache file[%s]", segmentInfoCacheFile ); @@ -260,16 +150,18 @@ public class ZkCoordinator implements DataSegmentChangeHandler throw new SegmentLoadingException(e, "Failed to announce segment[%s]", segment.getIdentifier()); } } - } catch (SegmentLoadingException e) { log.makeAlert(e, "Failed to load segment for dataSource") .addData("segment", segment) .emit(); } + finally { + callback.execute(); + } } - public void addSegments(Iterable segments) + public void addSegments(Iterable segments, DataSegmentChangeCallback callback) { try { final List segmentFailures = Lists.newArrayList(); @@ -284,7 +176,7 @@ public class ZkCoordinator implements DataSegmentChangeHandler } catch (Exception e) { log.error(e, "Exception loading segment[%s]", segment.getIdentifier()); - removeSegment(segment); + removeSegment(segment, callback); segmentFailures.add(segment.getIdentifier()); continue; } @@ -297,7 +189,7 @@ public class ZkCoordinator implements DataSegmentChangeHandler } catch (IOException e) { log.error(e, "Failed to write to disk segment info cache file[%s]", segmentInfoCacheFile); - removeSegment(segment); + removeSegment(segment, callback); segmentFailures.add(segment.getIdentifier()); continue; } @@ -326,11 +218,14 @@ public class ZkCoordinator implements DataSegmentChangeHandler .addData("segments", segments) .emit(); } + finally { + callback.execute(); + } } @Override - public void removeSegment(DataSegment segment) + public void removeSegment(DataSegment segment, DataSegmentChangeCallback callback) { try { serverManager.dropSegment(segment); @@ -347,26 +242,8 @@ public class ZkCoordinator implements DataSegmentChangeHandler .addData("segment", segment) .emit(); } - } - - public void removeSegments(Iterable segments) - { - try { - for (DataSegment segment : segments) { - serverManager.dropSegment(segment); - - File segmentInfoCacheFile = new File(config.getInfoDir(), segment.getIdentifier()); - if (!segmentInfoCacheFile.delete()) { - log.warn("Unable to delete segmentInfoCacheFile[%s]", segmentInfoCacheFile); - } - } - - announcer.unannounceSegments(segments); - } - catch (Exception e) { - log.makeAlert(e, "Failed to remove segments") - .addData("segments", segments) - .emit(); + finally { + callback.execute(); } } } diff --git a/server/src/main/java/io/druid/server/coordinator/DruidCoordinator.java b/server/src/main/java/io/druid/server/coordinator/DruidCoordinator.java index 71a4d0eb08c..b11d2758035 100644 --- a/server/src/main/java/io/druid/server/coordinator/DruidCoordinator.java +++ b/server/src/main/java/io/druid/server/coordinator/DruidCoordinator.java @@ -44,6 +44,7 @@ import io.druid.client.DruidDataSource; import io.druid.client.DruidServer; import io.druid.client.ServerInventoryView; import io.druid.client.indexing.IndexingServiceClient; +import io.druid.collections.CountingMap; import io.druid.common.config.JacksonConfigManager; import io.druid.concurrent.Execs; import io.druid.curator.discovery.ServiceAnnouncer; @@ -53,6 +54,15 @@ import io.druid.guice.ManageLifecycle; import io.druid.guice.annotations.Self; import io.druid.segment.IndexIO; import io.druid.server.DruidNode; +import io.druid.server.coordinator.helper.DruidCoordinatorBalancer; +import io.druid.server.coordinator.helper.DruidCoordinatorCleanup; +import io.druid.server.coordinator.helper.DruidCoordinatorHelper; +import io.druid.server.coordinator.helper.DruidCoordinatorLogger; +import io.druid.server.coordinator.helper.DruidCoordinatorRuleRunner; +import io.druid.server.coordinator.helper.DruidCoordinatorSegmentInfoLoader; +import io.druid.server.coordinator.helper.DruidCoordinatorSegmentMerger; +import io.druid.server.coordinator.rules.LoadRule; +import io.druid.server.coordinator.rules.Rule; import io.druid.server.initialization.ZkPathsConfig; import io.druid.timeline.DataSegment; import org.apache.curator.framework.CuratorFramework; @@ -63,7 +73,6 @@ import org.apache.curator.utils.ZKPaths; import org.joda.time.DateTime; import org.joda.time.Duration; -import javax.annotation.Nullable; import java.io.IOException; import java.util.Arrays; import java.util.List; @@ -72,6 +81,7 @@ import java.util.Set; import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; /** @@ -183,6 +193,46 @@ public class DruidCoordinator return leader; } + public Map getLoadManagementPeons() + { + return loadManagementPeons; + } + + public Map getReplicationStatus() + { + // find expected load per datasource + final CountingMap expectedSegmentsInCluster = new CountingMap<>(); + final DateTime now = new DateTime(); + for (DataSegment segment : getAvailableDataSegments()) { + List rules = databaseRuleManager.getRulesWithDefault(segment.getDataSource()); + for (Rule rule : rules) { + if (rule instanceof LoadRule && rule.appliesTo(segment, now)) { + for (Integer numReplicants : ((LoadRule) rule).getTieredReplicants().values()) { + expectedSegmentsInCluster.add(segment.getDataSource(), numReplicants); + } + break; + } + } + } + + // find segments currently loaded per datasource + CountingMap segmentsInCluster = new CountingMap<>(); + for (DruidServer druidServer : serverInventoryView.getInventory()) { + for (DataSegment segment : druidServer.getSegments().values()) { + segmentsInCluster.add(segment.getDataSource(), 1); + } + } + + // compare available segments with currently loaded + Map loadStatus = Maps.newHashMap(); + for (Map.Entry entry : expectedSegmentsInCluster.entrySet()) { + Long actual = segmentsInCluster.get(entry.getKey()).get(); + loadStatus.put(entry.getKey(), 100 * (actual == null ? 0.0D : (double) actual) / entry.getValue().get()); + } + + return loadStatus; + } + public Map getLoadStatus() { // find available segments @@ -323,7 +373,7 @@ public class DruidCoordinator new LoadPeonCallback() { @Override - protected void execute() + public void execute() { try { if (curator.checkExists().forPath(toServedSegPath) != null && @@ -527,7 +577,7 @@ public class DruidCoordinator if (leader) { theRunnable.run(); } - if (leader) { // (We might no longer be coordinator) + if (leader) { // (We might no longer be leader) return ScheduledExecutors.Signal.REPEAT; } else { return ScheduledExecutors.Signal.STOP; @@ -687,11 +737,11 @@ public class DruidCoordinator // Do coordinator stuff. DruidCoordinatorRuntimeParams params = DruidCoordinatorRuntimeParams.newBuilder() - .withStartTime(startTime) - .withDatasources(databaseSegmentManager.getInventory()) - .withDynamicConfigs(dynamicConfigs.get()) - .withEmitter(emitter) - .build(); + .withStartTime(startTime) + .withDatasources(databaseSegmentManager.getInventory()) + .withDynamicConfigs(dynamicConfigs.get()) + .withEmitter(emitter) + .build(); for (DruidCoordinatorHelper helper : helpers) { @@ -724,10 +774,10 @@ public class DruidCoordinator { @Override public boolean apply( - @Nullable DruidServer input + DruidServer input ) { - return input.getType().equalsIgnoreCase("historical"); + return !input.isRealtime(); } } ); @@ -760,11 +810,11 @@ public class DruidCoordinator SegmentReplicantLookup segmentReplicantLookup = SegmentReplicantLookup.make(cluster); // Stop peons for servers that aren't there anymore. - final Set disdappearedServers = Sets.newHashSet(loadManagementPeons.keySet()); + final Set disappeared = Sets.newHashSet(loadManagementPeons.keySet()); for (DruidServer server : servers) { - disdappearedServers.remove(server.getName()); + disappeared.remove(server.getName()); } - for (String name : disdappearedServers) { + for (String name : disappeared) { log.info("Removing listener for server[%s] which is no longer there.", name); LoadQueuePeon peon = loadManagementPeons.remove(name); peon.stop(); diff --git a/server/src/main/java/io/druid/server/coordinator/LoadPeonCallback.java b/server/src/main/java/io/druid/server/coordinator/LoadPeonCallback.java index ff0cea085ca..eaf3b0267d7 100644 --- a/server/src/main/java/io/druid/server/coordinator/LoadPeonCallback.java +++ b/server/src/main/java/io/druid/server/coordinator/LoadPeonCallback.java @@ -21,7 +21,7 @@ package io.druid.server.coordinator; /** */ -public abstract class LoadPeonCallback +public interface LoadPeonCallback { - protected abstract void execute(); + public void execute(); } diff --git a/server/src/main/java/io/druid/server/coordinator/LoadQueuePeon.java b/server/src/main/java/io/druid/server/coordinator/LoadQueuePeon.java index dcd9a77a8f8..8e3c2509697 100644 --- a/server/src/main/java/io/druid/server/coordinator/LoadQueuePeon.java +++ b/server/src/main/java/io/druid/server/coordinator/LoadQueuePeon.java @@ -19,6 +19,7 @@ package io.druid.server.coordinator; +import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Function; import com.google.common.collect.Collections2; @@ -103,6 +104,7 @@ public class LoadQueuePeon this.config = config; } + @JsonProperty public Set getSegmentsToLoad() { return new ConcurrentSkipListSet( @@ -120,6 +122,7 @@ public class LoadQueuePeon ); } + @JsonProperty public Set getSegmentsToDrop() { return new ConcurrentSkipListSet( diff --git a/server/src/main/java/io/druid/server/coordinator/DruidCoordinatorBalancer.java b/server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorBalancer.java similarity index 92% rename from server/src/main/java/io/druid/server/coordinator/DruidCoordinatorBalancer.java rename to server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorBalancer.java index 0ca4ff32fe8..bc960258400 100644 --- a/server/src/main/java/io/druid/server/coordinator/DruidCoordinatorBalancer.java +++ b/server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorBalancer.java @@ -17,7 +17,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ -package io.druid.server.coordinator; +package io.druid.server.coordinator.helper; import com.google.common.collect.Lists; import com.google.common.collect.Maps; @@ -25,6 +25,14 @@ import com.google.common.collect.MinMaxPriorityQueue; import com.metamx.common.guava.Comparators; import com.metamx.emitter.EmittingLogger; import io.druid.client.DruidServer; +import io.druid.server.coordinator.BalancerSegmentHolder; +import io.druid.server.coordinator.BalancerStrategy; +import io.druid.server.coordinator.CoordinatorStats; +import io.druid.server.coordinator.DruidCoordinator; +import io.druid.server.coordinator.DruidCoordinatorRuntimeParams; +import io.druid.server.coordinator.LoadPeonCallback; +import io.druid.server.coordinator.LoadQueuePeon; +import io.druid.server.coordinator.ServerHolder; import io.druid.timeline.DataSegment; import org.joda.time.DateTime; @@ -163,7 +171,7 @@ public class DruidCoordinatorBalancer implements DruidCoordinatorHelper callback = new LoadPeonCallback() { @Override - protected void execute() + public void execute() { Map movingSegments = currentlyMovingSegments.get(toServer.getTier()); if (movingSegments != null) { diff --git a/server/src/main/java/io/druid/server/coordinator/DruidCoordinatorCleanup.java b/server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorCleanup.java similarity index 89% rename from server/src/main/java/io/druid/server/coordinator/DruidCoordinatorCleanup.java rename to server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorCleanup.java index 659391514aa..33438e204e1 100644 --- a/server/src/main/java/io/druid/server/coordinator/DruidCoordinatorCleanup.java +++ b/server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorCleanup.java @@ -17,7 +17,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ -package io.druid.server.coordinator; +package io.druid.server.coordinator.helper; import com.google.common.collect.Maps; import com.google.common.collect.MinMaxPriorityQueue; @@ -25,6 +25,14 @@ import com.metamx.common.guava.Comparators; import com.metamx.common.logger.Logger; import io.druid.client.DruidDataSource; import io.druid.client.DruidServer; +import io.druid.server.coordinator.CoordinatorStats; +import io.druid.server.coordinator.DruidCluster; +import io.druid.server.coordinator.DruidCoordinator; +import io.druid.server.coordinator.DruidCoordinatorRuntimeParams; +import io.druid.server.coordinator.LoadPeonCallback; +import io.druid.server.coordinator.LoadQueuePeon; +import io.druid.server.coordinator.ServerHolder; +import io.druid.server.coordinator.helper.DruidCoordinatorHelper; import io.druid.timeline.DataSegment; import io.druid.timeline.TimelineObjectHolder; import io.druid.timeline.VersionedIntervalTimeline; @@ -69,7 +77,7 @@ public class DruidCoordinatorCleanup implements DruidCoordinatorHelper segment, new LoadPeonCallback() { @Override - protected void execute() + public void execute() { } } diff --git a/server/src/main/java/io/druid/server/coordinator/DruidCoordinatorHelper.java b/server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorHelper.java similarity index 89% rename from server/src/main/java/io/druid/server/coordinator/DruidCoordinatorHelper.java rename to server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorHelper.java index e6eb7bb3997..26709744ab9 100644 --- a/server/src/main/java/io/druid/server/coordinator/DruidCoordinatorHelper.java +++ b/server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorHelper.java @@ -17,7 +17,9 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ -package io.druid.server.coordinator; +package io.druid.server.coordinator.helper; + +import io.druid.server.coordinator.DruidCoordinatorRuntimeParams; /** */ diff --git a/server/src/main/java/io/druid/server/coordinator/DruidCoordinatorLogger.java b/server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorLogger.java similarity index 96% rename from server/src/main/java/io/druid/server/coordinator/DruidCoordinatorLogger.java rename to server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorLogger.java index b6c8bb8c240..aa03e808f17 100644 --- a/server/src/main/java/io/druid/server/coordinator/DruidCoordinatorLogger.java +++ b/server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorLogger.java @@ -17,7 +17,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ -package io.druid.server.coordinator; +package io.druid.server.coordinator.helper; import com.google.common.collect.Maps; import com.google.common.collect.MinMaxPriorityQueue; @@ -27,6 +27,11 @@ import com.metamx.emitter.service.ServiceMetricEvent; import io.druid.client.DruidDataSource; import io.druid.client.DruidServer; import io.druid.collections.CountingMap; +import io.druid.server.coordinator.CoordinatorStats; +import io.druid.server.coordinator.DruidCluster; +import io.druid.server.coordinator.DruidCoordinatorRuntimeParams; +import io.druid.server.coordinator.LoadQueuePeon; +import io.druid.server.coordinator.ServerHolder; import io.druid.timeline.DataSegment; import java.util.Map; diff --git a/server/src/main/java/io/druid/server/coordinator/DruidCoordinatorRuleRunner.java b/server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorRuleRunner.java similarity index 92% rename from server/src/main/java/io/druid/server/coordinator/DruidCoordinatorRuleRunner.java rename to server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorRuleRunner.java index 594889201cf..15edf27d428 100644 --- a/server/src/main/java/io/druid/server/coordinator/DruidCoordinatorRuleRunner.java +++ b/server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorRuleRunner.java @@ -17,10 +17,15 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ -package io.druid.server.coordinator; +package io.druid.server.coordinator.helper; import com.metamx.emitter.EmittingLogger; import io.druid.db.DatabaseRuleManager; +import io.druid.server.coordinator.CoordinatorStats; +import io.druid.server.coordinator.DruidCluster; +import io.druid.server.coordinator.DruidCoordinator; +import io.druid.server.coordinator.DruidCoordinatorRuntimeParams; +import io.druid.server.coordinator.ReplicationThrottler; import io.druid.server.coordinator.rules.Rule; import io.druid.timeline.DataSegment; import org.joda.time.DateTime; diff --git a/server/src/main/java/io/druid/server/coordinator/DruidCoordinatorSegmentInfoLoader.java b/server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorSegmentInfoLoader.java similarity index 91% rename from server/src/main/java/io/druid/server/coordinator/DruidCoordinatorSegmentInfoLoader.java rename to server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorSegmentInfoLoader.java index 69f8ccdbc43..8980bfded90 100644 --- a/server/src/main/java/io/druid/server/coordinator/DruidCoordinatorSegmentInfoLoader.java +++ b/server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorSegmentInfoLoader.java @@ -17,9 +17,11 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ -package io.druid.server.coordinator; +package io.druid.server.coordinator.helper; import com.metamx.common.logger.Logger; +import io.druid.server.coordinator.DruidCoordinator; +import io.druid.server.coordinator.DruidCoordinatorRuntimeParams; import io.druid.timeline.DataSegment; import java.util.Set; diff --git a/server/src/main/java/io/druid/server/coordinator/DruidCoordinatorSegmentMerger.java b/server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorSegmentMerger.java similarity index 98% rename from server/src/main/java/io/druid/server/coordinator/DruidCoordinatorSegmentMerger.java rename to server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorSegmentMerger.java index 839cda93563..d39d1bbaac9 100644 --- a/server/src/main/java/io/druid/server/coordinator/DruidCoordinatorSegmentMerger.java +++ b/server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorSegmentMerger.java @@ -17,7 +17,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ -package io.druid.server.coordinator; +package io.druid.server.coordinator.helper; import com.google.common.base.Function; import com.google.common.base.Preconditions; @@ -33,6 +33,9 @@ import com.metamx.common.Pair; import com.metamx.common.guava.FunctionalIterable; import com.metamx.common.logger.Logger; import io.druid.client.indexing.IndexingServiceClient; +import io.druid.server.coordinator.CoordinatorStats; +import io.druid.server.coordinator.DatasourceWhitelist; +import io.druid.server.coordinator.DruidCoordinatorRuntimeParams; import io.druid.timeline.DataSegment; import io.druid.timeline.TimelineObjectHolder; import io.druid.timeline.VersionedIntervalTimeline; diff --git a/server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorVersionConverter.java b/server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorVersionConverter.java new file mode 100644 index 00000000000..70bef7c2216 --- /dev/null +++ b/server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorVersionConverter.java @@ -0,0 +1,66 @@ +/* + * Druid - a distributed column store. + * Copyright (C) 2012, 2013 Metamarkets Group Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +package io.druid.server.coordinator.helper; + +import com.metamx.emitter.EmittingLogger; +import io.druid.client.indexing.IndexingServiceClient; +import io.druid.segment.IndexIO; +import io.druid.server.coordinator.DatasourceWhitelist; +import io.druid.server.coordinator.DruidCoordinatorRuntimeParams; +import io.druid.timeline.DataSegment; + +import java.util.concurrent.atomic.AtomicReference; + +public class DruidCoordinatorVersionConverter implements DruidCoordinatorHelper +{ + private static final EmittingLogger log = new EmittingLogger(DruidCoordinatorVersionConverter.class); + + + private final IndexingServiceClient indexingServiceClient; + private final AtomicReference whitelistRef; + + public DruidCoordinatorVersionConverter( + IndexingServiceClient indexingServiceClient, + AtomicReference whitelistRef + ) + { + this.indexingServiceClient = indexingServiceClient; + this.whitelistRef = whitelistRef; + } + + @Override + public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) + { + DatasourceWhitelist whitelist = whitelistRef.get(); + + for (DataSegment dataSegment : params.getAvailableSegments()) { + if (whitelist == null || whitelist.contains(dataSegment.getDataSource())) { + final Integer binaryVersion = dataSegment.getBinaryVersion(); + + if (binaryVersion == null || binaryVersion < IndexIO.CURRENT_VERSION_ID) { + log.info("Upgrading version on segment[%s]", dataSegment.getIdentifier()); + indexingServiceClient.upgradeSegment(dataSegment); + } + } + } + + return params; + } +} diff --git a/indexing-service/src/test/java/io/druid/indexing/overlord/scaling/TestAutoScalingStrategy.java b/server/src/main/java/io/druid/server/coordinator/rules/ForeverDropRule.java similarity index 66% rename from indexing-service/src/test/java/io/druid/indexing/overlord/scaling/TestAutoScalingStrategy.java rename to server/src/main/java/io/druid/server/coordinator/rules/ForeverDropRule.java index 8ef2e0513f6..510cb29c55a 100644 --- a/indexing-service/src/test/java/io/druid/indexing/overlord/scaling/TestAutoScalingStrategy.java +++ b/server/src/main/java/io/druid/server/coordinator/rules/ForeverDropRule.java @@ -17,35 +17,26 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ -package io.druid.indexing.overlord.scaling; +package io.druid.server.coordinator.rules; -import java.util.List; +import com.fasterxml.jackson.annotation.JsonProperty; +import io.druid.timeline.DataSegment; +import org.joda.time.DateTime; /** */ -public class TestAutoScalingStrategy implements AutoScalingStrategy +public class ForeverDropRule extends DropRule { @Override - public AutoScalingData provision() + @JsonProperty + public String getType() { - return null; + return "dropForever"; } @Override - public AutoScalingData terminate(List ips) + public boolean appliesTo(DataSegment segment, DateTime referenceTimestamp) { - return null; - } - - @Override - public List ipToIdLookup(List ips) - { - return null; - } - - @Override - public List idToIpLookup(List nodeIds) - { - return null; + return true; } } diff --git a/server/src/main/java/io/druid/server/coordinator/rules/SizeDropRule.java b/server/src/main/java/io/druid/server/coordinator/rules/ForeverLoadRule.java similarity index 69% rename from server/src/main/java/io/druid/server/coordinator/rules/SizeDropRule.java rename to server/src/main/java/io/druid/server/coordinator/rules/ForeverLoadRule.java index 219d5559b69..2150150cb03 100644 --- a/server/src/main/java/io/druid/server/coordinator/rules/SizeDropRule.java +++ b/server/src/main/java/io/druid/server/coordinator/rules/ForeverLoadRule.java @@ -21,51 +21,49 @@ package io.druid.server.coordinator.rules; import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; -import com.google.common.collect.Range; import io.druid.timeline.DataSegment; import org.joda.time.DateTime; +import java.util.Map; + /** */ -public class SizeDropRule extends DropRule +public class ForeverLoadRule extends LoadRule { - private final long low; - private final long high; - private final Range range; + private final Map tieredReplicants; @JsonCreator - public SizeDropRule( - @JsonProperty("low") long low, - @JsonProperty("high") long high + public ForeverLoadRule( + @JsonProperty("tieredReplicants") Map tieredReplicants ) { - this.low = low; - this.high = high; - this.range = Range.closedOpen(low, high); + this.tieredReplicants = tieredReplicants; } @Override @JsonProperty public String getType() { - return "dropBySize"; + return "loadForever"; } + @Override @JsonProperty - public long getLow() + public Map getTieredReplicants() { - return low; + return tieredReplicants; } - @JsonProperty - public long getHigh() + @Override + public int getNumReplicants(String tier) { - return high; + Integer retVal = tieredReplicants.get(tier); + return (retVal == null) ? 0 : retVal; } @Override public boolean appliesTo(DataSegment segment, DateTime referenceTimestamp) { - return range.contains(segment.getSize()); + return true; } } diff --git a/server/src/main/java/io/druid/server/coordinator/rules/IntervalLoadRule.java b/server/src/main/java/io/druid/server/coordinator/rules/IntervalLoadRule.java index 4fe01521e66..6a1599dc4ad 100644 --- a/server/src/main/java/io/druid/server/coordinator/rules/IntervalLoadRule.java +++ b/server/src/main/java/io/druid/server/coordinator/rules/IntervalLoadRule.java @@ -21,11 +21,14 @@ package io.druid.server.coordinator.rules; import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; +import com.google.common.collect.ImmutableMap; import com.metamx.common.logger.Logger; import io.druid.timeline.DataSegment; import org.joda.time.DateTime; import org.joda.time.Interval; +import java.util.Map; + /** */ public class IntervalLoadRule extends LoadRule @@ -33,19 +36,25 @@ public class IntervalLoadRule extends LoadRule private static final Logger log = new Logger(IntervalLoadRule.class); private final Interval interval; - private final Integer replicants; - private final String tier; + private final Map tieredReplicants; @JsonCreator public IntervalLoadRule( @JsonProperty("interval") Interval interval, + @JsonProperty("load") Map tieredReplicants, + // Replicants and tier are deprecated @JsonProperty("replicants") Integer replicants, @JsonProperty("tier") String tier ) { this.interval = interval; - this.replicants = (replicants == null) ? 2 : replicants; - this.tier = tier; + + + if (tieredReplicants != null) { + this.tieredReplicants = tieredReplicants; + } else { // Backwards compatible + this.tieredReplicants = ImmutableMap.of(tier, replicants); + } } @Override @@ -55,24 +64,17 @@ public class IntervalLoadRule extends LoadRule return "loadByInterval"; } - @Override @JsonProperty - public int getReplicants() + public Map getTieredReplicants() { - return replicants; + return tieredReplicants; } @Override - public int getReplicants(String tier) + public int getNumReplicants(String tier) { - return (this.tier.equalsIgnoreCase(tier)) ? replicants : 0; - } - - @Override - @JsonProperty - public String getTier() - { - return tier; + final Integer retVal = tieredReplicants.get(tier); + return retVal == null ? 0 : retVal; } @JsonProperty diff --git a/server/src/main/java/io/druid/server/coordinator/rules/LoadRule.java b/server/src/main/java/io/druid/server/coordinator/rules/LoadRule.java index 792d76d01b2..f629318395b 100644 --- a/server/src/main/java/io/druid/server/coordinator/rules/LoadRule.java +++ b/server/src/main/java/io/druid/server/coordinator/rules/LoadRule.java @@ -19,20 +19,20 @@ package io.druid.server.coordinator.rules; +import com.google.api.client.util.Maps; import com.google.common.collect.Lists; import com.google.common.collect.MinMaxPriorityQueue; import com.metamx.emitter.EmittingLogger; +import io.druid.server.coordinator.BalancerStrategy; import io.druid.server.coordinator.CoordinatorStats; import io.druid.server.coordinator.DruidCoordinator; import io.druid.server.coordinator.DruidCoordinatorRuntimeParams; import io.druid.server.coordinator.LoadPeonCallback; -import io.druid.server.coordinator.BalancerStrategy; import io.druid.server.coordinator.ReplicationThrottler; import io.druid.server.coordinator.ServerHolder; import io.druid.timeline.DataSegment; import org.joda.time.DateTime; -import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -48,39 +48,50 @@ public abstract class LoadRule implements Rule { CoordinatorStats stats = new CoordinatorStats(); - int expectedReplicants = getReplicants(); - int totalReplicants = params.getSegmentReplicantLookup().getTotalReplicants(segment.getIdentifier(), getTier()); - int clusterReplicants = params.getSegmentReplicantLookup().getClusterReplicants(segment.getIdentifier(), getTier()); + final Map loadStatus = Maps.newHashMap(); + for (Map.Entry entry : getTieredReplicants().entrySet()) { + final String tier = entry.getKey(); + final int expectedReplicants = entry.getValue(); - MinMaxPriorityQueue serverQueue = params.getDruidCluster().getServersByTier(getTier()); - if (serverQueue == null) { - log.makeAlert("Tier[%s] has no servers! Check your cluster configuration!", getTier()).emit(); - return stats; + int totalReplicants = params.getSegmentReplicantLookup().getTotalReplicants(segment.getIdentifier(), tier); + + MinMaxPriorityQueue serverQueue = params.getDruidCluster().getServersByTier(tier); + if (serverQueue == null) { + log.makeAlert("Tier[%s] has no servers! Check your cluster configuration!", tier).emit(); + return stats; + } + + final List serverHolderList = Lists.newArrayList(serverQueue); + final DateTime referenceTimestamp = params.getBalancerReferenceTimestamp(); + final BalancerStrategy strategy = params.getBalancerStrategyFactory().createBalancerStrategy(referenceTimestamp); + if (params.getAvailableSegments().contains(segment)) { + stats.accumulate( + assign( + params.getReplicationManager(), + tier, + expectedReplicants, + totalReplicants, + strategy, + serverHolderList, + segment + ) + ); + } + + int clusterReplicants = params.getSegmentReplicantLookup() + .getClusterReplicants(segment.getIdentifier(), tier); + loadStatus.put(tier, expectedReplicants - clusterReplicants); } + // Remove over-replication + stats.accumulate(drop(loadStatus, segment, params)); - final List serverHolderList = new ArrayList(serverQueue); - final DateTime referenceTimestamp = params.getBalancerReferenceTimestamp(); - final BalancerStrategy strategy = params.getBalancerStrategyFactory().createBalancerStrategy(referenceTimestamp); - if (params.getAvailableSegments().contains(segment)) { - stats.accumulate( - assign( - params.getReplicationManager(), - expectedReplicants, - totalReplicants, - strategy, - serverHolderList, - segment - ) - ); - } - - stats.accumulate(drop(expectedReplicants, clusterReplicants, segment, params)); return stats; } private CoordinatorStats assign( final ReplicationThrottler replicationManager, + final String tier, final int expectedReplicants, int totalReplicants, final BalancerStrategy strategy, @@ -89,11 +100,12 @@ public abstract class LoadRule implements Rule ) { final CoordinatorStats stats = new CoordinatorStats(); + stats.addToTieredStat("assignedCount", tier, 0); while (totalReplicants < expectedReplicants) { boolean replicate = totalReplicants > 0; - if (replicate && !replicationManager.canCreateReplicant(getTier())) { + if (replicate && !replicationManager.canCreateReplicant(tier)) { break; } @@ -101,8 +113,8 @@ public abstract class LoadRule implements Rule if (holder == null) { log.warn( - "Not enough %s servers or node capacity to assign segment[%s]! Expected Replicants[%d]", - getTier(), + "Not enough [%s] servers or node capacity to assign segment[%s]! Expected Replicants[%d]", + tier, segment.getIdentifier(), expectedReplicants ); @@ -111,7 +123,7 @@ public abstract class LoadRule implements Rule if (replicate) { replicationManager.registerReplicantCreation( - getTier(), segment.getIdentifier(), holder.getServer().getHost() + tier, segment.getIdentifier(), holder.getServer().getHost() ); } @@ -120,10 +132,10 @@ public abstract class LoadRule implements Rule new LoadPeonCallback() { @Override - protected void execute() + public void execute() { replicationManager.unregisterReplicantCreation( - getTier(), + tier, segment.getIdentifier(), holder.getServer().getHost() ); @@ -131,7 +143,7 @@ public abstract class LoadRule implements Rule } ); - stats.addToTieredStat("assignedCount", getTier(), 1); + stats.addToTieredStat("assignedCount", tier, 1); ++totalReplicants; } @@ -139,30 +151,35 @@ public abstract class LoadRule implements Rule } private CoordinatorStats drop( - int expectedReplicants, - int clusterReplicants, + final Map loadStatus, final DataSegment segment, final DruidCoordinatorRuntimeParams params ) { CoordinatorStats stats = new CoordinatorStats(); - final ReplicationThrottler replicationManager = params.getReplicationManager(); if (!params.hasDeletionWaitTimeElapsed()) { return stats; } - // Make sure we have enough actual replicants in the cluster before doing anything - if (clusterReplicants < expectedReplicants) { - return stats; + // Make sure we have enough actual replicants in the correct tiers in the cluster before doing anything + for (Integer leftToLoad : loadStatus.values()) { + if (leftToLoad > 0) { + return stats; + } } - Map replicantsByType = params.getSegmentReplicantLookup().getClusterTiers(segment.getIdentifier()); + final ReplicationThrottler replicationManager = params.getReplicationManager(); - for (Map.Entry entry : replicantsByType.entrySet()) { - String tier = entry.getKey(); - int actualNumReplicantsForType = entry.getValue(); - int expectedNumReplicantsForType = getReplicants(tier); + // Find all instances of this segment across tiers + Map replicantsByTier = params.getSegmentReplicantLookup().getClusterTiers(segment.getIdentifier()); + + for (Map.Entry entry : replicantsByTier.entrySet()) { + final String tier = entry.getKey(); + int actualNumReplicantsForTier = entry.getValue(); + int expectedNumReplicantsForTier = getNumReplicants(tier); + + stats.addToTieredStat("droppedCount", tier, 0); MinMaxPriorityQueue serverQueue = params.getDruidCluster().get(tier); if (serverQueue == null) { @@ -171,7 +188,7 @@ public abstract class LoadRule implements Rule } List droppedServers = Lists.newArrayList(); - while (actualNumReplicantsForType > expectedNumReplicantsForType) { + while (actualNumReplicantsForTier > expectedNumReplicantsForTier) { final ServerHolder holder = serverQueue.pollLast(); if (holder == null) { log.warn("Wtf, holder was null? I have no servers serving [%s]?", segment.getIdentifier()); @@ -179,14 +196,14 @@ public abstract class LoadRule implements Rule } if (holder.isServingSegment(segment)) { - if (expectedNumReplicantsForType > 0) { // don't throttle unless we are removing extra replicants - if (!replicationManager.canDestroyReplicant(getTier())) { + if (expectedNumReplicantsForTier > 0) { // don't throttle unless we are removing extra replicants + if (!replicationManager.canDestroyReplicant(tier)) { serverQueue.add(holder); break; } replicationManager.registerReplicantTermination( - getTier(), + tier, segment.getIdentifier(), holder.getServer().getHost() ); @@ -197,17 +214,17 @@ public abstract class LoadRule implements Rule new LoadPeonCallback() { @Override - protected void execute() + public void execute() { replicationManager.unregisterReplicantTermination( - getTier(), + tier, segment.getIdentifier(), holder.getServer().getHost() ); } } ); - --actualNumReplicantsForType; + --actualNumReplicantsForTier; stats.addToTieredStat("droppedCount", tier, 1); } droppedServers.add(holder); @@ -218,9 +235,7 @@ public abstract class LoadRule implements Rule return stats; } - public abstract int getReplicants(); + public abstract Map getTieredReplicants(); - public abstract int getReplicants(String tier); - - public abstract String getTier(); + public abstract int getNumReplicants(String tier); } diff --git a/server/src/main/java/io/druid/server/coordinator/rules/PeriodLoadRule.java b/server/src/main/java/io/druid/server/coordinator/rules/PeriodLoadRule.java index a2048756894..bfad025b4a5 100644 --- a/server/src/main/java/io/druid/server/coordinator/rules/PeriodLoadRule.java +++ b/server/src/main/java/io/druid/server/coordinator/rules/PeriodLoadRule.java @@ -21,12 +21,15 @@ package io.druid.server.coordinator.rules; import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; +import com.google.common.collect.ImmutableMap; import com.metamx.common.logger.Logger; import io.druid.timeline.DataSegment; import org.joda.time.DateTime; import org.joda.time.Interval; import org.joda.time.Period; +import java.util.Map; + /** */ public class PeriodLoadRule extends LoadRule @@ -34,19 +37,24 @@ public class PeriodLoadRule extends LoadRule private static final Logger log = new Logger(PeriodLoadRule.class); private final Period period; - private final Integer replicants; - private final String tier; + private final Map tieredReplicants; @JsonCreator public PeriodLoadRule( @JsonProperty("period") Period period, - @JsonProperty("replicants") Integer replicants, + @JsonProperty("tieredReplicants") Map tieredReplicants, + // The following two vars need to be deprecated + @JsonProperty("replicants") int replicants, @JsonProperty("tier") String tier ) { this.period = period; - this.replicants = (replicants == null) ? 2 : replicants; - this.tier = tier; + + if (tieredReplicants != null) { + this.tieredReplicants = tieredReplicants; + } else { // Backwards compatible + this.tieredReplicants = ImmutableMap.of(tier, replicants); + } } @Override @@ -62,22 +70,18 @@ public class PeriodLoadRule extends LoadRule return period; } + @Override @JsonProperty - public int getReplicants() + public Map getTieredReplicants() { - return replicants; + return tieredReplicants; } @Override - public int getReplicants(String tier) + public int getNumReplicants(String tier) { - return (this.tier.equalsIgnoreCase(tier)) ? replicants : 0; - } - - @JsonProperty - public String getTier() - { - return tier; + final Integer retVal = tieredReplicants.get(tier); + return retVal == null ? 0 : retVal; } @Override diff --git a/server/src/main/java/io/druid/server/coordinator/rules/Rule.java b/server/src/main/java/io/druid/server/coordinator/rules/Rule.java index 7c720e1aa2f..255c31b4f67 100644 --- a/server/src/main/java/io/druid/server/coordinator/rules/Rule.java +++ b/server/src/main/java/io/druid/server/coordinator/rules/Rule.java @@ -33,12 +33,10 @@ import org.joda.time.DateTime; @JsonSubTypes(value = { @JsonSubTypes.Type(name = "loadByPeriod", value = PeriodLoadRule.class), @JsonSubTypes.Type(name = "loadByInterval", value = IntervalLoadRule.class), + @JsonSubTypes.Type(name = "loadForever", value = ForeverLoadRule.class), @JsonSubTypes.Type(name = "dropByPeriod", value = PeriodDropRule.class), - @JsonSubTypes.Type(name = "dropByInterval", value = IntervalDropRule.class), - @JsonSubTypes.Type(name = "loadBySize", value = SizeLoadRule.class), - @JsonSubTypes.Type(name = "dropBySize", value = SizeDropRule.class) + @JsonSubTypes.Type(name = "dropByInterval", value = IntervalDropRule.class) }) - public interface Rule { public String getType(); diff --git a/server/src/main/java/io/druid/server/coordinator/rules/SizeLoadRule.java b/server/src/main/java/io/druid/server/coordinator/rules/SizeLoadRule.java deleted file mode 100644 index 6fdc10f822c..00000000000 --- a/server/src/main/java/io/druid/server/coordinator/rules/SizeLoadRule.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Druid - a distributed column store. - * Copyright (C) 2012, 2013 Metamarkets Group Inc. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -package io.druid.server.coordinator.rules; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.google.common.collect.Range; -import io.druid.timeline.DataSegment; -import org.joda.time.DateTime; - -/** - */ -public class SizeLoadRule extends LoadRule -{ - private final long low; - private final long high; - private final Integer replicants; - private final String tier; - private final Range range; - - @JsonCreator - public SizeLoadRule( - @JsonProperty("low") long low, - @JsonProperty("high") long high, - @JsonProperty("replicants") Integer replicants, - @JsonProperty("tier") String tier - ) - { - this.low = low; - this.high = high; - this.replicants = replicants; - this.tier = tier; - this.range = Range.closedOpen(low, high); - } - - @Override - @JsonProperty - public int getReplicants() - { - return replicants; - } - - @Override - public int getReplicants(String tier) - { - return (this.tier.equalsIgnoreCase(tier)) ? replicants : 0; - } - - @Override - @JsonProperty - public String getTier() - { - return tier; - } - - @Override - public String getType() - { - return "loadBySize"; - } - - @JsonProperty - public long getLow() - { - return low; - } - - @JsonProperty - public long getHigh() - { - return high; - } - - @Override - public boolean appliesTo(DataSegment segment, DateTime referenceTimestamp) - { - return range.contains(segment.getSize()); - } -} diff --git a/server/src/main/java/io/druid/server/http/BackwardsCompatibleCoordinatorResource.java b/server/src/main/java/io/druid/server/http/BackwardsCompatibleCoordinatorResource.java new file mode 100644 index 00000000000..0ea5329e910 --- /dev/null +++ b/server/src/main/java/io/druid/server/http/BackwardsCompatibleCoordinatorResource.java @@ -0,0 +1,62 @@ +/* + * Druid - a distributed column store. + * Copyright (C) 2012, 2013 Metamarkets Group Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +package io.druid.server.http; + +import com.google.inject.Inject; +import io.druid.server.coordinator.DruidCoordinator; + +import javax.ws.rs.GET; +import javax.ws.rs.Path; +import javax.ws.rs.Produces; +import javax.ws.rs.core.Response; + +/** + */ +@Deprecated +@Path("/coordinator") +public class BackwardsCompatibleCoordinatorResource +{ + private final DruidCoordinator coordinator; + + @Inject + public BackwardsCompatibleCoordinatorResource( + DruidCoordinator coordinator + ) + { + this.coordinator = coordinator; + } + + @GET + @Path("/leader") + @Produces("application/json") + public Response getLeader() + { + return Response.ok(coordinator.getCurrentLeader()).build(); + } + + @GET + @Path("/loadstatus") + @Produces("application/json") + public Response getLoadStatus( + ) + { + return Response.ok(coordinator.getLoadStatus()).build(); + } +} \ No newline at end of file diff --git a/server/src/main/java/io/druid/server/http/BackwardsCompatibleInfoResource.java b/server/src/main/java/io/druid/server/http/BackwardsCompatibleInfoResource.java index ed1cf580887..a7ebf36856a 100644 --- a/server/src/main/java/io/druid/server/http/BackwardsCompatibleInfoResource.java +++ b/server/src/main/java/io/druid/server/http/BackwardsCompatibleInfoResource.java @@ -19,6 +19,7 @@ package io.druid.server.http; +import com.fasterxml.jackson.databind.ObjectMapper; import com.google.inject.Inject; import io.druid.client.InventoryView; import io.druid.client.indexing.IndexingServiceClient; @@ -31,6 +32,7 @@ import javax.ws.rs.Path; /** */ +@Deprecated @Path("/static/info") public class BackwardsCompatibleInfoResource extends InfoResource { @@ -40,9 +42,17 @@ public class BackwardsCompatibleInfoResource extends InfoResource InventoryView serverInventoryView, DatabaseSegmentManager databaseSegmentManager, DatabaseRuleManager databaseRuleManager, - @Nullable IndexingServiceClient indexingServiceClient + @Nullable IndexingServiceClient indexingServiceClient, + ObjectMapper jsonMapper ) { - super(coordinator, serverInventoryView, databaseSegmentManager, databaseRuleManager, indexingServiceClient); + super( + coordinator, + serverInventoryView, + databaseSegmentManager, + databaseRuleManager, + indexingServiceClient, + jsonMapper + ); } } diff --git a/server/src/main/java/io/druid/server/http/CoordinatorDynamicConfigsResource.java b/server/src/main/java/io/druid/server/http/CoordinatorDynamicConfigsResource.java index 7efa753053b..675818122e6 100644 --- a/server/src/main/java/io/druid/server/http/CoordinatorDynamicConfigsResource.java +++ b/server/src/main/java/io/druid/server/http/CoordinatorDynamicConfigsResource.java @@ -32,7 +32,7 @@ import javax.ws.rs.core.Response; /** */ -@Path("/coordinator/config") +@Path("/druid/coordinator/v1/config") public class CoordinatorDynamicConfigsResource { private final JacksonConfigManager manager; diff --git a/server/src/main/java/io/druid/server/http/CoordinatorResource.java b/server/src/main/java/io/druid/server/http/CoordinatorResource.java index 1c70dd39c8c..aea61681183 100644 --- a/server/src/main/java/io/druid/server/http/CoordinatorResource.java +++ b/server/src/main/java/io/druid/server/http/CoordinatorResource.java @@ -19,21 +19,23 @@ package io.druid.server.http; +import com.google.common.base.Function; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Maps; import com.google.inject.Inject; import io.druid.server.coordinator.DruidCoordinator; -import io.druid.server.coordinator.LoadPeonCallback; +import io.druid.server.coordinator.LoadQueuePeon; +import io.druid.timeline.DataSegment; -import javax.ws.rs.Consumes; import javax.ws.rs.GET; -import javax.ws.rs.POST; import javax.ws.rs.Path; import javax.ws.rs.Produces; +import javax.ws.rs.QueryParam; import javax.ws.rs.core.Response; -import java.util.List; /** */ -@Path("/coordinator") +@Path("/druid/coordinator/v1") public class CoordinatorResource { private final DruidCoordinator coordinator; @@ -46,74 +48,64 @@ public class CoordinatorResource this.coordinator = coordinator; } - @POST - @Path("/move") - @Consumes("application/json") - public Response moveSegment(List segmentsToMove) + @GET + @Path("/leader") + @Produces("application/json") + public Response getLeader() { - Response resp = Response.status(Response.Status.OK).build(); - for (SegmentToMove segmentToMove : segmentsToMove) { - try { - coordinator.moveSegment( - segmentToMove.getFromServer(), - segmentToMove.getToServer(), - segmentToMove.getSegmentName(), - new LoadPeonCallback() - { - @Override - protected void execute() - { - return; - } - } - ); - } - catch (Exception e) { - resp = Response - .status(Response.Status.BAD_REQUEST) - .entity(e.getMessage()) - .build(); - break; - } - } - return resp; - } - - @POST - @Path("/drop") - @Consumes("application/json") - public Response dropSegment(List segmentsToDrop) - { - Response resp = Response.status(Response.Status.OK).build(); - for (SegmentToDrop segmentToDrop : segmentsToDrop) { - try { - coordinator.dropSegment( - segmentToDrop.getFromServer(), segmentToDrop.getSegmentName(), new LoadPeonCallback() - { - @Override - protected void execute() - { - return; - } - } - ); - } - catch (Exception e) { - resp = Response - .status(Response.Status.BAD_REQUEST) - .entity(e.getMessage()) - .build(); - break; - } - } - return resp; + return Response.ok(coordinator.getCurrentLeader()).build(); } @GET @Path("/loadstatus") @Produces("application/json") - public Response getLoadStatus() + public Response getLoadStatus( + @QueryParam("full") String full + ) { + if (full != null) { + return Response.ok(coordinator.getReplicationStatus()).build(); + } return Response.ok(coordinator.getLoadStatus()).build(); } + + @GET + @Path("/loadqueue") + @Produces("application/json") + public Response getLoadQueue( + @QueryParam("simple") String simple + ) + { + if (simple != null) { + return Response.ok( + Maps.transformValues( + coordinator.getLoadManagementPeons(), + new Function() + { + @Override + public Object apply(LoadQueuePeon input) + { + long loadSize = 0; + for (DataSegment dataSegment : input.getSegmentsToLoad()) { + loadSize += dataSegment.getSize(); + } + + long dropSize = 0; + for (DataSegment dataSegment : input.getSegmentsToDrop()) { + dropSize += dataSegment.getSize(); + } + + return new ImmutableMap.Builder<>() + .put("segmentsToLoad", input.getSegmentsToLoad().size()) + .put("segmentsToDrop", input.getSegmentsToDrop().size()) + .put("segmentsToLoadSize", loadSize) + .put("segmentsToDropSize", dropSize) + .build(); + } + } + ) + ).build(); + } + return Response.ok(coordinator.getLoadManagementPeons()).build(); + } } \ No newline at end of file diff --git a/server/src/main/java/io/druid/server/http/DBResource.java b/server/src/main/java/io/druid/server/http/DBResource.java new file mode 100644 index 00000000000..f979b76961b --- /dev/null +++ b/server/src/main/java/io/druid/server/http/DBResource.java @@ -0,0 +1,159 @@ +/* + * Druid - a distributed column store. + * Copyright (C) 2012, 2013 Metamarkets Group Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +package io.druid.server.http; + +import com.google.common.base.Function; +import com.google.common.collect.Iterables; +import com.google.common.collect.Lists; +import com.google.inject.Inject; +import io.druid.client.DruidDataSource; +import io.druid.db.DatabaseSegmentManager; +import io.druid.timeline.DataSegment; + +import javax.annotation.Nullable; +import javax.ws.rs.GET; +import javax.ws.rs.Path; +import javax.ws.rs.PathParam; +import javax.ws.rs.Produces; +import javax.ws.rs.QueryParam; +import javax.ws.rs.core.Response; +import java.util.Collections; +import java.util.List; + +/** + */ +@Path("/druid/coordinator/v1/db") +public class DBResource +{ + private final DatabaseSegmentManager databaseSegmentManager; + + @Inject + public DBResource( + DatabaseSegmentManager databaseSegmentManager + ) + { + this.databaseSegmentManager = databaseSegmentManager; + } + + + @GET + @Path("/datasources") + @Produces("application/json") + public Response getDatabaseDataSources( + @QueryParam("full") String full, + @QueryParam("includeDisabled") String includeDisabled + ) + { + Response.ResponseBuilder builder = Response.status(Response.Status.OK); + if (includeDisabled != null) { + return builder.entity(databaseSegmentManager.getAllDatasourceNames()).build(); + } + if (full != null) { + return builder.entity(databaseSegmentManager.getInventory()).build(); + } + + List dataSourceNames = Lists.newArrayList( + Iterables.transform( + databaseSegmentManager.getInventory(), + new Function() + { + @Override + public String apply(@Nullable DruidDataSource dataSource) + { + return dataSource.getName(); + } + } + ) + ); + + Collections.sort(dataSourceNames); + + return builder.entity(dataSourceNames).build(); + } + + @GET + @Path("/datasources/{dataSourceName}") + @Produces("application/json") + public Response getDatabaseSegmentDataSource( + @PathParam("dataSourceName") final String dataSourceName + ) + { + DruidDataSource dataSource = databaseSegmentManager.getInventoryValue(dataSourceName); + if (dataSource == null) { + return Response.status(Response.Status.NOT_FOUND).build(); + } + + return Response.status(Response.Status.OK).entity(dataSource).build(); + } + + @GET + @Path("/datasources/{dataSourceName}/segments") + @Produces("application/json") + public Response getDatabaseSegmentDataSourceSegments( + @PathParam("dataSourceName") String dataSourceName, + @QueryParam("full") String full + ) + { + DruidDataSource dataSource = databaseSegmentManager.getInventoryValue(dataSourceName); + if (dataSource == null) { + return Response.status(Response.Status.NOT_FOUND).build(); + } + + Response.ResponseBuilder builder = Response.status(Response.Status.OK); + if (full != null) { + return builder.entity(dataSource.getSegments()).build(); + } + + return builder.entity( + Iterables.transform( + dataSource.getSegments(), + new Function() + { + @Override + public Object apply(@Nullable DataSegment segment) + { + return segment.getIdentifier(); + } + } + ) + ).build(); + } + + @GET + @Path("/datasources/{dataSourceName}/segments/{segmentId}") + @Produces("application/json") + public Response getDatabaseSegmentDataSourceSegment( + @PathParam("dataSourceName") String dataSourceName, + @PathParam("segmentId") String segmentId + ) + { + DruidDataSource dataSource = databaseSegmentManager.getInventoryValue(dataSourceName); + if (dataSource == null) { + return Response.status(Response.Status.NOT_FOUND).build(); + } + + for (DataSegment segment : dataSource.getSegments()) { + if (segment.getIdentifier().equalsIgnoreCase(segmentId)) { + return Response.status(Response.Status.OK).entity(segment).build(); + } + } + return Response.status(Response.Status.NOT_FOUND).build(); + } +} diff --git a/server/src/main/java/io/druid/server/http/DatasourcesResource.java b/server/src/main/java/io/druid/server/http/DatasourcesResource.java new file mode 100644 index 00000000000..c5718782d77 --- /dev/null +++ b/server/src/main/java/io/druid/server/http/DatasourcesResource.java @@ -0,0 +1,325 @@ +/* + * Druid - a distributed column store. + * Copyright (C) 2012, 2013 Metamarkets Group Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +package io.druid.server.http; + +import com.google.common.base.Function; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Iterables; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; +import com.google.inject.Inject; +import io.druid.client.DruidDataSource; +import io.druid.client.DruidServer; +import io.druid.client.InventoryView; +import io.druid.client.indexing.IndexingServiceClient; +import io.druid.db.DatabaseSegmentManager; +import io.druid.segment.IndexGranularity; +import io.druid.timeline.DataSegment; +import org.joda.time.Interval; + +import javax.annotation.Nullable; +import javax.ws.rs.Consumes; +import javax.ws.rs.DELETE; +import javax.ws.rs.GET; +import javax.ws.rs.POST; +import javax.ws.rs.Path; +import javax.ws.rs.PathParam; +import javax.ws.rs.Produces; +import javax.ws.rs.QueryParam; +import javax.ws.rs.core.Response; +import java.util.Comparator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; + +/** + */ +@Path("/druid/coordinator/v1/datasources") +public class DatasourcesResource +{ + private static Map makeSimpleDatasource(DruidDataSource input) + { + return new ImmutableMap.Builder() + .put("name", input.getName()) + .put("properties", input.getProperties()) + .build(); + } + + private final InventoryView serverInventoryView; + private final DatabaseSegmentManager databaseSegmentManager; + private final IndexingServiceClient indexingServiceClient; + + @Inject + public DatasourcesResource( + InventoryView serverInventoryView, + DatabaseSegmentManager databaseSegmentManager, + @Nullable IndexingServiceClient indexingServiceClient + ) + { + this.serverInventoryView = serverInventoryView; + this.databaseSegmentManager = databaseSegmentManager; + this.indexingServiceClient = indexingServiceClient; + } + + @GET + @Produces("application/json") + public Response getQueryableDataSources( + @QueryParam("full") String full, + @QueryParam("simple") String simple, + @QueryParam("gran") String gran + ) + { + Response.ResponseBuilder builder = Response.status(Response.Status.OK); + if (full != null) { + return builder.entity(getDataSources()).build(); + } else if (simple != null) { + return builder.entity( + Lists.newArrayList( + Iterables.transform( + getDataSources(), + new Function>() + { + @Override + public Map apply(DruidDataSource dataSource) + { + return makeSimpleDatasource(dataSource); + } + } + ) + ) + ).build(); + } else if (gran != null) { + IndexGranularity granularity = IndexGranularity.fromString(gran); + // TODO + } + + return builder.entity( + Lists.newArrayList( + Iterables.transform( + getDataSources(), + new Function() + { + @Override + public String apply(DruidDataSource dataSource) + { + return dataSource.getName(); + } + } + ) + ) + ).build(); + } + + @DELETE + @Path("/{dataSourceName}") + public Response deleteDataSource( + @PathParam("dataSourceName") final String dataSourceName, + @QueryParam("kill") final String kill, + @QueryParam("interval") final String interval + ) + { + if (indexingServiceClient == null) { + return Response.status(Response.Status.OK).entity(ImmutableMap.of("error", "no indexing service found")).build(); + } + if (kill != null && Boolean.valueOf(kill)) { + indexingServiceClient.killSegments(dataSourceName, new Interval(interval)); + } else { + if (!databaseSegmentManager.removeDatasource(dataSourceName)) { + return Response.status(Response.Status.NOT_FOUND).build(); + } + } + + return Response.status(Response.Status.OK).build(); + } + + @POST + @Path("/{dataSourceName}") + @Consumes("application/json") + public Response enableDataSource( + @PathParam("dataSourceName") final String dataSourceName + ) + { + if (!databaseSegmentManager.enableDatasource(dataSourceName)) { + return Response.status(Response.Status.NOT_FOUND).build(); + } + + return Response.status(Response.Status.OK).build(); + } + + @GET + @Path("/{dataSourceName}/segments") + @Produces("application/json") + public Response getSegmentDataSourceSegments( + @PathParam("dataSourceName") String dataSourceName, + @QueryParam("full") String full + ) + { + DruidDataSource dataSource = getDataSource(dataSourceName.toLowerCase()); + if (dataSource == null) { + return Response.status(Response.Status.NOT_FOUND).build(); + } + + Response.ResponseBuilder builder = Response.status(Response.Status.OK); + if (full != null) { + return builder.entity(dataSource.getSegments()).build(); + } + + return builder.entity( + Iterables.transform( + dataSource.getSegments(), + new Function() + { + @Override + public Object apply(@Nullable DataSegment segment) + { + return segment.getIdentifier(); + } + } + ) + ).build(); + } + + @GET + @Path("/{dataSourceName}/segments/{segmentId}") + @Produces("application/json") + public Response getSegmentDataSourceSegment( + @PathParam("dataSourceName") String dataSourceName, + @PathParam("segmentId") String segmentId + ) + { + DruidDataSource dataSource = getDataSource(dataSourceName.toLowerCase()); + if (dataSource == null) { + return Response.status(Response.Status.NOT_FOUND).build(); + } + + for (DataSegment segment : dataSource.getSegments()) { + if (segment.getIdentifier().equalsIgnoreCase(segmentId)) { + return Response.status(Response.Status.OK).entity(segment).build(); + } + } + return Response.status(Response.Status.NOT_FOUND).build(); + } + + @DELETE + @Path("/{dataSourceName}/segments/{segmentId}") + public Response deleteDatasourceSegment( + @PathParam("dataSourceName") String dataSourceName, + @PathParam("segmentId") String segmentId + ) + { + if (!databaseSegmentManager.removeSegment(dataSourceName, segmentId)) { + return Response.status(Response.Status.NOT_FOUND).build(); + } + + return Response.status(Response.Status.OK).build(); + } + + @POST + @Path("/{dataSourceName}/segments/{segmentId}") + @Consumes("application/json") + public Response enableDatasourceSegment( + @PathParam("dataSourceName") String dataSourceName, + @PathParam("segmentId") String segmentId + ) + { + if (!databaseSegmentManager.enableSegment(segmentId)) { + return Response.status(Response.Status.NOT_FOUND).build(); + } + + return Response.status(Response.Status.OK).build(); + } + + private DruidDataSource getDataSource(final String dataSourceName) + { + Iterable dataSources = + Iterables.concat( + Iterables.transform( + serverInventoryView.getInventory(), + new Function() + { + @Override + public DruidDataSource apply(DruidServer input) + { + return input.getDataSource(dataSourceName); + } + } + ) + ); + + List validDataSources = Lists.newArrayList(); + for (DruidDataSource dataSource : dataSources) { + if (dataSource != null) { + validDataSources.add(dataSource); + } + } + if (validDataSources.isEmpty()) { + return null; + } + + Map segmentMap = Maps.newHashMap(); + for (DruidDataSource dataSource : validDataSources) { + if (dataSource != null) { + Iterable segments = dataSource.getSegments(); + for (DataSegment segment : segments) { + segmentMap.put(segment.getIdentifier(), segment); + } + } + } + + return new DruidDataSource( + dataSourceName, + ImmutableMap.of() + ).addSegments(segmentMap); + } + + private Set getDataSources() + { + TreeSet dataSources = Sets.newTreeSet( + new Comparator() + { + @Override + public int compare(DruidDataSource druidDataSource, DruidDataSource druidDataSource1) + { + return druidDataSource.getName().compareTo(druidDataSource1.getName()); + } + } + ); + dataSources.addAll( + Lists.newArrayList( + Iterables.concat( + Iterables.transform( + serverInventoryView.getInventory(), + new Function>() + { + @Override + public Iterable apply(DruidServer input) + { + return input.getDataSources(); + } + } + ) + ) + ) + ); + return dataSources; + } +} diff --git a/server/src/main/java/io/druid/server/http/InfoResource.java b/server/src/main/java/io/druid/server/http/InfoResource.java index c702c4450ca..0786e51b34a 100644 --- a/server/src/main/java/io/druid/server/http/InfoResource.java +++ b/server/src/main/java/io/druid/server/http/InfoResource.java @@ -19,6 +19,9 @@ package io.druid.server.http; +import com.fasterxml.jackson.annotation.JacksonInject; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Function; import com.google.common.collect.Collections2; import com.google.common.collect.ImmutableMap; @@ -34,11 +37,13 @@ import io.druid.client.indexing.IndexingServiceClient; import io.druid.db.DatabaseRuleManager; import io.druid.db.DatabaseSegmentManager; import io.druid.server.coordinator.DruidCoordinator; +import io.druid.server.coordinator.rules.LoadRule; import io.druid.server.coordinator.rules.Rule; import io.druid.timeline.DataSegment; import org.joda.time.Interval; import javax.annotation.Nullable; +import javax.validation.constraints.NotNull; import javax.ws.rs.Consumes; import javax.ws.rs.DELETE; import javax.ws.rs.GET; @@ -57,6 +62,7 @@ import java.util.TreeSet; /** */ +@Deprecated @Path("/info") public class InfoResource { @@ -102,6 +108,7 @@ public class InfoResource private final DatabaseRuleManager databaseRuleManager; private final IndexingServiceClient indexingServiceClient; + private final ObjectMapper jsonMapper; @Inject public InfoResource( @@ -110,7 +117,8 @@ public class InfoResource DatabaseSegmentManager databaseSegmentManager, DatabaseRuleManager databaseRuleManager, @Nullable - IndexingServiceClient indexingServiceClient + IndexingServiceClient indexingServiceClient, + ObjectMapper jsonMapper ) { this.coordinator = coordinator; @@ -118,6 +126,7 @@ public class InfoResource this.databaseSegmentManager = databaseSegmentManager; this.databaseRuleManager = databaseRuleManager; this.indexingServiceClient = indexingServiceClient; + this.jsonMapper = jsonMapper; } @GET @@ -347,9 +356,49 @@ public class InfoResource @Produces("application/json") public Response getRules() { - return Response.status(Response.Status.OK) - .entity(databaseRuleManager.getAllRules()) - .build(); + // FUGLY, backwards compatibility + // This will def. be removed as part of the next release + return Response.ok().entity( + Maps.transformValues( + databaseRuleManager.getAllRules(), + new Function, Object>() + { + @Override + public Object apply(List rules) + { + return Lists.transform( + rules, + new Function() + { + @Override + public Object apply(Rule rule) + { + if (rule instanceof LoadRule) { + Map newRule = jsonMapper.convertValue( + rule, new TypeReference>() + { + } + ); + Set tiers = Sets.newHashSet(((LoadRule) rule).getTieredReplicants().keySet()); + String tier = DruidServer.DEFAULT_TIER; + if (tiers.size() > 1) { + tiers.remove(DruidServer.DEFAULT_TIER); + tier = tiers.iterator().next(); + } + + newRule.put("tier", tier); + newRule.put("replicants", ((LoadRule) rule).getNumReplicants(tier)); + + return newRule; + } + return rule; + } + } + ); + } + } + ) + ).build(); } @GET diff --git a/server/src/main/java/io/druid/server/http/RulesResource.java b/server/src/main/java/io/druid/server/http/RulesResource.java new file mode 100644 index 00000000000..bd6d98e925a --- /dev/null +++ b/server/src/main/java/io/druid/server/http/RulesResource.java @@ -0,0 +1,70 @@ +/* + * Druid - a distributed column store. + * Copyright (C) 2012, 2013 Metamarkets Group Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +package io.druid.server.http; + +import com.google.inject.Inject; +import io.druid.db.DatabaseRuleManager; + +import javax.ws.rs.GET; +import javax.ws.rs.Path; +import javax.ws.rs.PathParam; +import javax.ws.rs.Produces; +import javax.ws.rs.QueryParam; +import javax.ws.rs.core.Response; + +/** + */ +@Path("/druid/coordinator/v1/rules") +public class RulesResource +{ + private final DatabaseRuleManager databaseRuleManager; + + @Inject + public RulesResource( + DatabaseRuleManager databaseRuleManager + ) + { + this.databaseRuleManager = databaseRuleManager; + } + + @GET + @Produces("application/json") + public Response getRules() + { + return Response.ok(databaseRuleManager.getAllRules()).build(); + } + + @GET + @Path("/{dataSourceName}") + @Produces("application/json") + public Response getDatasourceRules( + @PathParam("dataSourceName") final String dataSourceName, + @QueryParam("full") final String full + + ) + { + if (full != null) { + return Response.ok(databaseRuleManager.getRulesWithDefault(dataSourceName)) + .build(); + } + return Response.ok(databaseRuleManager.getRules(dataSourceName)) + .build(); + } +} diff --git a/server/src/main/java/io/druid/server/http/ServersResource.java b/server/src/main/java/io/druid/server/http/ServersResource.java new file mode 100644 index 00000000000..cf6a7688a86 --- /dev/null +++ b/server/src/main/java/io/druid/server/http/ServersResource.java @@ -0,0 +1,188 @@ +/* + * Druid - a distributed column store. + * Copyright (C) 2012, 2013 Metamarkets Group Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +package io.druid.server.http; + +import com.google.common.base.Function; +import com.google.common.collect.Collections2; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Iterables; +import com.google.common.collect.Lists; +import com.google.inject.Inject; +import io.druid.client.DruidServer; +import io.druid.client.InventoryView; +import io.druid.timeline.DataSegment; + +import javax.annotation.Nullable; +import javax.ws.rs.GET; +import javax.ws.rs.Path; +import javax.ws.rs.PathParam; +import javax.ws.rs.Produces; +import javax.ws.rs.QueryParam; +import javax.ws.rs.core.Response; +import java.util.Map; + +/** + */ +@Path("/druid/coordinator/v1/servers") +public class ServersResource +{ + private static Map makeSimpleServer(DruidServer input) + { + return new ImmutableMap.Builder() + .put("host", input.getHost()) + .put("tier", input.getTier()) + .put("currSize", input.getCurrSize()) + .put("maxSize", input.getMaxSize()) + .build(); + } + + private final InventoryView serverInventoryView; + + @Inject + public ServersResource( + InventoryView serverInventoryView + ) + { + this.serverInventoryView = serverInventoryView; + } + + @GET + @Produces("application/json") + public Response getClusterServers( + @QueryParam("full") String full, + @QueryParam("simple") String simple + ) + { + Response.ResponseBuilder builder = Response.status(Response.Status.OK); + + if (full != null) { + return builder.entity(Lists.newArrayList(serverInventoryView.getInventory())).build(); + } else if (simple != null) { + return builder.entity( + Lists.newArrayList( + Iterables.transform( + serverInventoryView.getInventory(), + new Function>() + { + @Override + public Map apply(DruidServer input) + { + return makeSimpleServer(input); + } + } + ) + ) + ).build(); + } + + return builder.entity( + Lists.newArrayList( + Iterables.transform( + serverInventoryView.getInventory(), + new Function() + { + @Override + public String apply(DruidServer druidServer) + { + return druidServer.getHost(); + } + } + ) + ) + ).build(); + } + + @GET + @Path("/{serverName}") + @Produces("application/json") + public Response getServer( + @PathParam("serverName") String serverName, + @QueryParam("simple") String simple + ) + { + DruidServer server = serverInventoryView.getInventoryValue(serverName); + if (server == null) { + return Response.status(Response.Status.NOT_FOUND).build(); + } + + Response.ResponseBuilder builder = Response.status(Response.Status.OK); + + if (simple != null) { + return builder.entity(makeSimpleServer(server)).build(); + } + + return builder.entity(server) + .build(); + } + + @GET + @Path("/{serverName}/segments") + @Produces("application/json") + public Response getServerSegments( + @PathParam("serverName") String serverName, + @QueryParam("full") String full + ) + { + Response.ResponseBuilder builder = Response.status(Response.Status.OK); + DruidServer server = serverInventoryView.getInventoryValue(serverName); + if (server == null) { + return Response.status(Response.Status.NOT_FOUND).build(); + } + + if (full != null) { + return builder.entity(server.getSegments().values()).build(); + } + + return builder.entity( + Collections2.transform( + server.getSegments().values(), + new Function() + { + @Override + public String apply(@Nullable DataSegment segment) + { + return segment.getIdentifier(); + } + } + ) + ).build(); + } + + @GET + @Path("/{serverName}/segments/{segmentId}") + @Produces("application/json") + public Response getServerSegment( + @PathParam("serverName") String serverName, + @PathParam("segmentId") String segmentId + ) + { + DruidServer server = serverInventoryView.getInventoryValue(serverName); + if (server == null) { + return Response.status(Response.Status.NOT_FOUND).build(); + } + + DataSegment segment = server.getSegment(segmentId); + if (segment == null) { + return Response.status(Response.Status.NOT_FOUND).build(); + } + + return Response.status(Response.Status.OK).entity(segment).build(); + } +} diff --git a/server/src/main/java/io/druid/server/http/TiersResource.java b/server/src/main/java/io/druid/server/http/TiersResource.java new file mode 100644 index 00000000000..2ec84d79e32 --- /dev/null +++ b/server/src/main/java/io/druid/server/http/TiersResource.java @@ -0,0 +1,88 @@ +/* + * Druid - a distributed column store. + * Copyright (C) 2012, 2013 Metamarkets Group Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +package io.druid.server.http; + +import com.google.api.client.util.Maps; +import com.google.common.collect.HashBasedTable; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Sets; +import com.google.common.collect.Table; +import com.google.inject.Inject; +import io.druid.client.DruidServer; +import io.druid.client.InventoryView; + +import javax.ws.rs.GET; +import javax.ws.rs.Path; +import javax.ws.rs.Produces; +import javax.ws.rs.QueryParam; +import javax.ws.rs.core.Response; +import java.util.Map; +import java.util.Set; + +/** + */ +@Path("/druid/coordinator/v1/tiers") +public class TiersResource +{ + private final InventoryView serverInventoryView; + + @Inject + public TiersResource( + InventoryView serverInventoryView + ) + { + this.serverInventoryView = serverInventoryView; + } + + @GET + @Produces("application/json") + public Response getTiers( + @QueryParam("simple") String simple + ) + { + Response.ResponseBuilder builder = Response.status(Response.Status.OK); + + if (simple != null) { + Map> metadata = Maps.newHashMap(); + for (DruidServer druidServer : serverInventoryView.getInventory()) { + Map tierMetadata = metadata.get(druidServer.getTier()); + + if (tierMetadata == null) { + tierMetadata = Maps.newHashMap(); + metadata.put(druidServer.getTier(), tierMetadata); + } + + Long currSize = tierMetadata.get("currSize"); + tierMetadata.put("currSize", (currSize == null) ? 0 : currSize + druidServer.getCurrSize()); + + Long maxSize = tierMetadata.get("maxSize"); + tierMetadata.put("maxSize", (maxSize == null) ? 0 : maxSize + druidServer.getMaxSize()); + } + return builder.entity(metadata).build(); + } + + Set tiers = Sets.newHashSet(); + for (DruidServer server : serverInventoryView.getInventory()) { + tiers.add(server.getTier()); + } + + return builder.entity(tiers).build(); + } +} diff --git a/server/src/main/java/io/druid/server/initialization/ZkPathsConfig.java b/server/src/main/java/io/druid/server/initialization/ZkPathsConfig.java index ab296698548..8494b45e257 100644 --- a/server/src/main/java/io/druid/server/initialization/ZkPathsConfig.java +++ b/server/src/main/java/io/druid/server/initialization/ZkPathsConfig.java @@ -66,6 +66,12 @@ public abstract class ZkPathsConfig return defaultPath("coordinator"); } + @Config("druid.zk.paths.connectorPath") + public String getConnectorPath() + { + return defaultPath("connector"); + } + @Config("druid.zk.paths.indexer.announcementsPath") public String getIndexerAnnouncementPath() { diff --git a/server/src/main/resources/static/css/rules.css b/server/src/main/resources/static/css/rules.css index 384f312181e..dcd52e2706e 100644 --- a/server/src/main/resources/static/css/rules.css +++ b/server/src/main/resources/static/css/rules.css @@ -36,8 +36,9 @@ margin: 0 10px 0 10px; } -.delete_rule { +.delete_rule, .add_tier { border-style : none; color:#555; background-color:#eee; + cursor: pointer; } \ No newline at end of file diff --git a/server/src/main/resources/static/js/enable-0.0.1.js b/server/src/main/resources/static/js/enable-0.0.1.js index f25b1a53a54..65fd75fb1fa 100644 --- a/server/src/main/resources/static/js/enable-0.0.1.js +++ b/server/src/main/resources/static/js/enable-0.0.1.js @@ -21,7 +21,7 @@ $(document).ready(function() { var selected = $('#datasources option:selected').text(); $.ajax({ type: 'POST', - url:'/info/datasources/' + selected, + url:'/druid/coordinator/v1/datasources/' + selected, data: JSON.stringify(selected), contentType:"application/json; charset=utf-8", dataType:"json", @@ -50,7 +50,7 @@ $(document).ready(function() { var selected = $('#datasources option:selected').text(); $.ajax({ type: 'DELETE', - url:'/info/datasources/' + selected, + url:'/druid/coordinator/v1/datasources/' + selected, data: JSON.stringify(selected), contentType:"application/json; charset=utf-8", dataType:"json", @@ -70,12 +70,12 @@ $(document).ready(function() { } }); - $.getJSON("/info/db/datasources", function(enabled_datasources) { + $.getJSON("/druid/coordinator/v1/db/datasources", function(enabled_datasources) { $.each(enabled_datasources, function(index, datasource) { $('#enabled_datasources').append($('
  • ' + datasource + '
  • ')); }); - $.getJSON("/info/db/datasources?includeDisabled", function(db_datasources) { + $.getJSON("/druid/coordinator/v1/db/datasources?includeDisabled", function(db_datasources) { var disabled_datasources = _.difference(db_datasources, enabled_datasources); $.each(disabled_datasources, function(index, datasource) { $('#disabled_datasources').append($('
  • ' + datasource + '
  • ')); diff --git a/server/src/main/resources/static/js/handlers-0.0.1.js b/server/src/main/resources/static/js/handlers-0.0.1.js index faa78f417b6..1cc16adfe4a 100644 --- a/server/src/main/resources/static/js/handlers-0.0.1.js +++ b/server/src/main/resources/static/js/handlers-0.0.1.js @@ -2,7 +2,7 @@ $(document).ready(function() { - var basePath = "/info/"; + var basePath = "/druid/coordinator/v1/"; var type = $('#select_type').attr('value') + ''; var view = $('#select_view').attr('value') + ''; diff --git a/server/src/main/resources/static/js/init-0.0.2.js b/server/src/main/resources/static/js/init-0.0.2.js index 914b9b1521a..c89ac19ce13 100644 --- a/server/src/main/resources/static/js/init-0.0.2.js +++ b/server/src/main/resources/static/js/init-0.0.2.js @@ -100,8 +100,8 @@ $(document).ready(function() { } // Execution stuff - $.get('/info/coordinator', function(data) { - $("#coordinator").html('Current Cluster Coordinator: ' + data.host); + $.get('/druid/coordinator/v1/leader', function(data) { + $("#coordinator").html('Current Cluster Coordinator Leader: ' + data.host); }); $('#move_segment').submit(function() { @@ -118,57 +118,10 @@ $(document).ready(function() { }); } -/* - $.ajax({ - url:"/coordinator/move", - type: "POST", - data: JSON.stringify(data), - contentType:"application/json; charset=utf-8", - dataType:"json", - error: function(xhr, status, error) { - alert(error + ": " + xhr.responseText); - }, - success: function(data, status, xhr) { - for (seg in CONSOLE.selected_segments) { - CONSOLE.selected_segments[seg].children('.server_host').text($('#move_segment > .to').val()); - } - } - }); -*/ return false; }); - -/*$ - ('#drop_segment').submit(function() { - var data = []; - - if ($.isEmptyObject(CONSOLE.selected_segments)) { - alert("Please select at least one segment"); - } - for (seg in CONSOLE.selected_segments) { - data.push({ - 'segmentName' : seg, - 'from' : CONSOLE.selected_segments[seg] - }); - } - - $.ajax({ - url:"/coordinator/drop", - type: "POST", - data: JSON.stringify(data), - contentType:"application/json; charset=utf-8", - dataType:"json", - error: function(xhr, status, error) { - alert(error + ": " + xhr.responseText); - } - }); - - return false; - }); -*/ - - $.get('/info/cluster', function(data) { + $.get('/druid/coordinator/v1/servers?full', function(data) { $('.loading').hide(); initTables(data); @@ -176,26 +129,5 @@ $(document).ready(function() { var oTable = []; initDataTable($('#servers'), oTable); initDataTable($('#segments'), oTable); - - // init select segments - /*$("#segments tbody").click(function(event) { - var el = $(event.target.parentNode); - var key = el.children('.segment_name').text(); - if (el.is("tr")) { - if (el.hasClass('row_selected')) { - el.removeClass('row_selected'); - delete CONSOLE.selected_segments[key]; - } else { - el.addClass('row_selected'); - CONSOLE.selected_segments[key] = el; - } - - var html =""; - for (segment in CONSOLE.selected_segments) { - html += segment + ' on ' + CONSOLE.selected_segments[segment].children('.server_host').text() + '
    '; - } - $('#selected_segments').html(html); - } - });*/ }); }); \ No newline at end of file diff --git a/server/src/main/resources/static/js/kill-0.0.1.js b/server/src/main/resources/static/js/kill-0.0.1.js index 666651128f1..5bc280ab513 100644 --- a/server/src/main/resources/static/js/kill-0.0.1.js +++ b/server/src/main/resources/static/js/kill-0.0.1.js @@ -22,7 +22,7 @@ $(document).ready(function() { var interval = $('#interval').val(); $.ajax({ type: 'DELETE', - url:'/info/datasources/' + selected +'?kill=true&interval=' + interval, + url:'/druid/coordinator/v1/datasources/' + selected +'?kill=true&interval=' + interval, contentType:"application/json; charset=utf-8", dataType:"json", error: function(xhr, status, error) { @@ -41,7 +41,7 @@ $(document).ready(function() { } }); - $.getJSON("/info/db/datasources?includeDisabled", function(data) { + $.getJSON("/druid/coordinator/v1/db/datasources?includeDisabled", function(data) { $.each(data, function(index, datasource) { $('#datasources').append($('').attr("value", datasource).text(datasource)); }); diff --git a/server/src/main/resources/static/js/rules-0.0.1.js b/server/src/main/resources/static/js/rules-0.0.1.js index fbe782b34c2..f8fb695a911 100644 --- a/server/src/main/resources/static/js/rules-0.0.1.js +++ b/server/src/main/resources/static/js/rules-0.0.1.js @@ -6,6 +6,8 @@ var ruleTypes = [ "loadByPeriod", "dropByInterval", "dropByPeriod", + "loadForever", + "dropForever", "JSON" ]; @@ -17,6 +19,7 @@ function makeRuleDiv(rule) { } else { retVal += makeRuleComponents(rule.type) + makeRuleBody(rule); } + retVal += ""; return retVal; } @@ -54,12 +57,18 @@ function makeRuleBody(rule) { case "loadByPeriod": retVal += makeLoadByPeriod(rule); break; + case "loadForever": + retVal += makeLoadForever(rule); + break; case "dropByInterval": retVal += makeDropByInterval(rule); break; case "dropByPeriod": retVal += makeDropByPeriod(rule); break; + case "dropForever": + retVal += ""; + break; case "JSON": retVal += makeJSON(); break; @@ -72,36 +81,67 @@ function makeRuleBody(rule) { } function makeLoadByInterval(rule) { - return "interval" + - "replicants" + - makeTiersDropdown(rule) - ; + var retVal = ""; + retVal += "interval"; + retVal += ""; + if (rule.tieredReplicants === undefined) { + retVal += makeTierLoad(null, 0); + } + for (var tier in rule.tieredReplicants) { + retVal += makeTierLoad(tier, rule.tieredReplicants[tier]); + } + return retVal; } function makeLoadByPeriod(rule) { - return "period" + - "replicants" + - makeTiersDropdown(rule) - ; + var retVal = ""; + retVal += "period"; + retVal += ""; + if (rule.tieredReplicants === undefined) { + retVal += makeTierLoad(null, 0); + } + for (var tier in rule.tieredReplicants) { + retVal += makeTierLoad(tier, rule.tieredReplicants[tier]); + } + return retVal; +} + +function makeLoadForever(rule) { + var retVal = ""; + retVal += ""; + if (rule.tieredReplicants === undefined) { + retVal += makeTierLoad(null, 0); + } + for (var tier in rule.tieredReplicants) { + retVal += makeTierLoad(tier, rule.tieredReplicants[tier]); + } + return retVal; +} + +function makeTierLoad(tier, val) { + return "
    " + + "replicants" + + makeTiersDropdown(tier) + + "
    "; } function makeDropByInterval(rule) { - return "interval"; + return "interval"; } function makeDropByPeriod(rule) { - return "period"; + return "period"; } function makeJSON() { return "JSON"; } -function makeTiersDropdown(rule) { +function makeTiersDropdown(selTier) { var retVal = "tier