diff --git a/examples/pom.xml b/examples/pom.xml index 9ee7ba2b906..f72d357d880 100644 --- a/examples/pom.xml +++ b/examples/pom.xml @@ -32,7 +32,6 @@ - rand twitter diff --git a/examples/rand/pom.xml b/examples/rand/pom.xml deleted file mode 100644 index dc4a2b1d08e..00000000000 --- a/examples/rand/pom.xml +++ /dev/null @@ -1,169 +0,0 @@ - - - 4.0.0 - com.metamx.druid - druid-examples-rand - druid-examples-rand - druid-examples-rand - - - com.metamx - druid-examples - 0.4.1-SNAPSHOT - - - - - com.metamx.druid - druid-realtime - ${project.parent.version} - - - com.metamx.druid - druid-server - ${project.parent.version} - - - com.metamx.druid - druid-common - ${project.parent.version} - - - - com.metamx - emitter - - - com.metamx - http-client - - - com.metamx - java-util - - - com.metamx - server-metrics - - - - com.davekoelle - alphanum - - - commons-codec - commons-codec - - - org.skife.config - config-magic - - - com.google.guava - guava - - - com.google.inject - guice - - - com.google.inject.extensions - guice-servlet - - - com.ibm.icu - icu4j - - - com.fasterxml.jackson.core - jackson-core - - - com.fasterxml.jackson.jaxrs - jackson-jaxrs-json-provider - - - com.fasterxml.jackson.core - jackson-databind - - - com.fasterxml.jackson.dataformat - jackson-dataformat-smile - - - org.jdbi - jdbi - - - com.sun.jersey - jersey-core - - - com.sun.jersey.contribs - jersey-guice - - - com.sun.jersey - jersey-server - - - org.mortbay.jetty - jetty - - - joda-time - joda-time - - - log4j - log4j - - - org.slf4j - slf4j-log4j12 - - - - - junit - junit - test - - - org.easymock - easymock - test - - - - - - - maven-shade-plugin - - - package - - shade - - - - ${project.build.directory}/${project.artifactId}-${project.version}-selfcontained.jar - - - - - - - maven-jar-plugin - - - - test-jar - - - - - - - diff --git a/examples/rand/src/main/java/druid/examples/RealtimeStandaloneMain.java b/examples/rand/src/main/java/druid/examples/RealtimeStandaloneMain.java deleted file mode 100644 index fa474bcb2f7..00000000000 --- a/examples/rand/src/main/java/druid/examples/RealtimeStandaloneMain.java +++ /dev/null @@ -1,83 +0,0 @@ -package druid.examples; - -import com.metamx.common.lifecycle.Lifecycle; -import com.metamx.common.logger.Logger; -import com.metamx.druid.client.DataSegment; -import com.metamx.druid.coordination.DataSegmentAnnouncer; -import com.metamx.druid.loading.DataSegmentPusher; -import com.metamx.druid.log.LogLevelAdjuster; -import com.metamx.druid.realtime.RealtimeNode; -import com.metamx.druid.realtime.SegmentPublisher; - -import java.io.File; -import java.io.IOException; - -/** - * Standalone Demo Realtime process. - * Created: 20121009T2050 - */ -public class RealtimeStandaloneMain -{ - private static final Logger log = new Logger(RealtimeStandaloneMain.class); - - public static void main(String[] args) throws Exception - { - LogLevelAdjuster.register(); - - Lifecycle lifecycle = new Lifecycle(); - - RealtimeNode rn = RealtimeNode.builder().build(); - lifecycle.addManagedInstance(rn); - - DataSegmentAnnouncer dummySegmentAnnouncer = - new DataSegmentAnnouncer() - { - @Override - public void announceSegment(DataSegment segment) throws IOException - { - // do nothing - } - - @Override - public void unannounceSegment(DataSegment segment) throws IOException - { - // do nothing - } - }; - SegmentPublisher dummySegmentPublisher = - new SegmentPublisher() - { - @Override - public void publishSegment(DataSegment segment) throws IOException - { - // do nothing - } - }; - - // dummySegmentPublisher will not send updates to db because standalone demo has no db - rn.setAnnouncer(dummySegmentAnnouncer); - rn.setSegmentPublisher(dummySegmentPublisher); - rn.setDataSegmentPusher( - new DataSegmentPusher() - { - @Override - public DataSegment push(File file, DataSegment segment) throws IOException - { - return segment; - } - } - ); - - - try { - lifecycle.start(); - } - catch (Throwable t) { - log.info(t, "Throwable caught at startup, committing seppuku"); - t.printStackTrace(); - System.exit(2); - } - - lifecycle.join(); - } -} \ No newline at end of file diff --git a/examples/rand/src/main/resources/runtime.properties b/examples/rand/src/main/resources/runtime.properties deleted file mode 100644 index c9483846106..00000000000 --- a/examples/rand/src/main/resources/runtime.properties +++ /dev/null @@ -1,145 +0,0 @@ -# Properties for demo of Realtime Node in standalone mode. -# To Use This: copy this file to runtime.properties and put directory containing it in classpath. -# -comment.origin=druid/examples/rand/src/main/resources/runtime.properties - -# S3 access -com.metamx.aws.accessKey= -com.metamx.aws.secretKey= - -# thread pool size for servicing queries -druid.client.http.connections=10 - -# JDBC connection string for metadata database -druid.database.connectURI= -druid.database.user=user -druid.database.password=password -# time between polling for metadata database -druid.database.poll.duration=PT1M - -# table for segment metadata coordination, no default -druid.database.segmentTable=prod_segments - -#in progress 20121010 #druid.database.taskTable= - -druid.emitter.period=PT60S - -druid.master.host -# Poll period the master runs on -druid.master.period=PT60S -# Number of poll periods to wait for a node to come back before believing it is really gone -druid.master.removedSegmentLifetime=1 -# Delay for the master to start its work, this should be sufficiently high so that the master can get all of the -# information it needs from ZK before starting. It's a hack, but it works until we re-work our ZK integration. -druid.master.startDelay=PT600S - -# Path on local FS for storage of segments; dir. will be created if needed -druid.paths.indexCache=/tmp/rand_realtime/indexCache -# Path on local FS for storage of segment metadata; dir. will be created if needed -druid.paths.segmentInfoCache=/tmp/rand_realtime/segmentInfoCache - -# Path to schema definition file -druid.request.logging.dir=/tmp/rand_realtime/log - -#druid.server.maxSize=0 -druid.server.maxSize=300000000000 -# =realtime or =historical (default) -druid.server.type=realtime - -# -# zookeeper (zk) znode paths (zpaths) -# - -# base znode which establishes a unique namespace for a Druid ensemble. -# Default is /druid if not set -# This can also be set via parameter baseZkPath of the DruidSetup commandline -# druid.zk.paths.base= - -# If these zpath properties like druid.zk.paths.*Path are overridden, then all must be -# overridden together for upgrade safety reasons. -# The commandline utility DruidSetup, which is used to set up properties on zookeeper, -# will validate this. Also, these zpaths must start with / because they are not relative. - -# ZK znode path for service discovery within the cluster. -# Default is value of druid.zk.paths.base + /announcements -# druid.zk.paths.announcementsPath=/druid/announcements - -# Legacy znode path, must be set, but can be ignored -#druid.zk.paths.indexesPath=/druid/indexes - -# Default is value of druid.zk.paths.base + /tasks -##druid.zk.paths.indexer.tasksPath=/druid/tasks - -# Default is value of druid.zk.paths.base + /status -#druid.zk.paths.indexer.statusPath=/druid/status - -# ZK path for load/drop protocol between Master/Compute -# Default is value of druid.zk.paths.base + /loadQueue -#druid.zk.paths.loadQueuePath=/druid/loadQueue - -# ZK path for Master leadership election -# Default is value of druid.zk.paths.base + /master -#druid.zk.paths.masterPath=/druid/master - -# ZK path for publishing served segments -# Default is value of druid.zk.paths.base + /servedSegments -#druid.zk.paths.servedSegmentsPath=/druid/servedSegments - -# Default is value of druid.zk.paths.base + /leaderLatch -#druid.zk.paths.indexer.leaderLatchPath=/druid/leaderLatch - -# ZK path for properties stored in zookeeper -# Default is value of druid.zk.paths.base + /properties -#druid.zk.paths.propertiesPath=/druid/properties - -druid.host=127.0.0.1 -druid.port=8080 - -# -druid.http.numThreads=10 -# default is 5 min. (300000) -#druid.http.maxIdleTimeMillis=300000 - -# unknown # com.metamx.service=compute -com.metamx.emitter.http=true -com.metamx.emitter.logging=true -com.metamx.emitter.logging.level=info -com.metamx.metrics.emitter.period=PT60S - -# ZK quorum IPs; ZK coordinates in the form host1:port1[,host2:port2[, ...]] -# if =none then do not contact zookeeper (only for RealtimeStandaloneMain examples) -druid.zk.service.host=none - -# msec; high value means tolerate slow zk nodes, default is to wait about 3 weeks -druid.zk.service.connectionTimeout=1000000 - -druid.processing.formatString=processing_%s -druid.processing.numThreads=3 - - -# -# other properties found -# -druid.computation.buffer.size=10000000 -druid.merger.threads=1 -druid.merger.runner=remote -druid.merger.whitelist.enabled=false -druid.merger.whitelist.datasources= -druid.merger.rowFlushBoundary=500000 -druid.indexer.retry.minWaitMillis=10000 -druid.indexer.retry.maxWaitMillis=60000 -druid.indexer.retry.maxRetryCount=10 -#emitting, opaque marker -druid.service=foo -# S3 dest for realtime indexer -druid.pusher.s3.bucket= -druid.pusher.s3.baseKey= - -druid.realtime.specFile=rand_realtime.spec - -# -# Integration-Test Related -# -# is this for RAM? which process? -druid.bard.cache.sizeInBytes=40000000 -#ignore#druid.bard.host= diff --git a/examples/rand/query.body b/examples/twitter/rand/query.body similarity index 100% rename from examples/rand/query.body rename to examples/twitter/rand/query.body diff --git a/examples/rand/rand_realtime.spec b/examples/twitter/rand/rand_realtime.spec similarity index 100% rename from examples/rand/rand_realtime.spec rename to examples/twitter/rand/rand_realtime.spec diff --git a/examples/rand/run_client.sh b/examples/twitter/rand/run_client.sh similarity index 100% rename from examples/rand/run_client.sh rename to examples/twitter/rand/run_client.sh diff --git a/examples/rand/run_server.sh b/examples/twitter/rand/run_server.sh similarity index 100% rename from examples/rand/run_server.sh rename to examples/twitter/rand/run_server.sh diff --git a/examples/twitter/rand_realtime.spec b/examples/twitter/rand_realtime.spec deleted file mode 100644 index 39323295205..00000000000 --- a/examples/twitter/rand_realtime.spec +++ /dev/null @@ -1,32 +0,0 @@ -[{ - "schema": { - "dataSource": "randseq", - "aggregators": [ - {"type": "count", "name": "events"}, - {"type": "doubleSum", "name": "outColumn", "fieldName": "inColumn"} - ], - "indexGranularity": "minute", - "shardSpec": {"type": "none"} - }, - - "config": { - "maxRowsInMemory": 50000, - "intermediatePersistPeriod": "PT10m" - }, - - "firehose": { - "type": "rand", - "sleepUsec": 100000, - "maxGeneratedRows": 5000000, - "seed": 0, - "nTokens": 19, - "nPerSleep": 3 - }, - - "plumber": { - "type": "realtime", - "windowPeriod": "PT5m", - "segmentGranularity": "hour", - "basePersistDirectory": "/tmp/rand_realtime/basePersist" - } -}] diff --git a/examples/twitter/group_by_query.body b/examples/twitter/twitter/group_by_query.body similarity index 100% rename from examples/twitter/group_by_query.body rename to examples/twitter/twitter/group_by_query.body diff --git a/examples/twitter/run_client.sh b/examples/twitter/twitter/run_client.sh similarity index 100% rename from examples/twitter/run_client.sh rename to examples/twitter/twitter/run_client.sh diff --git a/examples/twitter/run_server.sh b/examples/twitter/twitter/run_server.sh similarity index 100% rename from examples/twitter/run_server.sh rename to examples/twitter/twitter/run_server.sh diff --git a/examples/twitter/search_query.body b/examples/twitter/twitter/search_query.body similarity index 100% rename from examples/twitter/search_query.body rename to examples/twitter/twitter/search_query.body diff --git a/examples/twitter/twitter_realtime.spec b/examples/twitter/twitter/twitter_realtime.spec similarity index 100% rename from examples/twitter/twitter_realtime.spec rename to examples/twitter/twitter/twitter_realtime.spec