refactor examples to use global runtime

This commit is contained in:
fjy 2014-06-09 15:59:44 -07:00
parent 8340a1b0a1
commit 7233fce50c
9 changed files with 65 additions and 46 deletions

View File

@ -2,8 +2,6 @@ druid.host=localhost
druid.service=broker
druid.port=8080
druid.zk.service.host=localhost
# Change these to make Druid faster
# Add more threads or larger buffer for faster groupBys
druid.processing.buffer.sizeBytes=100000000
druid.processing.numThreads=1

View File

@ -2,10 +2,4 @@ druid.host=localhost
druid.service=coordinator
druid.port=8082
druid.zk.service.host=localhost
druid.db.connector.connectURI=jdbc\:mysql\://localhost\:3306/druid
druid.db.connector.user=druid
druid.db.connector.password=diurd
druid.coordinator.startDelay=PT70s

View File

@ -0,0 +1,24 @@
# Extensions
druid.extensions.coordinates=["io.druid.extensions:druid-examples:0.6.116","io.druid.extensions:druid-kafka-seven:0.6.116","io.druid.extensions:druid-rabbitmq:0.6.116", "io.druid.extensions:druid-s3-extensions:0.6.116"]
# Zookeeper
druid.zk.service.host=localhost
# Metadata Storage
druid.db.connector.connectURI=jdbc\:mysql\://localhost\:3306/druid
druid.db.connector.user=druid
druid.db.connector.password=diurd
# Deep storage
druid.storage.type=local
druid.storage.storage.storageDirectory=/tmp/druid/localStorage
# Indexing service discovery
druid.selectors.indexing.serviceName=overlord
# Monitoring (disabled for examples)
# druid.monitoring.monitors=["com.metamx.metrics.SysMonitor","com.metamx.metrics.JvmMonitor"]
# Metrics logging
druid.emitter=noop

View File

@ -2,8 +2,6 @@ druid.host=localhost
druid.service=historical
druid.port=8081
druid.zk.service.host=localhost
druid.extensions.coordinates=["io.druid.extensions:druid-s3-extensions:0.6.116"]
# Dummy read only AWS account (used to download example data)

View File

@ -1,22 +1,8 @@
-server
-Xmx256m
-Duser.timezone=UTC
-Dfile.encoding=UTF-8
druid.host=localhost
druid.port=8083
druid.service=overlord
-Ddruid.host=localhost
-Ddruid.port=8080
-Ddruid.service=overlord
-Ddruid.zk.service.host=localhost
-Ddruid.extensions.coordinates=["io.druid.extensions:druid-kafka-seven:0.6.116"]
-Ddruid.db.connector.connectURI=jdbc:mysql://localhost:3306/druid
-Ddruid.db.connector.user=druid
-Ddruid.db.connector.password=diurd
-Ddruid.selectors.indexing.serviceName=overlord
-Ddruid.indexer.queue.startDelay=PT0M
-Ddruid.indexer.runner.javaOpts="-server -Xmx256m"
-Ddruid.indexer.fork.property.druid.processing.numThreads=1
-Ddruid.indexer.fork.property.druid.computation.buffer.size=100000000
druid.indexer.queue.startDelay=PT0M
druid.indexer.runner.javaOpts="-server -Xmx256m"
druid.indexer.fork.property.druid.processing.numThreads=1
druid.indexer.fork.property.druid.computation.buffer.size=100000000

View File

@ -2,19 +2,8 @@ druid.host=localhost
druid.service=realtime
druid.port=8083
druid.zk.service.host=localhost
druid.extensions.coordinates=["io.druid.extensions:druid-examples:0.6.116","io.druid.extensions:druid-kafka-seven:0.6.116","io.druid.extensions:druid-rabbitmq:0.6.116"]
# Change this config to db to hand off to the rest of the Druid cluster
druid.publish.type=noop
# These configs are only required for real hand off
# druid.db.connector.connectURI=jdbc\:mysql\://localhost\:3306/druid
# druid.db.connector.user=druid
# druid.db.connector.password=diurd
druid.processing.buffer.sizeBytes=100000000
druid.processing.numThreads=1
druid.monitoring.monitors=["io.druid.segment.realtime.RealtimeMetricsMonitor"]

View File

@ -73,7 +73,7 @@
<dependency>
<groupId>com.metamx</groupId>
<artifactId>emitter</artifactId>
<version>0.2.11</version>
<version>0.2.12</version>
</dependency>
<dependency>
<groupId>com.metamx</groupId>

View File

@ -67,6 +67,7 @@ public class EmitterModule implements Module
{
String emitterType = props.getProperty(EMITTER_PROPERTY, "");
binder.install(new NoopEmitterModule());
binder.install(new LogEmitterModule());
binder.install(new HttpEmitterModule());
@ -104,7 +105,7 @@ public class EmitterModule implements Module
emitter = findEmitter(emitterType, emitterBindings);
if (emitter == null) {
emitter = findEmitter(LogEmitterModule.EMITTER_TYPE, emitterBindings);
emitter = findEmitter(NoopEmitterModule.EMITTER_TYPE, emitterBindings);
}
if (emitter == null) {
@ -115,7 +116,7 @@ public class EmitterModule implements Module
knownTypes.add(((Named) annotation).value());
}
}
throw new ISE("Uknown emitter type[%s]=[%s], known types[%s]", EMITTER_PROPERTY, emitterType, knownTypes);
throw new ISE("Unknown emitter type[%s]=[%s], known types[%s]", EMITTER_PROPERTY, emitterType, knownTypes);
}
}

View File

@ -0,0 +1,29 @@
package io.druid.server.initialization;
import com.google.inject.Binder;
import com.google.inject.Module;
import com.google.inject.Provides;
import com.google.inject.name.Named;
import com.metamx.emitter.core.Emitter;
import com.metamx.emitter.core.NoopEmitter;
import io.druid.guice.ManageLifecycle;
/**
*/
public class NoopEmitterModule implements Module
{
public static final String EMITTER_TYPE = "noop";
@Override
public void configure(Binder binder)
{
}
@Provides
@ManageLifecycle
@Named(EMITTER_TYPE)
public Emitter makeEmitter()
{
return new NoopEmitter();
}
}