Merge pull request #902 from metamx/cache-config

Commonalize the cache config and change default to disable cache
This commit is contained in:
Fangjin Yang 2014-12-04 11:30:24 -07:00
commit 332e4a4d1c
9 changed files with 24 additions and 11 deletions

View File

@ -14,6 +14,10 @@ druid.metadata.storage.connector.password=diurd
druid.storage.type=local
druid.storage.storage.storageDirectory=/tmp/druid/localStorage
# Cache (we use a simple 10mb heap-based local cache on the broker)
druid.cache.type=local
druid.cache.sizeInBytes=10000000
# Indexing service discovery
druid.selectors.indexing.serviceName=overlord

View File

@ -1,6 +1,9 @@
druid.host=localhost
druid.service=broker
druid.port=8080
druid.service=broker
druid.broker.cache.useCache=true
druid.broker.cache.populateCache=true
# Bump these up only for faster nested groupBy
druid.processing.buffer.sizeBytes=100000000

View File

@ -1,5 +1,7 @@
druid.host=localhost
druid.service=coordinator
druid.port=8082
druid.service=coordinator
# The coordinator begins assignment operations after the start delay.
# We override the default here to start things up faster for examples.
druid.coordinator.startDelay=PT70s

View File

@ -1,8 +1,9 @@
druid.host=localhost
druid.service=historical
druid.port=8081
druid.service=historical
# Change these to make Druid faster
# We can only 1 scan segment in parallel with these configs.
# Our intermediate buffer is also very small so longer topNs will be slow.
druid.processing.buffer.sizeBytes=100000000
druid.processing.numThreads=1

View File

@ -2,6 +2,7 @@ druid.host=localhost
druid.port=8080
druid.service=overlord
# Run the overlord in local mode with a single peon to execute tasks
druid.indexer.queue.startDelay=PT0M
druid.indexer.runner.javaOpts="-server -Xmx256m"
druid.indexer.fork.property.druid.processing.numThreads=1

View File

@ -1,10 +1,12 @@
druid.host=localhost
druid.service=realtime
druid.port=8083
druid.service=realtime
# Change this config to metadata to hand off to the rest of the Druid cluster
# Change this config to 'metadata' to hand off to the rest of the Druid cluster
druid.publish.type=noop
# We can only 1 scan segment in parallel with these configs.
# Our intermediate buffer is also very small so longer topNs will be slow.
druid.processing.buffer.sizeBytes=100000000
druid.processing.numThreads=1

View File

@ -31,10 +31,10 @@ public class CacheConfig
public static final String POPULATE_CACHE = "populateCache";
@JsonProperty
private boolean useCache = true;
private boolean useCache = false;
@JsonProperty
private boolean populateCache = true;
private boolean populateCache = false;
@JsonProperty
private List<String> unCacheable = Arrays.asList(Query.GROUP_BY, Query.SELECT);

View File

@ -90,7 +90,7 @@ public class CliBroker extends ServerRunnable
binder.bind(TimelineServerView.class).to(BrokerServerView.class).in(LazySingleton.class);
binder.bind(Cache.class).toProvider(CacheProvider.class).in(ManageLifecycle.class);
JsonConfigProvider.bind(binder, "druid.broker.cache", CacheProvider.class);
JsonConfigProvider.bind(binder, "druid.cache", CacheProvider.class);
JsonConfigProvider.bind(binder, "druid.broker.cache", CacheConfig.class);
JsonConfigProvider.bind(binder, "druid.broker.select", TierSelectorStrategy.class);
JsonConfigProvider.bind(binder, "druid.broker.select.tier.custom", CustomTierSelectorStrategyConfig.class);

View File

@ -88,7 +88,7 @@ public class CliHistorical extends ServerRunnable
LifecycleModule.register(binder, ZkCoordinator.class);
binder.bind(Cache.class).toProvider(CacheProvider.class).in(ManageLifecycle.class);
JsonConfigProvider.bind(binder, "druid.historical.cache", CacheProvider.class);
JsonConfigProvider.bind(binder, "druid.cache", CacheProvider.class);
JsonConfigProvider.bind(binder, "druid.historical.cache", CacheConfig.class);
MetricsModule.register(binder, CacheMonitor.class);
}