Merge pull request #2464 from gianm/print-properties

Make startup properties logging optional.
This commit is contained in:
Slim 2016-02-14 15:11:35 -06:00
commit 16092eb5e2
8 changed files with 119 additions and 13 deletions

View File

@ -74,6 +74,16 @@ The following path is used for service discovery. It is **not** affected by `dru
|--------|-----------|-------| |--------|-----------|-------|
|`druid.discovery.curator.path`|Services announce themselves under this ZooKeeper path.|`/druid/discovery`| |`druid.discovery.curator.path`|Services announce themselves under this ZooKeeper path.|`/druid/discovery`|
### Startup Logging
All nodes can log debugging information on startup.
|Property|Description|Default|
|--------|-----------|-------|
|`druid.startup.logging.logProperties`|Log all properties on startup (from common.runtime.properties, runtime.properties, and the JVM command line).|false|
Note that some sensitive information may be logged if these settings are enabled.
### Request Logging ### Request Logging
All nodes that can serve queries can also log the query requests they see. All nodes that can serve queries can also log the query requests they see.

View File

@ -30,6 +30,13 @@ druid.extensions.loadList=[]
# and uncomment the line below to point to your directory. # and uncomment the line below to point to your directory.
#druid.extensions.hadoopDependenciesDir=/my/dir/hadoop-dependencies #druid.extensions.hadoopDependenciesDir=/my/dir/hadoop-dependencies
#
# Logging
#
# Log all runtime properties on startup. Disable to avoid logging properties on startup:
druid.startup.logging.logProperties=true
# #
# Zookeeper # Zookeeper
# #
@ -41,6 +48,7 @@ druid.zk.paths.base=/druid
# Metadata storage # Metadata storage
# #
# For Derby server on your Druid Coordinator (only viable in a cluster with a single Coordinator, no fail-over):
druid.metadata.storage.type=derby druid.metadata.storage.type=derby
druid.metadata.storage.connector.connectURI=jdbc:derby://localhost:1527/var/druid/metadata.db;create=true druid.metadata.storage.connector.connectURI=jdbc:derby://localhost:1527/var/druid/metadata.db;create=true
druid.metadata.storage.connector.host=localhost druid.metadata.storage.connector.host=localhost
@ -62,6 +70,7 @@ druid.metadata.storage.connector.port=1527
# Deep storage # Deep storage
# #
# For local disk (only viable in a cluster if this is a network mount):
druid.storage.type=local druid.storage.type=local
druid.storage.storageDirectory=var/druid/segments druid.storage.storageDirectory=var/druid/segments
@ -80,6 +89,7 @@ druid.storage.storageDirectory=var/druid/segments
# Indexing service logs # Indexing service logs
# #
# For local disk (only viable in a cluster if this is a network mount):
druid.indexer.logs.type=file druid.indexer.logs.type=file
druid.indexer.logs.directory=var/druid/indexing-logs druid.indexer.logs.directory=var/druid/indexing-logs
@ -105,4 +115,4 @@ druid.selectors.coordinator.serviceName=druid/coordinator
druid.monitoring.monitors=["com.metamx.metrics.JvmMonitor"] druid.monitoring.monitors=["com.metamx.metrics.JvmMonitor"]
druid.emitter=logging druid.emitter=logging
druid.emitter.logging.logLevel=debug druid.emitter.logging.logLevel=info

View File

@ -29,6 +29,13 @@ druid.extensions.loadList=["druid-kafka-eight", "druid-s3-extensions", "druid-hi
# and uncomment the line below to point to your directory. # and uncomment the line below to point to your directory.
#druid.extensions.hadoopDependenciesDir=/my/dir/hadoop-dependencies #druid.extensions.hadoopDependenciesDir=/my/dir/hadoop-dependencies
#
# Logging
#
# Log all runtime properties on startup. Disable to avoid logging properties on startup:
druid.startup.logging.logProperties=true
# #
# Zookeeper # Zookeeper
# #
@ -40,6 +47,7 @@ druid.zk.paths.base=/druid
# Metadata storage # Metadata storage
# #
# For Derby server on your Druid Coordinator (only viable in a cluster with a single Coordinator, no fail-over):
druid.metadata.storage.type=derby druid.metadata.storage.type=derby
druid.metadata.storage.connector.connectURI=jdbc:derby://metadata.store.ip:1527/var/druid/metadata.db;create=true druid.metadata.storage.connector.connectURI=jdbc:derby://metadata.store.ip:1527/var/druid/metadata.db;create=true
druid.metadata.storage.connector.host=metadata.store.ip druid.metadata.storage.connector.host=metadata.store.ip
@ -61,6 +69,7 @@ druid.metadata.storage.connector.port=1527
# Deep storage # Deep storage
# #
# For local disk (only viable in a cluster if this is a network mount):
druid.storage.type=local druid.storage.type=local
druid.storage.storageDirectory=var/druid/segments druid.storage.storageDirectory=var/druid/segments
@ -79,12 +88,13 @@ druid.storage.storageDirectory=var/druid/segments
# Indexing service logs # Indexing service logs
# #
# For local disk (only viable in a cluster if this is a network mount):
druid.indexer.logs.type=file druid.indexer.logs.type=file
druid.indexer.logs.directory=var/druid/indexing-logs druid.indexer.logs.directory=var/druid/indexing-logs
# For HDFS (make sure to include the HDFS extension and that your Hadoop config files in the cp): # For HDFS (make sure to include the HDFS extension and that your Hadoop config files in the cp):
#druid.indexer.logs.type=hdfs #druid.indexer.logs.type=hdfs
#druid.indexer.logs.directory=hdfs://namenode.example.com:9000/druid/indexing-logs #druid.indexer.logs.directory=/druid/indexing-logs
# For S3: # For S3:
#druid.indexer.logs.type=s3 #druid.indexer.logs.type=s3

View File

@ -87,16 +87,6 @@ public class PropertiesModule implements Module
} }
} }
log.info(
"Loaded properties into JVM with processors[%,d], memory[%,d].",
Runtime.getRuntime().availableProcessors(),
Runtime.getRuntime().totalMemory()
);
for (String propertyName : Ordering.natural().sortedCopy(props.stringPropertyNames())) {
log.info("* %s: %s", propertyName, props.getProperty(propertyName));
}
binder.bind(Properties.class).toInstance(props); binder.bind(Properties.class).toInstance(props);
} }
} }

View File

@ -0,0 +1,33 @@
/*
* Licensed to Metamarkets Group Inc. (Metamarkets) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Metamarkets licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package io.druid.guice;
import com.google.inject.Binder;
import com.google.inject.Module;
import io.druid.server.log.StartupLoggingConfig;
public class StartupLoggingModule implements Module
{
@Override
public void configure(Binder binder)
{
JsonConfigProvider.bind(binder, "druid.startup.logging", StartupLoggingConfig.class);
}
}

View File

@ -50,6 +50,7 @@ import io.druid.guice.QueryRunnerFactoryModule;
import io.druid.guice.QueryableModule; import io.druid.guice.QueryableModule;
import io.druid.guice.ServerModule; import io.druid.guice.ServerModule;
import io.druid.guice.ServerViewModule; import io.druid.guice.ServerViewModule;
import io.druid.guice.StartupLoggingModule;
import io.druid.guice.StorageNodeModule; import io.druid.guice.StorageNodeModule;
import io.druid.guice.annotations.Client; import io.druid.guice.annotations.Client;
import io.druid.guice.annotations.Json; import io.druid.guice.annotations.Json;
@ -302,7 +303,8 @@ public class Initialization
new CoordinatorDiscoveryModule(), new CoordinatorDiscoveryModule(),
new LocalDataStorageDruidModule(), new LocalDataStorageDruidModule(),
new FirehoseModule(), new FirehoseModule(),
new ParsersModule() new ParsersModule(),
new StartupLoggingModule()
); );
ModuleList actualModules = new ModuleList(baseInjector); ModuleList actualModules = new ModuleList(baseInjector);

View File

@ -0,0 +1,33 @@
/*
* Licensed to Metamarkets Group Inc. (Metamarkets) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Metamarkets licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package io.druid.server.log;
import com.fasterxml.jackson.annotation.JsonProperty;
public class StartupLoggingConfig
{
@JsonProperty
private boolean logProperties = false;
public boolean isLogProperties()
{
return logProperties;
}
}

View File

@ -20,6 +20,7 @@
package io.druid.cli; package io.druid.cli;
import com.google.common.base.Throwables; import com.google.common.base.Throwables;
import com.google.common.collect.Ordering;
import com.google.inject.Inject; import com.google.inject.Inject;
import com.google.inject.Injector; import com.google.inject.Injector;
import com.google.inject.Module; import com.google.inject.Module;
@ -27,8 +28,10 @@ import com.metamx.common.lifecycle.Lifecycle;
import com.metamx.common.logger.Logger; import com.metamx.common.logger.Logger;
import io.druid.initialization.Initialization; import io.druid.initialization.Initialization;
import io.druid.initialization.LogLevelAdjuster; import io.druid.initialization.LogLevelAdjuster;
import io.druid.server.log.StartupLoggingConfig;
import java.util.List; import java.util.List;
import java.util.Properties;
/** /**
*/ */
@ -68,6 +71,21 @@ public abstract class GuiceRunnable implements Runnable
try { try {
LogLevelAdjuster.register(); LogLevelAdjuster.register();
final Lifecycle lifecycle = injector.getInstance(Lifecycle.class); final Lifecycle lifecycle = injector.getInstance(Lifecycle.class);
final StartupLoggingConfig startupLoggingConfig = injector.getInstance(StartupLoggingConfig.class);
log.info(
"Starting up with processors[%,d], memory[%,d].",
Runtime.getRuntime().availableProcessors(),
Runtime.getRuntime().totalMemory()
);
if (startupLoggingConfig.isLogProperties()) {
final Properties props = injector.getInstance(Properties.class);
for (String propertyName : Ordering.natural().sortedCopy(props.stringPropertyNames())) {
log.info("* %s: %s", propertyName, props.getProperty(propertyName));
}
}
try { try {
lifecycle.start(); lifecycle.start();