Merge pull request #1011 from metamx/log4j2

Upgrade to log4j2
This commit is contained in:
Xavier Léauté 2015-01-20 12:51:07 -08:00
commit c532d07635
9 changed files with 96 additions and 43 deletions

View File

@ -116,8 +116,21 @@
<artifactId>jsr305</artifactId> <artifactId>jsr305</artifactId>
</dependency> </dependency>
<dependency> <dependency>
<groupId>log4j</groupId> <groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j</artifactId> <artifactId>log4j-api</artifactId>
</dependency>
<dependency>
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-core</artifactId>
</dependency>
<dependency>
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-slf4j-impl</artifactId>
</dependency>
<!-- com.lmax.disruptor is optional in log4j-core, so we explicitly include it here -->
<dependency>
<groupId>com.lmax</groupId>
<artifactId>disruptor</artifactId>
</dependency> </dependency>
<!-- Tests --> <!-- Tests -->

View File

@ -1,7 +1,7 @@
<?xml version="1.0" encoding="UTF-8" ?> <?xml version="1.0" encoding="UTF-8" ?>
<!-- <!--
~ Druid - a distributed column store. ~ Druid - a distributed column store.
~ Copyright (C) 2012, 2013, 2014 Metamarkets Group Inc. ~ Copyright (C) 2012, 2013, 2014, 2015 Metamarkets Group Inc.
~ ~
~ This program is free software; you can redistribute it and/or ~ This program is free software; you can redistribute it and/or
~ modify it under the terms of the GNU General Public License ~ modify it under the terms of the GNU General Public License
@ -18,18 +18,15 @@
~ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. ~ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
--> -->
<!DOCTYPE log4j:configuration SYSTEM "log4j.dtd"> <Configuration status="WARN">
<Appenders>
<log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/"> <Console name="Console" target="SYSTEM_OUT">
<PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/>
<appender name="ConsoleAppender" class="org.apache.log4j.ConsoleAppender"> </Console>
<layout class="org.apache.log4j.PatternLayout"> </Appenders>
<param name="ConversionPattern" value="%d{ISO8601} %p [%t] %c - %m%n"/> <Loggers>
</layout> <Root level="info">
</appender> <AppenderRef ref="Console"/>
</Root>
<root> </Loggers>
<priority value ="info" /> </Configuration>
<appender-ref ref="ConsoleAppender"/>
</root>
</log4j:configuration>

View File

@ -15,4 +15,35 @@ The size of the JVM heap really depends on the type of Druid node you are runnin
The intermediate computation buffer specifies a buffer size for the storage of intermediate results. The computation engine in both the Historical and Realtime nodes will use a scratch buffer of this size to do all of their intermediate computations off-heap. Larger values allow for more aggregations in a single pass over the data while smaller values can require more passes depending on the query that is being executed. The default size is 1073741824 bytes (1GB). The intermediate computation buffer specifies a buffer size for the storage of intermediate results. The computation engine in both the Historical and Realtime nodes will use a scratch buffer of this size to do all of their intermediate computations off-heap. Larger values allow for more aggregations in a single pass over the data while smaller values can require more passes depending on the query that is being executed. The default size is 1073741824 bytes (1GB).
## What is server maxSize? ## What is server maxSize?
Server maxSize sets the maximum cumulative segment size (in bytes) that a node can hold. Changing this parameter will affect performance by controlling the memory/disk ratio on a node. Setting this parameter to a value greater than the total memory capacity on a node and may cause disk paging to occur. This paging time introduces a query latency delay. Server maxSize sets the maximum cumulative segment size (in bytes) that a node can hold. Changing this parameter will affect performance by controlling the memory/disk ratio on a node. Setting this parameter to a value greater than the total memory capacity on a node and may cause disk paging to occur. This paging time introduces a query latency delay.
## My logs are really chatty, can I set them to asynchronously write?
Yes, using a `log4j2.xml` similar to the following causes some of the more chatty classes to write asynchronously:
```
<?xml version="1.0" encoding="UTF-8" ?>
<Configuration status="WARN">
<Appenders>
<Console name="Console" target="SYSTEM_OUT">
<PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/>
</Console>
</Appenders>
<Loggers>
<AsyncLogger name="io.druid.curator.inventory.CuratorInventoryManager" level="debug" additivity="false">
<AppenderRef ref="Console"/>
</AsyncLogger>
<AsyncLogger name="io.druid.client.BatchServerInventoryView" level="debug" additivity="false">
<AppenderRef ref="Console"/>
</AsyncLogger>
<!-- Make extra sure nobody adds logs in a bad way that can hurt performance -->
<AsyncLogger name="io.druid.client.ServerInventoryView" level="debug" additivity="false">
<AppenderRef ref="Console"/>
</AsyncLogger>
<AsyncLogger name ="com.metamx.http.client.pool.ChannelResourceFactory" level="info" additivity="false">
<AppenderRef ref="Console"/>
</AsyncLogger>
<Root level="info">
<AppenderRef ref="Console"/>
</Root>
</Loggers>
</Configuration>
```

View File

@ -43,6 +43,11 @@
<groupId>net.java.dev.jets3t</groupId> <groupId>net.java.dev.jets3t</groupId>
<artifactId>jets3t</artifactId> <artifactId>jets3t</artifactId>
</dependency> </dependency>
<!-- jets3t requires log4j 1.2 compatability -->
<dependency>
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-1.2-api</artifactId>
</dependency>
<dependency> <dependency>
<groupId>com.amazonaws</groupId> <groupId>com.amazonaws</groupId>
<artifactId>aws-java-sdk</artifactId> <artifactId>aws-java-sdk</artifactId>
@ -76,11 +81,6 @@
<artifactId>easymock</artifactId> <artifactId>easymock</artifactId>
<scope>test</scope> <scope>test</scope>
</dependency> </dependency>
<dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
<scope>test</scope>
</dependency>
</dependencies> </dependencies>
<build> <build>
<plugins> <plugins>

28
pom.xml
View File

@ -45,6 +45,7 @@
<jetty.version>9.2.5.v20141112</jetty.version> <jetty.version>9.2.5.v20141112</jetty.version>
<druid.api.version>0.3.1</druid.api.version> <druid.api.version>0.3.1</druid.api.version>
<jackson.version>2.4.4</jackson.version> <jackson.version>2.4.4</jackson.version>
<log4j.version>2.1</log4j.version>
</properties> </properties>
<modules> <modules>
@ -348,14 +349,29 @@
<version>2.0.1</version> <version>2.0.1</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>log4j</groupId> <groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j</artifactId> <artifactId>log4j-api</artifactId>
<version>1.2.16</version> <version>${log4j.version}</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.slf4j</groupId> <groupId>org.apache.logging.log4j</groupId>
<artifactId>slf4j-log4j12</artifactId> <artifactId>log4j-core</artifactId>
<version>1.6.2</version> <version>${log4j.version}</version>
</dependency>
<dependency>
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-slf4j-impl</artifactId>
<version>${log4j.version}</version>
</dependency>
<dependency>
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-1.2-api</artifactId>
<version>${log4j.version}</version>
</dependency>
<dependency>
<groupId>com.lmax</groupId>
<artifactId>disruptor</artifactId>
<version>3.3.0</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>net.spy</groupId> <groupId>net.spy</groupId>

View File

@ -54,10 +54,6 @@
<groupId>org.skife.config</groupId> <groupId>org.skife.config</groupId>
<artifactId>config-magic</artifactId> <artifactId>config-magic</artifactId>
</dependency> </dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</dependency>
<dependency> <dependency>
<groupId>com.google.protobuf</groupId> <groupId>com.google.protobuf</groupId>
<artifactId>protobuf-java</artifactId> <artifactId>protobuf-java</artifactId>

View File

@ -118,7 +118,7 @@ public class BatchServerInventoryView extends ServerInventoryView<Set<DataSegmen
@Override @Override
protected DruidServer removeInnerInventory(final DruidServer container, String inventoryKey) protected DruidServer removeInnerInventory(final DruidServer container, String inventoryKey)
{ {
log.info("Server[%s] removed container[%s]", container.getName(), inventoryKey); log.debug("Server[%s] removed container[%s]", container.getName(), inventoryKey);
Set<DataSegment> segments = zNodes.remove(inventoryKey); Set<DataSegment> segments = zNodes.remove(inventoryKey);
if (segments == null) { if (segments == null) {

View File

@ -293,7 +293,7 @@ public abstract class ServerInventoryView<InventoryType> implements ServerView,
final DataSegment inventory final DataSegment inventory
) )
{ {
log.info("Server[%s] added segment[%s]", container.getName(), inventory.getIdentifier()); log.debug("Server[%s] added segment[%s]", container.getName(), inventory.getIdentifier());
if (container.getSegment(inventory.getIdentifier()) != null) { if (container.getSegment(inventory.getIdentifier()) != null) {
log.warn( log.warn(
@ -321,7 +321,7 @@ public abstract class ServerInventoryView<InventoryType> implements ServerView,
protected void removeSingleInventory(final DruidServer container, String inventoryKey) protected void removeSingleInventory(final DruidServer container, String inventoryKey)
{ {
log.info("Server[%s] removed segment[%s]", container.getName(), inventoryKey); log.debug("Server[%s] removed segment[%s]", container.getName(), inventoryKey);
final DataSegment segment = container.getSegment(inventoryKey); final DataSegment segment = container.getSegment(inventoryKey);
if (segment == null) { if (segment == null) {

View File

@ -224,7 +224,7 @@ public class CuratorInventoryManager<ContainerClass, InventoryClass>
containers.put(containerKey, new ContainerHolder(container, inventoryCache)); containers.put(containerKey, new ContainerHolder(container, inventoryCache));
log.info("Starting inventory cache for %s, inventoryPath %s", containerKey, inventoryPath); log.debug("Starting inventory cache for %s, inventoryPath %s", containerKey, inventoryPath);
inventoryCache.start(PathChildrenCache.StartMode.POST_INITIALIZED_EVENT); inventoryCache.start(PathChildrenCache.StartMode.POST_INITIALIZED_EVENT);
strategy.newContainer(container); strategy.newContainer(container);
} }
@ -244,7 +244,7 @@ public class CuratorInventoryManager<ContainerClass, InventoryClass>
// This close() call actually calls shutdownNow() on the executor registered with the Cache object, it // This close() call actually calls shutdownNow() on the executor registered with the Cache object, it
// better have its own executor or ignore shutdownNow() calls... // better have its own executor or ignore shutdownNow() calls...
log.info("Closing inventory cache for %s. Also removing listeners.", containerKey); log.debug("Closing inventory cache for %s. Also removing listeners.", containerKey);
removed.getCache().getListenable().clear(); removed.getCache().getListenable().clear();
removed.getCache().close(); removed.getCache().close();
strategy.deadContainer(removed.getContainer()); strategy.deadContainer(removed.getContainer());
@ -263,7 +263,7 @@ public class CuratorInventoryManager<ContainerClass, InventoryClass>
final ContainerClass container = strategy.deserializeContainer(child.getData()); final ContainerClass container = strategy.deserializeContainer(child.getData());
log.info("Container[%s] updated.", child.getPath()); log.debug("Container[%s] updated.", child.getPath());
ContainerHolder holder = containers.get(containerKey); ContainerHolder holder = containers.get(containerKey);
if (holder == null) { if (holder == null) {
log.warn("Container update[%s], but the old container didn't exist!? Ignoring.", child.getPath()); log.warn("Container update[%s], but the old container didn't exist!? Ignoring.", child.getPath());
@ -338,7 +338,7 @@ public class CuratorInventoryManager<ContainerClass, InventoryClass>
case CHILD_ADDED: { case CHILD_ADDED: {
final ChildData child = event.getData(); final ChildData child = event.getData();
final String inventoryKey = ZKPaths.getNodeFromPath(child.getPath()); final String inventoryKey = ZKPaths.getNodeFromPath(child.getPath());
log.info("CHILD_ADDED[%s] with version[%s]", child.getPath(), event.getData().getStat().getVersion()); log.debug("CHILD_ADDED[%s] with version[%s]", child.getPath(), event.getData().getStat().getVersion());
final InventoryClass addedInventory = strategy.deserializeInventory(child.getData()); final InventoryClass addedInventory = strategy.deserializeInventory(child.getData());
@ -351,7 +351,7 @@ public class CuratorInventoryManager<ContainerClass, InventoryClass>
case CHILD_UPDATED: { case CHILD_UPDATED: {
final ChildData child = event.getData(); final ChildData child = event.getData();
final String inventoryKey = ZKPaths.getNodeFromPath(child.getPath()); final String inventoryKey = ZKPaths.getNodeFromPath(child.getPath());
log.info("CHILD_UPDATED[%s] with version[%s]", child.getPath(), event.getData().getStat().getVersion()); log.debug("CHILD_UPDATED[%s] with version[%s]", child.getPath(), event.getData().getStat().getVersion());
final InventoryClass updatedInventory = strategy.deserializeInventory(child.getData()); final InventoryClass updatedInventory = strategy.deserializeInventory(child.getData());
@ -365,7 +365,7 @@ public class CuratorInventoryManager<ContainerClass, InventoryClass>
case CHILD_REMOVED: { case CHILD_REMOVED: {
final ChildData child = event.getData(); final ChildData child = event.getData();
final String inventoryKey = ZKPaths.getNodeFromPath(child.getPath()); final String inventoryKey = ZKPaths.getNodeFromPath(child.getPath());
log.info("CHILD_REMOVED[%s] with version[%s]", child.getPath(), event.getData().getStat().getVersion()); log.debug("CHILD_REMOVED[%s] with version[%s]", child.getPath(), event.getData().getStat().getVersion());
synchronized (holder) { synchronized (holder) {
holder.setContainer(strategy.removeInventory(holder.getContainer(), inventoryKey)); holder.setContainer(strategy.removeInventory(holder.getContainer(), inventoryKey));