diff --git a/common/pom.xml b/common/pom.xml
index 11f8ae8ee4b..f1814ea6809 100644
--- a/common/pom.xml
+++ b/common/pom.xml
@@ -116,8 +116,21 @@
jsr305
- log4j
- log4j
+ org.apache.logging.log4j
+ log4j-api
+
+
+ org.apache.logging.log4j
+ log4j-core
+
+
+ org.apache.logging.log4j
+ log4j-slf4j-impl
+
+
+
+ com.lmax
+ disruptor
diff --git a/common/src/main/resources/log4j.xml b/common/src/main/resources/log4j2.xml
similarity index 61%
rename from common/src/main/resources/log4j.xml
rename to common/src/main/resources/log4j2.xml
index 621c7a2ac9a..8e37260641c 100644
--- a/common/src/main/resources/log4j.xml
+++ b/common/src/main/resources/log4j2.xml
@@ -1,7 +1,7 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/docs/content/Performance-FAQ.md b/docs/content/Performance-FAQ.md
index c4bd7363a87..3a574c21777 100644
--- a/docs/content/Performance-FAQ.md
+++ b/docs/content/Performance-FAQ.md
@@ -15,4 +15,35 @@ The size of the JVM heap really depends on the type of Druid node you are runnin
The intermediate computation buffer specifies a buffer size for the storage of intermediate results. The computation engine in both the Historical and Realtime nodes will use a scratch buffer of this size to do all of their intermediate computations off-heap. Larger values allow for more aggregations in a single pass over the data while smaller values can require more passes depending on the query that is being executed. The default size is 1073741824 bytes (1GB).
## What is server maxSize?
-Server maxSize sets the maximum cumulative segment size (in bytes) that a node can hold. Changing this parameter will affect performance by controlling the memory/disk ratio on a node. Setting this parameter to a value greater than the total memory capacity on a node and may cause disk paging to occur. This paging time introduces a query latency delay.
\ No newline at end of file
+Server maxSize sets the maximum cumulative segment size (in bytes) that a node can hold. Changing this parameter will affect performance by controlling the memory/disk ratio on a node. Setting this parameter to a value greater than the total memory capacity on a node and may cause disk paging to occur. This paging time introduces a query latency delay.
+
+## My logs are really chatty, can I set them to asynchronously write?
+Yes, using a `log4j2.xml` similar to the following causes some of the more chatty classes to write asynchronously:
+```
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+```
\ No newline at end of file
diff --git a/extensions/s3-extensions/pom.xml b/extensions/s3-extensions/pom.xml
index 30962948b0a..c8c860717f5 100644
--- a/extensions/s3-extensions/pom.xml
+++ b/extensions/s3-extensions/pom.xml
@@ -43,6 +43,11 @@
net.java.dev.jets3t
jets3t
+
+
+ org.apache.logging.log4j
+ log4j-1.2-api
+
com.amazonaws
aws-java-sdk
@@ -76,11 +81,6 @@
easymock
test
-
- log4j
- log4j
- test
-
diff --git a/pom.xml b/pom.xml
index 6a9ce9f524d..8cbc6d8274c 100644
--- a/pom.xml
+++ b/pom.xml
@@ -45,6 +45,7 @@
9.2.5.v20141112
0.3.1
2.4.4
+ 2.1
@@ -348,14 +349,29 @@
2.0.1
- log4j
- log4j
- 1.2.16
+ org.apache.logging.log4j
+ log4j-api
+ ${log4j.version}
- org.slf4j
- slf4j-log4j12
- 1.6.2
+ org.apache.logging.log4j
+ log4j-core
+ ${log4j.version}
+
+
+ org.apache.logging.log4j
+ log4j-slf4j-impl
+ ${log4j.version}
+
+
+ org.apache.logging.log4j
+ log4j-1.2-api
+ ${log4j.version}
+
+
+ com.lmax
+ disruptor
+ 3.3.0
net.spy
diff --git a/processing/pom.xml b/processing/pom.xml
index 1d74887c994..e8ca1011a96 100644
--- a/processing/pom.xml
+++ b/processing/pom.xml
@@ -54,10 +54,6 @@
org.skife.config
config-magic
-
- org.slf4j
- slf4j-log4j12
-
com.google.protobuf
protobuf-java
diff --git a/server/src/main/java/io/druid/client/BatchServerInventoryView.java b/server/src/main/java/io/druid/client/BatchServerInventoryView.java
index 647f73ba14b..ac30b6161e1 100644
--- a/server/src/main/java/io/druid/client/BatchServerInventoryView.java
+++ b/server/src/main/java/io/druid/client/BatchServerInventoryView.java
@@ -118,7 +118,7 @@ public class BatchServerInventoryView extends ServerInventoryView segments = zNodes.remove(inventoryKey);
if (segments == null) {
diff --git a/server/src/main/java/io/druid/client/ServerInventoryView.java b/server/src/main/java/io/druid/client/ServerInventoryView.java
index 4e7979f1ca0..00e268effdc 100644
--- a/server/src/main/java/io/druid/client/ServerInventoryView.java
+++ b/server/src/main/java/io/druid/client/ServerInventoryView.java
@@ -293,7 +293,7 @@ public abstract class ServerInventoryView implements ServerView,
final DataSegment inventory
)
{
- log.info("Server[%s] added segment[%s]", container.getName(), inventory.getIdentifier());
+ log.debug("Server[%s] added segment[%s]", container.getName(), inventory.getIdentifier());
if (container.getSegment(inventory.getIdentifier()) != null) {
log.warn(
@@ -321,7 +321,7 @@ public abstract class ServerInventoryView implements ServerView,
protected void removeSingleInventory(final DruidServer container, String inventoryKey)
{
- log.info("Server[%s] removed segment[%s]", container.getName(), inventoryKey);
+ log.debug("Server[%s] removed segment[%s]", container.getName(), inventoryKey);
final DataSegment segment = container.getSegment(inventoryKey);
if (segment == null) {
diff --git a/server/src/main/java/io/druid/curator/inventory/CuratorInventoryManager.java b/server/src/main/java/io/druid/curator/inventory/CuratorInventoryManager.java
index aaa4409221c..0bf0e227e96 100644
--- a/server/src/main/java/io/druid/curator/inventory/CuratorInventoryManager.java
+++ b/server/src/main/java/io/druid/curator/inventory/CuratorInventoryManager.java
@@ -224,7 +224,7 @@ public class CuratorInventoryManager
containers.put(containerKey, new ContainerHolder(container, inventoryCache));
- log.info("Starting inventory cache for %s, inventoryPath %s", containerKey, inventoryPath);
+ log.debug("Starting inventory cache for %s, inventoryPath %s", containerKey, inventoryPath);
inventoryCache.start(PathChildrenCache.StartMode.POST_INITIALIZED_EVENT);
strategy.newContainer(container);
}
@@ -244,7 +244,7 @@ public class CuratorInventoryManager
// This close() call actually calls shutdownNow() on the executor registered with the Cache object, it
// better have its own executor or ignore shutdownNow() calls...
- log.info("Closing inventory cache for %s. Also removing listeners.", containerKey);
+ log.debug("Closing inventory cache for %s. Also removing listeners.", containerKey);
removed.getCache().getListenable().clear();
removed.getCache().close();
strategy.deadContainer(removed.getContainer());
@@ -263,7 +263,7 @@ public class CuratorInventoryManager
final ContainerClass container = strategy.deserializeContainer(child.getData());
- log.info("Container[%s] updated.", child.getPath());
+ log.debug("Container[%s] updated.", child.getPath());
ContainerHolder holder = containers.get(containerKey);
if (holder == null) {
log.warn("Container update[%s], but the old container didn't exist!? Ignoring.", child.getPath());
@@ -338,7 +338,7 @@ public class CuratorInventoryManager
case CHILD_ADDED: {
final ChildData child = event.getData();
final String inventoryKey = ZKPaths.getNodeFromPath(child.getPath());
- log.info("CHILD_ADDED[%s] with version[%s]", child.getPath(), event.getData().getStat().getVersion());
+ log.debug("CHILD_ADDED[%s] with version[%s]", child.getPath(), event.getData().getStat().getVersion());
final InventoryClass addedInventory = strategy.deserializeInventory(child.getData());
@@ -351,7 +351,7 @@ public class CuratorInventoryManager
case CHILD_UPDATED: {
final ChildData child = event.getData();
final String inventoryKey = ZKPaths.getNodeFromPath(child.getPath());
- log.info("CHILD_UPDATED[%s] with version[%s]", child.getPath(), event.getData().getStat().getVersion());
+ log.debug("CHILD_UPDATED[%s] with version[%s]", child.getPath(), event.getData().getStat().getVersion());
final InventoryClass updatedInventory = strategy.deserializeInventory(child.getData());
@@ -365,7 +365,7 @@ public class CuratorInventoryManager
case CHILD_REMOVED: {
final ChildData child = event.getData();
final String inventoryKey = ZKPaths.getNodeFromPath(child.getPath());
- log.info("CHILD_REMOVED[%s] with version[%s]", child.getPath(), event.getData().getStat().getVersion());
+ log.debug("CHILD_REMOVED[%s] with version[%s]", child.getPath(), event.getData().getStat().getVersion());
synchronized (holder) {
holder.setContainer(strategy.removeInventory(holder.getContainer(), inventoryKey));