diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index e448ca6727d..8148052005b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -393,6 +393,9 @@ Release 2.6.0 - UNRELEASED HDFS-6838. Code cleanup for unnecessary INode replacement. (Jing Zhao via wheat9) + HDFS-6836. HDFS INFO logging is verbose & uses file appenders. (Xiaoyu + Yao via Arpit Agarwal) + OPTIMIZATIONS HDFS-6690. Deduplicate xattr names in memory. (wang) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java index 86d88c2a44e..febf2de0c3a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java @@ -687,7 +687,7 @@ class BlockSender implements java.io.Closeable { // Trigger readahead of beginning of file if configured. manageOsCache(); - final long startTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0; + final long startTime = ClientTraceLog.isDebugEnabled() ? System.nanoTime() : 0; try { int maxChunksPerPacket; int pktBufSize = PacketHeader.PKT_MAX_HEADER_LEN; @@ -733,9 +733,9 @@ class BlockSender implements java.io.Closeable { sentEntireByteRange = true; } } finally { - if (clientTraceFmt != null) { + if ((clientTraceFmt != null) && ClientTraceLog.isDebugEnabled()) { final long endTime = System.nanoTime(); - ClientTraceLog.info(String.format(clientTraceFmt, totalRead, + ClientTraceLog.debug(String.format(clientTraceFmt, totalRead, initialOffset, endTime - startTime)); } close();