From 225569ece229cec32f852f831fd337a139c44b1e Mon Sep 17 00:00:00 2001 From: Colin Patrick Mccabe Date: Wed, 27 Aug 2014 13:39:40 -0700 Subject: [PATCH] HDFS-4486. Add log category for long-running DFSClient notices. Contributed by Zhe Zhang. --- .../apache/hadoop/crypto/OpensslCipher.java | 2 ++ .../crypto/random/OpensslSecureRandom.java | 3 +++ .../apache/hadoop/io/nativeio/NativeIO.java | 7 ++--- ...JniBasedUnixGroupsMappingWithFallback.java | 3 ++- .../hadoop/util/PerformanceAdvisory.java | 24 +++++++++++++++++ hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../hadoop/hdfs/BlockReaderFactory.java | 27 +++++++++---------- .../shortcircuit/DomainSocketFactory.java | 4 ++- 8 files changed, 53 insertions(+), 20 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PerformanceAdvisory.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java index 264652b202a..2eb16ee4747 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java @@ -32,6 +32,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.util.NativeCodeLoader; import com.google.common.base.Preconditions; +import org.apache.hadoop.util.PerformanceAdvisory; /** * OpenSSL cipher using JNI. @@ -82,6 +83,7 @@ public final class OpensslCipher { String loadingFailure = null; try { if (!NativeCodeLoader.buildSupportsOpenssl()) { + PerformanceAdvisory.LOG.debug("Build does not support openssl"); loadingFailure = "build does not support openssl."; } else { initIDs(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java index b1fa9883373..6c53a0a2179 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java @@ -25,6 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.util.NativeCodeLoader; import com.google.common.base.Preconditions; +import org.apache.hadoop.util.PerformanceAdvisory; /** * OpenSSL secure random using JNI. @@ -67,6 +68,8 @@ public class OpensslSecureRandom extends Random { public OpensslSecureRandom() { if (!nativeEnabled) { + PerformanceAdvisory.LOG.debug("Build does not support openssl, " + + "falling back to Java SecureRandom."); fallback = new java.security.SecureRandom(); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java index fafa29543e0..53d31d6fb96 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java @@ -37,6 +37,7 @@ import org.apache.hadoop.fs.HardLink; import org.apache.hadoop.io.SecureIOUtils.AlreadyExistsException; import org.apache.hadoop.util.NativeCodeLoader; import org.apache.hadoop.util.Shell; +import org.apache.hadoop.util.PerformanceAdvisory; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -196,7 +197,7 @@ public class NativeIO { // This can happen if the user has an older version of libhadoop.so // installed - in this case we can continue without native IO // after warning - LOG.error("Unable to initialize NativeIO libraries", t); + PerformanceAdvisory.LOG.debug("Unable to initialize NativeIO libraries", t); } } } @@ -574,7 +575,7 @@ public class NativeIO { // This can happen if the user has an older version of libhadoop.so // installed - in this case we can continue without native IO // after warning - LOG.error("Unable to initialize NativeIO libraries", t); + PerformanceAdvisory.LOG.debug("Unable to initialize NativeIO libraries", t); } } } @@ -593,7 +594,7 @@ public class NativeIO { // This can happen if the user has an older version of libhadoop.so // installed - in this case we can continue without native IO // after warning - LOG.error("Unable to initialize NativeIO libraries", t); + PerformanceAdvisory.LOG.debug("Unable to initialize NativeIO libraries", t); } } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMappingWithFallback.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMappingWithFallback.java index 908ca1468d1..40333fcc5df 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMappingWithFallback.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMappingWithFallback.java @@ -24,6 +24,7 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.util.NativeCodeLoader; +import org.apache.hadoop.util.PerformanceAdvisory; public class JniBasedUnixGroupsMappingWithFallback implements GroupMappingServiceProvider { @@ -37,7 +38,7 @@ public class JniBasedUnixGroupsMappingWithFallback implements if (NativeCodeLoader.isNativeCodeLoaded()) { this.impl = new JniBasedUnixGroupsMapping(); } else { - LOG.debug("Falling back to shell based"); + PerformanceAdvisory.LOG.debug("Falling back to shell based"); this.impl = new ShellBasedUnixGroupsMapping(); } if (LOG.isDebugEnabled()){ diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PerformanceAdvisory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PerformanceAdvisory.java new file mode 100644 index 00000000000..306d47c805e --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PerformanceAdvisory.java @@ -0,0 +1,24 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.util; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +public class PerformanceAdvisory { + public static final Log LOG = LogFactory.getLog(PerformanceAdvisory.class); +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 77832433c1b..f3ecf075219 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -514,6 +514,9 @@ Release 2.6.0 - UNRELEASED HDFS-6899. Allow changing MiniDFSCluster volumes per DN and capacity per volume. (Arpit Agarwal) + HDFS-4486. Add log category for long-running DFSClient notices (Zhe Zhang + via Colin Patrick McCabe) + OPTIMIZATIONS HDFS-6690. Deduplicate xattr names in memory. (wang) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java index d27bd6ef0d2..3fb442b94a5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java @@ -54,6 +54,7 @@ import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.util.PerformanceAdvisory; import org.apache.hadoop.util.Time; import com.google.common.annotations.VisibleForTesting; @@ -343,10 +344,9 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator { return null; } if (clientContext.getDisableLegacyBlockReaderLocal()) { - if (LOG.isTraceEnabled()) { - LOG.trace(this + ": can't construct BlockReaderLocalLegacy because " + - "disableLegacyBlockReaderLocal is set."); - } + PerformanceAdvisory.LOG.debug(this + ": can't construct " + + "BlockReaderLocalLegacy because " + + "disableLegacyBlockReaderLocal is set."); return null; } IOException ioe = null; @@ -385,10 +385,8 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator { getPathInfo(inetSocketAddress, conf); } if (!pathInfo.getPathState().getUsableForShortCircuit()) { - if (LOG.isTraceEnabled()) { - LOG.trace(this + ": " + pathInfo + " is not " + - "usable for short circuit; giving up on BlockReaderLocal."); - } + PerformanceAdvisory.LOG.debug(this + ": " + pathInfo + " is not " + + "usable for short circuit; giving up on BlockReaderLocal."); return null; } ShortCircuitCache cache = clientContext.getShortCircuitCache(); @@ -404,8 +402,9 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator { } if (info.getReplica() == null) { if (LOG.isTraceEnabled()) { - LOG.trace(this + ": failed to get ShortCircuitReplica. " + - "Cannot construct BlockReaderLocal via " + pathInfo.getPath()); + PerformanceAdvisory.LOG.debug(this + ": failed to get " + + "ShortCircuitReplica. Cannot construct " + + "BlockReaderLocal via " + pathInfo.getPath()); } return null; } @@ -580,11 +579,9 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator { getPathInfo(inetSocketAddress, conf); } if (!pathInfo.getPathState().getUsableForDataTransfer()) { - if (LOG.isTraceEnabled()) { - LOG.trace(this + ": not trying to create a remote block reader " + - "because the UNIX domain socket at " + pathInfo + - " is not usable."); - } + PerformanceAdvisory.LOG.debug(this + ": not trying to create a " + + "remote block reader because the UNIX domain socket at " + + pathInfo + " is not usable."); return null; } if (LOG.isTraceEnabled()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java index e067de7b4ad..5fd31a920cc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java @@ -33,6 +33,7 @@ import org.apache.hadoop.net.unix.DomainSocket; import com.google.common.base.Preconditions; import com.google.common.cache.Cache; import com.google.common.cache.CacheBuilder; +import org.apache.hadoop.util.PerformanceAdvisory; public class DomainSocketFactory { private static final Log LOG = LogFactory.getLog(DomainSocketFactory.class); @@ -105,7 +106,8 @@ public class DomainSocketFactory { } if (feature == null) { - LOG.debug("Both short-circuit local reads and UNIX domain socket are disabled."); + PerformanceAdvisory.LOG.debug( + "Both short-circuit local reads and UNIX domain socket are disabled."); } else { if (conf.getDomainSocketPath().isEmpty()) { throw new HadoopIllegalArgumentException(feature + " is enabled but "