diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
index 7d442bca414..bf165ab9f3d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
@@ -35,6 +35,16 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
org.apache.hadoop
hadoop-common
provided
+
+
+ commons-logging
+ commons-logging
+
+
+ log4j
+ log4j
+
+
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
index eda135e114a..0e72b9868e9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
@@ -317,10 +317,9 @@ public class DFSUtilClient {
if (address != null) {
InetSocketAddress isa = NetUtils.createSocketAddr(address);
if (isa.isUnresolved()) {
- LOG.warn("Namenode for " + nsId +
- " remains unresolved for ID " + nnId +
- ". Check your hdfs-site.xml file to " +
- "ensure namenodes are configured properly.");
+ LOG.warn("Namenode for {} remains unresolved for ID {}. Check your "
+ + "hdfs-site.xml file to ensure namenodes are configured "
+ + "properly.", nsId, nnId);
}
ret.put(nnId, isa);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java
index 2624960d93a..02250095c9f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java
@@ -158,13 +158,11 @@ public class BlockStoragePolicy implements BlockStoragePolicySpi {
// remove excess storage types after fallback replacement.
diff(storageTypes, excess, null);
if (storageTypes.size() < expectedSize) {
- LOG.warn("Failed to place enough replicas: expected size is " + expectedSize
- + " but only " + storageTypes.size() + " storage types can be selected "
- + "(replication=" + replication
- + ", selected=" + storageTypes
- + ", unavailable=" + unavailables
- + ", removed=" + removed
- + ", policy=" + this + ")");
+ LOG.warn("Failed to place enough replicas: expected size is {}"
+ + " but only {} storage types can be selected (replication={},"
+ + " selected={}, unavailable={}" + ", removed={}" + ", policy={}"
+ + ")", expectedSize, storageTypes.size(), replication, storageTypes,
+ unavailables, removed, this);
}
return storageTypes;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java
index 61bbe387b9c..41ec2f1c32d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java
@@ -24,8 +24,6 @@ import javax.annotation.Nullable;
import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.commons.lang.builder.HashCodeBuilder;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.InvalidRequestException;
@@ -41,7 +39,6 @@ import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Expiration;
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class CachePoolInfo {
- public static final Log LOG = LogFactory.getLog(CachePoolInfo.class);
/**
* Indicates that the pool does not have a maximum relative expiry.
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java
index 705ab4c803c..aff41792ffe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java
@@ -21,8 +21,6 @@ import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URI;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.DelegationTokenRenewer;
@@ -37,6 +35,8 @@ import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenRenewer;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
@@ -121,11 +121,11 @@ final class TokenAspect {
private final DTSelecorByKind dtSelector;
private final T fs;
private boolean hasInitedToken;
- private final Log LOG;
+ private final Logger LOG;
private final Text serviceName;
TokenAspect(T fs, final Text serviceName, final Text kind) {
- this.LOG = LogFactory.getLog(fs.getClass());
+ this.LOG = LoggerFactory.getLogger(fs.getClass());
this.fs = fs;
this.dtSelector = new DTSelecorByKind(kind);
this.serviceName = serviceName;
@@ -140,8 +140,8 @@ final class TokenAspect {
if (token != null) {
fs.setDelegationToken(token);
addRenewAction(fs);
- if(LOG.isDebugEnabled()) {
- LOG.debug("Created new DT for " + token.getService());
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Created new DT for {}", token.getService());
}
}
hasInitedToken = true;
@@ -155,8 +155,8 @@ final class TokenAspect {
synchronized void initDelegationToken(UserGroupInformation ugi) {
Token> token = selectDelegationToken(ugi);
if (token != null) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Found existing DT for " + token.getService());
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Found existing DT for {}", token.getService());
}
fs.setDelegationToken(token);
hasInitedToken = true;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
index e330adf4278..a5e02f234df 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
@@ -28,8 +28,6 @@ import javax.net.ssl.HostnameVerifier;
import javax.net.ssl.HttpsURLConnection;
import javax.net.ssl.SSLSocketFactory;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@@ -38,6 +36,8 @@ import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
import org.apache.hadoop.security.ssl.SSLFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
@@ -47,7 +47,8 @@ import com.google.common.annotations.VisibleForTesting;
@InterfaceAudience.LimitedPrivate({ "HDFS" })
@InterfaceStability.Unstable
public class URLConnectionFactory {
- private static final Log LOG = LogFactory.getLog(URLConnectionFactory.class);
+ private static final Logger LOG = LoggerFactory
+ .getLogger(URLConnectionFactory.class);
/**
* Timeout for socket connects and reads
@@ -154,16 +155,14 @@ public class URLConnectionFactory {
throws IOException, AuthenticationException {
if (isSpnego) {
if (LOG.isDebugEnabled()) {
- LOG.debug("open AuthenticatedURL connection" + url);
+ LOG.debug("open AuthenticatedURL connection {}", url);
}
UserGroupInformation.getCurrentUser().checkTGTAndReloginFromKeytab();
final AuthenticatedURL.Token authToken = new AuthenticatedURL.Token();
return new AuthenticatedURL(new KerberosUgiAuthenticator(),
connConfigurator).openConnection(url, authToken);
} else {
- if (LOG.isDebugEnabled()) {
- LOG.debug("open URL connection");
- }
+ LOG.debug("open URL connection");
URLConnection connection = url.openConnection();
if (connection instanceof HttpURLConnection) {
connConfigurator.configure((HttpURLConnection) connection);
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index fb5f029b270..4a01420e6f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -36,8 +36,6 @@ import java.util.StringTokenizer;
import javax.ws.rs.core.MediaType;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -81,6 +79,8 @@ import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelect
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.StringUtils;
import org.codehaus.jackson.map.ObjectMapper;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
@@ -89,7 +89,8 @@ import com.google.common.collect.Lists;
/** A FileSystem for HDFS over the web. */
public class WebHdfsFileSystem extends FileSystem
implements DelegationTokenRenewer.Renewable, TokenAspect.TokenManagementDelegator {
- public static final Log LOG = LogFactory.getLog(WebHdfsFileSystem.class);
+ public static final Logger LOG = LoggerFactory
+ .getLogger(WebHdfsFileSystem.class);
/** WebHdfs version. */
public static final int VERSION = 1;
/** Http URI: http://namenode:port/{PATH_PREFIX}/path/to/file */
@@ -228,14 +229,14 @@ public class WebHdfsFileSystem extends FileSystem
// to get another token to match hdfs/rpc behavior
if (token != null) {
if(LOG.isDebugEnabled()) {
- LOG.debug("Using UGI token: " + token);
+ LOG.debug("Using UGI token: {}", token);
}
canRefreshDelegationToken = false;
} else {
token = getDelegationToken(null);
if (token != null) {
if(LOG.isDebugEnabled()) {
- LOG.debug("Fetched new token: " + token);
+ LOG.debug("Fetched new token: {}", token);
}
} else { // security is disabled
canRefreshDelegationToken = false;
@@ -252,7 +253,7 @@ public class WebHdfsFileSystem extends FileSystem
if (canRefreshDelegationToken) {
Token> token = getDelegationToken(null);
if(LOG.isDebugEnabled()) {
- LOG.debug("Replaced expired token: " + token);
+ LOG.debug("Replaced expired token: {}", token);
}
setDelegationToken(token);
replaced = (token != null);
@@ -439,7 +440,7 @@ public class WebHdfsFileSystem extends FileSystem
final URL url = new URL(getTransportScheme(), nnAddr.getHostName(),
nnAddr.getPort(), path + '?' + query);
if (LOG.isTraceEnabled()) {
- LOG.trace("url=" + url);
+ LOG.trace("url={}", url);
}
return url;
}
@@ -476,7 +477,7 @@ public class WebHdfsFileSystem extends FileSystem
+ Param.toSortedString("&", parameters);
final URL url = getNamenodeURL(path, query);
if (LOG.isTraceEnabled()) {
- LOG.trace("url=" + url);
+ LOG.trace("url={}", url);
}
return url;
}
@@ -667,9 +668,9 @@ public class WebHdfsFileSystem extends FileSystem
a.action == RetryPolicy.RetryAction.RetryDecision.FAILOVER_AND_RETRY;
if (isRetry || isFailoverAndRetry) {
- LOG.info("Retrying connect to namenode: " + nnAddr
- + ". Already tried " + retry + " time(s); retry policy is "
- + retryPolicy + ", delay " + a.delayMillis + "ms.");
+ LOG.info("Retrying connect to namenode: {}. Already tried {}"
+ + " time(s); retry policy is {}, delay {}ms.", nnAddr, retry,
+ retryPolicy, a.delayMillis);
if (isFailoverAndRetry) {
resetStateToFailOver();
@@ -766,7 +767,7 @@ public class WebHdfsFileSystem extends FileSystem
final IOException ioe =
new IOException("Response decoding failure: "+e.toString(), e);
if (LOG.isDebugEnabled()) {
- LOG.debug(ioe);
+ LOG.debug("Response decoding failure: {}", e.toString(), e);
}
throw ioe;
} finally {
@@ -1221,7 +1222,7 @@ public class WebHdfsFileSystem extends FileSystem
}
} catch (IOException ioe) {
if (LOG.isDebugEnabled()) {
- LOG.debug("Token cancel failed: " + ioe);
+ LOG.debug("Token cancel failed: ", ioe);
}
} finally {
super.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 14f4f9a3181..948e7250608 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -321,6 +321,9 @@ Release 2.8.0 - UNRELEASED
of Block in UnderReplicatedBlocks and PendingReplicationBlocks).
(Zhe Zhang via wang)
+ HDFS-6564. Use slf4j instead of common-logging in hdfs-client.
+ (Rakesh R via wheat9)
+
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than