Merge branch 'trunk' into HDFS-7240
This commit is contained in:
commit
b3a7f3b2df
|
@ -162,10 +162,6 @@
|
|||
<groupId>org.apache.avro</groupId>
|
||||
<artifactId>avro</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>jline</groupId>
|
||||
<artifactId>jline</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>io.netty</groupId>
|
||||
<artifactId>netty</artifactId>
|
||||
|
|
|
@ -17,6 +17,8 @@
|
|||
*/
|
||||
package org.apache.hadoop.security.authentication.util;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
|
@ -86,8 +88,8 @@ public class TestCertificateUtil {
|
|||
+ "Mzc1xA==";
|
||||
try {
|
||||
RSAPublicKey pk = CertificateUtil.parseRSAPublicKey(pem);
|
||||
assertTrue(pk != null);
|
||||
assertTrue(pk.getAlgorithm().equals("RSA"));
|
||||
assertNotNull(pk);
|
||||
assertEquals("RSA", pk.getAlgorithm());
|
||||
} catch (ServletException se) {
|
||||
fail("Should not have thrown ServletException");
|
||||
}
|
||||
|
|
|
@ -262,10 +262,6 @@
|
|||
<groupId>org.apache.zookeeper</groupId>
|
||||
<artifactId>zookeeper</artifactId>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>jline</groupId>
|
||||
<artifactId>jline</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.jboss.netty</groupId>
|
||||
<artifactId>netty</artifactId>
|
||||
|
|
|
@ -79,8 +79,6 @@ import javax.xml.transform.stream.StreamResult;
|
|||
|
||||
import com.google.common.base.Charsets;
|
||||
import org.apache.commons.collections.map.UnmodifiableMap;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
|
@ -98,6 +96,8 @@ import org.apache.hadoop.util.StringInterner;
|
|||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.codehaus.stax2.XMLInputFactory2;
|
||||
import org.codehaus.stax2.XMLStreamReader2;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.w3c.dom.Document;
|
||||
import org.w3c.dom.Element;
|
||||
|
||||
|
@ -192,11 +192,12 @@ import com.google.common.base.Strings;
|
|||
@InterfaceStability.Stable
|
||||
public class Configuration implements Iterable<Map.Entry<String,String>>,
|
||||
Writable {
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(Configuration.class);
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(Configuration.class);
|
||||
|
||||
private static final Log LOG_DEPRECATION =
|
||||
LogFactory.getLog("org.apache.hadoop.conf.Configuration.deprecation");
|
||||
private static final Logger LOG_DEPRECATION =
|
||||
LoggerFactory.getLogger(
|
||||
"org.apache.hadoop.conf.Configuration.deprecation");
|
||||
|
||||
private boolean quietmode = true;
|
||||
|
||||
|
@ -1667,7 +1668,15 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
|
|||
}
|
||||
}
|
||||
|
||||
private long getTimeDurationHelper(String name, String vStr, TimeUnit unit) {
|
||||
/**
|
||||
* Return time duration in the given time unit. Valid units are encoded in
|
||||
* properties as suffixes: nanoseconds (ns), microseconds (us), milliseconds
|
||||
* (ms), seconds (s), minutes (m), hours (h), and days (d).
|
||||
* @param name Property name
|
||||
* @param vStr The string value with time unit suffix to be converted.
|
||||
* @param unit Unit to convert the stored property, if it exists.
|
||||
*/
|
||||
public long getTimeDurationHelper(String name, String vStr, TimeUnit unit) {
|
||||
vStr = vStr.trim();
|
||||
vStr = StringUtils.toLowerCase(vStr);
|
||||
ParsedTimeDuration vUnit = ParsedTimeDuration.unitFor(vStr);
|
||||
|
@ -2877,10 +2886,10 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
|
|||
}
|
||||
return null;
|
||||
} catch (IOException e) {
|
||||
LOG.fatal("error parsing conf " + name, e);
|
||||
LOG.error("error parsing conf " + name, e);
|
||||
throw new RuntimeException(e);
|
||||
} catch (XMLStreamException e) {
|
||||
LOG.fatal("error parsing conf " + name, e);
|
||||
LOG.error("error parsing conf " + name, e);
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,9 +22,10 @@ import com.google.common.annotations.VisibleForTesting;
|
|||
import com.google.common.base.Optional;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.Maps;
|
||||
import org.apache.commons.logging.*;
|
||||
import org.apache.hadoop.util.Time;
|
||||
import org.apache.hadoop.conf.ReconfigurationUtil.PropertyChange;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
|
@ -41,8 +42,8 @@ import java.util.Map;
|
|||
public abstract class ReconfigurableBase
|
||||
extends Configured implements Reconfigurable {
|
||||
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(ReconfigurableBase.class);
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(ReconfigurableBase.class);
|
||||
// Use for testing purpose.
|
||||
private ReconfigurationUtil reconfigurationUtil = new ReconfigurationUtil();
|
||||
|
||||
|
|
|
@ -18,8 +18,6 @@
|
|||
|
||||
package org.apache.hadoop.conf;
|
||||
|
||||
import org.apache.commons.logging.*;
|
||||
|
||||
import org.apache.commons.lang.StringEscapeUtils;
|
||||
|
||||
import java.util.Collection;
|
||||
|
@ -33,6 +31,8 @@ import javax.servlet.http.HttpServletRequest;
|
|||
import javax.servlet.http.HttpServletResponse;
|
||||
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* A servlet for changing a node's configuration.
|
||||
|
@ -45,8 +45,8 @@ public class ReconfigurationServlet extends HttpServlet {
|
|||
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(ReconfigurationServlet.class);
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(ReconfigurationServlet.class);
|
||||
|
||||
// the prefix used to fing the attribute holding the reconfigurable
|
||||
// for a given request
|
||||
|
|
|
@ -26,12 +26,12 @@ import javax.crypto.Cipher;
|
|||
import javax.crypto.spec.IvParameterSpec;
|
||||
import javax.crypto.spec.SecretKeySpec;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_JCE_PROVIDER_KEY;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_KEY;
|
||||
|
@ -42,8 +42,8 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY
|
|||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class JceAesCtrCryptoCodec extends AesCtrCryptoCodec {
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(JceAesCtrCryptoCodec.class.getName());
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(JceAesCtrCryptoCodec.class.getName());
|
||||
|
||||
private Configuration conf;
|
||||
private String provider;
|
||||
|
|
|
@ -26,22 +26,22 @@ import java.security.GeneralSecurityException;
|
|||
import java.security.SecureRandom;
|
||||
import java.util.Random;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.crypto.random.OsSecureRandom;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Implement the AES-CTR crypto codec using JNI into OpenSSL.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class OpensslAesCtrCryptoCodec extends AesCtrCryptoCodec {
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(OpensslAesCtrCryptoCodec.class.getName());
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(OpensslAesCtrCryptoCodec.class.getName());
|
||||
|
||||
private Configuration conf;
|
||||
private Random random;
|
||||
|
|
|
@ -26,13 +26,13 @@ import javax.crypto.IllegalBlockSizeException;
|
|||
import javax.crypto.NoSuchPaddingException;
|
||||
import javax.crypto.ShortBufferException;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.util.NativeCodeLoader;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.util.PerformanceAdvisory;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* OpenSSL cipher using JNI.
|
||||
|
@ -41,8 +41,8 @@ import org.apache.hadoop.util.PerformanceAdvisory;
|
|||
*/
|
||||
@InterfaceAudience.Private
|
||||
public final class OpensslCipher {
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(OpensslCipher.class.getName());
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(OpensslCipher.class.getName());
|
||||
public static final int ENCRYPT_MODE = 1;
|
||||
public static final int DECRYPT_MODE = 0;
|
||||
|
||||
|
|
|
@ -309,9 +309,8 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
|
|||
* - HOSTNAME = string
|
||||
* - PORT = integer
|
||||
*
|
||||
* If multiple hosts are provider, the Factory will create a
|
||||
* {@link LoadBalancingKMSClientProvider} that round-robins requests
|
||||
* across the provided list of hosts.
|
||||
* This will always create a {@link LoadBalancingKMSClientProvider}
|
||||
* if the uri is correct.
|
||||
*/
|
||||
@Override
|
||||
public KeyProvider createProvider(URI providerUri, Configuration conf)
|
||||
|
@ -338,30 +337,26 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
|
|||
}
|
||||
hostsPart = t[0];
|
||||
}
|
||||
return createProvider(providerUri, conf, origUrl, port, hostsPart);
|
||||
return createProvider(conf, origUrl, port, hostsPart);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private KeyProvider createProvider(URI providerUri, Configuration conf,
|
||||
private KeyProvider createProvider(Configuration conf,
|
||||
URL origUrl, int port, String hostsPart) throws IOException {
|
||||
String[] hosts = hostsPart.split(";");
|
||||
if (hosts.length == 1) {
|
||||
return new KMSClientProvider(providerUri, conf);
|
||||
} else {
|
||||
KMSClientProvider[] providers = new KMSClientProvider[hosts.length];
|
||||
for (int i = 0; i < hosts.length; i++) {
|
||||
try {
|
||||
providers[i] =
|
||||
new KMSClientProvider(
|
||||
new URI("kms", origUrl.getProtocol(), hosts[i], port,
|
||||
origUrl.getPath(), null, null), conf);
|
||||
} catch (URISyntaxException e) {
|
||||
throw new IOException("Could not instantiate KMSProvider..", e);
|
||||
}
|
||||
KMSClientProvider[] providers = new KMSClientProvider[hosts.length];
|
||||
for (int i = 0; i < hosts.length; i++) {
|
||||
try {
|
||||
providers[i] =
|
||||
new KMSClientProvider(
|
||||
new URI("kms", origUrl.getProtocol(), hosts[i], port,
|
||||
origUrl.getPath(), null, null), conf);
|
||||
} catch (URISyntaxException e) {
|
||||
throw new IOException("Could not instantiate KMSProvider.", e);
|
||||
}
|
||||
return new LoadBalancingKMSClientProvider(providers, conf);
|
||||
}
|
||||
return new LoadBalancingKMSClientProvider(providers, conf);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1078,7 +1073,11 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
|
|||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
} catch (Exception e) {
|
||||
throw new IOException(e);
|
||||
if (e instanceof IOException) {
|
||||
throw (IOException) e;
|
||||
} else {
|
||||
throw new IOException(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
return tokens;
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
package org.apache.hadoop.crypto.key.kms;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InterruptedIOException;
|
||||
import java.security.GeneralSecurityException;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.util.Arrays;
|
||||
|
@ -31,14 +32,19 @@ import org.apache.hadoop.crypto.key.KeyProvider;
|
|||
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension;
|
||||
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
|
||||
import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
import org.apache.hadoop.io.retry.RetryPolicies;
|
||||
import org.apache.hadoop.io.retry.RetryPolicy;
|
||||
import org.apache.hadoop.io.retry.RetryPolicy.RetryAction;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.security.Credentials;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.util.Time;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
/**
|
||||
* A simple LoadBalancing KMSClientProvider that round-robins requests
|
||||
|
@ -68,6 +74,8 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
|
|||
private final KMSClientProvider[] providers;
|
||||
private final AtomicInteger currentIdx;
|
||||
|
||||
private RetryPolicy retryPolicy = null;
|
||||
|
||||
public LoadBalancingKMSClientProvider(KMSClientProvider[] providers,
|
||||
Configuration conf) {
|
||||
this(shuffle(providers), Time.monotonicNow(), conf);
|
||||
|
@ -79,24 +87,79 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
|
|||
super(conf);
|
||||
this.providers = providers;
|
||||
this.currentIdx = new AtomicInteger((int)(seed % providers.length));
|
||||
int maxNumRetries = conf.getInt(CommonConfigurationKeysPublic.
|
||||
KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY, providers.length);
|
||||
int sleepBaseMillis = conf.getInt(CommonConfigurationKeysPublic.
|
||||
KMS_CLIENT_FAILOVER_SLEEP_BASE_MILLIS_KEY,
|
||||
CommonConfigurationKeysPublic.
|
||||
KMS_CLIENT_FAILOVER_SLEEP_BASE_MILLIS_DEFAULT);
|
||||
int sleepMaxMillis = conf.getInt(CommonConfigurationKeysPublic.
|
||||
KMS_CLIENT_FAILOVER_SLEEP_MAX_MILLIS_KEY,
|
||||
CommonConfigurationKeysPublic.
|
||||
KMS_CLIENT_FAILOVER_SLEEP_MAX_MILLIS_DEFAULT);
|
||||
Preconditions.checkState(maxNumRetries >= 0);
|
||||
Preconditions.checkState(sleepBaseMillis >= 0);
|
||||
Preconditions.checkState(sleepMaxMillis >= 0);
|
||||
this.retryPolicy = RetryPolicies.failoverOnNetworkException(
|
||||
RetryPolicies.TRY_ONCE_THEN_FAIL, maxNumRetries, 0, sleepBaseMillis,
|
||||
sleepMaxMillis);
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
KMSClientProvider[] getProviders() {
|
||||
public KMSClientProvider[] getProviders() {
|
||||
return providers;
|
||||
}
|
||||
|
||||
private <T> T doOp(ProviderCallable<T> op, int currPos)
|
||||
throws IOException {
|
||||
if (providers.length == 0) {
|
||||
throw new IOException("No providers configured !");
|
||||
}
|
||||
IOException ex = null;
|
||||
for (int i = 0; i < providers.length; i++) {
|
||||
int numFailovers = 0;
|
||||
for (int i = 0;; i++, numFailovers++) {
|
||||
KMSClientProvider provider = providers[(currPos + i) % providers.length];
|
||||
try {
|
||||
return op.call(provider);
|
||||
} catch (AccessControlException ace) {
|
||||
// No need to retry on AccessControlException
|
||||
// and AuthorizationException.
|
||||
// This assumes all the servers are configured with identical
|
||||
// permissions and identical key acls.
|
||||
throw ace;
|
||||
} catch (IOException ioe) {
|
||||
LOG.warn("KMS provider at [{}] threw an IOException!! {}",
|
||||
provider.getKMSUrl(), StringUtils.stringifyException(ioe));
|
||||
LOG.warn("KMS provider at [{}] threw an IOException: ",
|
||||
provider.getKMSUrl(), ioe);
|
||||
ex = ioe;
|
||||
|
||||
RetryAction action = null;
|
||||
try {
|
||||
action = retryPolicy.shouldRetry(ioe, 0, numFailovers, false);
|
||||
} catch (Exception e) {
|
||||
if (e instanceof IOException) {
|
||||
throw (IOException)e;
|
||||
}
|
||||
throw new IOException(e);
|
||||
}
|
||||
if (action.action == RetryAction.RetryDecision.FAIL) {
|
||||
LOG.warn("Aborting since the Request has failed with all KMS"
|
||||
+ " providers(depending on {}={} setting and numProviders={})"
|
||||
+ " in the group OR the exception is not recoverable",
|
||||
CommonConfigurationKeysPublic.KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY,
|
||||
getConf().getInt(
|
||||
CommonConfigurationKeysPublic.
|
||||
KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY, providers.length),
|
||||
providers.length);
|
||||
throw ex;
|
||||
}
|
||||
if (((numFailovers + 1) % providers.length) == 0) {
|
||||
// Sleep only after we try all the providers for every cycle.
|
||||
try {
|
||||
Thread.sleep(action.delayMillis);
|
||||
} catch (InterruptedException e) {
|
||||
throw new InterruptedIOException("Thread Interrupted");
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
if (e instanceof RuntimeException) {
|
||||
throw (RuntimeException)e;
|
||||
|
@ -105,12 +168,6 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
|
|||
}
|
||||
}
|
||||
}
|
||||
if (ex != null) {
|
||||
LOG.warn("Aborting since the Request has failed with all KMS"
|
||||
+ " providers in the group. !!");
|
||||
throw ex;
|
||||
}
|
||||
throw new IOException("No providers configured !!");
|
||||
}
|
||||
|
||||
private int nextIdx() {
|
||||
|
@ -159,15 +216,24 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
|
|||
// This request is sent to all providers in the load-balancing group
|
||||
@Override
|
||||
public void warmUpEncryptedKeys(String... keyNames) throws IOException {
|
||||
Preconditions.checkArgument(providers.length > 0,
|
||||
"No providers are configured");
|
||||
boolean success = false;
|
||||
IOException e = null;
|
||||
for (KMSClientProvider provider : providers) {
|
||||
try {
|
||||
provider.warmUpEncryptedKeys(keyNames);
|
||||
success = true;
|
||||
} catch (IOException ioe) {
|
||||
e = ioe;
|
||||
LOG.error(
|
||||
"Error warming up keys for provider with url"
|
||||
+ "[" + provider.getKMSUrl() + "]");
|
||||
+ "[" + provider.getKMSUrl() + "]", ioe);
|
||||
}
|
||||
}
|
||||
if (!success && e != null) {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
// This request is sent to all providers in the load-balancing group
|
||||
|
|
|
@ -19,13 +19,13 @@ package org.apache.hadoop.crypto.random;
|
|||
|
||||
import java.util.Random;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.util.NativeCodeLoader;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.util.PerformanceAdvisory;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* OpenSSL secure random using JNI.
|
||||
|
@ -44,8 +44,8 @@ import org.apache.hadoop.util.PerformanceAdvisory;
|
|||
@InterfaceAudience.Private
|
||||
public class OpensslSecureRandom extends Random {
|
||||
private static final long serialVersionUID = -7828193502768789584L;
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(OpensslSecureRandom.class.getName());
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(OpensslSecureRandom.class.getName());
|
||||
|
||||
/** If native SecureRandom unavailable, use java SecureRandom */
|
||||
private java.security.SecureRandom fallback = null;
|
||||
|
|
|
@ -23,12 +23,12 @@ import java.io.FileInputStream;
|
|||
import java.io.IOException;
|
||||
import java.util.Random;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configurable;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_KEY;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_DEFAULT;
|
||||
|
@ -39,7 +39,8 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY
|
|||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class OsSecureRandom extends Random implements Closeable, Configurable {
|
||||
public static final Log LOG = LogFactory.getLog(OsSecureRandom.class);
|
||||
public static final Logger LOG =
|
||||
LoggerFactory.getLogger(OsSecureRandom.class);
|
||||
|
||||
private static final long serialVersionUID = 6391500337172057900L;
|
||||
|
||||
|
@ -112,7 +113,7 @@ public class OsSecureRandom extends Random implements Closeable, Configurable {
|
|||
@Override
|
||||
synchronized public void close() {
|
||||
if (stream != null) {
|
||||
IOUtils.cleanup(LOG, stream);
|
||||
IOUtils.cleanupWithLogger(LOG, stream);
|
||||
stream = null;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,8 +32,6 @@ import java.util.NoSuchElementException;
|
|||
import java.util.StringTokenizer;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
@ -52,6 +50,8 @@ import org.apache.hadoop.security.token.Token;
|
|||
import org.apache.hadoop.util.Progressable;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* This class provides an interface for implementors of a Hadoop file system
|
||||
|
@ -66,7 +66,7 @@ import com.google.common.annotations.VisibleForTesting;
|
|||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Stable
|
||||
public abstract class AbstractFileSystem {
|
||||
static final Log LOG = LogFactory.getLog(AbstractFileSystem.class);
|
||||
static final Logger LOG = LoggerFactory.getLogger(AbstractFileSystem.class);
|
||||
|
||||
/** Recording statistics per a file system class. */
|
||||
private static final Map<URI, Statistics>
|
||||
|
|
|
@ -27,14 +27,14 @@ import java.util.ArrayList;
|
|||
import java.util.Arrays;
|
||||
import java.util.EnumSet;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.fs.Options.ChecksumOpt;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.util.DataChecksum;
|
||||
import org.apache.hadoop.util.Progressable;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Abstract Checksumed Fs.
|
||||
|
@ -110,8 +110,8 @@ public abstract class ChecksumFs extends FilterFs {
|
|||
* It verifies that data matches checksums.
|
||||
*******************************************************/
|
||||
private static class ChecksumFSInputChecker extends FSInputChecker {
|
||||
public static final Log LOG
|
||||
= LogFactory.getLog(FSInputChecker.class);
|
||||
public static final Logger LOG =
|
||||
LoggerFactory.getLogger(FSInputChecker.class);
|
||||
private static final int HEADER_LENGTH = 8;
|
||||
|
||||
private ChecksumFs fs;
|
||||
|
|
|
@ -719,6 +719,35 @@ public class CommonConfigurationKeysPublic {
|
|||
/** Default value for KMS_CLIENT_ENC_KEY_CACHE_EXPIRY (12 hrs)*/
|
||||
public static final int KMS_CLIENT_ENC_KEY_CACHE_EXPIRY_DEFAULT = 43200000;
|
||||
|
||||
/**
|
||||
* @see
|
||||
* <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
|
||||
* core-default.xml</a>
|
||||
*/
|
||||
/** Default value is the number of providers specified. */
|
||||
public static final String KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY =
|
||||
"hadoop.security.kms.client.failover.max.retries";
|
||||
|
||||
/**
|
||||
* @see
|
||||
* <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
|
||||
* core-default.xml</a>
|
||||
*/
|
||||
public static final String KMS_CLIENT_FAILOVER_SLEEP_BASE_MILLIS_KEY =
|
||||
"hadoop.security.kms.client.failover.sleep.base.millis";
|
||||
/** Default value is 100 ms. */
|
||||
public static final int KMS_CLIENT_FAILOVER_SLEEP_BASE_MILLIS_DEFAULT = 100;
|
||||
|
||||
/**
|
||||
* @see
|
||||
* <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
|
||||
* core-default.xml</a>
|
||||
*/
|
||||
public static final String KMS_CLIENT_FAILOVER_SLEEP_MAX_MILLIS_KEY =
|
||||
"hadoop.security.kms.client.failover.sleep.max.millis";
|
||||
/** Default value is 2 secs. */
|
||||
public static final int KMS_CLIENT_FAILOVER_SLEEP_MAX_MILLIS_DEFAULT = 2000;
|
||||
|
||||
/**
|
||||
* @see
|
||||
* <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
|
||||
|
|
|
@ -26,12 +26,12 @@ import java.util.concurrent.DelayQueue;
|
|||
import java.util.concurrent.Delayed;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.security.token.TokenIdentifier;
|
||||
import org.apache.hadoop.util.Time;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* A daemon thread that waits for the next file system to renew.
|
||||
|
@ -39,8 +39,8 @@ import org.apache.hadoop.util.Time;
|
|||
@InterfaceAudience.Private
|
||||
public class DelegationTokenRenewer
|
||||
extends Thread {
|
||||
private static final Log LOG = LogFactory
|
||||
.getLog(DelegationTokenRenewer.class);
|
||||
private static final Logger LOG = LoggerFactory
|
||||
.getLogger(DelegationTokenRenewer.class);
|
||||
|
||||
/** The renewable interface used by the renewer. */
|
||||
public interface Renewable {
|
||||
|
@ -243,7 +243,7 @@ public class DelegationTokenRenewer
|
|||
LOG.error("Interrupted while canceling token for " + fs.getUri()
|
||||
+ "filesystem");
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug(ie.getStackTrace());
|
||||
LOG.debug("Exception in removeRenewAction: ", ie);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,11 +22,12 @@ import java.io.IOException;
|
|||
import java.io.InputStream;
|
||||
import java.util.zip.Checksum;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.IntBuffer;
|
||||
|
||||
|
@ -37,8 +38,8 @@ import java.nio.IntBuffer;
|
|||
@InterfaceAudience.LimitedPrivate({"HDFS"})
|
||||
@InterfaceStability.Unstable
|
||||
abstract public class FSInputChecker extends FSInputStream {
|
||||
public static final Log LOG
|
||||
= LogFactory.getLog(FSInputChecker.class);
|
||||
public static final Logger LOG =
|
||||
LoggerFactory.getLogger(FSInputChecker.class);
|
||||
|
||||
/** The file name from which data is read from */
|
||||
protected Path file;
|
||||
|
|
|
@ -35,8 +35,6 @@ import java.util.Stack;
|
|||
import java.util.TreeSet;
|
||||
import java.util.Map.Entry;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
@ -63,6 +61,8 @@ import org.apache.hadoop.util.ShutdownHookManager;
|
|||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* The FileContext class provides an interface for users of the Hadoop
|
||||
|
@ -169,7 +169,7 @@ import org.apache.htrace.core.Tracer;
|
|||
@InterfaceStability.Stable
|
||||
public class FileContext {
|
||||
|
||||
public static final Log LOG = LogFactory.getLog(FileContext.class);
|
||||
public static final Logger LOG = LoggerFactory.getLogger(FileContext.class);
|
||||
/**
|
||||
* Default permission for directory and symlink
|
||||
* In previous versions, this default permission was also used to
|
||||
|
@ -332,7 +332,7 @@ public class FileContext {
|
|||
}
|
||||
});
|
||||
} catch (InterruptedException ex) {
|
||||
LOG.error(ex);
|
||||
LOG.error(ex.toString());
|
||||
throw new IOException("Failed to get the AbstractFileSystem for path: "
|
||||
+ uri, ex);
|
||||
}
|
||||
|
@ -446,7 +446,7 @@ public class FileContext {
|
|||
} catch (UnsupportedFileSystemException ex) {
|
||||
throw ex;
|
||||
} catch (IOException ex) {
|
||||
LOG.error(ex);
|
||||
LOG.error(ex.toString());
|
||||
throw new RuntimeException(ex);
|
||||
}
|
||||
return getFileContext(defaultAfs, aConf);
|
||||
|
|
|
@ -23,8 +23,6 @@ import org.apache.commons.collections.map.CaseInsensitiveMap;
|
|||
import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
|
||||
import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -35,6 +33,8 @@ import org.apache.hadoop.io.nativeio.NativeIO;
|
|||
import org.apache.hadoop.util.Shell;
|
||||
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.BufferedInputStream;
|
||||
import java.io.BufferedOutputStream;
|
||||
|
@ -71,7 +71,7 @@ import java.util.zip.ZipOutputStream;
|
|||
@InterfaceStability.Evolving
|
||||
public class FileUtil {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(FileUtil.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(FileUtil.class);
|
||||
|
||||
/* The error code is defined in winutils to indicate insufficient
|
||||
* privilege to create symbolic links. This value need to keep in
|
||||
|
@ -767,7 +767,7 @@ public class FileUtil {
|
|||
entry = tis.getNextTarEntry();
|
||||
}
|
||||
} finally {
|
||||
IOUtils.cleanup(LOG, tis, inputStream);
|
||||
IOUtils.cleanupWithLogger(LOG, tis, inputStream);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1357,7 +1357,7 @@ public class FileUtil {
|
|||
bos = new BufferedOutputStream(fos);
|
||||
jos = new JarOutputStream(bos, jarManifest);
|
||||
} finally {
|
||||
IOUtils.cleanup(LOG, jos, bos, fos);
|
||||
IOUtils.cleanupWithLogger(LOG, jos, bos, fos);
|
||||
}
|
||||
String[] jarCp = {classPathJar.getCanonicalPath(),
|
||||
unexpandedWildcardClasspath.toString()};
|
||||
|
|
|
@ -24,8 +24,6 @@ import java.util.Arrays;
|
|||
import java.util.LinkedList;
|
||||
|
||||
import org.apache.commons.lang.WordUtils;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.conf.Configured;
|
||||
|
@ -39,12 +37,14 @@ import org.apache.hadoop.util.Tool;
|
|||
import org.apache.hadoop.util.ToolRunner;
|
||||
import org.apache.htrace.core.TraceScope;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/** Provide command line access to a FileSystem. */
|
||||
@InterfaceAudience.Private
|
||||
public class FsShell extends Configured implements Tool {
|
||||
|
||||
static final Log LOG = LogFactory.getLog(FsShell.class);
|
||||
static final Logger LOG = LoggerFactory.getLogger(FsShell.class);
|
||||
|
||||
private static final int MAX_LINE_WIDTH = 80;
|
||||
|
||||
|
|
|
@ -22,7 +22,6 @@ import java.util.LinkedList;
|
|||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.fs.permission.ChmodParser;
|
||||
|
@ -32,6 +31,7 @@ import org.apache.hadoop.fs.shell.CommandFormat;
|
|||
import org.apache.hadoop.fs.shell.FsCommand;
|
||||
import org.apache.hadoop.fs.shell.PathData;
|
||||
import org.apache.hadoop.util.Shell;
|
||||
import org.slf4j.Logger;
|
||||
|
||||
/**
|
||||
* This class is the home for file permissions related commands.
|
||||
|
@ -41,7 +41,7 @@ import org.apache.hadoop.util.Shell;
|
|||
@InterfaceStability.Unstable
|
||||
public class FsShellPermissions extends FsCommand {
|
||||
|
||||
static Log LOG = FsShell.LOG;
|
||||
static final Logger LOG = FsShell.LOG;
|
||||
|
||||
/**
|
||||
* Register the permission related commands with the factory
|
||||
|
|
|
@ -23,18 +23,19 @@ import java.util.ArrayList;
|
|||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
import org.apache.htrace.core.TraceScope;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
class Globber {
|
||||
public static final Log LOG = LogFactory.getLog(Globber.class.getName());
|
||||
public static final Logger LOG =
|
||||
LoggerFactory.getLogger(Globber.class.getName());
|
||||
|
||||
private final FileSystem fs;
|
||||
private final FileContext fc;
|
||||
|
|
|
@ -17,14 +17,14 @@
|
|||
*/
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.util.LineReader;
|
||||
import org.apache.hadoop.util.Progressable;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.EOFException;
|
||||
import java.io.FileNotFoundException;
|
||||
|
@ -50,7 +50,8 @@ import java.util.*;
|
|||
|
||||
public class HarFileSystem extends FileSystem {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(HarFileSystem.class);
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(HarFileSystem.class);
|
||||
|
||||
public static final String METADATA_CACHE_ENTRIES_KEY = "fs.har.metadatacache.entries";
|
||||
public static final int METADATA_CACHE_ENTRIES_DEFAULT = 10;
|
||||
|
@ -1173,7 +1174,7 @@ public class HarFileSystem extends FileSystem {
|
|||
LOG.warn("Encountered exception ", ioe);
|
||||
throw ioe;
|
||||
} finally {
|
||||
IOUtils.cleanup(LOG, lin, in);
|
||||
IOUtils.cleanupWithLogger(LOG, lin, in);
|
||||
}
|
||||
|
||||
FSDataInputStream aIn = fs.open(archiveIndexPath);
|
||||
|
@ -1198,7 +1199,7 @@ public class HarFileSystem extends FileSystem {
|
|||
}
|
||||
}
|
||||
} finally {
|
||||
IOUtils.cleanup(LOG, aIn);
|
||||
IOUtils.cleanupWithLogger(LOG, aIn);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,14 +23,15 @@ import java.util.*;
|
|||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import org.apache.commons.logging.*;
|
||||
import org.apache.hadoop.util.*;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/** An implementation of a round-robin scheme for disk allocation for creating
|
||||
* files. The way it works is that it is kept track what disk was last
|
||||
|
@ -245,8 +246,8 @@ public class LocalDirAllocator {
|
|||
|
||||
private static class AllocatorPerContext {
|
||||
|
||||
private final Log LOG =
|
||||
LogFactory.getLog(AllocatorPerContext.class);
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(AllocatorPerContext.class);
|
||||
|
||||
private Random dirIndexRandomizer = new Random();
|
||||
private String contextCfgItemName;
|
||||
|
|
|
@ -19,11 +19,12 @@ package org.apache.hadoop.fs;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.conf.Configured;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Provides a trash facility which supports pluggable Trash policies.
|
||||
|
@ -34,8 +35,8 @@ import org.apache.hadoop.conf.Configured;
|
|||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Stable
|
||||
public class Trash extends Configured {
|
||||
private static final org.apache.commons.logging.Log LOG =
|
||||
LogFactory.getLog(Trash.class);
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(Trash.class);
|
||||
|
||||
private TrashPolicy trashPolicy; // configured trash policy instance
|
||||
|
||||
|
|
|
@ -30,8 +30,6 @@ import java.text.SimpleDateFormat;
|
|||
import java.util.Collection;
|
||||
import java.util.Date;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -41,6 +39,8 @@ import org.apache.hadoop.fs.permission.FsPermission;
|
|||
import org.apache.hadoop.util.Time;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/** Provides a <i>trash</i> feature. Files are moved to a user's trash
|
||||
* directory, a subdirectory of their home directory named ".Trash". Files are
|
||||
|
@ -54,8 +54,8 @@ import com.google.common.annotations.VisibleForTesting;
|
|||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Evolving
|
||||
public class TrashPolicyDefault extends TrashPolicy {
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(TrashPolicyDefault.class);
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(TrashPolicyDefault.class);
|
||||
|
||||
private static final Path CURRENT = new Path("Current");
|
||||
|
||||
|
|
|
@ -25,8 +25,6 @@ import java.net.URI;
|
|||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.commons.net.ftp.FTP;
|
||||
import org.apache.commons.net.ftp.FTPClient;
|
||||
import org.apache.commons.net.ftp.FTPFile;
|
||||
|
@ -45,6 +43,8 @@ import org.apache.hadoop.fs.permission.FsAction;
|
|||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.util.Progressable;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
|
@ -56,8 +56,8 @@ import org.apache.hadoop.util.Progressable;
|
|||
@InterfaceStability.Stable
|
||||
public class FTPFileSystem extends FileSystem {
|
||||
|
||||
public static final Log LOG = LogFactory
|
||||
.getLog(FTPFileSystem.class);
|
||||
public static final Logger LOG = LoggerFactory
|
||||
.getLogger(FTPFileSystem.class);
|
||||
|
||||
public static final int DEFAULT_BUFFER_SIZE = 1024 * 1024;
|
||||
|
||||
|
|
|
@ -24,8 +24,6 @@ import java.io.InvalidObjectException;
|
|||
import java.io.ObjectInputValidation;
|
||||
import java.io.Serializable;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -33,6 +31,8 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
|
|||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.WritableFactories;
|
||||
import org.apache.hadoop.io.WritableFactory;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* A class for file/directory permissions.
|
||||
|
@ -41,7 +41,7 @@ import org.apache.hadoop.io.WritableFactory;
|
|||
@InterfaceStability.Stable
|
||||
public class FsPermission implements Writable, Serializable,
|
||||
ObjectInputValidation {
|
||||
private static final Log LOG = LogFactory.getLog(FsPermission.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(FsPermission.class);
|
||||
private static final long serialVersionUID = 0x2fe08564;
|
||||
|
||||
static final WritableFactory FACTORY = new WritableFactory() {
|
||||
|
|
|
@ -23,19 +23,20 @@ import java.util.HashSet;
|
|||
import java.util.Iterator;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
||||
import com.jcraft.jsch.ChannelSftp;
|
||||
import com.jcraft.jsch.JSch;
|
||||
import com.jcraft.jsch.JSchException;
|
||||
import com.jcraft.jsch.Session;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/** Concurrent/Multiple Connections. */
|
||||
class SFTPConnectionPool {
|
||||
|
||||
public static final Log LOG = LogFactory.getLog(SFTPFileSystem.class);
|
||||
public static final Logger LOG =
|
||||
LoggerFactory.getLogger(SFTPFileSystem.class);
|
||||
// Maximum number of allowed live connections. This doesn't mean we cannot
|
||||
// have more live connections. It means that when we have more
|
||||
// live connections than this threshold, any unused connection will be
|
||||
|
|
|
@ -26,8 +26,6 @@ import java.net.URLDecoder;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Vector;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
|
@ -41,11 +39,14 @@ import com.jcraft.jsch.ChannelSftp;
|
|||
import com.jcraft.jsch.ChannelSftp.LsEntry;
|
||||
import com.jcraft.jsch.SftpATTRS;
|
||||
import com.jcraft.jsch.SftpException;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/** SFTP FileSystem. */
|
||||
public class SFTPFileSystem extends FileSystem {
|
||||
|
||||
public static final Log LOG = LogFactory.getLog(SFTPFileSystem.class);
|
||||
public static final Logger LOG =
|
||||
LoggerFactory.getLogger(SFTPFileSystem.class);
|
||||
|
||||
private SFTPConnectionPool connectionPool;
|
||||
private URI uri;
|
||||
|
|
|
@ -27,8 +27,6 @@ import java.util.Arrays;
|
|||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -36,6 +34,8 @@ import org.apache.hadoop.conf.Configured;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.PathNotFoundException;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* An abstract class for the execution of a file system command
|
||||
|
@ -59,7 +59,7 @@ abstract public class Command extends Configured {
|
|||
private int depth = 0;
|
||||
protected ArrayList<Exception> exceptions = new ArrayList<Exception>();
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(Command.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(Command.class);
|
||||
|
||||
/** allows stdout to be captured if necessary */
|
||||
public PrintStream out = System.out;
|
||||
|
|
|
@ -26,7 +26,11 @@ import java.net.URISyntaxException;
|
|||
import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
import java.util.concurrent.ArrayBlockingQueue;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
|
@ -288,9 +292,113 @@ class CopyCommands {
|
|||
}
|
||||
|
||||
public static class CopyFromLocal extends Put {
|
||||
private ThreadPoolExecutor executor = null;
|
||||
private int numThreads = 1;
|
||||
|
||||
private static final int MAX_THREADS =
|
||||
Runtime.getRuntime().availableProcessors() * 2;
|
||||
public static final String NAME = "copyFromLocal";
|
||||
public static final String USAGE = Put.USAGE;
|
||||
public static final String DESCRIPTION = "Identical to the -put command.";
|
||||
public static final String USAGE =
|
||||
"[-f] [-p] [-l] [-d] [-t <thread count>] <localsrc> ... <dst>";
|
||||
public static final String DESCRIPTION =
|
||||
"Copy files from the local file system " +
|
||||
"into fs. Copying fails if the file already " +
|
||||
"exists, unless the -f flag is given.\n" +
|
||||
"Flags:\n" +
|
||||
" -p : Preserves access and modification times, ownership and the" +
|
||||
" mode.\n" +
|
||||
" -f : Overwrites the destination if it already exists.\n" +
|
||||
" -t <thread count> : Number of threads to be used, default is 1.\n" +
|
||||
" -l : Allow DataNode to lazily persist the file to disk. Forces" +
|
||||
" replication factor of 1. This flag will result in reduced" +
|
||||
" durability. Use with care.\n" +
|
||||
" -d : Skip creation of temporary file(<dst>._COPYING_).\n";
|
||||
|
||||
private void setNumberThreads(String numberThreadsString) {
|
||||
if (numberThreadsString == null) {
|
||||
numThreads = 1;
|
||||
} else {
|
||||
int parsedValue = Integer.parseInt(numberThreadsString);
|
||||
if (parsedValue <= 1) {
|
||||
numThreads = 1;
|
||||
} else if (parsedValue > MAX_THREADS) {
|
||||
numThreads = MAX_THREADS;
|
||||
} else {
|
||||
numThreads = parsedValue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void processOptions(LinkedList<String> args) throws IOException {
|
||||
CommandFormat cf =
|
||||
new CommandFormat(1, Integer.MAX_VALUE, "f", "p", "l", "d");
|
||||
cf.addOptionWithValue("t");
|
||||
cf.parse(args);
|
||||
setNumberThreads(cf.getOptValue("t"));
|
||||
setOverwrite(cf.getOpt("f"));
|
||||
setPreserve(cf.getOpt("p"));
|
||||
setLazyPersist(cf.getOpt("l"));
|
||||
setDirectWrite(cf.getOpt("d"));
|
||||
getRemoteDestination(args);
|
||||
// should have a -r option
|
||||
setRecursive(true);
|
||||
}
|
||||
|
||||
private void copyFile(PathData src, PathData target) throws IOException {
|
||||
if (isPathRecursable(src)) {
|
||||
throw new PathIsDirectoryException(src.toString());
|
||||
}
|
||||
super.copyFileToTarget(src, target);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void copyFileToTarget(PathData src, PathData target)
|
||||
throws IOException {
|
||||
// if number of thread is 1, mimic put and avoid threading overhead
|
||||
if (numThreads == 1) {
|
||||
copyFile(src, target);
|
||||
return;
|
||||
}
|
||||
|
||||
Runnable task = () -> {
|
||||
try {
|
||||
copyFile(src, target);
|
||||
} catch (IOException e) {
|
||||
displayError(e);
|
||||
}
|
||||
};
|
||||
executor.submit(task);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void processArguments(LinkedList<PathData> args)
|
||||
throws IOException {
|
||||
executor = new ThreadPoolExecutor(numThreads, numThreads, 1,
|
||||
TimeUnit.SECONDS, new ArrayBlockingQueue<>(1024),
|
||||
new ThreadPoolExecutor.CallerRunsPolicy());
|
||||
super.processArguments(args);
|
||||
|
||||
// issue the command and then wait for it to finish
|
||||
executor.shutdown();
|
||||
try {
|
||||
executor.awaitTermination(Long.MAX_VALUE, TimeUnit.MINUTES);
|
||||
} catch (InterruptedException e) {
|
||||
executor.shutdownNow();
|
||||
displayError(e);
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public int getNumThreads() {
|
||||
return numThreads;
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public ThreadPoolExecutor getExecutor() {
|
||||
return executor;
|
||||
}
|
||||
}
|
||||
|
||||
public static class CopyToLocal extends Get {
|
||||
|
|
|
@ -25,7 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
|||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.fs.PathIOException;
|
||||
import org.apache.hadoop.fs.PathExistsException;
|
||||
import org.apache.hadoop.fs.shell.CopyCommands.CopyFromLocal;
|
||||
import org.apache.hadoop.fs.shell.CopyCommands.Put;
|
||||
|
||||
/** Various commands for moving files */
|
||||
@InterfaceAudience.Private
|
||||
|
@ -41,7 +41,7 @@ class MoveCommands {
|
|||
/**
|
||||
* Move local files to a remote filesystem
|
||||
*/
|
||||
public static class MoveFromLocal extends CopyFromLocal {
|
||||
public static class MoveFromLocal extends Put {
|
||||
public static final String NAME = "moveFromLocal";
|
||||
public static final String USAGE = "<localsrc> ... <dst>";
|
||||
public static final String DESCRIPTION =
|
||||
|
|
|
@ -26,8 +26,6 @@ import java.util.concurrent.TimeUnit;
|
|||
import java.util.concurrent.locks.Lock;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
@ -47,6 +45,8 @@ import org.apache.zookeeper.KeeperException.Code;
|
|||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -141,7 +141,8 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
|
|||
@VisibleForTesting
|
||||
protected static final String BREADCRUMB_FILENAME = "ActiveBreadCrumb";
|
||||
|
||||
public static final Log LOG = LogFactory.getLog(ActiveStandbyElector.class);
|
||||
public static final Logger LOG =
|
||||
LoggerFactory.getLogger(ActiveStandbyElector.class);
|
||||
|
||||
private static final int SLEEP_AFTER_FAILURE_TO_BECOME_ACTIVE = 1000;
|
||||
|
||||
|
@ -712,7 +713,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
|
|||
}
|
||||
|
||||
private void fatalError(String errorMessage) {
|
||||
LOG.fatal(errorMessage);
|
||||
LOG.error(errorMessage);
|
||||
reset();
|
||||
appClient.notifyFatalError(errorMessage);
|
||||
}
|
||||
|
@ -824,10 +825,10 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
|
|||
createConnection();
|
||||
success = true;
|
||||
} catch(IOException e) {
|
||||
LOG.warn(e);
|
||||
LOG.warn(e.toString());
|
||||
sleepFor(5000);
|
||||
} catch(KeeperException e) {
|
||||
LOG.warn(e);
|
||||
LOG.warn(e.toString());
|
||||
sleepFor(5000);
|
||||
}
|
||||
++connectionRetryCount;
|
||||
|
@ -866,7 +867,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
|
|||
try {
|
||||
tempZk.close();
|
||||
} catch(InterruptedException e) {
|
||||
LOG.warn(e);
|
||||
LOG.warn(e.toString());
|
||||
}
|
||||
zkConnectionState = ConnectionState.TERMINATED;
|
||||
wantToBeInElection = false;
|
||||
|
|
|
@ -19,9 +19,6 @@ package org.apache.hadoop.ha;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -32,6 +29,8 @@ import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
|
|||
import org.apache.hadoop.ipc.RPC;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* The FailOverController is responsible for electing an active service
|
||||
|
@ -43,7 +42,8 @@ import com.google.common.base.Preconditions;
|
|||
@InterfaceStability.Evolving
|
||||
public class FailoverController {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(FailoverController.class);
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(FailoverController.class);
|
||||
|
||||
private final int gracefulFenceTimeout;
|
||||
private final int rpcTimeoutToNewActive;
|
||||
|
@ -252,7 +252,7 @@ public class FailoverController {
|
|||
} catch (FailoverFailedException ffe) {
|
||||
msg += ". Failback to " + fromSvc +
|
||||
" failed (" + ffe.getMessage() + ")";
|
||||
LOG.fatal(msg);
|
||||
LOG.error(msg);
|
||||
}
|
||||
}
|
||||
throw new FailoverFailedException(msg, cause);
|
||||
|
|
|
@ -28,8 +28,6 @@ import org.apache.commons.cli.Options;
|
|||
import org.apache.commons.cli.CommandLine;
|
||||
import org.apache.commons.cli.GnuParser;
|
||||
import org.apache.commons.cli.ParseException;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -43,6 +41,8 @@ import org.apache.hadoop.util.ToolRunner;
|
|||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* A command-line tool for making calls in the HAServiceProtocol.
|
||||
|
@ -62,7 +62,7 @@ public abstract class HAAdmin extends Configured implements Tool {
|
|||
* operation, which is why it is not documented in the usage below.
|
||||
*/
|
||||
private static final String FORCEMANUAL = "forcemanual";
|
||||
private static final Log LOG = LogFactory.getLog(HAAdmin.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(HAAdmin.class);
|
||||
|
||||
private int rpcTimeoutForChecks = -1;
|
||||
|
||||
|
@ -449,7 +449,7 @@ public abstract class HAAdmin extends Configured implements Tool {
|
|||
|
||||
if (cmdLine.hasOption(FORCEMANUAL)) {
|
||||
if (!confirmForceManual()) {
|
||||
LOG.fatal("Aborted");
|
||||
LOG.error("Aborted");
|
||||
return -1;
|
||||
}
|
||||
// Instruct the NNs to honor this request even if they're
|
||||
|
|
|
@ -23,8 +23,6 @@ import java.util.LinkedList;
|
|||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeys.*;
|
||||
import org.apache.hadoop.ha.HAServiceProtocol;
|
||||
|
@ -35,6 +33,8 @@ import org.apache.hadoop.ipc.RPC;
|
|||
import org.apache.hadoop.util.Daemon;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* This class is a daemon which runs in a loop, periodically heartbeating
|
||||
|
@ -47,7 +47,7 @@ import com.google.common.base.Preconditions;
|
|||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class HealthMonitor {
|
||||
private static final Log LOG = LogFactory.getLog(
|
||||
private static final Logger LOG = LoggerFactory.getLogger(
|
||||
HealthMonitor.class);
|
||||
|
||||
private Daemon daemon;
|
||||
|
@ -283,7 +283,7 @@ public class HealthMonitor {
|
|||
setUncaughtExceptionHandler(new UncaughtExceptionHandler() {
|
||||
@Override
|
||||
public void uncaughtException(Thread t, Throwable e) {
|
||||
LOG.fatal("Health monitor failed", e);
|
||||
LOG.error("Health monitor failed", e);
|
||||
enterState(HealthMonitor.State.HEALTH_MONITOR_FAILED);
|
||||
}
|
||||
});
|
||||
|
|
|
@ -22,8 +22,6 @@ import java.util.Map;
|
|||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -31,6 +29,8 @@ import org.apache.hadoop.util.ReflectionUtils;
|
|||
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.Lists;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* This class parses the configured list of fencing methods, and
|
||||
|
@ -61,7 +61,7 @@ public class NodeFencer {
|
|||
private static final Pattern HASH_COMMENT_RE =
|
||||
Pattern.compile("#.*$");
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(NodeFencer.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(NodeFencer.class);
|
||||
|
||||
/**
|
||||
* Standard fencing methods included with Hadoop.
|
||||
|
|
|
@ -25,10 +25,10 @@ import java.io.OutputStreamWriter;
|
|||
import java.net.InetSocketAddress;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configured;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Fencer method that uses PowerShell to remotely connect to a machine and kill
|
||||
|
@ -41,7 +41,8 @@ import org.apache.hadoop.util.StringUtils;
|
|||
*/
|
||||
public class PowerShellFencer extends Configured implements FenceMethod {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(PowerShellFencer.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(PowerShellFencer
|
||||
.class);
|
||||
|
||||
|
||||
@Override
|
||||
|
|
|
@ -21,12 +21,12 @@ import java.io.IOException;
|
|||
import java.lang.reflect.Field;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configured;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import org.apache.hadoop.util.Shell;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Fencing method that runs a shell command. It should be specified
|
||||
|
@ -61,8 +61,7 @@ public class ShellCommandFencer
|
|||
private static final String TARGET_PREFIX = "target_";
|
||||
|
||||
@VisibleForTesting
|
||||
static Log LOG = LogFactory.getLog(
|
||||
ShellCommandFencer.class);
|
||||
static Logger LOG = LoggerFactory.getLogger(ShellCommandFencer.class);
|
||||
|
||||
@Override
|
||||
public void checkArgs(String args) throws BadFencingConfigurationException {
|
||||
|
|
|
@ -23,8 +23,6 @@ import java.util.Collection;
|
|||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configured;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
|
@ -32,6 +30,8 @@ import com.jcraft.jsch.ChannelExec;
|
|||
import com.jcraft.jsch.JSch;
|
||||
import com.jcraft.jsch.JSchException;
|
||||
import com.jcraft.jsch.Session;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* This fencing implementation sshes to the target node and uses
|
||||
|
@ -58,9 +58,8 @@ import com.jcraft.jsch.Session;
|
|||
public class SshFenceByTcpPort extends Configured
|
||||
implements FenceMethod {
|
||||
|
||||
static final Log LOG = LogFactory.getLog(
|
||||
SshFenceByTcpPort.class);
|
||||
|
||||
static final Logger LOG = LoggerFactory.getLogger(SshFenceByTcpPort.class);
|
||||
|
||||
static final String CONF_CONNECT_TIMEOUT_KEY =
|
||||
"dfs.ha.fencing.ssh.connect-timeout";
|
||||
private static final int CONF_CONNECT_TIMEOUT_DEFAULT =
|
||||
|
@ -271,7 +270,7 @@ public class SshFenceByTcpPort extends Configured
|
|||
* Adapter from JSch's logger interface to our log4j
|
||||
*/
|
||||
private static class LogAdapter implements com.jcraft.jsch.Logger {
|
||||
static final Log LOG = LogFactory.getLog(
|
||||
static final Logger LOG = LoggerFactory.getLogger(
|
||||
SshFenceByTcpPort.class.getName() + ".jsch");
|
||||
|
||||
@Override
|
||||
|
@ -284,9 +283,8 @@ public class SshFenceByTcpPort extends Configured
|
|||
case com.jcraft.jsch.Logger.WARN:
|
||||
return LOG.isWarnEnabled();
|
||||
case com.jcraft.jsch.Logger.ERROR:
|
||||
return LOG.isErrorEnabled();
|
||||
case com.jcraft.jsch.Logger.FATAL:
|
||||
return LOG.isFatalEnabled();
|
||||
return LOG.isErrorEnabled();
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
@ -305,10 +303,8 @@ public class SshFenceByTcpPort extends Configured
|
|||
LOG.warn(message);
|
||||
break;
|
||||
case com.jcraft.jsch.Logger.ERROR:
|
||||
LOG.error(message);
|
||||
break;
|
||||
case com.jcraft.jsch.Logger.FATAL:
|
||||
LOG.fatal(message);
|
||||
LOG.error(message);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
|
|
@ -17,14 +17,14 @@
|
|||
*/
|
||||
package org.apache.hadoop.ha;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
|
||||
/**
|
||||
* Class responsible for pumping the streams of the subprocess
|
||||
* out to log4j. stderr is pumped to WARN level and stdout is
|
||||
|
@ -35,7 +35,7 @@ class StreamPumper {
|
|||
STDOUT, STDERR;
|
||||
}
|
||||
|
||||
private final Log log;
|
||||
private final Logger log;
|
||||
|
||||
final Thread thread;
|
||||
final String logPrefix;
|
||||
|
@ -43,7 +43,7 @@ class StreamPumper {
|
|||
private final InputStream stream;
|
||||
private boolean started = false;
|
||||
|
||||
StreamPumper(final Log log, final String logPrefix,
|
||||
StreamPumper(final Logger log, final String logPrefix,
|
||||
final InputStream stream, final StreamType type) {
|
||||
this.log = log;
|
||||
this.logPrefix = logPrefix;
|
||||
|
|
|
@ -28,8 +28,6 @@ import java.util.concurrent.Executors;
|
|||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -56,11 +54,13 @@ import org.apache.zookeeper.data.ACL;
|
|||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.util.concurrent.ThreadFactoryBuilder;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
@InterfaceAudience.LimitedPrivate("HDFS")
|
||||
public abstract class ZKFailoverController {
|
||||
|
||||
static final Log LOG = LogFactory.getLog(ZKFailoverController.class);
|
||||
static final Logger LOG = LoggerFactory.getLogger(ZKFailoverController.class);
|
||||
|
||||
public static final String ZK_QUORUM_KEY = "ha.zookeeper.quorum";
|
||||
private static final String ZK_SESSION_TIMEOUT_KEY = "ha.zookeeper.session-timeout.ms";
|
||||
|
@ -162,7 +162,7 @@ public abstract class ZKFailoverController {
|
|||
|
||||
public int run(final String[] args) throws Exception {
|
||||
if (!localTarget.isAutoFailoverEnabled()) {
|
||||
LOG.fatal("Automatic failover is not enabled for " + localTarget + "." +
|
||||
LOG.error("Automatic failover is not enabled for " + localTarget + "." +
|
||||
" Please ensure that automatic failover is enabled in the " +
|
||||
"configuration before running the ZK failover controller.");
|
||||
return ERR_CODE_AUTO_FAILOVER_NOT_ENABLED;
|
||||
|
@ -184,7 +184,7 @@ public abstract class ZKFailoverController {
|
|||
}
|
||||
});
|
||||
} catch (RuntimeException rte) {
|
||||
LOG.fatal("The failover controller encounters runtime error: " + rte);
|
||||
LOG.error("The failover controller encounters runtime error: " + rte);
|
||||
throw (Exception)rte.getCause();
|
||||
}
|
||||
}
|
||||
|
@ -195,7 +195,7 @@ public abstract class ZKFailoverController {
|
|||
try {
|
||||
initZK();
|
||||
} catch (KeeperException ke) {
|
||||
LOG.fatal("Unable to start failover controller. Unable to connect "
|
||||
LOG.error("Unable to start failover controller. Unable to connect "
|
||||
+ "to ZooKeeper quorum at " + zkQuorum + ". Please check the "
|
||||
+ "configured value for " + ZK_QUORUM_KEY + " and ensure that "
|
||||
+ "ZooKeeper is running.");
|
||||
|
@ -221,7 +221,7 @@ public abstract class ZKFailoverController {
|
|||
}
|
||||
|
||||
if (!elector.parentZNodeExists()) {
|
||||
LOG.fatal("Unable to start failover controller. "
|
||||
LOG.error("Unable to start failover controller. "
|
||||
+ "Parent znode does not exist.\n"
|
||||
+ "Run with -formatZK flag to initialize ZooKeeper.");
|
||||
return ERR_CODE_NO_PARENT_ZNODE;
|
||||
|
@ -230,7 +230,7 @@ public abstract class ZKFailoverController {
|
|||
try {
|
||||
localTarget.checkFencingConfigured();
|
||||
} catch (BadFencingConfigurationException e) {
|
||||
LOG.fatal("Fencing is not configured for " + localTarget + ".\n" +
|
||||
LOG.error("Fencing is not configured for " + localTarget + ".\n" +
|
||||
"You must configure a fencing method before using automatic " +
|
||||
"failover.", e);
|
||||
return ERR_CODE_NO_FENCER;
|
||||
|
@ -376,7 +376,7 @@ public abstract class ZKFailoverController {
|
|||
}
|
||||
|
||||
private synchronized void fatalError(String err) {
|
||||
LOG.fatal("Fatal error occurred:" + err);
|
||||
LOG.error("Fatal error occurred:" + err);
|
||||
fatalError = err;
|
||||
notifyAll();
|
||||
}
|
||||
|
@ -395,7 +395,7 @@ public abstract class ZKFailoverController {
|
|||
|
||||
} catch (Throwable t) {
|
||||
String msg = "Couldn't make " + localTarget + " active";
|
||||
LOG.fatal(msg, t);
|
||||
LOG.error(msg, t);
|
||||
|
||||
recordActiveAttempt(new ActiveAttemptRecord(false, msg + "\n" +
|
||||
StringUtils.stringifyException(t)));
|
||||
|
|
|
@ -19,8 +19,6 @@ package org.apache.hadoop.ha.protocolPB;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.ha.HAServiceProtocol;
|
||||
|
@ -42,6 +40,8 @@ import org.apache.hadoop.ipc.RPC;
|
|||
|
||||
import com.google.protobuf.RpcController;
|
||||
import com.google.protobuf.ServiceException;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* This class is used on the server side. Calls come across the wire for the
|
||||
|
@ -61,7 +61,7 @@ public class HAServiceProtocolServerSideTranslatorPB implements
|
|||
TransitionToActiveResponseProto.newBuilder().build();
|
||||
private static final TransitionToStandbyResponseProto TRANSITION_TO_STANDBY_RESP =
|
||||
TransitionToStandbyResponseProto.newBuilder().build();
|
||||
private static final Log LOG = LogFactory.getLog(
|
||||
private static final Logger LOG = LoggerFactory.getLogger(
|
||||
HAServiceProtocolServerSideTranslatorPB.class);
|
||||
|
||||
public HAServiceProtocolServerSideTranslatorPB(HAServiceProtocol server) {
|
||||
|
|
|
@ -53,8 +53,6 @@ import com.google.common.base.Preconditions;
|
|||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.sun.jersey.spi.container.servlet.ServletContainer;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
@ -103,6 +101,8 @@ import org.eclipse.jetty.util.MultiException;
|
|||
import org.eclipse.jetty.util.ssl.SslContextFactory;
|
||||
import org.eclipse.jetty.util.thread.QueuedThreadPool;
|
||||
import org.eclipse.jetty.webapp.WebAppContext;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Create a Jetty embedded server to answer http requests. The primary goal is
|
||||
|
@ -117,7 +117,7 @@ import org.eclipse.jetty.webapp.WebAppContext;
|
|||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Evolving
|
||||
public final class HttpServer2 implements FilterContainer {
|
||||
public static final Log LOG = LogFactory.getLog(HttpServer2.class);
|
||||
public static final Logger LOG = LoggerFactory.getLogger(HttpServer2.class);
|
||||
|
||||
public static final String HTTP_SCHEME = "http";
|
||||
public static final String HTTPS_SCHEME = "https";
|
||||
|
|
|
@ -29,11 +29,11 @@ import javax.servlet.ServletResponse;
|
|||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.servlet.http.HttpServletRequestWrapper;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.http.FilterContainer;
|
||||
import org.apache.hadoop.http.FilterInitializer;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import javax.servlet.Filter;
|
||||
|
||||
|
@ -47,7 +47,8 @@ import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_S
|
|||
public class StaticUserWebFilter extends FilterInitializer {
|
||||
static final String DEPRECATED_UGI_KEY = "dfs.web.ugi";
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(StaticUserWebFilter.class);
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(StaticUserWebFilter.class);
|
||||
|
||||
static class User implements Principal {
|
||||
private final String name;
|
||||
|
|
|
@ -22,8 +22,6 @@ import java.io.DataInputStream;
|
|||
import java.io.DataOutputStream;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -36,6 +34,8 @@ import org.apache.hadoop.util.bloom.DynamicBloomFilter;
|
|||
import org.apache.hadoop.util.bloom.Filter;
|
||||
import org.apache.hadoop.util.bloom.Key;
|
||||
import org.apache.hadoop.util.hash.Hash;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAPFILE_BLOOM_ERROR_RATE_DEFAULT;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAPFILE_BLOOM_ERROR_RATE_KEY;
|
||||
|
@ -52,7 +52,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAPFILE_BLOO
|
|||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Stable
|
||||
public class BloomMapFile {
|
||||
private static final Log LOG = LogFactory.getLog(BloomMapFile.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(BloomMapFile.class);
|
||||
public static final String BLOOM_FILE_NAME = "bloom";
|
||||
public static final int HASH_COUNT = 5;
|
||||
|
||||
|
|
|
@ -22,11 +22,10 @@ import java.nio.ByteOrder;
|
|||
import java.security.AccessController;
|
||||
import java.security.PrivilegedAction;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import sun.misc.Unsafe;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
||||
import com.google.common.primitives.Longs;
|
||||
import com.google.common.primitives.UnsignedBytes;
|
||||
|
||||
|
@ -36,7 +35,7 @@ import com.google.common.primitives.UnsignedBytes;
|
|||
* class to be able to compare arrays that start at non-zero offsets.
|
||||
*/
|
||||
abstract class FastByteComparisons {
|
||||
static final Log LOG = LogFactory.getLog(FastByteComparisons.class);
|
||||
static final Logger LOG = LoggerFactory.getLogger(FastByteComparisons.class);
|
||||
|
||||
/**
|
||||
* Lexicographically compare two byte arrays.
|
||||
|
|
|
@ -32,13 +32,12 @@ import java.util.ArrayList;
|
|||
import java.util.List;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.util.Shell;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
|
||||
|
@ -49,7 +48,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_
|
|||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
public class IOUtils {
|
||||
public static final Log LOG = LogFactory.getLog(IOUtils.class);
|
||||
public static final Logger LOG = LoggerFactory.getLogger(IOUtils.class);
|
||||
|
||||
/**
|
||||
* Copies from one stream to another.
|
||||
|
|
|
@ -23,8 +23,6 @@ import java.io.IOException;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
@ -37,6 +35,8 @@ import org.apache.hadoop.io.compress.CompressionCodec;
|
|||
import org.apache.hadoop.util.Options;
|
||||
import org.apache.hadoop.util.Progressable;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAP_INDEX_SKIP_DEFAULT;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAP_INDEX_SKIP_KEY;
|
||||
|
@ -60,7 +60,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAP_INDEX_SK
|
|||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Stable
|
||||
public class MapFile {
|
||||
private static final Log LOG = LogFactory.getLog(MapFile.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MapFile.class);
|
||||
|
||||
/** The name of the index file. */
|
||||
public static final String INDEX_FILE_NAME = "index";
|
||||
|
@ -1002,7 +1002,7 @@ public class MapFile {
|
|||
while (reader.next(key, value)) // copy all entries
|
||||
writer.append(key, value);
|
||||
} finally {
|
||||
IOUtils.cleanup(LOG, writer, reader);
|
||||
IOUtils.cleanupWithLogger(LOG, writer, reader);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,8 +23,6 @@ import java.util.concurrent.ArrayBlockingQueue;
|
|||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.io.nativeio.NativeIO;
|
||||
|
@ -33,6 +31,8 @@ import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.POSIX_FADV_WILLNEED;
|
|||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.util.concurrent.ThreadFactoryBuilder;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Manages a pool of threads which can issue readahead requests on file descriptors.
|
||||
|
@ -40,7 +40,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
|
|||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Evolving
|
||||
public class ReadaheadPool {
|
||||
static final Log LOG = LogFactory.getLog(ReadaheadPool.class);
|
||||
static final Logger LOG = LoggerFactory.getLogger(ReadaheadPool.class);
|
||||
private static final int POOL_SIZE = 4;
|
||||
private static final int MAX_POOL_SIZE = 16;
|
||||
private static final int CAPACITY = 1024;
|
||||
|
|
|
@ -25,7 +25,6 @@ import java.rmi.server.UID;
|
|||
import java.security.MessageDigest;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import org.apache.commons.logging.*;
|
||||
import org.apache.hadoop.util.Options;
|
||||
import org.apache.hadoop.fs.*;
|
||||
import org.apache.hadoop.fs.Options.CreateOpts;
|
||||
|
@ -51,6 +50,8 @@ import org.apache.hadoop.util.NativeCodeLoader;
|
|||
import org.apache.hadoop.util.MergeSort;
|
||||
import org.apache.hadoop.util.PriorityQueue;
|
||||
import org.apache.hadoop.util.Time;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
|
||||
|
@ -203,7 +204,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_SKIP_CHECKSU
|
|||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Stable
|
||||
public class SequenceFile {
|
||||
private static final Log LOG = LogFactory.getLog(SequenceFile.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(SequenceFile.class);
|
||||
|
||||
private SequenceFile() {} // no public ctor
|
||||
|
||||
|
@ -1923,7 +1924,7 @@ public class SequenceFile {
|
|||
succeeded = true;
|
||||
} finally {
|
||||
if (!succeeded) {
|
||||
IOUtils.cleanup(LOG, this.in);
|
||||
IOUtils.cleanupWithLogger(LOG, this.in);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,9 +25,10 @@ import java.io.UTFDataFormatException;
|
|||
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
||||
import org.apache.commons.logging.*;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/** A WritableComparable for strings that uses the UTF8 encoding.
|
||||
*
|
||||
|
@ -42,7 +43,7 @@ import org.apache.hadoop.classification.InterfaceStability;
|
|||
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
|
||||
@InterfaceStability.Stable
|
||||
public class UTF8 implements WritableComparable<UTF8> {
|
||||
private static final Log LOG= LogFactory.getLog(UTF8.class);
|
||||
private static final Logger LOG= LoggerFactory.getLogger(UTF8.class);
|
||||
private static final DataInputBuffer IBUF = new DataInputBuffer();
|
||||
|
||||
private static final ThreadLocal<DataOutputBuffer> OBUF_FACTORY =
|
||||
|
|
|
@ -23,8 +23,6 @@ import java.util.Set;
|
|||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -33,6 +31,8 @@ import org.apache.hadoop.util.ReflectionUtils;
|
|||
import com.google.common.cache.CacheBuilder;
|
||||
import com.google.common.cache.CacheLoader;
|
||||
import com.google.common.cache.LoadingCache;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* A global compressor/decompressor pool used to save and reuse
|
||||
|
@ -41,7 +41,7 @@ import com.google.common.cache.LoadingCache;
|
|||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
public class CodecPool {
|
||||
private static final Log LOG = LogFactory.getLog(CodecPool.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(CodecPool.class);
|
||||
|
||||
/**
|
||||
* A global compressor pool used to save the expensive
|
||||
|
|
|
@ -19,8 +19,6 @@ package org.apache.hadoop.io.compress;
|
|||
|
||||
import java.util.*;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -28,6 +26,8 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* A factory that will find the correct codec for a given filename.
|
||||
|
@ -36,8 +36,8 @@ import org.apache.hadoop.util.StringUtils;
|
|||
@InterfaceStability.Evolving
|
||||
public class CompressionCodecFactory {
|
||||
|
||||
public static final Log LOG =
|
||||
LogFactory.getLog(CompressionCodecFactory.class.getName());
|
||||
public static final Logger LOG =
|
||||
LoggerFactory.getLogger(CompressionCodecFactory.class.getName());
|
||||
|
||||
private static final ServiceLoader<CompressionCodec> CODEC_PROVIDERS =
|
||||
ServiceLoader.load(CompressionCodec.class);
|
||||
|
|
|
@ -22,14 +22,14 @@ import java.io.IOException;
|
|||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configurable;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.io.compress.zlib.ZlibDecompressor;
|
||||
import org.apache.hadoop.io.compress.zlib.ZlibFactory;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
|
||||
|
@ -37,7 +37,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_
|
|||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
public class DefaultCodec implements Configurable, CompressionCodec, DirectDecompressionCodec {
|
||||
private static final Log LOG = LogFactory.getLog(DefaultCodec.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(DefaultCodec.class);
|
||||
|
||||
Configuration conf;
|
||||
|
||||
|
|
|
@ -24,9 +24,8 @@ import java.nio.ByteBuffer;
|
|||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.io.compress.Compressor;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* A {@link Compressor} based on the popular
|
||||
|
@ -42,7 +41,8 @@ public class Bzip2Compressor implements Compressor {
|
|||
static final int DEFAULT_BLOCK_SIZE = 9;
|
||||
static final int DEFAULT_WORK_FACTOR = 30;
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(Bzip2Compressor.class);
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(Bzip2Compressor.class);
|
||||
|
||||
private long stream;
|
||||
private int blockSize;
|
||||
|
|
|
@ -23,9 +23,8 @@ import java.nio.Buffer;
|
|||
import java.nio.ByteBuffer;
|
||||
|
||||
import org.apache.hadoop.io.compress.Decompressor;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* A {@link Decompressor} based on the popular
|
||||
|
@ -36,7 +35,8 @@ import org.apache.commons.logging.LogFactory;
|
|||
public class Bzip2Decompressor implements Decompressor {
|
||||
private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64*1024;
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(Bzip2Decompressor.class);
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(Bzip2Decompressor.class);
|
||||
|
||||
private long stream;
|
||||
private boolean conserveMemory;
|
||||
|
|
|
@ -18,12 +18,12 @@
|
|||
|
||||
package org.apache.hadoop.io.compress.bzip2;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.io.compress.Compressor;
|
||||
import org.apache.hadoop.io.compress.Decompressor;
|
||||
import org.apache.hadoop.util.NativeCodeLoader;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* A collection of factories to create the right
|
||||
|
@ -31,7 +31,7 @@ import org.apache.hadoop.util.NativeCodeLoader;
|
|||
*
|
||||
*/
|
||||
public class Bzip2Factory {
|
||||
private static final Log LOG = LogFactory.getLog(Bzip2Factory.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(Bzip2Factory.class);
|
||||
|
||||
private static String bzip2LibraryName = "";
|
||||
private static boolean nativeBzip2Loaded;
|
||||
|
|
|
@ -22,19 +22,19 @@ import java.io.IOException;
|
|||
import java.nio.Buffer;
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.io.compress.Compressor;
|
||||
import org.apache.hadoop.util.NativeCodeLoader;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* A {@link Compressor} based on the lz4 compression algorithm.
|
||||
* http://code.google.com/p/lz4/
|
||||
*/
|
||||
public class Lz4Compressor implements Compressor {
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(Lz4Compressor.class.getName());
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(Lz4Compressor.class.getName());
|
||||
private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64 * 1024;
|
||||
|
||||
private int directBufferSize;
|
||||
|
|
|
@ -22,18 +22,18 @@ import java.io.IOException;
|
|||
import java.nio.Buffer;
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.io.compress.Decompressor;
|
||||
import org.apache.hadoop.util.NativeCodeLoader;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* A {@link Decompressor} based on the lz4 compression algorithm.
|
||||
* http://code.google.com/p/lz4/
|
||||
*/
|
||||
public class Lz4Decompressor implements Decompressor {
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(Lz4Compressor.class.getName());
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(Lz4Compressor.class.getName());
|
||||
private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64 * 1024;
|
||||
|
||||
private int directBufferSize;
|
||||
|
|
|
@ -22,19 +22,19 @@ import java.io.IOException;
|
|||
import java.nio.Buffer;
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.io.compress.Compressor;
|
||||
import org.apache.hadoop.util.NativeCodeLoader;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* A {@link Compressor} based on the snappy compression algorithm.
|
||||
* http://code.google.com/p/snappy/
|
||||
*/
|
||||
public class SnappyCompressor implements Compressor {
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(SnappyCompressor.class.getName());
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(SnappyCompressor.class.getName());
|
||||
private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64 * 1024;
|
||||
|
||||
private int directBufferSize;
|
||||
|
|
|
@ -22,19 +22,19 @@ import java.io.IOException;
|
|||
import java.nio.Buffer;
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.io.compress.Decompressor;
|
||||
import org.apache.hadoop.io.compress.DirectDecompressor;
|
||||
import org.apache.hadoop.util.NativeCodeLoader;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* A {@link Decompressor} based on the snappy compression algorithm.
|
||||
* http://code.google.com/p/snappy/
|
||||
*/
|
||||
public class SnappyDecompressor implements Decompressor {
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(SnappyDecompressor.class.getName());
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(SnappyDecompressor.class.getName());
|
||||
private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64 * 1024;
|
||||
|
||||
private int directBufferSize;
|
||||
|
|
|
@ -23,9 +23,8 @@ import java.util.zip.Deflater;
|
|||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.io.compress.Compressor;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* A wrapper around java.util.zip.Deflater to make it conform
|
||||
|
@ -34,7 +33,8 @@ import org.apache.commons.logging.LogFactory;
|
|||
*/
|
||||
public class BuiltInZlibDeflater extends Deflater implements Compressor {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(BuiltInZlibDeflater.class);
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(BuiltInZlibDeflater.class);
|
||||
|
||||
public BuiltInZlibDeflater(int level, boolean nowrap) {
|
||||
super(level, nowrap);
|
||||
|
|
|
@ -25,9 +25,8 @@ import java.nio.ByteBuffer;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.io.compress.Compressor;
|
||||
import org.apache.hadoop.util.NativeCodeLoader;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* A {@link Compressor} based on the popular
|
||||
|
@ -37,7 +36,8 @@ import org.apache.commons.logging.LogFactory;
|
|||
*/
|
||||
public class ZlibCompressor implements Compressor {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(ZlibCompressor.class);
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(ZlibCompressor.class);
|
||||
|
||||
private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64*1024;
|
||||
|
||||
|
|
|
@ -18,8 +18,6 @@
|
|||
|
||||
package org.apache.hadoop.io.compress.zlib;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.io.compress.Compressor;
|
||||
import org.apache.hadoop.io.compress.Decompressor;
|
||||
|
@ -29,6 +27,8 @@ import org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy;
|
|||
import org.apache.hadoop.util.NativeCodeLoader;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* A collection of factories to create the right
|
||||
|
@ -36,8 +36,8 @@ import com.google.common.annotations.VisibleForTesting;
|
|||
*
|
||||
*/
|
||||
public class ZlibFactory {
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(ZlibFactory.class);
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(ZlibFactory.class);
|
||||
|
||||
private static boolean nativeZlibLoaded = false;
|
||||
|
||||
|
|
|
@ -18,8 +18,6 @@
|
|||
package org.apache.hadoop.io.erasurecode;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.io.erasurecode.codec.ErasureCodec;
|
||||
|
@ -31,6 +29,8 @@ import org.apache.hadoop.io.erasurecode.coder.ErasureEncoder;
|
|||
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureCoderFactory;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
|
@ -48,7 +48,7 @@ import java.lang.reflect.InvocationTargetException;
|
|||
@InterfaceAudience.Private
|
||||
public final class CodecUtil {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(CodecUtil.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(CodecUtil.class);
|
||||
|
||||
public static final String IO_ERASURECODE_CODEC = "io.erasurecode.codec.";
|
||||
|
||||
|
|
|
@ -18,17 +18,17 @@
|
|||
|
||||
package org.apache.hadoop.io.erasurecode;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.util.NativeCodeLoader;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Erasure code native libraries (for now, Intel ISA-L) related utilities.
|
||||
*/
|
||||
public final class ErasureCodeNative {
|
||||
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(ErasureCodeNative.class.getName());
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(ErasureCodeNative.class.getName());
|
||||
|
||||
/**
|
||||
* The reason why ISA-L library is not available, or null if it is available.
|
||||
|
|
|
@ -30,8 +30,6 @@ import java.util.Arrays;
|
|||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
|
@ -43,6 +41,8 @@ import org.apache.hadoop.io.file.tfile.CompareUtils.ScalarComparator;
|
|||
import org.apache.hadoop.io.file.tfile.CompareUtils.ScalarLong;
|
||||
import org.apache.hadoop.io.file.tfile.Compression.Algorithm;
|
||||
import org.apache.hadoop.io.file.tfile.Utils.Version;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Block Compressed file, the underlying physical storage layer for TFile.
|
||||
|
@ -54,7 +54,7 @@ final class BCFile {
|
|||
// the current version of BCFile impl, increment them (major or minor) made
|
||||
// enough changes
|
||||
static final Version API_VERSION = new Version((short) 1, (short) 0);
|
||||
static final Log LOG = LogFactory.getLog(BCFile.class);
|
||||
static final Logger LOG = LoggerFactory.getLogger(BCFile.class);
|
||||
|
||||
/**
|
||||
* Prevent the instantiation of BCFile objects.
|
||||
|
|
|
@ -24,8 +24,6 @@ import java.io.InputStream;
|
|||
import java.io.OutputStream;
|
||||
import java.util.ArrayList;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.io.compress.CodecPool;
|
||||
import org.apache.hadoop.io.compress.CompressionCodec;
|
||||
|
@ -35,6 +33,8 @@ import org.apache.hadoop.io.compress.Compressor;
|
|||
import org.apache.hadoop.io.compress.Decompressor;
|
||||
import org.apache.hadoop.io.compress.DefaultCodec;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_DEFAULT;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_KEY;
|
||||
|
@ -44,7 +44,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_
|
|||
* Compression related stuff.
|
||||
*/
|
||||
final class Compression {
|
||||
static final Log LOG = LogFactory.getLog(Compression.class);
|
||||
static final Logger LOG = LoggerFactory.getLogger(Compression.class);
|
||||
|
||||
/**
|
||||
* Prevent the instantiation of class.
|
||||
|
|
|
@ -29,8 +29,6 @@ import java.io.OutputStream;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Comparator;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -51,6 +49,8 @@ import org.apache.hadoop.io.file.tfile.CompareUtils.BytesComparator;
|
|||
import org.apache.hadoop.io.file.tfile.CompareUtils.MemcmpRawComparator;
|
||||
import org.apache.hadoop.io.file.tfile.Utils.Version;
|
||||
import org.apache.hadoop.io.serializer.JavaSerializationComparator;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* A TFile is a container of key-value pairs. Both keys and values are type-less
|
||||
|
@ -131,7 +131,7 @@ import org.apache.hadoop.io.serializer.JavaSerializationComparator;
|
|||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
public class TFile {
|
||||
static final Log LOG = LogFactory.getLog(TFile.class);
|
||||
static final Logger LOG = LoggerFactory.getLogger(TFile.class);
|
||||
|
||||
private static final String CHUNK_BUF_SIZE_ATTR = "tfile.io.chunk.size";
|
||||
private static final String FS_INPUT_BUF_SIZE_ATTR =
|
||||
|
@ -335,7 +335,7 @@ public class TFile {
|
|||
writerBCF.close();
|
||||
}
|
||||
} finally {
|
||||
IOUtils.cleanup(LOG, blkAppender, writerBCF);
|
||||
IOUtils.cleanupWithLogger(LOG, blkAppender, writerBCF);
|
||||
blkAppender = null;
|
||||
writerBCF = null;
|
||||
state = State.CLOSED;
|
||||
|
|
|
@ -25,8 +25,6 @@ import java.util.LinkedHashMap;
|
|||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
|
@ -36,12 +34,14 @@ import org.apache.hadoop.io.file.tfile.BCFile.BlockRegion;
|
|||
import org.apache.hadoop.io.file.tfile.BCFile.MetaIndexEntry;
|
||||
import org.apache.hadoop.io.file.tfile.TFile.TFileIndexEntry;
|
||||
import org.apache.hadoop.io.file.tfile.Utils.Version;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Dumping the information of a TFile.
|
||||
*/
|
||||
class TFileDumper {
|
||||
static final Log LOG = LogFactory.getLog(TFileDumper.class);
|
||||
static final Logger LOG = LoggerFactory.getLogger(TFileDumper.class);
|
||||
|
||||
private TFileDumper() {
|
||||
// namespace object not constructable.
|
||||
|
@ -290,7 +290,7 @@ class TFileDumper {
|
|||
}
|
||||
}
|
||||
} finally {
|
||||
IOUtils.cleanup(LOG, reader, fsdis);
|
||||
IOUtils.cleanupWithLogger(LOG, reader, fsdis);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -40,9 +40,9 @@ import org.apache.hadoop.io.SecureIOUtils.AlreadyExistsException;
|
|||
import org.apache.hadoop.util.NativeCodeLoader;
|
||||
import org.apache.hadoop.util.Shell;
|
||||
import org.apache.hadoop.util.PerformanceAdvisory;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import sun.misc.Unsafe;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
|
@ -98,7 +98,7 @@ public class NativeIO {
|
|||
write. */
|
||||
public static int SYNC_FILE_RANGE_WAIT_AFTER = 4;
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(NativeIO.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(NativeIO.class);
|
||||
|
||||
// Set to true via JNI if possible
|
||||
public static boolean fadvisePossible = false;
|
||||
|
@ -634,7 +634,7 @@ public class NativeIO {
|
|||
}
|
||||
}
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(NativeIO.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(NativeIO.class);
|
||||
|
||||
private static boolean nativeLoaded = false;
|
||||
|
||||
|
@ -940,10 +940,10 @@ public class NativeIO {
|
|||
position += transferred;
|
||||
}
|
||||
} finally {
|
||||
IOUtils.cleanup(LOG, output);
|
||||
IOUtils.cleanup(LOG, fos);
|
||||
IOUtils.cleanup(LOG, input);
|
||||
IOUtils.cleanup(LOG, fis);
|
||||
IOUtils.cleanupWithLogger(LOG, output);
|
||||
IOUtils.cleanupWithLogger(LOG, fos);
|
||||
IOUtils.cleanupWithLogger(LOG, input);
|
||||
IOUtils.cleanupWithLogger(LOG, fis);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,10 +22,10 @@ import java.io.IOException;
|
|||
import java.io.FileDescriptor;
|
||||
|
||||
import org.apache.commons.lang.SystemUtils;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* A factory for creating shared file descriptors inside a given directory.
|
||||
|
@ -45,7 +45,8 @@ import org.apache.hadoop.classification.InterfaceStability;
|
|||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
public class SharedFileDescriptorFactory {
|
||||
public static final Log LOG = LogFactory.getLog(SharedFileDescriptorFactory.class);
|
||||
public static final Logger LOG =
|
||||
LoggerFactory.getLogger(SharedFileDescriptorFactory.class);
|
||||
private final String prefix;
|
||||
private final String path;
|
||||
|
||||
|
|
|
@ -32,8 +32,6 @@ import java.util.Map.Entry;
|
|||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.apache.hadoop.ipc.RetriableException;
|
||||
import org.apache.hadoop.ipc.StandbyException;
|
||||
|
@ -41,6 +39,8 @@ import org.apache.hadoop.net.ConnectTimeoutException;
|
|||
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
|
@ -49,7 +49,7 @@ import com.google.common.annotations.VisibleForTesting;
|
|||
*/
|
||||
public class RetryPolicies {
|
||||
|
||||
public static final Log LOG = LogFactory.getLog(RetryPolicies.class);
|
||||
public static final Logger LOG = LoggerFactory.getLogger(RetryPolicies.class);
|
||||
|
||||
/**
|
||||
* <p>
|
||||
|
|
|
@ -19,17 +19,17 @@ package org.apache.hadoop.io.retry;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.io.retry.RetryPolicies.MultipleLinearRandomRetry;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
|
||||
import com.google.protobuf.ServiceException;
|
||||
import org.apache.hadoop.ipc.RetriableException;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public class RetryUtils {
|
||||
public static final Log LOG = LogFactory.getLog(RetryUtils.class);
|
||||
public static final Logger LOG = LoggerFactory.getLogger(RetryUtils.class);
|
||||
|
||||
/**
|
||||
* Return the default retry policy set in conf.
|
||||
|
|
|
@ -21,8 +21,6 @@ package org.apache.hadoop.io.serializer;
|
|||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -31,6 +29,8 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
|
|||
import org.apache.hadoop.io.serializer.avro.AvroReflectSerialization;
|
||||
import org.apache.hadoop.io.serializer.avro.AvroSpecificSerialization;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
|
@ -41,8 +41,8 @@ import org.apache.hadoop.util.ReflectionUtils;
|
|||
@InterfaceStability.Evolving
|
||||
public class SerializationFactory extends Configured {
|
||||
|
||||
static final Log LOG =
|
||||
LogFactory.getLog(SerializationFactory.class.getName());
|
||||
static final Logger LOG =
|
||||
LoggerFactory.getLogger(SerializationFactory.class.getName());
|
||||
|
||||
private List<Serialization<?>> serializations = new ArrayList<Serialization<?>>();
|
||||
|
||||
|
|
|
@ -28,20 +28,21 @@ import java.util.concurrent.BlockingQueue;
|
|||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcStatusProto;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Abstracts queue operations for different blocking queues.
|
||||
*/
|
||||
public class CallQueueManager<E extends Schedulable>
|
||||
extends AbstractQueue<E> implements BlockingQueue<E> {
|
||||
public static final Log LOG = LogFactory.getLog(CallQueueManager.class);
|
||||
public static final Logger LOG =
|
||||
LoggerFactory.getLogger(CallQueueManager.class);
|
||||
// Number of checkpoints for empty queue.
|
||||
private static final int CHECKPOINT_NUM = 20;
|
||||
// Interval to check empty queue.
|
||||
|
|
|
@ -21,8 +21,6 @@ package org.apache.hadoop.ipc;
|
|||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.util.concurrent.ThreadFactoryBuilder;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Public;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
@ -57,6 +55,8 @@ import org.apache.hadoop.util.Time;
|
|||
import org.apache.hadoop.util.concurrent.AsyncGet;
|
||||
import org.apache.htrace.core.Span;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import javax.net.SocketFactory;
|
||||
import javax.security.sasl.Sasl;
|
||||
|
@ -84,7 +84,7 @@ import static org.apache.hadoop.ipc.RpcConstants.PING_CALL_ID;
|
|||
@InterfaceStability.Evolving
|
||||
public class Client implements AutoCloseable {
|
||||
|
||||
public static final Log LOG = LogFactory.getLog(Client.class);
|
||||
public static final Logger LOG = LoggerFactory.getLogger(Client.class);
|
||||
|
||||
/** A counter for generating call IDs. */
|
||||
private static final AtomicInteger callIdCounter = new AtomicInteger();
|
||||
|
|
|
@ -33,11 +33,11 @@ import java.util.concurrent.atomic.AtomicLong;
|
|||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import org.apache.commons.lang.NotImplementedException;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.ipc.CallQueueManager.CallQueueOverflowException;
|
||||
import org.apache.hadoop.metrics2.util.MBeans;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* A queue with multiple levels for each priority.
|
||||
|
@ -50,7 +50,7 @@ public class FairCallQueue<E extends Schedulable> extends AbstractQueue<E>
|
|||
public static final String IPC_CALLQUEUE_PRIORITY_LEVELS_KEY =
|
||||
"faircallqueue.priority-levels";
|
||||
|
||||
public static final Log LOG = LogFactory.getLog(FairCallQueue.class);
|
||||
public static final Logger LOG = LoggerFactory.getLogger(FairCallQueue.class);
|
||||
|
||||
/* The queues */
|
||||
private final ArrayList<BlockingQueue<E>> queues;
|
||||
|
|
|
@ -21,8 +21,6 @@ package org.apache.hadoop.ipc;
|
|||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.protobuf.*;
|
||||
import com.google.protobuf.Descriptors.MethodDescriptor;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||
|
@ -39,6 +37,8 @@ import org.apache.hadoop.util.Time;
|
|||
import org.apache.hadoop.util.concurrent.AsyncGet;
|
||||
import org.apache.htrace.core.TraceScope;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import javax.net.SocketFactory;
|
||||
import java.io.IOException;
|
||||
|
@ -55,7 +55,8 @@ import java.util.concurrent.atomic.AtomicBoolean;
|
|||
*/
|
||||
@InterfaceStability.Evolving
|
||||
public class ProtobufRpcEngine implements RpcEngine {
|
||||
public static final Log LOG = LogFactory.getLog(ProtobufRpcEngine.class);
|
||||
public static final Logger LOG =
|
||||
LoggerFactory.getLogger(ProtobufRpcEngine.class);
|
||||
private static final ThreadLocal<AsyncGet<Message, Exception>>
|
||||
ASYNC_RETURN_MESSAGE = new ThreadLocal<>();
|
||||
|
||||
|
|
|
@ -38,8 +38,6 @@ import java.util.concurrent.atomic.AtomicBoolean;
|
|||
|
||||
import javax.net.SocketFactory;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
|
@ -60,6 +58,8 @@ import org.apache.hadoop.util.ReflectionUtils;
|
|||
import org.apache.hadoop.util.Time;
|
||||
|
||||
import com.google.protobuf.BlockingService;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/** A simple RPC mechanism.
|
||||
*
|
||||
|
@ -110,7 +110,7 @@ public class RPC {
|
|||
Writable rpcRequest, long receiveTime) throws Exception ;
|
||||
}
|
||||
|
||||
static final Log LOG = LogFactory.getLog(RPC.class);
|
||||
static final Logger LOG = LoggerFactory.getLogger(RPC.class);
|
||||
|
||||
/**
|
||||
* Get all superInterfaces that extend VersionedProtocol
|
||||
|
|
|
@ -24,9 +24,9 @@ import com.google.common.base.Joiner;
|
|||
import com.google.common.collect.HashMultimap;
|
||||
import com.google.common.collect.Multimap;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Used to registry custom methods to refresh at runtime.
|
||||
|
@ -34,7 +34,8 @@ import org.apache.hadoop.classification.InterfaceStability;
|
|||
*/
|
||||
@InterfaceStability.Unstable
|
||||
public class RefreshRegistry {
|
||||
public static final Log LOG = LogFactory.getLog(RefreshRegistry.class);
|
||||
public static final Logger LOG =
|
||||
LoggerFactory.getLogger(RefreshRegistry.class);
|
||||
|
||||
// Used to hold singleton instance
|
||||
private static class RegistryHolder {
|
||||
|
|
|
@ -22,8 +22,6 @@ import java.util.Arrays;
|
|||
import java.util.UUID;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.ipc.metrics.RetryCacheMetrics;
|
||||
import org.apache.hadoop.util.LightWeightCache;
|
||||
|
@ -32,6 +30,8 @@ import org.apache.hadoop.util.LightWeightGSet.LinkedElement;
|
|||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Maintains a cache of non-idempotent requests that have been successfully
|
||||
|
@ -44,7 +44,7 @@ import com.google.common.base.Preconditions;
|
|||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class RetryCache {
|
||||
public static final Log LOG = LogFactory.getLog(RetryCache.class);
|
||||
public static final Logger LOG = LoggerFactory.getLogger(RetryCache.class);
|
||||
private final RetryCacheMetrics retryCacheMetrics;
|
||||
private static final int MAX_CAPACITY = 16;
|
||||
|
||||
|
|
|
@ -70,8 +70,6 @@ import javax.security.sasl.Sasl;
|
|||
import javax.security.sasl.SaslException;
|
||||
import javax.security.sasl.SaslServer;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Public;
|
||||
|
@ -125,6 +123,8 @@ import com.google.common.annotations.VisibleForTesting;
|
|||
import com.google.protobuf.ByteString;
|
||||
import com.google.protobuf.CodedOutputStream;
|
||||
import com.google.protobuf.Message;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/** An abstract IPC service. IPC calls take a single {@link Writable} as a
|
||||
* parameter, and return a {@link Writable} as their value. A service runs on
|
||||
|
@ -293,9 +293,9 @@ public abstract class Server {
|
|||
}
|
||||
|
||||
|
||||
public static final Log LOG = LogFactory.getLog(Server.class);
|
||||
public static final Log AUDITLOG =
|
||||
LogFactory.getLog("SecurityLogger."+Server.class.getName());
|
||||
public static final Logger LOG = LoggerFactory.getLogger(Server.class);
|
||||
public static final Logger AUDITLOG =
|
||||
LoggerFactory.getLogger("SecurityLogger."+Server.class.getName());
|
||||
private static final String AUTH_FAILED_FOR = "Auth failed for ";
|
||||
private static final String AUTH_SUCCESSFUL_FOR = "Auth successful for ";
|
||||
|
||||
|
@ -1113,7 +1113,7 @@ public abstract class Server {
|
|||
} catch (IOException ex) {
|
||||
LOG.error("Error in Reader", ex);
|
||||
} catch (Throwable re) {
|
||||
LOG.fatal("Bug in read selector!", re);
|
||||
LOG.error("Bug in read selector!", re);
|
||||
ExitUtil.terminate(1, "Bug in read selector!");
|
||||
}
|
||||
}
|
||||
|
@ -2692,7 +2692,7 @@ public abstract class Server {
|
|||
}
|
||||
} finally {
|
||||
CurCall.set(null);
|
||||
IOUtils.cleanup(LOG, traceScope);
|
||||
IOUtils.cleanupWithLogger(LOG, traceScope);
|
||||
}
|
||||
}
|
||||
LOG.debug(Thread.currentThread().getName() + ": exiting");
|
||||
|
@ -2701,7 +2701,7 @@ public abstract class Server {
|
|||
}
|
||||
|
||||
@VisibleForTesting
|
||||
void logException(Log logger, Throwable e, Call call) {
|
||||
void logException(Logger logger, Throwable e, Call call) {
|
||||
if (exceptionsHandler.isSuppressedLog(e.getClass())) {
|
||||
return; // Log nothing.
|
||||
}
|
||||
|
|
|
@ -20,9 +20,9 @@ package org.apache.hadoop.ipc;
|
|||
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Determines which queue to start reading from, occasionally drawing from
|
||||
|
@ -43,8 +43,8 @@ public class WeightedRoundRobinMultiplexer implements RpcMultiplexer {
|
|||
public static final String IPC_CALLQUEUE_WRRMUX_WEIGHTS_KEY =
|
||||
"faircallqueue.multiplexer.weights";
|
||||
|
||||
public static final Log LOG =
|
||||
LogFactory.getLog(WeightedRoundRobinMultiplexer.class);
|
||||
public static final Logger LOG =
|
||||
LoggerFactory.getLogger(WeightedRoundRobinMultiplexer.class);
|
||||
|
||||
private final int numQueues; // The number of queues under our provisioning
|
||||
|
||||
|
|
|
@ -28,8 +28,6 @@ import java.util.concurrent.atomic.AtomicBoolean;
|
|||
|
||||
import javax.net.SocketFactory;
|
||||
|
||||
import org.apache.commons.logging.*;
|
||||
|
||||
import org.apache.hadoop.io.*;
|
||||
import org.apache.hadoop.io.retry.RetryPolicy;
|
||||
import org.apache.hadoop.ipc.Client.ConnectionId;
|
||||
|
@ -43,12 +41,14 @@ import org.apache.hadoop.classification.InterfaceStability;
|
|||
import org.apache.hadoop.conf.*;
|
||||
import org.apache.htrace.core.TraceScope;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/** An RpcEngine implementation for Writable data. */
|
||||
@InterfaceStability.Evolving
|
||||
@Deprecated
|
||||
public class WritableRpcEngine implements RpcEngine {
|
||||
private static final Log LOG = LogFactory.getLog(RPC.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(RPC.class);
|
||||
|
||||
//writableRpcVersion should be updated if there is a change
|
||||
//in format of the rpc messages.
|
||||
|
|
|
@ -17,8 +17,6 @@
|
|||
*/
|
||||
package org.apache.hadoop.ipc.metrics;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.ipc.RetryCache;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.metrics2.annotation.Metric;
|
||||
|
@ -26,6 +24,8 @@ import org.apache.hadoop.metrics2.annotation.Metrics;
|
|||
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
|
||||
import org.apache.hadoop.metrics2.lib.MetricsRegistry;
|
||||
import org.apache.hadoop.metrics2.lib.MutableCounterLong;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* This class is for maintaining the various RetryCache-related statistics
|
||||
|
@ -35,7 +35,7 @@ import org.apache.hadoop.metrics2.lib.MutableCounterLong;
|
|||
@Metrics(about="Aggregate RetryCache metrics", context="rpc")
|
||||
public class RetryCacheMetrics {
|
||||
|
||||
static final Log LOG = LogFactory.getLog(RetryCacheMetrics.class);
|
||||
static final Logger LOG = LoggerFactory.getLogger(RetryCacheMetrics.class);
|
||||
final MetricsRegistry registry;
|
||||
final String name;
|
||||
|
||||
|
|
|
@ -17,14 +17,14 @@
|
|||
*/
|
||||
package org.apache.hadoop.ipc.metrics;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.metrics2.annotation.Metric;
|
||||
import org.apache.hadoop.metrics2.annotation.Metrics;
|
||||
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
|
||||
import org.apache.hadoop.metrics2.lib.MetricsRegistry;
|
||||
import org.apache.hadoop.metrics2.lib.MutableRatesWithAggregation;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* This class is for maintaining RPC method related statistics
|
||||
|
@ -37,7 +37,7 @@ public class RpcDetailedMetrics {
|
|||
@Metric MutableRatesWithAggregation rates;
|
||||
@Metric MutableRatesWithAggregation deferredRpcRates;
|
||||
|
||||
static final Log LOG = LogFactory.getLog(RpcDetailedMetrics.class);
|
||||
static final Logger LOG = LoggerFactory.getLogger(RpcDetailedMetrics.class);
|
||||
final MetricsRegistry registry;
|
||||
final String name;
|
||||
|
||||
|
@ -45,7 +45,7 @@ public class RpcDetailedMetrics {
|
|||
name = "RpcDetailedActivityForPort"+ port;
|
||||
registry = new MetricsRegistry("rpcdetailed")
|
||||
.tag("port", "RPC port", String.valueOf(port));
|
||||
LOG.debug(registry.info());
|
||||
LOG.debug(registry.info().toString());
|
||||
}
|
||||
|
||||
public String name() { return name; }
|
||||
|
|
|
@ -17,8 +17,6 @@
|
|||
*/
|
||||
package org.apache.hadoop.ipc.metrics;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.ipc.Server;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
|
@ -31,6 +29,8 @@ import org.apache.hadoop.metrics2.lib.MutableCounterInt;
|
|||
import org.apache.hadoop.metrics2.lib.MutableCounterLong;
|
||||
import org.apache.hadoop.metrics2.lib.MutableQuantiles;
|
||||
import org.apache.hadoop.metrics2.lib.MutableRate;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* This class is for maintaining the various RPC statistics
|
||||
|
@ -40,7 +40,7 @@ import org.apache.hadoop.metrics2.lib.MutableRate;
|
|||
@Metrics(about="Aggregate RPC metrics", context="rpc")
|
||||
public class RpcMetrics {
|
||||
|
||||
static final Log LOG = LogFactory.getLog(RpcMetrics.class);
|
||||
static final Logger LOG = LoggerFactory.getLogger(RpcMetrics.class);
|
||||
final Server server;
|
||||
final MetricsRegistry registry;
|
||||
final String name;
|
||||
|
|
|
@ -19,9 +19,9 @@ package org.apache.hadoop.jmx;
|
|||
|
||||
import com.fasterxml.jackson.core.JsonFactory;
|
||||
import com.fasterxml.jackson.core.JsonGenerator;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.http.HttpServer2;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import javax.management.AttributeNotFoundException;
|
||||
import javax.management.InstanceNotFoundException;
|
||||
|
@ -116,7 +116,8 @@ import java.util.Set;
|
|||
*
|
||||
*/
|
||||
public class JMXJsonServlet extends HttpServlet {
|
||||
private static final Log LOG = LogFactory.getLog(JMXJsonServlet.class);
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(JMXJsonServlet.class);
|
||||
static final String ACCESS_CONTROL_ALLOW_METHODS =
|
||||
"Access-Control-Allow-Methods";
|
||||
static final String ACCESS_CONTROL_ALLOW_ORIGIN =
|
||||
|
|
|
@ -106,7 +106,7 @@ class MBeanInfoBuilder implements MetricsVisitor {
|
|||
}
|
||||
++curRecNo;
|
||||
}
|
||||
MetricsSystemImpl.LOG.debug(attrs);
|
||||
MetricsSystemImpl.LOG.debug(attrs.toString());
|
||||
MBeanAttributeInfo[] attrsArray = new MBeanAttributeInfo[attrs.size()];
|
||||
return new MBeanInfo(name, description, attrs.toArray(attrsArray),
|
||||
null, null, null); // no ops/ctors/notifications
|
||||
|
|
|
@ -41,18 +41,18 @@ import org.apache.commons.configuration2.builder.fluent.Configurations;
|
|||
import org.apache.commons.configuration2.builder.fluent.Parameters;
|
||||
import org.apache.commons.configuration2.convert.DefaultListDelimiterHandler;
|
||||
import org.apache.commons.configuration2.ex.ConfigurationException;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.metrics2.MetricsFilter;
|
||||
import org.apache.hadoop.metrics2.MetricsPlugin;
|
||||
import org.apache.hadoop.metrics2.filter.GlobFilter;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Metrics configuration for MetricsSystemImpl
|
||||
*/
|
||||
class MetricsConfig extends SubsetConfiguration {
|
||||
static final Log LOG = LogFactory.getLog(MetricsConfig.class);
|
||||
static final Logger LOG = LoggerFactory.getLogger(MetricsConfig.class);
|
||||
|
||||
static final String DEFAULT_FILE_NAME = "hadoop-metrics2.properties";
|
||||
static final String PREFIX_DEFAULT = "*.";
|
||||
|
@ -121,7 +121,7 @@ class MetricsConfig extends SubsetConfiguration {
|
|||
LOG.info("loaded properties from "+ fname);
|
||||
LOG.debug(toString(cf));
|
||||
MetricsConfig mc = new MetricsConfig(cf, prefix);
|
||||
LOG.debug(mc);
|
||||
LOG.debug(mc.toString());
|
||||
return mc;
|
||||
} catch (ConfigurationException e) {
|
||||
// Commons Configuration defines the message text when file not found
|
||||
|
|
|
@ -24,8 +24,6 @@ import java.util.concurrent.*;
|
|||
|
||||
import static com.google.common.base.Preconditions.*;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
|
||||
import org.apache.hadoop.metrics2.lib.MetricsRegistry;
|
||||
|
@ -36,13 +34,16 @@ import static org.apache.hadoop.metrics2.util.Contracts.*;
|
|||
import org.apache.hadoop.metrics2.MetricsFilter;
|
||||
import org.apache.hadoop.metrics2.MetricsSink;
|
||||
import org.apache.hadoop.util.Time;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* An adapter class for metrics sink and associated filters
|
||||
*/
|
||||
class MetricsSinkAdapter implements SinkQueue.Consumer<MetricsBuffer> {
|
||||
|
||||
private final Log LOG = LogFactory.getLog(MetricsSinkAdapter.class);
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(MetricsSinkAdapter.class);
|
||||
private final String name, description, context;
|
||||
private final MetricsSink sink;
|
||||
private final MetricsFilter sourceFilter, recordFilter, metricFilter;
|
||||
|
@ -207,7 +208,7 @@ class MetricsSinkAdapter implements SinkQueue.Consumer<MetricsBuffer> {
|
|||
stopping = true;
|
||||
sinkThread.interrupt();
|
||||
if (sink instanceof Closeable) {
|
||||
IOUtils.cleanup(LOG, (Closeable)sink);
|
||||
IOUtils.cleanupWithLogger(LOG, (Closeable)sink);
|
||||
}
|
||||
try {
|
||||
sinkThread.join();
|
||||
|
|
|
@ -33,8 +33,6 @@ import static com.google.common.base.Preconditions.*;
|
|||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.Maps;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
||||
import org.apache.hadoop.metrics2.AbstractMetric;
|
||||
import org.apache.hadoop.metrics2.MetricsFilter;
|
||||
|
@ -43,6 +41,8 @@ import org.apache.hadoop.metrics2.MetricsTag;
|
|||
import static org.apache.hadoop.metrics2.impl.MetricsConfig.*;
|
||||
import org.apache.hadoop.metrics2.util.MBeans;
|
||||
import org.apache.hadoop.util.Time;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import static org.apache.hadoop.metrics2.util.Contracts.*;
|
||||
|
||||
|
@ -51,7 +51,8 @@ import static org.apache.hadoop.metrics2.util.Contracts.*;
|
|||
*/
|
||||
class MetricsSourceAdapter implements DynamicMBean {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(MetricsSourceAdapter.class);
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(MetricsSourceAdapter.class);
|
||||
|
||||
private final String prefix, name;
|
||||
private final MetricsSource source;
|
||||
|
|
|
@ -36,8 +36,6 @@ import com.google.common.annotations.VisibleForTesting;
|
|||
import static com.google.common.base.Preconditions.*;
|
||||
|
||||
import org.apache.commons.configuration2.PropertiesConfiguration;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.commons.math3.util.ArithmeticUtils;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.metrics2.MetricsInfo;
|
||||
|
@ -62,6 +60,8 @@ import org.apache.hadoop.metrics2.lib.MutableStat;
|
|||
import org.apache.hadoop.metrics2.util.MBeans;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.util.Time;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* A base class for metrics system singletons
|
||||
|
@ -70,7 +70,7 @@ import org.apache.hadoop.util.Time;
|
|||
@Metrics(context="metricssystem")
|
||||
public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
|
||||
|
||||
static final Log LOG = LogFactory.getLog(MetricsSystemImpl.class);
|
||||
static final Logger LOG = LoggerFactory.getLogger(MetricsSystemImpl.class);
|
||||
static final String MS_NAME = "MetricsSystem";
|
||||
static final String MS_STATS_NAME = MS_NAME +",sub=Stats";
|
||||
static final String MS_STATS_DESC = "Metrics system metrics";
|
||||
|
|
|
@ -22,20 +22,21 @@ import java.lang.reflect.Method;
|
|||
|
||||
import static com.google.common.base.Preconditions.*;
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
||||
import org.apache.hadoop.metrics2.MetricsException;
|
||||
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
|
||||
import org.apache.hadoop.metrics2.MetricsInfo;
|
||||
import org.apache.hadoop.metrics2.annotation.Metric;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import static org.apache.hadoop.metrics2.util.Contracts.*;
|
||||
|
||||
/**
|
||||
* Metric generated from a method, mostly used by annotation
|
||||
*/
|
||||
class MethodMetric extends MutableMetric {
|
||||
private static final Log LOG = LogFactory.getLog(MethodMetric.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodMetric.class);
|
||||
|
||||
private final Object obj;
|
||||
private final Method method;
|
||||
|
|
|
@ -24,8 +24,6 @@ import java.lang.reflect.Method;
|
|||
|
||||
import static com.google.common.base.Preconditions.*;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.metrics2.MetricsCollector;
|
||||
import org.apache.hadoop.metrics2.MetricsException;
|
||||
|
@ -34,6 +32,8 @@ import org.apache.hadoop.metrics2.MetricsSource;
|
|||
import org.apache.hadoop.metrics2.annotation.Metric;
|
||||
import org.apache.hadoop.metrics2.annotation.Metrics;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Helper class to build {@link MetricsSource} object from annotations.
|
||||
|
@ -49,7 +49,8 @@ import org.apache.hadoop.util.ReflectionUtils;
|
|||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class MetricsSourceBuilder {
|
||||
private static final Log LOG = LogFactory.getLog(MetricsSourceBuilder.class);
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(MetricsSourceBuilder.class);
|
||||
|
||||
private final Object source;
|
||||
private final MutableMetricsFactory factory;
|
||||
|
|
|
@ -22,19 +22,20 @@ import java.lang.reflect.Field;
|
|||
import java.lang.reflect.Method;
|
||||
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.metrics2.MetricsException;
|
||||
import org.apache.hadoop.metrics2.MetricsInfo;
|
||||
import org.apache.hadoop.metrics2.annotation.Metric;
|
||||
import org.apache.hadoop.metrics2.annotation.Metrics;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Evolving
|
||||
public class MutableMetricsFactory {
|
||||
private static final Log LOG = LogFactory.getLog(MutableMetricsFactory.class);
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(MutableMetricsFactory.class);
|
||||
|
||||
MutableMetric newForField(Field field, Metric annotation,
|
||||
MetricsRegistry registry) {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue