diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReader.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReader.java similarity index 100% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReader.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReader.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java similarity index 98% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java index d913f3a2835..2a0e21be1dc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java @@ -22,11 +22,10 @@ import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.util.EnumSet; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.ReadOption; import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.impl.DfsClientConf.ShortCircuitConf; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader; @@ -42,6 +41,9 @@ import org.apache.htrace.TraceScope; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + /** * BlockReaderLocal enables local short circuited reads. If the DFS client is on * the same machine as the datanode, then the client can read files directly @@ -60,7 +62,7 @@ import com.google.common.base.Preconditions; */ @InterfaceAudience.Private class BlockReaderLocal implements BlockReader { - static final Log LOG = LogFactory.getLog(BlockReaderLocal.class); + static final Logger LOG = LoggerFactory.getLogger(BlockReaderLocal.class); private static final DirectBufferPool bufferPool = new DirectBufferPool(); @@ -88,7 +90,7 @@ class BlockReaderLocal implements BlockReader { public Builder setCachingStrategy(CachingStrategy cachingStrategy) { long readahead = cachingStrategy.getReadahead() != null ? cachingStrategy.getReadahead() : - DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT; + HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT; this.maxReadahead = (int)Math.min(Integer.MAX_VALUE, readahead); return this; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java similarity index 98% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java index c16ffdf2e0e..eea3f067cf8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java @@ -29,8 +29,6 @@ import java.util.HashMap; import java.util.LinkedHashMap; import java.util.Map; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ReadOption; @@ -45,6 +43,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader; import org.apache.hadoop.hdfs.shortcircuit.ClientMmap; +import org.apache.hadoop.hdfs.util.IOUtilsClient; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.security.UserGroupInformation; @@ -55,6 +54,9 @@ import org.apache.htrace.Sampler; import org.apache.htrace.Trace; import org.apache.htrace.TraceScope; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + /** * BlockReaderLocalLegacy enables local short circuited reads. If the DFS client is on * the same machine as the datanode, then the client can read files directly @@ -79,7 +81,8 @@ import org.apache.htrace.TraceScope; */ @InterfaceAudience.Private class BlockReaderLocalLegacy implements BlockReader { - private static final Log LOG = LogFactory.getLog(BlockReaderLocalLegacy.class); + private static final Logger LOG = LoggerFactory.getLogger( + BlockReaderLocalLegacy.class); //Stores the cache and proxy for a local datanode. private static class LocalDatanodeInfo { @@ -112,7 +115,7 @@ class BlockReaderLocalLegacy implements BlockReader { proxy = ugi.doAs(new PrivilegedExceptionAction() { @Override public ClientDatanodeProtocol run() throws Exception { - return DFSUtil.createClientDatanodeProtocolProxy(node, conf, + return DFSUtilClient.createClientDatanodeProtocolProxy(node, conf, socketTimeout, connectToDnViaHostname); } }); @@ -244,7 +247,7 @@ class BlockReaderLocalLegacy implements BlockReader { } catch (IOException e) { // remove from cache localDatanodeInfo.removeBlockLocalPathInfo(blk); - DFSClient.LOG.warn("BlockReaderLocalLegacy: Removing " + blk + LOG.warn("BlockReaderLocalLegacy: Removing " + blk + " from cache because local file " + pathinfo.getBlockPath() + " could not be opened."); throw e; @@ -689,7 +692,7 @@ class BlockReaderLocalLegacy implements BlockReader { @Override public synchronized void close() throws IOException { - IOUtils.cleanup(LOG, dataIn, checksumIn); + IOUtilsClient.cleanup(LOG, dataIn, checksumIn); if (slowReadBuff != null) { bufferPool.returnBuffer(slowReadBuff); slowReadBuff = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderUtil.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderUtil.java similarity index 100% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderUtil.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderUtil.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ClientContext.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java similarity index 97% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ClientContext.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java index bf114631cf2..38369798268 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ClientContext.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java @@ -19,8 +19,6 @@ package org.apache.hadoop.hdfs; import java.util.HashMap; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; @@ -32,6 +30,9 @@ import org.apache.hadoop.hdfs.util.ByteArrayManager; import com.google.common.annotations.VisibleForTesting; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + /** * ClientContext contains context information for a client. * @@ -40,7 +41,7 @@ import com.google.common.annotations.VisibleForTesting; */ @InterfaceAudience.Private public class ClientContext { - private static final Log LOG = LogFactory.getLog(ClientContext.class); + private static final Logger LOG = LoggerFactory.getLogger(ClientContext.class); /** * Global map of context names to caches contexts. diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java index 3d0acb0792d..a89f5567442 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java @@ -22,22 +22,32 @@ import com.google.common.collect.Maps; import com.google.common.primitives.SignedBytes; import org.apache.commons.io.Charsets; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.key.KeyProvider; +import org.apache.hadoop.crypto.key.KeyProviderFactory; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; +import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; +import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB; import org.apache.hadoop.hdfs.web.WebHdfsConstants; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NodeBase; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import javax.net.SocketFactory; +import java.io.IOException; import java.io.UnsupportedEncodingException; import java.net.InetAddress; import java.net.InetSocketAddress; +import java.net.URI; +import java.net.URISyntaxException; import java.text.SimpleDateFormat; import java.util.Collection; import java.util.Collections; @@ -455,4 +465,62 @@ public class DFSUtilClient { localAddrMap.put(addr.getHostAddress(), local); return local; } + + /** Create a {@link ClientDatanodeProtocol} proxy */ + public static ClientDatanodeProtocol createClientDatanodeProtocolProxy( + DatanodeID datanodeid, Configuration conf, int socketTimeout, + boolean connectToDnViaHostname, LocatedBlock locatedBlock) throws IOException { + return new ClientDatanodeProtocolTranslatorPB(datanodeid, conf, socketTimeout, + connectToDnViaHostname, locatedBlock); + } + + /** Create {@link ClientDatanodeProtocol} proxy using kerberos ticket */ + public static ClientDatanodeProtocol createClientDatanodeProtocolProxy( + DatanodeID datanodeid, Configuration conf, int socketTimeout, + boolean connectToDnViaHostname) throws IOException { + return new ClientDatanodeProtocolTranslatorPB( + datanodeid, conf, socketTimeout, connectToDnViaHostname); + } + + /** Create a {@link ClientDatanodeProtocol} proxy */ + public static ClientDatanodeProtocol createClientDatanodeProtocolProxy( + InetSocketAddress addr, UserGroupInformation ticket, Configuration conf, + SocketFactory factory) throws IOException { + return new ClientDatanodeProtocolTranslatorPB(addr, ticket, conf, factory); + } + + /** + * Creates a new KeyProvider from the given Configuration. + * + * @param conf Configuration + * @return new KeyProvider, or null if no provider was found. + * @throws IOException if the KeyProvider is improperly specified in + * the Configuration + */ + public static KeyProvider createKeyProvider( + final Configuration conf) throws IOException { + final String providerUriStr = + conf.getTrimmed(HdfsClientConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, ""); + // No provider set in conf + if (providerUriStr.isEmpty()) { + return null; + } + final URI providerUri; + try { + providerUri = new URI(providerUriStr); + } catch (URISyntaxException e) { + throw new IOException(e); + } + KeyProvider keyProvider = KeyProviderFactory.get(providerUri, conf); + if (keyProvider == null) { + throw new IOException("Could not instantiate KeyProvider from " + + HdfsClientConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI + " setting of '" + + providerUriStr + "'"); + } + if (keyProvider.isTransient()) { + throw new IOException("KeyProvider " + keyProvider.toString() + + " was found but it is a transient provider."); + } + return keyProvider; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ExternalBlockReader.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ExternalBlockReader.java similarity index 100% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ExternalBlockReader.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ExternalBlockReader.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/KeyProviderCache.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/KeyProviderCache.java similarity index 89% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/KeyProviderCache.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/KeyProviderCache.java index a2b6c7e9a9d..05492e018fc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/KeyProviderCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/KeyProviderCache.java @@ -21,14 +21,12 @@ import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; import java.util.concurrent.Callable; -import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.key.KeyProvider; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import com.google.common.annotations.VisibleForTesting; import com.google.common.cache.Cache; @@ -36,10 +34,13 @@ import com.google.common.cache.CacheBuilder; import com.google.common.cache.RemovalListener; import com.google.common.cache.RemovalNotification; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + @InterfaceAudience.Private public class KeyProviderCache { - public static final Log LOG = LogFactory.getLog(KeyProviderCache.class); + public static final Logger LOG = LoggerFactory.getLogger(KeyProviderCache.class); private final Cache cache; @@ -72,7 +73,7 @@ public class KeyProviderCache { return cache.get(kpURI, new Callable() { @Override public KeyProvider call() throws Exception { - return DFSUtil.createKeyProvider(conf); + return DFSUtilClient.createKeyProvider(conf); } }); } catch (Exception e) { @@ -83,11 +84,11 @@ public class KeyProviderCache { private URI createKeyProviderURI(Configuration conf) { final String providerUriStr = - conf.getTrimmed(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, ""); + conf.getTrimmed(HdfsClientConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, ""); // No provider set in conf if (providerUriStr.isEmpty()) { LOG.error("Could not find uri with key [" - + DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI + + HdfsClientConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI + "] to create a keyProvider !!"); return null; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/PeerCache.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/PeerCache.java similarity index 95% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/PeerCache.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/PeerCache.java index 08b0468bfa8..55aa741ecf0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/PeerCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/PeerCache.java @@ -27,15 +27,16 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.LinkedListMultimap; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.net.Peer; import org.apache.hadoop.hdfs.protocol.DatanodeID; +import org.apache.hadoop.hdfs.util.IOUtilsClient; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.Time; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * A cache of input stream sockets to Data Node. @@ -44,7 +45,7 @@ import org.apache.hadoop.util.Time; @InterfaceAudience.Private @VisibleForTesting public class PeerCache { - private static final Log LOG = LogFactory.getLog(PeerCache.class); + private static final Logger LOG = LoggerFactory.getLogger(PeerCache.class); private static class Key { final DatanodeID dnID; @@ -188,7 +189,7 @@ public class PeerCache { if (peer.isClosed()) return; if (capacity <= 0) { // Cache disabled. - IOUtils.cleanup(LOG, peer); + IOUtilsClient.cleanup(LOG, peer); return; } putInternal(dnId, peer); @@ -222,7 +223,7 @@ public class PeerCache { expiryPeriod) { break; } - IOUtils.cleanup(LOG, entry.getValue().getPeer()); + IOUtilsClient.cleanup(LOG, entry.getValue().getPeer()); iter.remove(); } } @@ -241,7 +242,7 @@ public class PeerCache { "capacity: " + capacity); } Entry entry = iter.next(); - IOUtils.cleanup(LOG, entry.getValue().getPeer()); + IOUtilsClient.cleanup(LOG, entry.getValue().getPeer()); iter.remove(); } @@ -269,7 +270,7 @@ public class PeerCache { @VisibleForTesting synchronized void clear() { for (Value value : multimap.values()) { - IOUtils.cleanup(LOG, value.getPeer()); + IOUtilsClient.cleanup(LOG, value.getPeer()); } multimap.clear(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/BlockReportOptions.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/BlockReportOptions.java similarity index 100% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/BlockReportOptions.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/BlockReportOptions.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java index 52a9339aa58..0a3aad15d7f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java @@ -117,6 +117,11 @@ public interface HdfsClientConfigKeys { "dfs.datanode.hdfs-blocks-metadata.enabled"; boolean DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT = false; + String DFS_DATANODE_KERBEROS_PRINCIPAL_KEY = "dfs.datanode.kerberos.principal"; + String DFS_DATANODE_READAHEAD_BYTES_KEY = "dfs.datanode.readahead.bytes"; + long DFS_DATANODE_READAHEAD_BYTES_DEFAULT = 4 * 1024 * 1024; // 4MB + String DFS_ENCRYPTION_KEY_PROVIDER_URI = "dfs.encryption.key.provider.uri"; + String REPLICA_ACCESSOR_BUILDER_CLASSES_KEY = PREFIX + "replica.accessor.builder.classes"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockLocalPathInfo.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlockLocalPathInfo.java similarity index 100% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockLocalPathInfo.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlockLocalPathInfo.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java similarity index 96% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java index 3cf32920dd9..c8e0ed9042a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java @@ -20,13 +20,11 @@ package org.apache.hadoop.hdfs.protocol; import java.io.IOException; import java.util.List; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.ReconfigurationTaskStatus; import org.apache.hadoop.hdfs.client.BlockReportOptions; -import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSelector; import org.apache.hadoop.security.KerberosInfo; @@ -38,7 +36,7 @@ import org.apache.hadoop.security.token.TokenInfo; @InterfaceAudience.Private @InterfaceStability.Evolving @KerberosInfo( - serverPrincipal = DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY) + serverPrincipal = HdfsClientConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY) @TokenInfo(BlockTokenSelector.class) public interface ClientDatanodeProtocol { /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsBlocksMetadata.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsBlocksMetadata.java similarity index 100% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsBlocksMetadata.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsBlocksMetadata.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/InvalidEncryptionKeyException.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/InvalidEncryptionKeyException.java similarity index 100% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/InvalidEncryptionKeyException.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/InvalidEncryptionKeyException.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolPB.java similarity index 91% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolPB.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolPB.java index 21073eb2e37..7e3f66b5489 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolPB.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.protocolPB; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ClientDatanodeProtocolService; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSelector; import org.apache.hadoop.ipc.ProtocolInfo; @@ -26,7 +26,7 @@ import org.apache.hadoop.security.KerberosInfo; import org.apache.hadoop.security.token.TokenInfo; @KerberosInfo( - serverPrincipal = DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY) + serverPrincipal = HdfsClientConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY) @TokenInfo(BlockTokenSelector.class) @ProtocolInfo(protocolName = "org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol", diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java similarity index 98% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java index 9e4d36c6293..e335919a6f5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java @@ -28,8 +28,6 @@ import javax.net.SocketFactory; import com.google.common.base.Optional; import com.google.common.collect.Maps; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -77,6 +75,8 @@ import com.google.common.primitives.Longs; import com.google.protobuf.ByteString; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * This class is the client side translator to translate the requests made on @@ -88,8 +88,8 @@ import com.google.protobuf.ServiceException; public class ClientDatanodeProtocolTranslatorPB implements ProtocolMetaInterface, ClientDatanodeProtocol, ProtocolTranslator, Closeable { - public static final Log LOG = LogFactory - .getLog(ClientDatanodeProtocolTranslatorPB.class); + public static final Logger LOG = LoggerFactory + .getLogger(ClientDatanodeProtocolTranslatorPB.class); /** RpcController is not used and hence is set to null */ private final static RpcController NULL_CONTROLLER = null; @@ -226,7 +226,7 @@ public class ClientDatanodeProtocolTranslatorPB implements } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } - return new BlockLocalPathInfo(PBHelper.convert(resp.getBlock()), + return new BlockLocalPathInfo(PBHelperClient.convert(resp.getBlock()), resp.getLocalPath(), resp.getLocalMetaPath()); } @@ -294,7 +294,7 @@ public class ClientDatanodeProtocolTranslatorPB implements GetDatanodeInfoResponseProto response; try { response = rpcProxy.getDatanodeInfo(NULL_CONTROLLER, VOID_GET_DATANODE_INFO); - return PBHelper.convert(response.getLocalInfo()); + return PBHelperClient.convert(response.getLocalInfo()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java index edf658ab4c0..d9215074876 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java @@ -23,12 +23,14 @@ import com.google.protobuf.CodedInputStream; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto; import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId; @@ -185,6 +187,17 @@ public class PBHelperClient { return pinnings; } + public static ExtendedBlock convert(ExtendedBlockProto eb) { + if (eb == null) return null; + return new ExtendedBlock( eb.getPoolId(), eb.getBlockId(), eb.getNumBytes(), + eb.getGenerationStamp()); + } + + public static DatanodeLocalInfo convert(DatanodeLocalInfoProto proto) { + return new DatanodeLocalInfo(proto.getSoftwareVersion(), + proto.getConfigVersion(), proto.getUptime()); + } + static public DatanodeInfoProto convertDatanodeInfo(DatanodeInfo di) { if (di == null) return null; return convert(di); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSelector.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSelector.java similarity index 100% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSelector.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSelector.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index c7f17846889..41ac00442dc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -513,6 +513,9 @@ Release 2.8.0 - UNRELEASED HDFS-8938. Extract BlockToMarkCorrupt and ReplicationWork as standalone classes from BlockManager. (Mingliang Liu via wheat9) + HDFS-8925. Move BlockReaderLocal to hdfs-client. + (Mingliang Liu via wheat9) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockStorageLocationUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockStorageLocationUtil.java index a1cd5556b10..cac5366838b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockStorageLocationUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockStorageLocationUtil.java @@ -350,7 +350,8 @@ class BlockStorageLocationUtil { TraceScope scope = Trace.startSpan("getHdfsBlocksMetadata", parentSpan); try { - cdp = DFSUtil.createClientDatanodeProtocolProxy(datanode, configuration, + cdp = DFSUtilClient.createClientDatanodeProtocolProxy( + datanode, configuration, timeout, connectToDnViaHostname); metadata = cdp.getHdfsBlocksMetadata(poolId, blockIds, dnTokens); } catch (IOException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 2278612d31b..cf9c11d0333 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -80,8 +80,12 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final long DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_DEFAULT = 1024*1024; public static final String DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY = "dfs.datanode.balance.max.concurrent.moves"; public static final int DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT = 5; - public static final String DFS_DATANODE_READAHEAD_BYTES_KEY = "dfs.datanode.readahead.bytes"; - public static final long DFS_DATANODE_READAHEAD_BYTES_DEFAULT = 4 * 1024 * 1024; // 4MB + @Deprecated + public static final String DFS_DATANODE_READAHEAD_BYTES_KEY = + HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_KEY; + @Deprecated + public static final long DFS_DATANODE_READAHEAD_BYTES_DEFAULT = + HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT; public static final String DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_KEY = "dfs.datanode.drop.cache.behind.writes"; public static final boolean DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_DEFAULT = false; public static final String DFS_DATANODE_SYNC_BEHIND_WRITES_KEY = "dfs.datanode.sync.behind.writes"; @@ -505,7 +509,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_WEB_UGI_KEY = "dfs.web.ugi"; public static final String DFS_NAMENODE_STARTUP_KEY = "dfs.namenode.startup"; public static final String DFS_DATANODE_KEYTAB_FILE_KEY = "dfs.datanode.keytab.file"; - public static final String DFS_DATANODE_KERBEROS_PRINCIPAL_KEY = "dfs.datanode.kerberos.principal"; + public static final String DFS_DATANODE_KERBEROS_PRINCIPAL_KEY = + HdfsClientConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY; @Deprecated public static final String DFS_DATANODE_USER_NAME_KEY = DFS_DATANODE_KERBEROS_PRINCIPAL_KEY; public static final String DFS_DATANODE_SHARED_FILE_DESCRIPTOR_PATHS = "dfs.datanode.shared.file.descriptor.paths"; @@ -602,7 +607,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY = "dfs.data.transfer.saslproperties.resolver.class"; public static final int DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES_DEFAULT = 100; public static final String DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES = "dfs.namenode.list.encryption.zones.num.responses"; - public static final String DFS_ENCRYPTION_KEY_PROVIDER_URI = "dfs.encryption.key.provider.uri"; + public static final String DFS_ENCRYPTION_KEY_PROVIDER_URI = + HdfsClientConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI; // Journal-node related configs. These are read on the JN side. public static final String DFS_JOURNALNODE_EDITS_DIR_KEY = "dfs.journalnode.edits.dir"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java index 7f3722f2f92..139a27cfd20 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java @@ -364,7 +364,7 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead, ClientDatanodeProtocol cdp = null; try { - cdp = DFSUtil.createClientDatanodeProtocolProxy(datanode, + cdp = DFSUtilClient.createClientDatanodeProtocolProxy(datanode, dfsClient.getConfiguration(), conf.getSocketTimeout(), conf.isConnectToDnViaHostname(), locatedblock); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index 76ebaac1b6a..76f1d180131 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -53,8 +53,6 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.ThreadLocalRandom; -import javax.net.SocketFactory; - import com.google.common.collect.Sets; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.CommandLineParser; @@ -75,12 +73,8 @@ import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; -import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import org.apache.hadoop.hdfs.protocol.LocatedBlock; -import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.http.HttpConfig; @@ -934,29 +928,6 @@ public class DFSUtil { public static int roundBytesToGB(long bytes) { return Math.round((float)bytes/ 1024 / 1024 / 1024); } - - /** Create a {@link ClientDatanodeProtocol} proxy */ - public static ClientDatanodeProtocol createClientDatanodeProtocolProxy( - DatanodeID datanodeid, Configuration conf, int socketTimeout, - boolean connectToDnViaHostname, LocatedBlock locatedBlock) throws IOException { - return new ClientDatanodeProtocolTranslatorPB(datanodeid, conf, socketTimeout, - connectToDnViaHostname, locatedBlock); - } - - /** Create {@link ClientDatanodeProtocol} proxy using kerberos ticket */ - public static ClientDatanodeProtocol createClientDatanodeProtocolProxy( - DatanodeID datanodeid, Configuration conf, int socketTimeout, - boolean connectToDnViaHostname) throws IOException { - return new ClientDatanodeProtocolTranslatorPB( - datanodeid, conf, socketTimeout, connectToDnViaHostname); - } - - /** Create a {@link ClientDatanodeProtocol} proxy */ - public static ClientDatanodeProtocol createClientDatanodeProtocolProxy( - InetSocketAddress addr, UserGroupInformation ticket, Configuration conf, - SocketFactory factory) throws IOException { - return new ClientDatanodeProtocolTranslatorPB(addr, ticket, conf, factory); - } /** * Get nameservice Id for the {@link NameNode} based on namenode RPC address @@ -1484,41 +1455,6 @@ public class DFSUtil { } } - /** - * Creates a new KeyProvider from the given Configuration. - * - * @param conf Configuration - * @return new KeyProvider, or null if no provider was found. - * @throws IOException if the KeyProvider is improperly specified in - * the Configuration - */ - public static KeyProvider createKeyProvider( - final Configuration conf) throws IOException { - final String providerUriStr = - conf.getTrimmed(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, ""); - // No provider set in conf - if (providerUriStr.isEmpty()) { - return null; - } - final URI providerUri; - try { - providerUri = new URI(providerUriStr); - } catch (URISyntaxException e) { - throw new IOException(e); - } - KeyProvider keyProvider = KeyProviderFactory.get(providerUri, conf); - if (keyProvider == null) { - throw new IOException("Could not instantiate KeyProvider from " + - DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI + " setting of '" + - providerUriStr +"'"); - } - if (keyProvider.isTransient()) { - throw new IOException("KeyProvider " + keyProvider.toString() - + " was found but it is a transient provider."); - } - return keyProvider; - } - /** * Creates a new KeyProviderCryptoExtension by wrapping the * KeyProvider specified in the given Configuration. @@ -1530,7 +1466,7 @@ public class DFSUtil { */ public static KeyProviderCryptoExtension createKeyProviderCryptoExtension( final Configuration conf) throws IOException { - KeyProvider keyProvider = createKeyProvider(conf); + KeyProvider keyProvider = DFSUtilClient.createKeyProvider(conf); if (keyProvider == null) { return null; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java index 694f5212b99..85da414dccf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java @@ -30,7 +30,6 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto; -import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto; @@ -115,7 +114,7 @@ public abstract class Receiver implements DataTransferProtocol { TraceScope traceScope = continueTraceSpan(proto.getHeader(), proto.getClass().getSimpleName()); try { - readBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()), + readBlock(PBHelperClient.convert(proto.getHeader().getBaseHeader().getBlock()), PBHelper.convert(proto.getHeader().getBaseHeader().getToken()), proto.getHeader().getClientName(), proto.getOffset(), @@ -136,7 +135,7 @@ public abstract class Receiver implements DataTransferProtocol { TraceScope traceScope = continueTraceSpan(proto.getHeader(), proto.getClass().getSimpleName()); try { - writeBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()), + writeBlock(PBHelperClient.convert(proto.getHeader().getBaseHeader().getBlock()), PBHelperClient.convertStorageType(proto.getStorageType()), PBHelper.convert(proto.getHeader().getBaseHeader().getToken()), proto.getHeader().getClientName(), @@ -167,7 +166,7 @@ public abstract class Receiver implements DataTransferProtocol { TraceScope traceScope = continueTraceSpan(proto.getHeader(), proto.getClass().getSimpleName()); try { - transferBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()), + transferBlock(PBHelperClient.convert(proto.getHeader().getBaseHeader().getBlock()), PBHelper.convert(proto.getHeader().getBaseHeader().getToken()), proto.getHeader().getClientName(), targets, @@ -186,7 +185,7 @@ public abstract class Receiver implements DataTransferProtocol { TraceScope traceScope = continueTraceSpan(proto.getHeader(), proto.getClass().getSimpleName()); try { - requestShortCircuitFds(PBHelper.convert(proto.getHeader().getBlock()), + requestShortCircuitFds(PBHelperClient.convert(proto.getHeader().getBlock()), PBHelper.convert(proto.getHeader().getToken()), slotId, proto.getMaxVersion(), proto.getSupportsReceiptVerification()); @@ -228,7 +227,7 @@ public abstract class Receiver implements DataTransferProtocol { TraceScope traceScope = continueTraceSpan(proto.getHeader(), proto.getClass().getSimpleName()); try { - replaceBlock(PBHelper.convert(proto.getHeader().getBlock()), + replaceBlock(PBHelperClient.convert(proto.getHeader().getBlock()), PBHelperClient.convertStorageType(proto.getStorageType()), PBHelper.convert(proto.getHeader().getToken()), proto.getDelHint(), @@ -244,7 +243,7 @@ public abstract class Receiver implements DataTransferProtocol { TraceScope traceScope = continueTraceSpan(proto.getHeader(), proto.getClass().getSimpleName()); try { - copyBlock(PBHelper.convert(proto.getHeader().getBlock()), + copyBlock(PBHelperClient.convert(proto.getHeader().getBlock()), PBHelper.convert(proto.getHeader().getToken())); } finally { if (traceScope != null) traceScope.close(); @@ -257,7 +256,7 @@ public abstract class Receiver implements DataTransferProtocol { TraceScope traceScope = continueTraceSpan(proto.getHeader(), proto.getClass().getSimpleName()); try { - blockChecksum(PBHelper.convert(proto.getHeader().getBlock()), + blockChecksum(PBHelperClient.convert(proto.getHeader().getBlock()), PBHelper.convert(proto.getHeader().getToken())); } finally { if (traceScope != null) traceScope.close(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolServerSideTranslatorPB.java index a628287debd..357c9450d8c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolServerSideTranslatorPB.java @@ -95,7 +95,7 @@ public class ClientDatanodeProtocolServerSideTranslatorPB implements throws ServiceException { long len; try { - len = impl.getReplicaVisibleLength(PBHelper.convert(request.getBlock())); + len = impl.getReplicaVisibleLength(PBHelperClient.convert(request.getBlock())); } catch (IOException e) { throw new ServiceException(e); } @@ -132,7 +132,7 @@ public class ClientDatanodeProtocolServerSideTranslatorPB implements throws ServiceException { BlockLocalPathInfo resp; try { - resp = impl.getBlockLocalPathInfo(PBHelper.convert(request.getBlock()), PBHelper.convert(request.getToken())); + resp = impl.getBlockLocalPathInfo(PBHelperClient.convert(request.getBlock()), PBHelper.convert(request.getToken())); } catch (IOException e) { throw new ServiceException(e); } @@ -150,7 +150,7 @@ public class ClientDatanodeProtocolServerSideTranslatorPB implements try { String poolId = request.getBlockPoolId(); - List> tokens = + List> tokens = new ArrayList>(request.getTokensCount()); for (TokenProto b : request.getTokensList()) { tokens.add(PBHelper.convert(b)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index a95f3978144..df8dbad6b83 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -477,7 +477,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements public AbandonBlockResponseProto abandonBlock(RpcController controller, AbandonBlockRequestProto req) throws ServiceException { try { - server.abandonBlock(PBHelper.convert(req.getB()), req.getFileId(), + server.abandonBlock(PBHelperClient.convert(req.getB()), req.getFileId(), req.getSrc(), req.getHolder()); } catch (IOException e) { throw new ServiceException(e); @@ -495,7 +495,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements LocatedBlock result = server.addBlock( req.getSrc(), req.getClientName(), - req.hasPrevious() ? PBHelper.convert(req.getPrevious()) : null, + req.hasPrevious() ? PBHelperClient.convert(req.getPrevious()) : null, (excl == null || excl.size() == 0) ? null : PBHelper.convert(excl .toArray(new DatanodeInfoProto[excl.size()])), req.getFileId(), (favor == null || favor.size() == 0) ? null : favor @@ -516,7 +516,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements List existingStorageIDsList = req.getExistingStorageUuidsList(); List excludesList = req.getExcludesList(); LocatedBlock result = server.getAdditionalDatanode(req.getSrc(), - req.getFileId(), PBHelper.convert(req.getBlk()), + req.getFileId(), PBHelperClient.convert(req.getBlk()), PBHelper.convert(existingList.toArray( new DatanodeInfoProto[existingList.size()])), existingStorageIDsList.toArray( @@ -538,7 +538,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements try { boolean result = server.complete(req.getSrc(), req.getClientName(), - req.hasLast() ? PBHelper.convert(req.getLast()) : null, + req.hasLast() ? PBHelperClient.convert(req.getLast()) : null, req.hasFileId() ? req.getFileId() : HdfsConstants.GRANDFATHER_INODE_ID); return CompleteResponseProto.newBuilder().setResult(result).build(); } catch (IOException e) { @@ -956,7 +956,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements throws ServiceException { try { LocatedBlockProto result = PBHelper.convert(server - .updateBlockForPipeline(PBHelper.convert(req.getBlock()), + .updateBlockForPipeline(PBHelperClient.convert(req.getBlock()), req.getClientName())); return UpdateBlockForPipelineResponseProto.newBuilder().setBlock(result) .build(); @@ -972,8 +972,8 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements List newNodes = req.getNewNodesList(); List newStorageIDs = req.getStorageIDsList(); server.updatePipeline(req.getClientName(), - PBHelper.convert(req.getOldBlock()), - PBHelper.convert(req.getNewBlock()), + PBHelperClient.convert(req.getOldBlock()), + PBHelperClient.convert(req.getNewBlock()), PBHelper.convert(newNodes.toArray(new DatanodeIDProto[newNodes.size()])), newStorageIDs.toArray(new String[newStorageIDs.size()])); return VOID_UPDATEPIPELINE_RESPONSE; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java index e133ec7923a..5964e151ece 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java @@ -281,7 +281,7 @@ public class DatanodeProtocolServerSideTranslatorPB implements final List sidprotos = request.getNewTargetStoragesList(); final String[] storageIDs = sidprotos.toArray(new String[sidprotos.size()]); try { - impl.commitBlockSynchronization(PBHelper.convert(request.getBlock()), + impl.commitBlockSynchronization(PBHelperClient.convert(request.getBlock()), request.getNewGenStamp(), request.getNewLength(), request.getCloseFile(), request.getDeleteBlock(), dns, storageIDs); } catch (IOException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolServerSideTranslatorPB.java index ba0a8fc24ac..fb67e36e5b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolServerSideTranslatorPB.java @@ -76,7 +76,7 @@ public class InterDatanodeProtocolServerSideTranslatorPB implements final String storageID; try { storageID = impl.updateReplicaUnderRecovery( - PBHelper.convert(request.getBlock()), request.getRecoveryId(), + PBHelperClient.convert(request.getBlock()), request.getRecoveryId(), request.getNewBlockId(), request.getNewLength()); } catch (IOException e) { throw new ServiceException(e); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index dbb1861858d..75072ac42cc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -23,9 +23,7 @@ import static org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos import static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto; import static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto; -import java.io.EOFException; import java.io.IOException; -import java.io.InputStream; import java.util.ArrayList; import java.util.Arrays; import java.util.EnumSet; @@ -110,7 +108,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsS import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeActionProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeActionProto; -import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto; @@ -146,7 +143,6 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto; @@ -219,20 +215,16 @@ import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary; -import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId; import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId; -import org.apache.hadoop.hdfs.util.ExactSizeInputStream; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.util.DataChecksum; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import com.google.common.primitives.Shorts; import com.google.protobuf.ByteString; -import com.google.protobuf.CodedInputStream; /** * Utilities for converting protobuf classes to and from implementation classes @@ -575,13 +567,7 @@ public class PBHelper { return new NamenodeCommand(cmd.getAction()); } } - - public static ExtendedBlock convert(ExtendedBlockProto eb) { - if (eb == null) return null; - return new ExtendedBlock( eb.getPoolId(), eb.getBlockId(), eb.getNumBytes(), - eb.getGenerationStamp()); - } - + public static RecoveringBlockProto convert(RecoveringBlock b) { if (b == null) { return null; @@ -595,7 +581,7 @@ public class PBHelper { } public static RecoveringBlock convert(RecoveringBlockProto b) { - ExtendedBlock block = convert(b.getBlock().getB()); + ExtendedBlock block = PBHelperClient.convert(b.getBlock().getB()); DatanodeInfo[] locs = convert(b.getBlock().getLocsList()); return (b.hasTruncateBlock()) ? new RecoveringBlock(block, locs, PBHelper.convert(b.getTruncateBlock())) : @@ -741,7 +727,7 @@ public class PBHelper { } } - LocatedBlock lb = new LocatedBlock(PBHelper.convert(proto.getB()), targets, + LocatedBlock lb = new LocatedBlock(PBHelperClient.convert(proto.getB()), targets, storageIDs, storageTypes, proto.getOffset(), proto.getCorrupt(), cachedLocs.toArray(new DatanodeInfo[0])); lb.setBlockToken(PBHelper.convert(proto.getBlockToken())); @@ -2120,12 +2106,6 @@ public class PBHelper { return builder.build(); } - public static DatanodeLocalInfo convert(DatanodeLocalInfoProto proto) { - return new DatanodeLocalInfo(proto.getSoftwareVersion(), - proto.getConfigVersion(), proto.getUptime()); - } - - private static AclEntryScopeProto convert(AclEntryScope v) { return AclEntryScopeProto.valueOf(v.ordinal()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java index 66fd5671ddd..9c25f5eb350 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java @@ -125,8 +125,8 @@ public class DNConf { DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT); readaheadLength = conf.getLong( - DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_KEY, - DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT); + HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_KEY, + HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT); dropCacheBehindWrites = conf.getBoolean( DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_KEY, DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_DEFAULT); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java index ad4af878881..135d81a9f75 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java @@ -52,6 +52,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.shell.Command; import org.apache.hadoop.fs.shell.CommandFormat; import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.HAUtilClient; import org.apache.hadoop.hdfs.client.BlockReportOptions; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -1940,7 +1941,7 @@ public class DFSAdmin extends FsShell { // Create the client ClientDatanodeProtocol dnProtocol = - DFSUtil.createClientDatanodeProtocolProxy(datanodeAddr, getUGI(), conf, + DFSUtilClient.createClientDatanodeProtocolProxy(datanodeAddr, getUGI(), conf, NetUtils.getSocketFactory(conf, ClientDatanodeProtocol.class)); return dnProtocol; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java index aad670a6717..1c4394897cd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java @@ -247,7 +247,7 @@ public class TestBlockReaderLocal { @Test public void testBlockReaderSimpleReads() throws IOException { runBlockReaderLocalTest(new TestBlockReaderSimpleReads(), true, - DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT); + HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT); } @Test @@ -259,7 +259,7 @@ public class TestBlockReaderLocal { @Test public void testBlockReaderSimpleReadsNoChecksum() throws IOException { runBlockReaderLocalTest(new TestBlockReaderSimpleReads(), false, - DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT); + HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT); } @Test @@ -297,14 +297,14 @@ public class TestBlockReaderLocal { @Test public void testBlockReaderLocalArrayReads2() throws IOException { runBlockReaderLocalTest(new TestBlockReaderLocalArrayReads2(), - true, DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT); + true, HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT); } @Test public void testBlockReaderLocalArrayReads2NoChecksum() throws IOException { runBlockReaderLocalTest(new TestBlockReaderLocalArrayReads2(), - false, DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT); + false, HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT); } @Test @@ -341,7 +341,7 @@ public class TestBlockReaderLocal { public void testBlockReaderLocalByteBufferReads() throws IOException { runBlockReaderLocalTest(new TestBlockReaderLocalByteBufferReads(), - true, DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT); + true, HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT); } @Test @@ -349,7 +349,7 @@ public class TestBlockReaderLocal { throws IOException { runBlockReaderLocalTest( new TestBlockReaderLocalByteBufferReads(), - false, DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT); + false, HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT); } @Test @@ -473,7 +473,7 @@ public class TestBlockReaderLocal { public void testBlockReaderLocalReadCorruptStart() throws IOException { runBlockReaderLocalTest(new TestBlockReaderLocalReadCorruptStart(), true, - DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT); + HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT); } private static class TestBlockReaderLocalReadCorrupt @@ -524,14 +524,14 @@ public class TestBlockReaderLocal { public void testBlockReaderLocalReadCorrupt() throws IOException { runBlockReaderLocalTest(new TestBlockReaderLocalReadCorrupt(), true, - DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT); + HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT); } @Test public void testBlockReaderLocalReadCorruptNoChecksum() throws IOException { runBlockReaderLocalTest(new TestBlockReaderLocalReadCorrupt(), false, - DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT); + HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT); } @Test @@ -576,14 +576,14 @@ public class TestBlockReaderLocal { public void testBlockReaderLocalWithMlockChanges() throws IOException { runBlockReaderLocalTest(new TestBlockReaderLocalWithMlockChanges(), - true, DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT); + true, HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT); } @Test public void testBlockReaderLocalWithMlockChangesNoChecksum() throws IOException { runBlockReaderLocalTest(new TestBlockReaderLocalWithMlockChanges(), - false, DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT); + false, HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT); } @Test @@ -649,14 +649,14 @@ public class TestBlockReaderLocal { public void testBlockReaderLocalOnFileWithoutChecksum() throws IOException { runBlockReaderLocalTest(new TestBlockReaderLocalOnFileWithoutChecksum(), - true, DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT); + true, HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT); } @Test public void testBlockReaderLocalOnFileWithoutChecksumNoChecksum() throws IOException { runBlockReaderLocalTest(new TestBlockReaderLocalOnFileWithoutChecksum(), - false, DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT); + false, HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT); } @Test @@ -677,14 +677,14 @@ public class TestBlockReaderLocal { public void testBlockReaderLocalReadZeroBytes() throws IOException { runBlockReaderLocalTest(new TestBlockReaderLocalReadZeroBytes(), - true, DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT); + true, HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT); } @Test public void testBlockReaderLocalReadZeroBytesNoChecksum() throws IOException { runBlockReaderLocalTest(new TestBlockReaderLocalReadZeroBytes(), - false, DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT); + false, HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT); } @Test diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java index 3deca170481..af28bd314f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java @@ -182,7 +182,7 @@ public class TestBlockReaderLocalLegacy { { final LocatedBlock lb = cluster.getNameNode().getRpcServer() .getBlockLocations(path.toString(), 0, 1).get(0); - proxy = DFSUtil.createClientDatanodeProtocolProxy( + proxy = DFSUtilClient.createClientDatanodeProtocolProxy( lb.getLocations()[0], conf, 60000, false); token = lb.getBlockToken(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java index 813b886b59c..3dd1917b2d7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java @@ -856,7 +856,7 @@ public class TestDFSClientRetries { ClientDatanodeProtocol proxy = null; try { - proxy = DFSUtil.createClientDatanodeProtocolProxy( + proxy = DFSUtilClient.createClientDatanodeProtocolProxy( fakeDnId, conf, 500, false, fakeBlock); proxy.getReplicaVisibleLength(new ExtendedBlock("bpid", 1)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java index f25fb1b0aab..3d2e8b945d1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java @@ -333,12 +333,12 @@ public class TestPBHelper { public void testConvertExtendedBlock() { ExtendedBlock b = getExtendedBlock(); ExtendedBlockProto bProto = PBHelperClient.convert(b); - ExtendedBlock b1 = PBHelper.convert(bProto); + ExtendedBlock b1 = PBHelperClient.convert(bProto); assertEquals(b, b1); b.setBlockId(-1); bProto = PBHelperClient.convert(b); - b1 = PBHelper.convert(bProto); + b1 = PBHelperClient.convert(bProto); assertEquals(b, b1); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java index d5a94268ddc..8308db0264f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java @@ -43,7 +43,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; -import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.Block; @@ -57,7 +57,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.Client import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto; import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolPB; -import org.apache.hadoop.hdfs.protocolPB.PBHelper; +import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; import org.apache.hadoop.io.TestWritable; import org.apache.hadoop.ipc.Client; import org.apache.hadoop.ipc.ProtobufRpcEngine; @@ -138,7 +138,7 @@ public class TestBlockToken { BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId; LOG.info("Got: " + id.toString()); assertTrue("Received BlockTokenIdentifier is wrong", ident.equals(id)); - sm.checkAccess(id, null, PBHelper.convert(req.getBlock()), + sm.checkAccess(id, null, PBHelperClient.convert(req.getBlock()), BlockTokenIdentifier.AccessMode.WRITE); result = id.getBlockId(); } @@ -259,7 +259,7 @@ public class TestBlockToken { ClientDatanodeProtocol proxy = null; try { - proxy = DFSUtil.createClientDatanodeProtocolProxy(addr, ticket, conf, + proxy = DFSUtilClient.createClientDatanodeProtocolProxy(addr, ticket, conf, NetUtils.getDefaultSocketFactory(conf)); assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3)); } finally { @@ -313,7 +313,7 @@ public class TestBlockToken { try { long endTime = Time.now() + 3000; while (Time.now() < endTime) { - proxy = DFSUtil.createClientDatanodeProtocolProxy(fakeDnId, conf, 1000, + proxy = DFSUtilClient.createClientDatanodeProtocolProxy(fakeDnId, conf, 1000, false, fakeBlock); assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3)); if (proxy != null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java index 0ee433f391b..b6b8a490bfb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java @@ -41,7 +41,7 @@ import org.apache.hadoop.hdfs.ClientContext; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; -import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -367,7 +367,7 @@ public class TestShortCircuitLocalRead { Token token = lb.get(0).getBlockToken(); final DatanodeInfo dnInfo = lb.get(0).getLocations()[0]; ClientDatanodeProtocol proxy = - DFSUtil.createClientDatanodeProtocolProxy(dnInfo, conf, 60000, false); + DFSUtilClient.createClientDatanodeProtocolProxy(dnInfo, conf, 60000, false); try { proxy.getBlockLocalPathInfo(blk, token); Assert.fail("The call should have failed as this user "