From 3cdd6a9a2db2794c1689298c9784bf87a4176584 Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Tue, 14 Aug 2012 22:15:57 +0000 Subject: [PATCH] HDFS-3150. Add option for clients to contact DNs via hostname. Contributed by Eli Collins git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1373143 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../apache/hadoop/hdfs/BlockReaderLocal.java | 17 +++--- .../org/apache/hadoop/hdfs/DFSClient.java | 33 +++++++++--- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 4 ++ .../apache/hadoop/hdfs/DFSInputStream.java | 16 ++++-- .../apache/hadoop/hdfs/DFSOutputStream.java | 13 ++--- .../java/org/apache/hadoop/hdfs/DFSUtil.java | 10 ++-- .../hadoop/hdfs/protocol/DatanodeID.java | 25 ++++++++- .../ClientDatanodeProtocolTranslatorPB.java | 23 +++++--- .../hadoop/hdfs/server/datanode/DNConf.java | 6 ++- .../hadoop/hdfs/server/datanode/DataNode.java | 31 ++++++----- .../hdfs/server/datanode/DataXceiver.java | 19 +++++-- .../server/namenode/FileChecksumServlets.java | 2 +- .../src/main/resources/hdfs-default.xml | 18 ++++++- .../apache/hadoop/hdfs/MiniDFSCluster.java | 53 +++++++++++++++---- .../hadoop/hdfs/TestDFSClientRetries.java | 2 +- .../hdfs/TestDistributedFileSystem.java | 1 - .../apache/hadoop/hdfs/TestFileCreation.java | 29 ++++++++-- .../hadoop/hdfs/TestHftpFileSystem.java | 1 - .../hadoop/hdfs/TestMiniDFSCluster.java | 23 ++++++++ .../hdfs/TestShortCircuitLocalRead.java | 4 +- .../security/token/block/TestBlockToken.java | 2 +- .../server/datanode/DataNodeTestUtils.java | 9 ++-- .../impl/TestInterDatanodeProtocol.java | 40 +++++++++++--- 24 files changed, 291 insertions(+), 92 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index a4de5bd2501..ce764dc1c74 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -26,6 +26,8 @@ Release 2.0.1-alpha - UNRELEASED HDFS-3637. Add support for encrypting the DataTransferProtocol. (atm) + HDFS-3150. Add option for clients to contact DNs via hostname. (eli) + IMPROVEMENTS HDFS-3390. DFSAdmin should print full stack traces of errors when DEBUG diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java index 6db6e75198b..7d4fb7a0610 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java @@ -86,11 +86,11 @@ class BlockReaderLocal implements BlockReader { } private synchronized ClientDatanodeProtocol getDatanodeProxy( - DatanodeInfo node, Configuration conf, int socketTimeout) - throws IOException { + DatanodeInfo node, Configuration conf, int socketTimeout, + boolean connectToDnViaHostname) throws IOException { if (proxy == null) { proxy = DFSUtil.createClientDatanodeProtocolProxy(node, conf, - socketTimeout); + socketTimeout, connectToDnViaHostname); } return proxy; } @@ -156,14 +156,16 @@ class BlockReaderLocal implements BlockReader { */ static BlockReaderLocal newBlockReader(Configuration conf, String file, ExtendedBlock blk, Token token, DatanodeInfo node, - int socketTimeout, long startOffset, long length) throws IOException { + int socketTimeout, long startOffset, long length, + boolean connectToDnViaHostname) throws IOException { LocalDatanodeInfo localDatanodeInfo = getLocalDatanodeInfo(node .getIpcPort()); // check the cache first BlockLocalPathInfo pathinfo = localDatanodeInfo.getBlockLocalPathInfo(blk); if (pathinfo == null) { - pathinfo = getBlockPathInfo(blk, node, conf, socketTimeout, token); + pathinfo = getBlockPathInfo(blk, node, conf, socketTimeout, token, + connectToDnViaHostname); } // check to see if the file exists. It may so happen that the @@ -241,11 +243,12 @@ class BlockReaderLocal implements BlockReader { private static BlockLocalPathInfo getBlockPathInfo(ExtendedBlock blk, DatanodeInfo node, Configuration conf, int timeout, - Token token) throws IOException { + Token token, boolean connectToDnViaHostname) + throws IOException { LocalDatanodeInfo localDatanodeInfo = getLocalDatanodeInfo(node.getIpcPort()); BlockLocalPathInfo pathinfo = null; ClientDatanodeProtocol proxy = localDatanodeInfo.getDatanodeProxy(node, - conf, timeout); + conf, timeout, connectToDnViaHostname); try { // make RPC to local datanode to find local pathnames of blocks pathinfo = proxy.getBlockLocalPathInfo(blk, token); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 307237c523e..7351806f8c2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -49,6 +49,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_DEF import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT; import java.io.BufferedOutputStream; import java.io.DataInputStream; @@ -216,6 +218,7 @@ public class DFSClient implements java.io.Closeable { final String taskId; final FsPermission uMask; final boolean useLegacyBlockReader; + final boolean connectToDnViaHostname; Conf(Configuration conf) { maxFailoverAttempts = conf.getInt( @@ -266,6 +269,8 @@ public class DFSClient implements java.io.Closeable { useLegacyBlockReader = conf.getBoolean( DFS_CLIENT_USE_LEGACY_BLOCKREADER, DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT); + connectToDnViaHostname = conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME, + DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT); } private int getChecksumType(Configuration conf) { @@ -476,6 +481,14 @@ public class DFSClient implements java.io.Closeable { return clientName; } + /** + * @return whether the client should use hostnames instead of IPs + * when connecting to DataNodes + */ + boolean connectToDnViaHostname() { + return dfsClientConf.connectToDnViaHostname; + } + void checkOpen() throws IOException { if (!clientRunning) { IOException result = new IOException("Filesystem closed"); @@ -732,12 +745,12 @@ public class DFSClient implements java.io.Closeable { */ static BlockReader getLocalBlockReader(Configuration conf, String src, ExtendedBlock blk, Token accessToken, - DatanodeInfo chosenNode, int socketTimeout, long offsetIntoBlock) - throws InvalidToken, IOException { + DatanodeInfo chosenNode, int socketTimeout, long offsetIntoBlock, + boolean connectToDnViaHostname) throws InvalidToken, IOException { try { return BlockReaderLocal.newBlockReader(conf, src, blk, accessToken, chosenNode, socketTimeout, offsetIntoBlock, blk.getNumBytes() - - offsetIntoBlock); + - offsetIntoBlock, connectToDnViaHostname); } catch (RemoteException re) { throw re.unwrapRemoteException(InvalidToken.class, AccessControlException.class); @@ -1464,7 +1477,8 @@ public class DFSClient implements java.io.Closeable { public MD5MD5CRC32FileChecksum getFileChecksum(String src) throws IOException { checkOpen(); return getFileChecksum(src, namenode, socketFactory, - dfsClientConf.socketTimeout, getDataEncryptionKey()); + dfsClientConf.socketTimeout, getDataEncryptionKey(), + dfsClientConf.connectToDnViaHostname); } @InterfaceAudience.Private @@ -1510,7 +1524,8 @@ public class DFSClient implements java.io.Closeable { */ public static MD5MD5CRC32FileChecksum getFileChecksum(String src, ClientProtocol namenode, SocketFactory socketFactory, int socketTimeout, - DataEncryptionKey encryptionKey) throws IOException { + DataEncryptionKey encryptionKey, boolean connectToDnViaHostname) + throws IOException { //get all block locations LocatedBlocks blockLocations = callGetBlockLocations(namenode, src, 0, Long.MAX_VALUE); if (null == blockLocations) { @@ -1548,9 +1563,11 @@ public class DFSClient implements java.io.Closeable { try { //connect to a datanode sock = socketFactory.createSocket(); - NetUtils.connect(sock, - NetUtils.createSocketAddr(datanodes[j].getXferAddr()), - timeout); + String dnAddr = datanodes[j].getXferAddr(connectToDnViaHostname); + if (LOG.isDebugEnabled()) { + LOG.debug("Connecting to datanode " + dnAddr); + } + NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout); sock.setSoTimeout(timeout); OutputStream unbufOut = NetUtils.getOutputStream(sock); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index c23005830a3..54a53b5d7c9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -52,6 +52,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_DEFAULT = "DEFAULT"; public static final String DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY = "dfs.client.socketcache.capacity"; public static final int DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT = 16; + public static final String DFS_CLIENT_USE_DN_HOSTNAME = "dfs.client.use.datanode.hostname"; + public static final boolean DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT = false; // HA related configuration public static final String DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX = "dfs.client.failover.proxy.provider"; @@ -81,6 +83,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final boolean DFS_DATANODE_SYNC_BEHIND_WRITES_DEFAULT = false; public static final String DFS_DATANODE_DROP_CACHE_BEHIND_READS_KEY = "dfs.datanode.drop.cache.behind.reads"; public static final boolean DFS_DATANODE_DROP_CACHE_BEHIND_READS_DEFAULT = false; + public static final String DFS_DATANODE_USE_DN_HOSTNAME = "dfs.datanode.use.datanode.hostname"; + public static final boolean DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT = false; public static final String DFS_NAMENODE_HTTP_PORT_KEY = "dfs.http.port"; public static final int DFS_NAMENODE_HTTP_PORT_DEFAULT = 50070; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java index 91026bea9a5..ee4dc8918c9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java @@ -199,7 +199,8 @@ public class DFSInputStream extends FSInputStream implements ByteBufferReadable try { cdp = DFSUtil.createClientDatanodeProtocolProxy( - datanode, dfsClient.conf, dfsClient.getConf().socketTimeout, locatedblock); + datanode, dfsClient.conf, dfsClient.getConf().socketTimeout, + dfsClient.getConf().connectToDnViaHostname, locatedblock); final long n = cdp.getReplicaVisibleLength(locatedblock.getBlock()); @@ -716,8 +717,12 @@ public class DFSInputStream extends FSInputStream implements ByteBufferReadable DatanodeInfo[] nodes = block.getLocations(); try { DatanodeInfo chosenNode = bestNode(nodes, deadNodes); - InetSocketAddress targetAddr = - NetUtils.createSocketAddr(chosenNode.getXferAddr()); + final String dnAddr = + chosenNode.getXferAddr(dfsClient.connectToDnViaHostname()); + if (DFSClient.LOG.isDebugEnabled()) { + DFSClient.LOG.debug("Connecting to datanode " + dnAddr); + } + InetSocketAddress targetAddr = NetUtils.createSocketAddr(dnAddr); return new DNAddrPair(chosenNode, targetAddr); } catch (IOException ie) { String blockInfo = block.getBlock() + " file=" + src; @@ -875,7 +880,8 @@ public class DFSInputStream extends FSInputStream implements ByteBufferReadable if (dfsClient.shouldTryShortCircuitRead(dnAddr)) { return DFSClient.getLocalBlockReader(dfsClient.conf, src, block, - blockToken, chosenNode, dfsClient.hdfsTimeout, startOffset); + blockToken, chosenNode, dfsClient.hdfsTimeout, startOffset, + dfsClient.connectToDnViaHostname()); } IOException err = null; @@ -1183,7 +1189,7 @@ public class DFSInputStream extends FSInputStream implements ByteBufferReadable throw new IOException("No live nodes contain current block"); } - /** Utility class to encapsulate data node info and its ip address. */ + /** Utility class to encapsulate data node info and its address. */ static class DNAddrPair { DatanodeInfo info; InetSocketAddress addr; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java index 53a13a0db1c..a291955c258 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java @@ -1100,7 +1100,7 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable { if (ie instanceof InvalidEncryptionKeyException && refetchEncryptionKey > 0) { DFSClient.LOG.info("Will fetch a new encryption key and retry, " + "encryption key was invalid when connecting to " - + nodes[0].getXferAddr() + " : " + ie); + + nodes[0] + " : " + ie); // The encryption key used is invalid. refetchEncryptionKey--; dfsClient.clearDataEncryptionKey(); @@ -1112,7 +1112,8 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable { // find the datanode that matches if (firstBadLink.length() != 0) { for (int i = 0; i < nodes.length; i++) { - if (nodes[i].getXferAddr().equals(firstBadLink)) { + // NB: Unconditionally using the xfer addr w/o hostname + if (firstBadLink.equals(nodes[i].getXferAddr())) { errorIndex = i; break; } @@ -1216,11 +1217,11 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable { */ static Socket createSocketForPipeline(final DatanodeInfo first, final int length, final DFSClient client) throws IOException { - if(DFSClient.LOG.isDebugEnabled()) { - DFSClient.LOG.debug("Connecting to datanode " + first); + final String dnAddr = first.getXferAddr(client.connectToDnViaHostname()); + if (DFSClient.LOG.isDebugEnabled()) { + DFSClient.LOG.debug("Connecting to datanode " + dnAddr); } - final InetSocketAddress isa = - NetUtils.createSocketAddr(first.getXferAddr()); + final InetSocketAddress isa = NetUtils.createSocketAddr(dnAddr); final Socket sock = client.socketFactory.createSocket(); final int timeout = client.getDatanodeReadTimeout(length); NetUtils.connect(sock, isa, client.getRandomLocalInterfaceAddr(), timeout); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index 54ec79d051a..20a5551253a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -847,17 +847,17 @@ public class DFSUtil { /** Create a {@link ClientDatanodeProtocol} proxy */ public static ClientDatanodeProtocol createClientDatanodeProtocolProxy( DatanodeID datanodeid, Configuration conf, int socketTimeout, - LocatedBlock locatedBlock) throws IOException { + boolean connectToDnViaHostname, LocatedBlock locatedBlock) throws IOException { return new ClientDatanodeProtocolTranslatorPB(datanodeid, conf, socketTimeout, - locatedBlock); + connectToDnViaHostname, locatedBlock); } /** Create {@link ClientDatanodeProtocol} proxy using kerberos ticket */ static ClientDatanodeProtocol createClientDatanodeProtocolProxy( - DatanodeID datanodeid, Configuration conf, int socketTimeout) - throws IOException { + DatanodeID datanodeid, Configuration conf, int socketTimeout, + boolean connectToDnViaHostname) throws IOException { return new ClientDatanodeProtocolTranslatorPB( - datanodeid, conf, socketTimeout); + datanodeid, conf, socketTimeout, connectToDnViaHostname); } /** Create a {@link ClientDatanodeProtocol} proxy */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java index 4ed9f5666ac..1a923153402 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java @@ -104,7 +104,7 @@ public class DatanodeID implements Comparable { /** * @return IP:ipcPort string */ - public String getIpcAddr() { + private String getIpcAddr() { return ipAddr + ":" + ipcPort; } @@ -122,6 +122,29 @@ public class DatanodeID implements Comparable { return hostName + ":" + xferPort; } + /** + * @return hostname:ipcPort + */ + private String getIpcAddrWithHostname() { + return hostName + ":" + ipcPort; + } + + /** + * @param useHostname true to use the DN hostname, use the IP otherwise + * @return name:xferPort + */ + public String getXferAddr(boolean useHostname) { + return useHostname ? getXferAddrWithHostname() : getXferAddr(); + } + + /** + * @param useHostname true to use the DN hostname, use the IP otherwise + * @return name:ipcPort + */ + public String getIpcAddr(boolean useHostname) { + return useHostname ? getIpcAddrWithHostname() : getIpcAddr(); + } + /** * @return data storage ID. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java index d28dbff10b0..1f6c9b113ae 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java @@ -73,10 +73,10 @@ public class ClientDatanodeProtocolTranslatorPB implements RefreshNamenodesRequestProto.newBuilder().build(); public ClientDatanodeProtocolTranslatorPB(DatanodeID datanodeid, - Configuration conf, int socketTimeout, LocatedBlock locatedBlock) - throws IOException { + Configuration conf, int socketTimeout, boolean connectToDnViaHostname, + LocatedBlock locatedBlock) throws IOException { rpcProxy = createClientDatanodeProtocolProxy( datanodeid, conf, - socketTimeout, locatedBlock); + socketTimeout, connectToDnViaHostname, locatedBlock); } public ClientDatanodeProtocolTranslatorPB(InetSocketAddress addr, @@ -90,11 +90,17 @@ public class ClientDatanodeProtocolTranslatorPB implements * @param datanodeid Datanode to connect to. * @param conf Configuration. * @param socketTimeout Socket timeout to use. + * @param connectToDnViaHostname connect to the Datanode using its hostname * @throws IOException */ public ClientDatanodeProtocolTranslatorPB(DatanodeID datanodeid, - Configuration conf, int socketTimeout) throws IOException { - InetSocketAddress addr = NetUtils.createSocketAddr(datanodeid.getIpcAddr()); + Configuration conf, int socketTimeout, boolean connectToDnViaHostname) + throws IOException { + final String dnAddr = datanodeid.getIpcAddr(connectToDnViaHostname); + InetSocketAddress addr = NetUtils.createSocketAddr(dnAddr); + if (LOG.isDebugEnabled()) { + LOG.debug("Connecting to datanode " + dnAddr + " addr=" + addr); + } rpcProxy = createClientDatanodeProtocolProxy(addr, UserGroupInformation.getCurrentUser(), conf, NetUtils.getDefaultSocketFactory(conf), socketTimeout); @@ -102,10 +108,11 @@ public class ClientDatanodeProtocolTranslatorPB implements static ClientDatanodeProtocolPB createClientDatanodeProtocolProxy( DatanodeID datanodeid, Configuration conf, int socketTimeout, - LocatedBlock locatedBlock) throws IOException { - InetSocketAddress addr = NetUtils.createSocketAddr(datanodeid.getIpcAddr()); + boolean connectToDnViaHostname, LocatedBlock locatedBlock) throws IOException { + final String dnAddr = datanodeid.getIpcAddr(connectToDnViaHostname); + InetSocketAddress addr = NetUtils.createSocketAddr(dnAddr); if (LOG.isDebugEnabled()) { - LOG.debug("ClientDatanodeProtocol addr=" + addr); + LOG.debug("Connecting to datanode " + dnAddr + " addr=" + addr); } // Since we're creating a new UserGroupInformation here, we know that no diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java index 3f37a7eea38..d4b0ffd1f85 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java @@ -55,7 +55,7 @@ class DNConf { final boolean dropCacheBehindReads; final boolean syncOnClose; final boolean encryptDataTransfer; - + final boolean connectToDnViaHostname; final long readaheadLength; final long heartBeatInterval; @@ -97,7 +97,9 @@ class DNConf { dropCacheBehindReads = conf.getBoolean( DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_READS_KEY, DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_READS_DEFAULT); - + connectToDnViaHostname = conf.getBoolean( + DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME, + DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT); this.blockReportInterval = conf.getLong(DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 60e8a9ec7ee..fbdae956f4a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -279,6 +279,7 @@ public class DataNode extends Configured private Configuration conf; private final String userWithLocalPathAccess; + private boolean connectToDnViaHostname; ReadaheadPool readaheadPool; /** @@ -299,8 +300,11 @@ public class DataNode extends Configured final SecureResources resources) throws IOException { super(conf); - this.userWithLocalPathAccess = conf - .get(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY); + this.userWithLocalPathAccess = + conf.get(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY); + this.connectToDnViaHostname = conf.getBoolean( + DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME, + DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT); try { hostName = getHostName(conf); LOG.info("Configured hostname is " + hostName); @@ -882,7 +886,7 @@ public class DataNode extends Configured /** * NB: The datanode can perform data transfer on the streaming * address however clients are given the IPC IP address for data - * transfer, and that may be be a different address. + * transfer, and that may be a different address. * * @return socket address for data transfer */ @@ -929,12 +933,12 @@ public class DataNode extends Configured } public static InterDatanodeProtocol createInterDataNodeProtocolProxy( - DatanodeID datanodeid, final Configuration conf, final int socketTimeout) - throws IOException { - final InetSocketAddress addr = - NetUtils.createSocketAddr(datanodeid.getIpcAddr()); - if (InterDatanodeProtocol.LOG.isDebugEnabled()) { - InterDatanodeProtocol.LOG.debug("InterDatanodeProtocol addr=" + addr); + DatanodeID datanodeid, final Configuration conf, final int socketTimeout, + final boolean connectToDnViaHostname) throws IOException { + final String dnAddr = datanodeid.getIpcAddr(connectToDnViaHostname); + final InetSocketAddress addr = NetUtils.createSocketAddr(dnAddr); + if (LOG.isDebugEnabled()) { + LOG.debug("Connecting to datanode " + dnAddr + " addr=" + addr); } final UserGroupInformation loginUgi = UserGroupInformation.getLoginUser(); try { @@ -1390,8 +1394,11 @@ public class DataNode extends Configured final boolean isClient = clientname.length() > 0; try { - InetSocketAddress curTarget = - NetUtils.createSocketAddr(targets[0].getXferAddr()); + final String dnAddr = targets[0].getXferAddr(connectToDnViaHostname); + InetSocketAddress curTarget = NetUtils.createSocketAddr(dnAddr); + if (LOG.isDebugEnabled()) { + LOG.debug("Connecting to datanode " + dnAddr); + } sock = newSocket(); NetUtils.connect(sock, curTarget, dnConf.socketTimeout); sock.setSoTimeout(targets.length * dnConf.socketTimeout); @@ -1847,7 +1854,7 @@ public class DataNode extends Configured DatanodeRegistration bpReg = bpos.bpRegistration; InterDatanodeProtocol datanode = bpReg.equals(id)? this: DataNode.createInterDataNodeProtocolProxy(id, getConf(), - dnConf.socketTimeout); + dnConf.socketTimeout, dnConf.connectToDnViaHostname); ReplicaRecoveryInfo info = callInitReplicaRecovery(datanode, rBlock); if (info != null && info.getGenerationStamp() >= block.getGenerationStamp() && diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java index 8547df94873..d0c5aaff892 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java @@ -86,7 +86,7 @@ class DataXceiver extends Receiver implements Runnable { private final DataNode datanode; private final DNConf dnConf; private final DataXceiverServer dataXceiverServer; - + private final boolean connectToDnViaHostname; private long opStartTime; //the start time of receiving an Op private final SocketInputWrapper socketIn; private OutputStream socketOut; @@ -113,6 +113,7 @@ class DataXceiver extends Receiver implements Runnable { this.isLocal = s.getInetAddress().equals(s.getLocalAddress()); this.datanode = datanode; this.dataXceiverServer = dataXceiverServer; + this.connectToDnViaHostname = datanode.getDnConf().connectToDnViaHostname; remoteAddress = s.getRemoteSocketAddress().toString(); localAddress = s.getLocalSocketAddress().toString(); @@ -404,7 +405,10 @@ class DataXceiver extends Receiver implements Runnable { if (targets.length > 0) { InetSocketAddress mirrorTarget = null; // Connect to backup machine - mirrorNode = targets[0].getXferAddr(); + mirrorNode = targets[0].getXferAddr(connectToDnViaHostname); + if (LOG.isDebugEnabled()) { + LOG.debug("Connecting to datanode " + mirrorNode); + } mirrorTarget = NetUtils.createSocketAddr(mirrorNode); mirrorSock = datanode.newSocket(); try { @@ -457,7 +461,8 @@ class DataXceiver extends Receiver implements Runnable { if (isClient) { BlockOpResponseProto.newBuilder() .setStatus(ERROR) - .setFirstBadLink(mirrorNode) + // NB: Unconditionally using the xfer addr w/o hostname + .setFirstBadLink(targets[0].getXferAddr()) .build() .writeDelimitedTo(replyOut); replyOut.flush(); @@ -729,8 +734,11 @@ class DataXceiver extends Receiver implements Runnable { try { // get the output stream to the proxy - InetSocketAddress proxyAddr = - NetUtils.createSocketAddr(proxySource.getXferAddr()); + final String dnAddr = proxySource.getXferAddr(connectToDnViaHostname); + if (LOG.isDebugEnabled()) { + LOG.debug("Connecting to datanode " + dnAddr); + } + InetSocketAddress proxyAddr = NetUtils.createSocketAddr(dnAddr); proxySock = datanode.newSocket(); NetUtils.connect(proxySock, proxyAddr, dnConf.socketTimeout); proxySock.setSoTimeout(dnConf.socketTimeout); @@ -891,6 +899,7 @@ class DataXceiver extends Receiver implements Runnable { if (mode == BlockTokenSecretManager.AccessMode.WRITE) { DatanodeRegistration dnR = datanode.getDNRegistrationForBP(blk.getBlockPoolId()); + // NB: Unconditionally using the xfer addr w/o hostname resp.setFirstBadLink(dnR.getXferAddr()); } resp.build().writeDelimitedTo(out); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java index 39f9094e684..fd3b439355a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java @@ -127,7 +127,7 @@ public class FileChecksumServlets { datanode, conf, getUGI(request, conf)); final ClientProtocol nnproxy = dfs.getNamenode(); final MD5MD5CRC32FileChecksum checksum = DFSClient.getFileChecksum( - path, nnproxy, socketFactory, socketTimeout, dfs.getDataEncryptionKey()); + path, nnproxy, socketFactory, socketTimeout, dfs.getDataEncryptionKey(), false); MD5MD5CRC32FileChecksum.write(xml, checksum); } catch(IOException ioe) { writeXml(ioe, path, xml); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 95e2f8c0814..16510168445 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -53,7 +53,7 @@ dfs.datanode.address 0.0.0.0:50010 - The address where the datanode server will listen to. + The datanode server address and port for data transfer. If the port is 0 then the server will start on a free port. @@ -920,6 +920,22 @@ + + dfs.client.use.datanode.hostname + false + Whether clients should use datanode hostnames when + connecting to datanodes. + + + + + dfs.datanode.use.datanode.hostname + false + Whether datanodes should use datanode hostnames when + connecting to other datanodes for data transfer. + + + dfs.client.local.interfaces diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index af33b070991..6dc4a8aa9a1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -144,6 +144,7 @@ public class MiniDFSCluster { private boolean setupHostsFile = false; private MiniDFSNNTopology nnTopology = null; private boolean checkExitOnShutdown = true; + private boolean checkDataNodeHostConfig = false; public Builder(Configuration conf) { this.conf = conf; @@ -253,6 +254,14 @@ public class MiniDFSCluster { return this; } + /** + * Default: false + */ + public Builder checkDataNodeHostConfig(boolean val) { + this.checkDataNodeHostConfig = val; + return this; + } + /** * Default: null */ @@ -316,7 +325,8 @@ public class MiniDFSCluster { builder.waitSafeMode, builder.setupHostsFile, builder.nnTopology, - builder.checkExitOnShutdown); + builder.checkExitOnShutdown, + builder.checkDataNodeHostConfig); } public class DataNodeProperties { @@ -550,7 +560,7 @@ public class MiniDFSCluster { initMiniDFSCluster(conf, numDataNodes, format, manageNameDfsDirs, true, manageDataDfsDirs, operation, racks, hosts, simulatedCapacities, null, true, false, - MiniDFSNNTopology.simpleSingleNN(nameNodePort, 0), true); + MiniDFSNNTopology.simpleSingleNN(nameNodePort, 0), true, false); } private void initMiniDFSCluster( @@ -560,7 +570,8 @@ public class MiniDFSCluster { StartupOption operation, String[] racks, String[] hosts, long[] simulatedCapacities, String clusterId, boolean waitSafeMode, boolean setupHostsFile, - MiniDFSNNTopology nnTopology, boolean checkExitOnShutdown) + MiniDFSNNTopology nnTopology, boolean checkExitOnShutdown, + boolean checkDataNodeHostConfig) throws IOException { ExitUtil.disableSystemExit(); @@ -616,7 +627,7 @@ public class MiniDFSCluster { // Start the DataNodes startDataNodes(conf, numDataNodes, manageDataDfsDirs, operation, racks, - hosts, simulatedCapacities, setupHostsFile); + hosts, simulatedCapacities, setupHostsFile, false, checkDataNodeHostConfig); waitClusterUp(); //make sure ProxyUsers uses the latest conf ProxyUsers.refreshSuperUserGroupsConfiguration(conf); @@ -957,7 +968,21 @@ public class MiniDFSCluster { long[] simulatedCapacities, boolean setupHostsFile) throws IOException { startDataNodes(conf, numDataNodes, manageDfsDirs, operation, racks, hosts, - simulatedCapacities, setupHostsFile, false); + simulatedCapacities, setupHostsFile, false, false); + } + + /** + * @see MiniDFSCluster#startDataNodes(Configuration, int, boolean, StartupOption, + * String[], String[], long[], boolean, boolean, boolean) + */ + public synchronized void startDataNodes(Configuration conf, int numDataNodes, + boolean manageDfsDirs, StartupOption operation, + String[] racks, String[] hosts, + long[] simulatedCapacities, + boolean setupHostsFile, + boolean checkDataNodeAddrConfig) throws IOException { + startDataNodes(conf, numDataNodes, manageDfsDirs, operation, racks, hosts, + simulatedCapacities, setupHostsFile, checkDataNodeAddrConfig, false); } /** @@ -983,19 +1008,25 @@ public class MiniDFSCluster { * @param simulatedCapacities array of capacities of the simulated data nodes * @param setupHostsFile add new nodes to dfs hosts files * @param checkDataNodeAddrConfig if true, only set DataNode port addresses if not already set in config + * @param checkDataNodeHostConfig if true, only set DataNode hostname key if not already set in config * * @throws IllegalStateException if NameNode has been shutdown */ public synchronized void startDataNodes(Configuration conf, int numDataNodes, - boolean manageDfsDirs, StartupOption operation, - String[] racks, String[] hosts, - long[] simulatedCapacities, - boolean setupHostsFile, - boolean checkDataNodeAddrConfig) throws IOException { + boolean manageDfsDirs, StartupOption operation, + String[] racks, String[] hosts, + long[] simulatedCapacities, + boolean setupHostsFile, + boolean checkDataNodeAddrConfig, + boolean checkDataNodeHostConfig) throws IOException { if (operation == StartupOption.RECOVER) { return; } - conf.set(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1"); + if (checkDataNodeHostConfig) { + conf.setIfUnset(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1"); + } else { + conf.set(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1"); + } int curDatanodesNum = dataNodes.size(); // for mincluster's the default initialDelay for BRs is 0 diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java index 98c78ff98db..c16beed4576 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java @@ -763,7 +763,7 @@ public class TestDFSClientRetries { try { proxy = DFSUtil.createClientDatanodeProtocolProxy( - fakeDnId, conf, 500, fakeBlock); + fakeDnId, conf, 500, false, fakeBlock); proxy.getReplicaVisibleLength(new ExtendedBlock("bpid", 1)); fail ("Did not get expected exception: SocketTimeoutException"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java index 2768b0c5d4e..825348d11cb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java @@ -417,7 +417,6 @@ public class TestDistributedFileSystem { final Configuration conf = getTestConfiguration(); conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true); - conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "localhost"); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); final FileSystem hdfs = cluster.getFileSystem(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java index dbf22b4392f..9391c00d658 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java @@ -171,7 +171,14 @@ public class TestFileCreation { @Test public void testFileCreation() throws IOException { - checkFileCreation(null); + checkFileCreation(null, false); + } + + /** Same test but the client should use DN hostnames */ + @Test + public void testFileCreationUsingHostname() throws IOException { + assumeTrue(System.getProperty("os.name").startsWith("Linux")); + checkFileCreation(null, true); } /** Same test but the client should bind to a local interface */ @@ -180,10 +187,10 @@ public class TestFileCreation { assumeTrue(System.getProperty("os.name").startsWith("Linux")); // The mini cluster listens on the loopback so we can use it here - checkFileCreation("lo"); + checkFileCreation("lo", false); try { - checkFileCreation("bogus-interface"); + checkFileCreation("bogus-interface", false); fail("Able to specify a bogus interface"); } catch (UnknownHostException e) { assertEquals("No such interface bogus-interface", e.getMessage()); @@ -193,16 +200,28 @@ public class TestFileCreation { /** * Test if file creation and disk space consumption works right * @param netIf the local interface, if any, clients should use to access DNs + * @param useDnHostname whether the client should contact DNs by hostname */ - public void checkFileCreation(String netIf) throws IOException { + public void checkFileCreation(String netIf, boolean useDnHostname) + throws IOException { Configuration conf = new HdfsConfiguration(); if (netIf != null) { conf.set(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES, netIf); } + conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME, useDnHostname); + if (useDnHostname) { + // Since the mini cluster only listens on the loopback we have to + // ensure the hostname used to access DNs maps to the loopback. We + // do this by telling the DN to advertise localhost as its hostname + // instead of the default hostname. + conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "localhost"); + } if (simulatedStorage) { SimulatedFSDataset.setFactory(conf); } - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + .checkDataNodeHostConfig(true) + .build(); FileSystem fs = cluster.getFileSystem(); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java index 5e20d46e2c5..3fa5eaad0a7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java @@ -92,7 +92,6 @@ public class TestHftpFileSystem { RAN.setSeed(seed); config = new Configuration(); - config.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "localhost"); cluster = new MiniDFSCluster.Builder(config).numDataNodes(2).build(); hdfs = cluster.getFileSystem(); blockPoolId = cluster.getNamesystem().getBlockPoolId(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java index a7fd82aea0b..10cab0755d7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeTrue; import java.io.File; @@ -41,6 +42,7 @@ public class TestMiniDFSCluster { private static final String CLUSTER_2 = "cluster2"; private static final String CLUSTER_3 = "cluster3"; private static final String CLUSTER_4 = "cluster4"; + private static final String CLUSTER_5 = "cluster5"; protected String testDataPath; protected File testDataDir; @Before @@ -125,4 +127,25 @@ public class TestMiniDFSCluster { } } } + + /** MiniDFSCluster should not clobber dfs.datanode.hostname if requested */ + @Test(timeout=100000) + public void testClusterSetDatanodeHostname() throws Throwable { + assumeTrue(System.getProperty("os.name").startsWith("Linux")); + Configuration conf = new HdfsConfiguration(); + conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "MYHOST"); + File testDataCluster5 = new File(testDataPath, CLUSTER_5); + String c5Path = testDataCluster5.getAbsolutePath(); + conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c5Path); + MiniDFSCluster cluster5 = new MiniDFSCluster.Builder(conf) + .numDataNodes(1) + .checkDataNodeHostConfig(true) + .build(); + try { + Assert.assertEquals("DataNode hostname config not respected", "MYHOST", + cluster5.getDataNodes().get(0).getDatanodeId().getHostName()); + } finally { + MiniDFSCluster.shutdownCluster(cluster5); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java index 0710a6ccc94..5fed40cea41 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java @@ -246,7 +246,7 @@ public class TestShortCircuitLocalRead { @Override public ClientDatanodeProtocol run() throws Exception { return DFSUtil.createClientDatanodeProtocolProxy(dnInfo, conf, - 60000); + 60000, false); } }); @@ -264,7 +264,7 @@ public class TestShortCircuitLocalRead { @Override public ClientDatanodeProtocol run() throws Exception { return DFSUtil.createClientDatanodeProtocolProxy(dnInfo, conf, - 60000); + 60000, false); } }); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java index 4b9f2f4faaa..a9b55621887 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java @@ -304,7 +304,7 @@ public class TestBlockToken { long endTime = Time.now() + 3000; while (Time.now() < endTime) { proxy = DFSUtil.createClientDatanodeProtocolProxy(fakeDnId, conf, 1000, - fakeBlock); + false, fakeBlock); assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3)); if (proxy != null) { RPC.stopProxy(proxy); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java index 2f9ed12099e..55b4bf57db6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java @@ -105,10 +105,13 @@ public class DataNodeTestUtils { } public static InterDatanodeProtocol createInterDatanodeProtocolProxy( - DataNode dn, DatanodeID datanodeid, final Configuration conf - ) throws IOException { + DataNode dn, DatanodeID datanodeid, final Configuration conf, + boolean connectToDnViaHostname) throws IOException { + if (connectToDnViaHostname != dn.getDnConf().connectToDnViaHostname) { + throw new AssertionError("Unexpected DN hostname configuration"); + } return DataNode.createInterDataNodeProtocolProxy(datanodeid, conf, - dn.getDnConf().socketTimeout); + dn.getDnConf().socketTimeout, dn.getDnConf().connectToDnViaHostname); } public static void shutdownBlockScanner(DataNode dn) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java index 8ff6cb88866..b1733efa9f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java @@ -29,6 +29,7 @@ import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSClientAdapter; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; @@ -59,6 +60,8 @@ import org.apache.hadoop.net.NetUtils; import org.junit.Assert; import org.junit.Test; +import static org.junit.Assume.assumeTrue; + /** * This tests InterDataNodeProtocol for block handling. */ @@ -125,17 +128,42 @@ public class TestInterDatanodeProtocol { return blocks.get(blocks.size() - 1); } + /** Test block MD access via a DN */ + @Test + public void testBlockMetaDataInfo() throws Exception { + checkBlockMetaDataInfo(false); + } + + /** The same as above, but use hostnames for DN<->DN communication */ + @Test + public void testBlockMetaDataInfoWithHostname() throws Exception { + assumeTrue(System.getProperty("os.name").startsWith("Linux")); + checkBlockMetaDataInfo(true); + } + /** * The following test first creates a file. * It verifies the block information from a datanode. - * Then, it updates the block with new information and verifies again. + * Then, it updates the block with new information and verifies again. + * @param useDnHostname whether DNs should connect to other DNs by hostname */ - @Test - public void testBlockMetaDataInfo() throws Exception { + private void checkBlockMetaDataInfo(boolean useDnHostname) throws Exception { MiniDFSCluster cluster = null; + conf.setBoolean(DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME, useDnHostname); + if (useDnHostname) { + // Since the mini cluster only listens on the loopback we have to + // ensure the hostname used to access DNs maps to the loopback. We + // do this by telling the DN to advertise localhost as its hostname + // instead of the default hostname. + conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "localhost"); + } + try { - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); + cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(3) + .checkDataNodeHostConfig(true) + .build(); cluster.waitActive(); //create a file @@ -154,7 +182,7 @@ public class TestInterDatanodeProtocol { //connect to a data node DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort()); InterDatanodeProtocol idp = DataNodeTestUtils.createInterDatanodeProtocolProxy( - datanode, datanodeinfo[0], conf); + datanode, datanodeinfo[0], conf, useDnHostname); //stop block scanner, so we could compare lastScanTime DataNodeTestUtils.shutdownBlockScanner(datanode); @@ -364,7 +392,7 @@ public class TestInterDatanodeProtocol { try { proxy = DataNode.createInterDataNodeProtocolProxy( - dInfo, conf, 500); + dInfo, conf, 500, false); proxy.initReplicaRecovery(new RecoveringBlock( new ExtendedBlock("bpid", 1), null, 100)); fail ("Expected SocketTimeoutException exception, but did not get.");