diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-1623.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-1623.txt index 7c67b9887ca..c87faa70d34 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-1623.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-1623.txt @@ -208,3 +208,5 @@ HDFS-2909. HA: Inaccessible shared edits dir not getting removed from FSImage st HDFS-2934. Allow configs to be scoped to all NNs in the nameservice. (todd) HDFS-2935. Shared edits dir property should be suffixed with nameservice and namenodeID (todd) + +HDFS-2928. ConfiguredFailoverProxyProvider should not create a NameNode proxy with an underlying retry proxy. (Uma Maheswara Rao G via atm) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index 1c7afd40bac..52b1eb9f35e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -42,13 +42,17 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB; +import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB; +import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB; import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB; import org.apache.hadoop.hdfs.server.namenode.NameNode; @@ -57,6 +61,7 @@ import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.io.retry.RetryProxy; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NodeBase; @@ -807,23 +812,16 @@ public class DFSUtil { /** Create a {@link NameNode} proxy */ public static ClientProtocol createNamenode(InetSocketAddress nameNodeAddr, Configuration conf) throws IOException { - return createNamenode(nameNodeAddr, conf, + return createNamenode(nameNodeAddr, conf, UserGroupInformation.getCurrentUser()); } /** Create a {@link NameNode} proxy */ - public static ClientProtocol createNamenode( InetSocketAddress nameNodeAddr, + public static ClientProtocol createNamenode(InetSocketAddress nameNodeAddr, Configuration conf, UserGroupInformation ugi) throws IOException { - /** - * Currently we have simply burnt-in support for a SINGLE - * protocol - protocolPB. This will be replaced - * by a way to pick the right protocol based on the - * version of the target server. - */ - return new org.apache.hadoop.hdfs.protocolPB. - ClientNamenodeProtocolTranslatorPB(nameNodeAddr, conf, ugi); + return createNNProxyWithClientProtocol(nameNodeAddr, conf, ugi, true); } - + /** Create a {@link ClientDatanodeProtocol} proxy */ public static ClientDatanodeProtocol createClientDatanodeProtocolProxy( DatanodeID datanodeid, Configuration conf, int socketTimeout, @@ -846,31 +844,115 @@ public class DFSUtil { SocketFactory factory) throws IOException { return new ClientDatanodeProtocolTranslatorPB(addr, ticket, conf, factory); } - + /** - * Build a NamenodeProtocol connection to the namenode and set up the retry - * policy + * Build a proxy connection to the namenode with NamenodeProtocol and set up + * the proxy with retry policy. + * @param address - namenode address + * @param conf - configuration + * @param ugi - User group information + * @return a proxy connection with NamenodeProtocol + * @throws - IOException */ public static NamenodeProtocolTranslatorPB createNNProxyWithNamenodeProtocol( InetSocketAddress address, Configuration conf, UserGroupInformation ugi) throws IOException { - RetryPolicy timeoutPolicy = RetryPolicies.exponentialBackoffRetry(5, 200, - TimeUnit.MILLISECONDS); - Map, RetryPolicy> exceptionToPolicyMap - = new HashMap, RetryPolicy>(); - RetryPolicy methodPolicy = RetryPolicies.retryByException(timeoutPolicy, - exceptionToPolicyMap); - Map methodNameToPolicyMap = new HashMap(); - methodNameToPolicyMap.put("getBlocks", methodPolicy); - methodNameToPolicyMap.put("getAccessKeys", methodPolicy); - RPC.setProtocolEngine(conf, NamenodeProtocolPB.class, - ProtobufRpcEngine.class); - NamenodeProtocolPB proxy = RPC.getProxy(NamenodeProtocolPB.class, RPC - .getProtocolVersion(NamenodeProtocolPB.class), address, ugi, conf, - NetUtils.getDefaultSocketFactory(conf)); - NamenodeProtocolPB retryProxy = (NamenodeProtocolPB) RetryProxy.create( - NamenodeProtocolPB.class, proxy, methodNameToPolicyMap); - return new NamenodeProtocolTranslatorPB(retryProxy); + return createNNProxyWithNamenodeProtocol(address, conf, ugi, true); + } + + /** + * Build a proxy connection to the namenode with NamenodeProtocol. + * @param address - namenode address + * @param conf - configuration + * @param ugi - User group information + * @param withRetries - indicates whether to create retry proxy or not + * @return a proxy connection with NamenodeProtocol + * @throws - IOException + */ + public static NamenodeProtocolTranslatorPB createNNProxyWithNamenodeProtocol( + InetSocketAddress address, Configuration conf, UserGroupInformation ugi, + boolean withRetries) throws IOException { + NamenodeProtocolPB proxy = (NamenodeProtocolPB) createNameNodeProxy( + address, conf, ugi, NamenodeProtocolPB.class); + if (withRetries) { // create the proxy with retries + RetryPolicy timeoutPolicy = RetryPolicies.exponentialBackoffRetry(5, 200, + TimeUnit.MILLISECONDS); + Map, RetryPolicy> exceptionToPolicyMap + = new HashMap, RetryPolicy>(); + RetryPolicy methodPolicy = RetryPolicies.retryByException(timeoutPolicy, + exceptionToPolicyMap); + Map methodNameToPolicyMap + = new HashMap(); + methodNameToPolicyMap.put("getBlocks", methodPolicy); + methodNameToPolicyMap.put("getAccessKeys", methodPolicy); + proxy = (NamenodeProtocolPB) RetryProxy.create(NamenodeProtocolPB.class, + proxy, methodNameToPolicyMap); + } + return new NamenodeProtocolTranslatorPB(proxy); + } + + /** + * Build a proxy connection to the namenode with ClientProtocol. + * @param address - namenode address + * @param conf - configuration + * @param ugi - User group information + * @param withRetries - indicates whether to create retry proxy or not + * @return a proxy connection with ClientProtocol + * @throws IOException + */ + public static ClientNamenodeProtocolTranslatorPB createNNProxyWithClientProtocol( + InetSocketAddress address, Configuration conf, UserGroupInformation ugi, + boolean withRetries) throws IOException { + ClientNamenodeProtocolPB proxy + = (ClientNamenodeProtocolPB) createNameNodeProxy(address, conf, ugi, + ClientNamenodeProtocolPB.class); + if (withRetries) { // create the proxy with retries + proxy = createNameNodeProxyWithRetries(proxy); + } + return new ClientNamenodeProtocolTranslatorPB(proxy); + } + + /** + * Creates the retry proxy by setting up the retry policy. + * @param proxy - non retry proxy connection + * @return a retry proxy connection + */ + public static ClientNamenodeProtocolPB createNameNodeProxyWithRetries( + ClientNamenodeProtocolPB proxy) { + RetryPolicy createPolicy = RetryPolicies + .retryUpToMaximumCountWithFixedSleep(5, + HdfsConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS); + + Map, RetryPolicy> remoteExceptionToPolicyMap + = new HashMap, RetryPolicy>(); + remoteExceptionToPolicyMap.put(AlreadyBeingCreatedException.class, + createPolicy); + + Map, RetryPolicy> exceptionToPolicyMap + = new HashMap, RetryPolicy>(); + exceptionToPolicyMap.put(RemoteException.class, RetryPolicies + .retryByRemoteException(RetryPolicies.TRY_ONCE_THEN_FAIL, + remoteExceptionToPolicyMap)); + RetryPolicy methodPolicy = RetryPolicies.retryByException( + RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap); + Map methodNameToPolicyMap + = new HashMap(); + + methodNameToPolicyMap.put("create", methodPolicy); + + ClientNamenodeProtocolPB retryProxy = (ClientNamenodeProtocolPB) RetryProxy + .create(ClientNamenodeProtocolPB.class, proxy, methodNameToPolicyMap); + return retryProxy; + } + + @SuppressWarnings("unchecked") + private static Object createNameNodeProxy(InetSocketAddress address, + Configuration conf, UserGroupInformation ugi, Class xface) + throws IOException { + RPC.setProtocolEngine(conf, xface, ProtobufRpcEngine.class); + Object proxy = RPC.getProxy(xface, RPC.getProtocolVersion(xface), address, + ugi, conf, NetUtils.getDefaultSocketFactory(conf)); + return proxy; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index dd3dc723a65..c6dc3e3a2bc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -22,9 +22,6 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.net.InetSocketAddress; import java.util.Arrays; -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.TimeUnit; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -37,6 +34,7 @@ import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.LocatedBlock; @@ -55,28 +53,22 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction; import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; -import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.Text; -import org.apache.hadoop.io.retry.RetryPolicies; -import org.apache.hadoop.io.retry.RetryPolicy; -import org.apache.hadoop.io.retry.RetryProxy; import org.apache.hadoop.ipc.ProtocolTranslator; import org.apache.hadoop.ipc.ProtobufHelper; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.ProtocolMetaInterface; import org.apache.hadoop.ipc.ProtocolSignature; import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RpcClientUtil; import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto; @@ -143,49 +135,29 @@ public class ClientNamenodeProtocolTranslatorPB implements ProtocolMetaInterface, ClientProtocol, Closeable, ProtocolTranslator { final private ClientNamenodeProtocolPB rpcProxy; - private static ClientNamenodeProtocolPB createNamenode( + public ClientNamenodeProtocolTranslatorPB(InetSocketAddress nameNodeAddr, + Configuration conf, UserGroupInformation ugi) throws IOException { + this(getNamenodeRetryProxy(nameNodeAddr, conf, ugi)); + } + + public ClientNamenodeProtocolTranslatorPB(ClientNamenodeProtocolPB proxy) + throws IOException { + rpcProxy = proxy; + } + + private static ClientNamenodeProtocolPB getNamenodeRetryProxy( InetSocketAddress nameNodeAddr, Configuration conf, UserGroupInformation ugi) throws IOException { RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class, ProtobufRpcEngine.class); - return RPC.getProxy(ClientNamenodeProtocolPB.class, - RPC.getProtocolVersion(ClientNamenodeProtocolPB.class), nameNodeAddr, ugi, conf, - NetUtils.getSocketFactory(conf, ClientNamenodeProtocolPB.class)); + ClientNamenodeProtocolPB proxy = RPC.getProxy( + ClientNamenodeProtocolPB.class, RPC + .getProtocolVersion(ClientNamenodeProtocolPB.class), nameNodeAddr, + ugi, conf, NetUtils.getSocketFactory(conf, + ClientNamenodeProtocolPB.class)); + return DFSUtil.createNameNodeProxyWithRetries(proxy); } - - /** Create a {@link NameNode} proxy */ - static ClientNamenodeProtocolPB createNamenodeWithRetry( - ClientNamenodeProtocolPB rpcNamenode) { - RetryPolicy createPolicy = RetryPolicies - .retryUpToMaximumCountWithFixedSleep(5, - HdfsConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS); - - Map, RetryPolicy> remoteExceptionToPolicyMap - = new HashMap, RetryPolicy>(); - remoteExceptionToPolicyMap.put(AlreadyBeingCreatedException.class, - createPolicy); - - Map, RetryPolicy> exceptionToPolicyMap = - new HashMap, RetryPolicy>(); - exceptionToPolicyMap.put(RemoteException.class, RetryPolicies - .retryByRemoteException(RetryPolicies.TRY_ONCE_THEN_FAIL, - remoteExceptionToPolicyMap)); - RetryPolicy methodPolicy = RetryPolicies.retryByException( - RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap); - Map methodNameToPolicyMap = new HashMap(); - - methodNameToPolicyMap.put("create", methodPolicy); - - return (ClientNamenodeProtocolPB) RetryProxy.create( - ClientNamenodeProtocolPB.class, rpcNamenode, methodNameToPolicyMap); - } - - public ClientNamenodeProtocolTranslatorPB(InetSocketAddress nameNodeAddr, - Configuration conf, UserGroupInformation ugi) throws IOException { - - rpcProxy = createNamenodeWithRetry(createNamenode(nameNodeAddr, conf, ugi)); - } - + public void close() { RPC.stopProxy(rpcProxy); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java index d2d0c00b557..6f6f88f9e8e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java @@ -27,7 +27,6 @@ import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -118,11 +117,10 @@ public class ConfiguredFailoverProxyProvider implements try { if (NamenodeProtocol.class.equals(xface)) { current.namenode = DFSUtil.createNNProxyWithNamenodeProtocol( - current.address, conf, ugi); + current.address, conf, ugi, false); } else if (ClientProtocol.class.equals(xface)) { - // TODO(HA): This will create a NN proxy with an underlying retry - // proxy. We don't want this. - current.namenode = DFSUtil.createNamenode(current.address, conf, ugi); + current.namenode = DFSUtil.createNNProxyWithClientProtocol( + current.address, conf, ugi, false); } else { throw new IllegalStateException( "Upsupported protocol found when creating the proxy conection to NameNode. "