ConfiguredFailoverProxyProvider should not create a NameNode proxy with an underlying retry proxy. Contributed by Uma Maheswara Rao G.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1623@1244845 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Aaron Myers 2012-02-16 03:59:09 +00:00
parent 0663b51ed4
commit 83a922b55e
4 changed files with 137 additions and 83 deletions

View File

@ -208,3 +208,5 @@ HDFS-2909. HA: Inaccessible shared edits dir not getting removed from FSImage st
HDFS-2934. Allow configs to be scoped to all NNs in the nameservice. (todd) HDFS-2934. Allow configs to be scoped to all NNs in the nameservice. (todd)
HDFS-2935. Shared edits dir property should be suffixed with nameservice and namenodeID (todd) HDFS-2935. Shared edits dir property should be suffixed with nameservice and namenodeID (todd)
HDFS-2928. ConfiguredFailoverProxyProvider should not create a NameNode proxy with an underlying retry proxy. (Uma Maheswara Rao G via atm)

View File

@ -42,13 +42,17 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB; import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
@ -57,6 +61,7 @@ import org.apache.hadoop.io.retry.RetryPolicy;
import org.apache.hadoop.io.retry.RetryProxy; import org.apache.hadoop.io.retry.RetryProxy;
import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind; import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.NodeBase; import org.apache.hadoop.net.NodeBase;
@ -814,14 +819,7 @@ public class DFSUtil {
/** Create a {@link NameNode} proxy */ /** Create a {@link NameNode} proxy */
public static ClientProtocol createNamenode(InetSocketAddress nameNodeAddr, public static ClientProtocol createNamenode(InetSocketAddress nameNodeAddr,
Configuration conf, UserGroupInformation ugi) throws IOException { Configuration conf, UserGroupInformation ugi) throws IOException {
/** return createNNProxyWithClientProtocol(nameNodeAddr, conf, ugi, true);
* Currently we have simply burnt-in support for a SINGLE
* protocol - protocolPB. This will be replaced
* by a way to pick the right protocol based on the
* version of the target server.
*/
return new org.apache.hadoop.hdfs.protocolPB.
ClientNamenodeProtocolTranslatorPB(nameNodeAddr, conf, ugi);
} }
/** Create a {@link ClientDatanodeProtocol} proxy */ /** Create a {@link ClientDatanodeProtocol} proxy */
@ -848,29 +846,113 @@ public class DFSUtil {
} }
/** /**
* Build a NamenodeProtocol connection to the namenode and set up the retry * Build a proxy connection to the namenode with NamenodeProtocol and set up
* policy * the proxy with retry policy.
* @param address - namenode address
* @param conf - configuration
* @param ugi - User group information
* @return a proxy connection with NamenodeProtocol
* @throws - IOException
*/ */
public static NamenodeProtocolTranslatorPB createNNProxyWithNamenodeProtocol( public static NamenodeProtocolTranslatorPB createNNProxyWithNamenodeProtocol(
InetSocketAddress address, Configuration conf, UserGroupInformation ugi) InetSocketAddress address, Configuration conf, UserGroupInformation ugi)
throws IOException { throws IOException {
return createNNProxyWithNamenodeProtocol(address, conf, ugi, true);
}
/**
* Build a proxy connection to the namenode with NamenodeProtocol.
* @param address - namenode address
* @param conf - configuration
* @param ugi - User group information
* @param withRetries - indicates whether to create retry proxy or not
* @return a proxy connection with NamenodeProtocol
* @throws - IOException
*/
public static NamenodeProtocolTranslatorPB createNNProxyWithNamenodeProtocol(
InetSocketAddress address, Configuration conf, UserGroupInformation ugi,
boolean withRetries) throws IOException {
NamenodeProtocolPB proxy = (NamenodeProtocolPB) createNameNodeProxy(
address, conf, ugi, NamenodeProtocolPB.class);
if (withRetries) { // create the proxy with retries
RetryPolicy timeoutPolicy = RetryPolicies.exponentialBackoffRetry(5, 200, RetryPolicy timeoutPolicy = RetryPolicies.exponentialBackoffRetry(5, 200,
TimeUnit.MILLISECONDS); TimeUnit.MILLISECONDS);
Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap
= new HashMap<Class<? extends Exception>, RetryPolicy>(); = new HashMap<Class<? extends Exception>, RetryPolicy>();
RetryPolicy methodPolicy = RetryPolicies.retryByException(timeoutPolicy, RetryPolicy methodPolicy = RetryPolicies.retryByException(timeoutPolicy,
exceptionToPolicyMap); exceptionToPolicyMap);
Map<String, RetryPolicy> methodNameToPolicyMap = new HashMap<String, RetryPolicy>(); Map<String, RetryPolicy> methodNameToPolicyMap
= new HashMap<String, RetryPolicy>();
methodNameToPolicyMap.put("getBlocks", methodPolicy); methodNameToPolicyMap.put("getBlocks", methodPolicy);
methodNameToPolicyMap.put("getAccessKeys", methodPolicy); methodNameToPolicyMap.put("getAccessKeys", methodPolicy);
RPC.setProtocolEngine(conf, NamenodeProtocolPB.class, proxy = (NamenodeProtocolPB) RetryProxy.create(NamenodeProtocolPB.class,
ProtobufRpcEngine.class); proxy, methodNameToPolicyMap);
NamenodeProtocolPB proxy = RPC.getProxy(NamenodeProtocolPB.class, RPC }
.getProtocolVersion(NamenodeProtocolPB.class), address, ugi, conf, return new NamenodeProtocolTranslatorPB(proxy);
NetUtils.getDefaultSocketFactory(conf)); }
NamenodeProtocolPB retryProxy = (NamenodeProtocolPB) RetryProxy.create(
NamenodeProtocolPB.class, proxy, methodNameToPolicyMap); /**
return new NamenodeProtocolTranslatorPB(retryProxy); * Build a proxy connection to the namenode with ClientProtocol.
* @param address - namenode address
* @param conf - configuration
* @param ugi - User group information
* @param withRetries - indicates whether to create retry proxy or not
* @return a proxy connection with ClientProtocol
* @throws IOException
*/
public static ClientNamenodeProtocolTranslatorPB createNNProxyWithClientProtocol(
InetSocketAddress address, Configuration conf, UserGroupInformation ugi,
boolean withRetries) throws IOException {
ClientNamenodeProtocolPB proxy
= (ClientNamenodeProtocolPB) createNameNodeProxy(address, conf, ugi,
ClientNamenodeProtocolPB.class);
if (withRetries) { // create the proxy with retries
proxy = createNameNodeProxyWithRetries(proxy);
}
return new ClientNamenodeProtocolTranslatorPB(proxy);
}
/**
* Creates the retry proxy by setting up the retry policy.
* @param proxy - non retry proxy connection
* @return a retry proxy connection
*/
public static ClientNamenodeProtocolPB createNameNodeProxyWithRetries(
ClientNamenodeProtocolPB proxy) {
RetryPolicy createPolicy = RetryPolicies
.retryUpToMaximumCountWithFixedSleep(5,
HdfsConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS);
Map<Class<? extends Exception>, RetryPolicy> remoteExceptionToPolicyMap
= new HashMap<Class<? extends Exception>, RetryPolicy>();
remoteExceptionToPolicyMap.put(AlreadyBeingCreatedException.class,
createPolicy);
Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap
= new HashMap<Class<? extends Exception>, RetryPolicy>();
exceptionToPolicyMap.put(RemoteException.class, RetryPolicies
.retryByRemoteException(RetryPolicies.TRY_ONCE_THEN_FAIL,
remoteExceptionToPolicyMap));
RetryPolicy methodPolicy = RetryPolicies.retryByException(
RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);
Map<String, RetryPolicy> methodNameToPolicyMap
= new HashMap<String, RetryPolicy>();
methodNameToPolicyMap.put("create", methodPolicy);
ClientNamenodeProtocolPB retryProxy = (ClientNamenodeProtocolPB) RetryProxy
.create(ClientNamenodeProtocolPB.class, proxy, methodNameToPolicyMap);
return retryProxy;
}
@SuppressWarnings("unchecked")
private static Object createNameNodeProxy(InetSocketAddress address,
Configuration conf, UserGroupInformation ugi, Class xface)
throws IOException {
RPC.setProtocolEngine(conf, xface, ProtobufRpcEngine.class);
Object proxy = RPC.getProxy(xface, RPC.getProtocolVersion(xface), address,
ugi, conf, NetUtils.getDefaultSocketFactory(conf));
return proxy;
} }
/** /**

View File

@ -22,9 +22,6 @@ import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import java.util.Arrays; import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
@ -37,6 +34,7 @@ import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@ -55,28 +53,22 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable; import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.retry.RetryPolicies;
import org.apache.hadoop.io.retry.RetryPolicy;
import org.apache.hadoop.io.retry.RetryProxy;
import org.apache.hadoop.ipc.ProtocolTranslator; import org.apache.hadoop.ipc.ProtocolTranslator;
import org.apache.hadoop.ipc.ProtobufHelper; import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.ProtocolMetaInterface; import org.apache.hadoop.ipc.ProtocolMetaInterface;
import org.apache.hadoop.ipc.ProtocolSignature; import org.apache.hadoop.ipc.ProtocolSignature;
import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.RpcClientUtil; import org.apache.hadoop.ipc.RpcClientUtil;
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind; import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto;
@ -143,47 +135,27 @@ public class ClientNamenodeProtocolTranslatorPB implements
ProtocolMetaInterface, ClientProtocol, Closeable, ProtocolTranslator { ProtocolMetaInterface, ClientProtocol, Closeable, ProtocolTranslator {
final private ClientNamenodeProtocolPB rpcProxy; final private ClientNamenodeProtocolPB rpcProxy;
private static ClientNamenodeProtocolPB createNamenode( public ClientNamenodeProtocolTranslatorPB(InetSocketAddress nameNodeAddr,
Configuration conf, UserGroupInformation ugi) throws IOException {
this(getNamenodeRetryProxy(nameNodeAddr, conf, ugi));
}
public ClientNamenodeProtocolTranslatorPB(ClientNamenodeProtocolPB proxy)
throws IOException {
rpcProxy = proxy;
}
private static ClientNamenodeProtocolPB getNamenodeRetryProxy(
InetSocketAddress nameNodeAddr, Configuration conf, InetSocketAddress nameNodeAddr, Configuration conf,
UserGroupInformation ugi) throws IOException { UserGroupInformation ugi) throws IOException {
RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class, RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class,
ProtobufRpcEngine.class); ProtobufRpcEngine.class);
return RPC.getProxy(ClientNamenodeProtocolPB.class, ClientNamenodeProtocolPB proxy = RPC.getProxy(
RPC.getProtocolVersion(ClientNamenodeProtocolPB.class), nameNodeAddr, ugi, conf, ClientNamenodeProtocolPB.class, RPC
NetUtils.getSocketFactory(conf, ClientNamenodeProtocolPB.class)); .getProtocolVersion(ClientNamenodeProtocolPB.class), nameNodeAddr,
} ugi, conf, NetUtils.getSocketFactory(conf,
ClientNamenodeProtocolPB.class));
/** Create a {@link NameNode} proxy */ return DFSUtil.createNameNodeProxyWithRetries(proxy);
static ClientNamenodeProtocolPB createNamenodeWithRetry(
ClientNamenodeProtocolPB rpcNamenode) {
RetryPolicy createPolicy = RetryPolicies
.retryUpToMaximumCountWithFixedSleep(5,
HdfsConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS);
Map<Class<? extends Exception>, RetryPolicy> remoteExceptionToPolicyMap
= new HashMap<Class<? extends Exception>, RetryPolicy>();
remoteExceptionToPolicyMap.put(AlreadyBeingCreatedException.class,
createPolicy);
Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap =
new HashMap<Class<? extends Exception>, RetryPolicy>();
exceptionToPolicyMap.put(RemoteException.class, RetryPolicies
.retryByRemoteException(RetryPolicies.TRY_ONCE_THEN_FAIL,
remoteExceptionToPolicyMap));
RetryPolicy methodPolicy = RetryPolicies.retryByException(
RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);
Map<String, RetryPolicy> methodNameToPolicyMap = new HashMap<String, RetryPolicy>();
methodNameToPolicyMap.put("create", methodPolicy);
return (ClientNamenodeProtocolPB) RetryProxy.create(
ClientNamenodeProtocolPB.class, rpcNamenode, methodNameToPolicyMap);
}
public ClientNamenodeProtocolTranslatorPB(InetSocketAddress nameNodeAddr,
Configuration conf, UserGroupInformation ugi) throws IOException {
rpcProxy = createNamenodeWithRetry(createNamenode(nameNodeAddr, conf, ugi));
} }
public void close() { public void close() {

View File

@ -27,7 +27,6 @@ import java.util.Map;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
@ -118,11 +117,10 @@ public class ConfiguredFailoverProxyProvider<T> implements
try { try {
if (NamenodeProtocol.class.equals(xface)) { if (NamenodeProtocol.class.equals(xface)) {
current.namenode = DFSUtil.createNNProxyWithNamenodeProtocol( current.namenode = DFSUtil.createNNProxyWithNamenodeProtocol(
current.address, conf, ugi); current.address, conf, ugi, false);
} else if (ClientProtocol.class.equals(xface)) { } else if (ClientProtocol.class.equals(xface)) {
// TODO(HA): This will create a NN proxy with an underlying retry current.namenode = DFSUtil.createNNProxyWithClientProtocol(
// proxy. We don't want this. current.address, conf, ugi, false);
current.namenode = DFSUtil.createNamenode(current.address, conf, ugi);
} else { } else {
throw new IllegalStateException( throw new IllegalStateException(
"Upsupported protocol found when creating the proxy conection to NameNode. " "Upsupported protocol found when creating the proxy conection to NameNode. "