HDFS-2958. Sweep for remaining proxy construction which doesn't go through failover path.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1623@1294811 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Aaron Myers 2012-02-28 20:09:18 +00:00
parent 1ab31b1715
commit c69dfdd5e1
26 changed files with 507 additions and 466 deletions

View File

@ -236,3 +236,5 @@ HDFS-2904. Client support for getting delegation tokens. (todd)
HDFS-3013. HA: NameNode format doesn't pick up dfs.namenode.name.dir.NameServiceId configuration (Mingjie Lai via todd)
HDFS-3019. Fix silent failure of TestEditLogJournalFailures (todd)
HDFS-2958. Sweep for remaining proxy construction which doesn't go through failover path. (atm)

View File

@ -62,7 +62,6 @@ import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.FsPermission;
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
import org.apache.hadoop.hdfs.HAUtil.ProxyAndInfo;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
@ -325,8 +324,8 @@ public class DFSClient implements java.io.Closeable {
} else {
Preconditions.checkArgument(nameNodeUri != null,
"null URI");
ProxyAndInfo<ClientProtocol> proxyInfo =
HAUtil.createProxy(conf, nameNodeUri, ClientProtocol.class);
NameNodeProxies.ProxyAndInfo<ClientProtocol> proxyInfo =
NameNodeProxies.createProxy(conf, nameNodeUri, ClientProtocol.class);
this.dtService = proxyInfo.getDelegationTokenService();
this.namenode = proxyInfo.getProxy();
}
@ -694,8 +693,8 @@ public class DFSClient implements java.io.Closeable {
"a failover proxy provider configured.");
}
ProxyAndInfo<ClientProtocol> info =
HAUtil.createProxy(conf, uri, ClientProtocol.class);
NameNodeProxies.ProxyAndInfo<ClientProtocol> info =
NameNodeProxies.createProxy(conf, uri, ClientProtocol.class);
assert info.getDelegationTokenService().equals(token.getService()) :
"Returned service '" + info.getDelegationTokenService().toString() +
"' doesn't match expected service '" +

View File

@ -28,12 +28,10 @@ import java.security.SecureRandom;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.StringTokenizer;
import java.util.concurrent.TimeUnit;
import javax.net.SocketFactory;
@ -42,26 +40,15 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.io.retry.RetryPolicies;
import org.apache.hadoop.io.retry.RetryPolicy;
import org.apache.hadoop.io.retry.RetryProxy;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.NodeBase;
@ -801,26 +788,6 @@ public class DFSUtil {
public static int roundBytesToGB(long bytes) {
return Math.round((float)bytes/ 1024 / 1024 / 1024);
}
/** Create a {@link NameNode} proxy */
public static ClientProtocol createNamenode(Configuration conf)
throws IOException {
return createNamenode(NameNode.getAddress(conf), conf);
}
/** Create a {@link NameNode} proxy */
public static ClientProtocol createNamenode(InetSocketAddress nameNodeAddr,
Configuration conf) throws IOException {
return createNamenode(nameNodeAddr, conf,
UserGroupInformation.getCurrentUser());
}
/** Create a {@link NameNode} proxy */
public static ClientProtocol createNamenode(InetSocketAddress nameNodeAddr,
Configuration conf, UserGroupInformation ugi) throws IOException {
return createNNProxyWithClientProtocol(nameNodeAddr, conf, ugi, true);
}
/** Create a {@link ClientDatanodeProtocol} proxy */
public static ClientDatanodeProtocol createClientDatanodeProtocolProxy(
@ -845,116 +812,6 @@ public class DFSUtil {
return new ClientDatanodeProtocolTranslatorPB(addr, ticket, conf, factory);
}
/**
* Build a proxy connection to the namenode with NamenodeProtocol and set up
* the proxy with retry policy.
* @param address - namenode address
* @param conf - configuration
* @param ugi - User group information
* @return a proxy connection with NamenodeProtocol
* @throws - IOException
*/
public static NamenodeProtocolTranslatorPB createNNProxyWithNamenodeProtocol(
InetSocketAddress address, Configuration conf, UserGroupInformation ugi)
throws IOException {
return createNNProxyWithNamenodeProtocol(address, conf, ugi, true);
}
/**
* Build a proxy connection to the namenode with NamenodeProtocol.
* @param address - namenode address
* @param conf - configuration
* @param ugi - User group information
* @param withRetries - indicates whether to create retry proxy or not
* @return a proxy connection with NamenodeProtocol
* @throws - IOException
*/
public static NamenodeProtocolTranslatorPB createNNProxyWithNamenodeProtocol(
InetSocketAddress address, Configuration conf, UserGroupInformation ugi,
boolean withRetries) throws IOException {
NamenodeProtocolPB proxy = (NamenodeProtocolPB) createNameNodeProxy(
address, conf, ugi, NamenodeProtocolPB.class);
if (withRetries) { // create the proxy with retries
RetryPolicy timeoutPolicy = RetryPolicies.exponentialBackoffRetry(5, 200,
TimeUnit.MILLISECONDS);
Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap
= new HashMap<Class<? extends Exception>, RetryPolicy>();
RetryPolicy methodPolicy = RetryPolicies.retryByException(timeoutPolicy,
exceptionToPolicyMap);
Map<String, RetryPolicy> methodNameToPolicyMap
= new HashMap<String, RetryPolicy>();
methodNameToPolicyMap.put("getBlocks", methodPolicy);
methodNameToPolicyMap.put("getAccessKeys", methodPolicy);
proxy = (NamenodeProtocolPB) RetryProxy.create(NamenodeProtocolPB.class,
proxy, methodNameToPolicyMap);
}
return new NamenodeProtocolTranslatorPB(proxy);
}
/**
* Build a proxy connection to the namenode with ClientProtocol.
* @param address - namenode address
* @param conf - configuration
* @param ugi - User group information
* @param withRetries - indicates whether to create retry proxy or not
* @return a proxy connection with ClientProtocol
* @throws IOException
*/
public static ClientNamenodeProtocolTranslatorPB createNNProxyWithClientProtocol(
InetSocketAddress address, Configuration conf, UserGroupInformation ugi,
boolean withRetries) throws IOException {
ClientNamenodeProtocolPB proxy
= (ClientNamenodeProtocolPB) createNameNodeProxy(address, conf, ugi,
ClientNamenodeProtocolPB.class);
if (withRetries) { // create the proxy with retries
proxy = createNameNodeProxyWithRetries(proxy);
}
return new ClientNamenodeProtocolTranslatorPB(proxy);
}
/**
* Creates the retry proxy by setting up the retry policy.
* @param proxy - non retry proxy connection
* @return a retry proxy connection
*/
public static ClientNamenodeProtocolPB createNameNodeProxyWithRetries(
ClientNamenodeProtocolPB proxy) {
RetryPolicy createPolicy = RetryPolicies
.retryUpToMaximumCountWithFixedSleep(5,
HdfsConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS);
Map<Class<? extends Exception>, RetryPolicy> remoteExceptionToPolicyMap
= new HashMap<Class<? extends Exception>, RetryPolicy>();
remoteExceptionToPolicyMap.put(AlreadyBeingCreatedException.class,
createPolicy);
Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap
= new HashMap<Class<? extends Exception>, RetryPolicy>();
exceptionToPolicyMap.put(RemoteException.class, RetryPolicies
.retryByRemoteException(RetryPolicies.TRY_ONCE_THEN_FAIL,
remoteExceptionToPolicyMap));
RetryPolicy methodPolicy = RetryPolicies.retryByException(
RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);
Map<String, RetryPolicy> methodNameToPolicyMap
= new HashMap<String, RetryPolicy>();
methodNameToPolicyMap.put("create", methodPolicy);
ClientNamenodeProtocolPB retryProxy = (ClientNamenodeProtocolPB) RetryProxy
.create(ClientNamenodeProtocolPB.class, proxy, methodNameToPolicyMap);
return retryProxy;
}
@SuppressWarnings("unchecked")
private static Object createNameNodeProxy(InetSocketAddress address,
Configuration conf, UserGroupInformation ugi, Class xface)
throws IOException {
RPC.setProtocolEngine(conf, xface, ProtobufRpcEngine.class);
Object proxy = RPC.getProxy(xface, RPC.getProtocolVersion(xface), address,
ugi, conf, NetUtils.getDefaultSocketFactory(conf));
return proxy;
}
/**
* Get nameservice Id for the {@link NameNode} based on namenode RPC address
* matching the local node address.

View File

@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs;
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
import java.io.IOException;
import java.lang.reflect.Constructor;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URISyntaxException;
@ -31,18 +30,11 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSClient.Conf;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.retry.FailoverProxyProvider;
import org.apache.hadoop.io.retry.RetryPolicies;
import org.apache.hadoop.io.retry.RetryProxy;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
@ -186,70 +178,6 @@ public class HAUtil {
conf.setBoolean("dfs.ha.allow.stale.reads", val);
}
/** Creates the Failover proxy provider instance*/
@SuppressWarnings("unchecked")
private static <T> FailoverProxyProvider<T> createFailoverProxyProvider(
Configuration conf, Class<FailoverProxyProvider<T>> failoverProxyProviderClass,
Class<T> xface, URI nameNodeUri) throws IOException {
Preconditions.checkArgument(
xface.isAssignableFrom(NamenodeProtocols.class),
"Interface %s is not a NameNode protocol", xface);
try {
Constructor<FailoverProxyProvider<T>> ctor = failoverProxyProviderClass
.getConstructor(Configuration.class, URI.class, Class.class);
FailoverProxyProvider<?> provider = ctor.newInstance(conf, nameNodeUri,
xface);
return (FailoverProxyProvider<T>) provider;
} catch (Exception e) {
String message = "Couldn't create proxy provider " + failoverProxyProviderClass;
if (LOG.isDebugEnabled()) {
LOG.debug(message, e);
}
if (e.getCause() instanceof IOException) {
throw (IOException) e.getCause();
} else {
throw new IOException(message, e);
}
}
}
/** Gets the configured Failover proxy provider's class */
private static <T> Class<FailoverProxyProvider<T>> getFailoverProxyProviderClass(
Configuration conf, URI nameNodeUri, Class<T> xface) throws IOException {
if (nameNodeUri == null) {
return null;
}
String host = nameNodeUri.getHost();
String configKey = DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "."
+ host;
try {
@SuppressWarnings("unchecked")
Class<FailoverProxyProvider<T>> ret = (Class<FailoverProxyProvider<T>>) conf
.getClass(configKey, null, FailoverProxyProvider.class);
if (ret != null) {
// If we found a proxy provider, then this URI should be a logical NN.
// Given that, it shouldn't have a non-default port number.
int port = nameNodeUri.getPort();
if (port > 0 && port != NameNode.DEFAULT_PORT) {
throw new IOException("Port " + port + " specified in URI "
+ nameNodeUri + " but host '" + host
+ "' is a logical (HA) namenode"
+ " and does not use port information.");
}
}
return ret;
} catch (RuntimeException e) {
if (e.getCause() instanceof ClassNotFoundException) {
throw new IOException("Could not load failover proxy provider class "
+ conf.get(configKey) + " which is configured for authority "
+ nameNodeUri, e);
} else {
throw e;
}
}
}
/**
* @return true if the given nameNodeUri appears to be a logical URI.
* This is the case if there is a failover proxy provider configured
@ -263,60 +191,6 @@ public class HAUtil {
return conf.get(configKey) != null;
}
/**
* Creates the namenode proxy with the passed Protocol.
* @param conf the configuration containing the required IPC
* properties, client failover configurations, etc.
* @param nameNodeUri the URI pointing either to a specific NameNode
* or to a logical nameservice.
* @param xface the IPC interface which should be created
* @return an object containing both the proxy and the associated
* delegation token service it corresponds to
**/
@SuppressWarnings("unchecked")
public static <T> ProxyAndInfo<T> createProxy(
Configuration conf, URI nameNodeUri,
Class<T> xface) throws IOException {
Class<FailoverProxyProvider<T>> failoverProxyProviderClass =
HAUtil.getFailoverProxyProviderClass(conf, nameNodeUri, xface);
if (failoverProxyProviderClass == null) {
// Non-HA case
return createNonHAProxy(conf, nameNodeUri, xface);
} else {
// HA case
FailoverProxyProvider<T> failoverProxyProvider = HAUtil
.createFailoverProxyProvider(conf, failoverProxyProviderClass, xface,
nameNodeUri);
Conf config = new Conf(conf);
T proxy = (T) RetryProxy.create(xface, failoverProxyProvider, RetryPolicies
.failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
config.maxFailoverAttempts, config.failoverSleepBaseMillis,
config.failoverSleepMaxMillis));
Text dtService = buildTokenServiceForLogicalUri(nameNodeUri);
return new ProxyAndInfo<T>(proxy, dtService);
}
}
@SuppressWarnings("unchecked")
private static <T> ProxyAndInfo<T> createNonHAProxy(
Configuration conf, URI nameNodeUri, Class<T> xface) throws IOException {
InetSocketAddress nnAddr = NameNode.getAddress(nameNodeUri);
Text dtService = SecurityUtil.buildTokenService(nnAddr);
if (xface == ClientProtocol.class) {
T proxy = (T)DFSUtil.createNamenode(nnAddr, conf);
return new ProxyAndInfo<T>(proxy, dtService);
} else if (xface == NamenodeProtocol.class) {
T proxy = (T) DFSUtil.createNNProxyWithNamenodeProtocol(
nnAddr, conf, UserGroupInformation.getCurrentUser());
return new ProxyAndInfo<T>(proxy, dtService);
} else {
throw new AssertionError("Unsupported proxy type: " + xface);
}
}
/**
* Parse the HDFS URI out of the provided token.
* @throws IOException if the token is invalid
@ -383,27 +257,4 @@ public class HAUtil {
LOG.debug("Mapped HA service delegation token for logical URI " +
haUri + " to namenode " + singleNNAddr);
}
/**
* Wrapper for a client proxy as well as its associated service ID.
* This is simply used as a tuple-like return type for
* {@link HAUtil#createProxy(Configuration, URI, Class)}.
*/
public static class ProxyAndInfo<PROXYTYPE> {
private final PROXYTYPE proxy;
private final Text dtService;
public ProxyAndInfo(PROXYTYPE proxy, Text dtService) {
this.proxy = proxy;
this.dtService = dtService;
}
public PROXYTYPE getProxy() {
return proxy;
}
public Text getDelegationTokenService() {
return dtService;
}
}
}

View File

@ -0,0 +1,332 @@
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX;
import java.io.IOException;
import java.lang.reflect.Constructor;
import java.net.InetSocketAddress;
import java.net.URI;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSClient.Conf;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.GetUserMappingsProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.GetUserMappingsProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.JournalProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.JournalProtocolTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.RefreshAuthorizationPolicyProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.RefreshAuthorizationPolicyProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.RefreshUserMappingsProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.RefreshUserMappingsProtocolPB;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.retry.FailoverProxyProvider;
import org.apache.hadoop.io.retry.RetryPolicies;
import org.apache.hadoop.io.retry.RetryPolicy;
import org.apache.hadoop.io.retry.RetryProxy;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.RefreshUserMappingsProtocol;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
import org.apache.hadoop.tools.GetUserMappingsProtocol;
import com.google.common.base.Preconditions;
/**
* Create proxy objects to communicate with a remote NN. All remote access to an
* NN should be funneled through this class. Most of the time you'll want to use
* {@link NameNodeProxies#createProxy(Configuration, URI, Class)}, which will
* create either an HA- or non-HA-enabled client proxy as appropriate.
*/
public class NameNodeProxies {
private static final Log LOG = LogFactory.getLog(NameNodeProxies.class);
/**
* Wrapper for a client proxy as well as its associated service ID.
* This is simply used as a tuple-like return type for
* {@link createProxy} and {@link createNonHaProxy}.
*/
public static class ProxyAndInfo<PROXYTYPE> {
private final PROXYTYPE proxy;
private final Text dtService;
public ProxyAndInfo(PROXYTYPE proxy, Text dtService) {
this.proxy = proxy;
this.dtService = dtService;
}
public PROXYTYPE getProxy() {
return proxy;
}
public Text getDelegationTokenService() {
return dtService;
}
}
/**
* Creates the namenode proxy with the passed protocol. This will handle
* creation of either HA- or non-HA-enabled proxy objects, depending upon
* if the provided URI is a configured logical URI.
*
* @param conf the configuration containing the required IPC
* properties, client failover configurations, etc.
* @param nameNodeUri the URI pointing either to a specific NameNode
* or to a logical nameservice.
* @param xface the IPC interface which should be created
* @return an object containing both the proxy and the associated
* delegation token service it corresponds to
* @throws IOException if there is an error creating the proxy
**/
@SuppressWarnings("unchecked")
public static <T> ProxyAndInfo<T> createProxy(Configuration conf,
URI nameNodeUri, Class<T> xface) throws IOException {
Class<FailoverProxyProvider<T>> failoverProxyProviderClass =
getFailoverProxyProviderClass(conf, nameNodeUri, xface);
if (failoverProxyProviderClass == null) {
// Non-HA case
return createNonHAProxy(conf, NameNode.getAddress(nameNodeUri), xface,
UserGroupInformation.getCurrentUser(), true);
} else {
// HA case
FailoverProxyProvider<T> failoverProxyProvider = NameNodeProxies
.createFailoverProxyProvider(conf, failoverProxyProviderClass, xface,
nameNodeUri);
Conf config = new Conf(conf);
T proxy = (T) RetryProxy.create(xface, failoverProxyProvider, RetryPolicies
.failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
config.maxFailoverAttempts, config.failoverSleepBaseMillis,
config.failoverSleepMaxMillis));
Text dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri);
return new ProxyAndInfo<T>(proxy, dtService);
}
}
/**
* Creates an explicitly non-HA-enabled proxy object. Most of the time you
* don't want to use this, and should instead use {@link createProxy}.
*
* @param conf the configuration object
* @param nnAddr address of the remote NN to connect to
* @param xface the IPC interface which should be created
* @param ugi the user who is making the calls on the proxy object
* @param withRetries certain interfaces have a non-standard retry policy
* @return an object containing both the proxy and the associated
* delegation token service it corresponds to
* @throws IOException
*/
@SuppressWarnings("unchecked")
public static <T> ProxyAndInfo<T> createNonHAProxy(
Configuration conf, InetSocketAddress nnAddr, Class<T> xface,
UserGroupInformation ugi, boolean withRetries) throws IOException {
Text dtService = SecurityUtil.buildTokenService(nnAddr);
T proxy;
if (xface == ClientProtocol.class) {
proxy = (T) createNNProxyWithClientProtocol(nnAddr, conf, ugi,
withRetries);
} else if (xface == JournalProtocol.class) {
proxy = (T) createNNProxyWithJournalProtocol(nnAddr, conf, ugi);
} else if (xface == NamenodeProtocol.class) {
proxy = (T) createNNProxyWithNamenodeProtocol(nnAddr, conf, ugi,
withRetries);
} else if (xface == GetUserMappingsProtocol.class) {
proxy = (T) createNNProxyWithGetUserMappingsProtocol(nnAddr, conf, ugi);
} else if (xface == RefreshUserMappingsProtocol.class) {
proxy = (T) createNNProxyWithRefreshUserMappingsProtocol(nnAddr, conf, ugi);
} else if (xface == RefreshAuthorizationPolicyProtocol.class) {
proxy = (T) createNNProxyWithRefreshAuthorizationPolicyProtocol(nnAddr,
conf, ugi);
} else {
String message = "Upsupported protocol found when creating the proxy " +
"conection to NameNode: " +
((xface != null) ? xface.getClass().getName() : xface);
LOG.error(message);
throw new IllegalStateException(message);
}
return new ProxyAndInfo<T>(proxy, dtService);
}
private static JournalProtocol createNNProxyWithJournalProtocol(
InetSocketAddress address, Configuration conf, UserGroupInformation ugi)
throws IOException {
JournalProtocolPB proxy = (JournalProtocolPB) createNameNodeProxy(address,
conf, ugi, JournalProtocolPB.class);
return new JournalProtocolTranslatorPB(proxy);
}
private static RefreshAuthorizationPolicyProtocol
createNNProxyWithRefreshAuthorizationPolicyProtocol(InetSocketAddress address,
Configuration conf, UserGroupInformation ugi) throws IOException {
RefreshAuthorizationPolicyProtocolPB proxy = (RefreshAuthorizationPolicyProtocolPB)
createNameNodeProxy(address, conf, ugi, RefreshAuthorizationPolicyProtocolPB.class);
return new RefreshAuthorizationPolicyProtocolClientSideTranslatorPB(proxy);
}
private static RefreshUserMappingsProtocol
createNNProxyWithRefreshUserMappingsProtocol(InetSocketAddress address,
Configuration conf, UserGroupInformation ugi) throws IOException {
RefreshUserMappingsProtocolPB proxy = (RefreshUserMappingsProtocolPB)
createNameNodeProxy(address, conf, ugi, RefreshUserMappingsProtocolPB.class);
return new RefreshUserMappingsProtocolClientSideTranslatorPB(proxy);
}
private static GetUserMappingsProtocol createNNProxyWithGetUserMappingsProtocol(
InetSocketAddress address, Configuration conf, UserGroupInformation ugi)
throws IOException {
GetUserMappingsProtocolPB proxy = (GetUserMappingsProtocolPB)
createNameNodeProxy(address, conf, ugi, GetUserMappingsProtocolPB.class);
return new GetUserMappingsProtocolClientSideTranslatorPB(proxy);
}
private static NamenodeProtocol createNNProxyWithNamenodeProtocol(
InetSocketAddress address, Configuration conf, UserGroupInformation ugi,
boolean withRetries) throws IOException {
NamenodeProtocolPB proxy = (NamenodeProtocolPB) createNameNodeProxy(
address, conf, ugi, NamenodeProtocolPB.class);
if (withRetries) { // create the proxy with retries
RetryPolicy timeoutPolicy = RetryPolicies.exponentialBackoffRetry(5, 200,
TimeUnit.MILLISECONDS);
Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap
= new HashMap<Class<? extends Exception>, RetryPolicy>();
RetryPolicy methodPolicy = RetryPolicies.retryByException(timeoutPolicy,
exceptionToPolicyMap);
Map<String, RetryPolicy> methodNameToPolicyMap
= new HashMap<String, RetryPolicy>();
methodNameToPolicyMap.put("getBlocks", methodPolicy);
methodNameToPolicyMap.put("getAccessKeys", methodPolicy);
proxy = (NamenodeProtocolPB) RetryProxy.create(NamenodeProtocolPB.class,
proxy, methodNameToPolicyMap);
}
return new NamenodeProtocolTranslatorPB(proxy);
}
private static ClientProtocol createNNProxyWithClientProtocol(
InetSocketAddress address, Configuration conf, UserGroupInformation ugi,
boolean withRetries) throws IOException {
ClientNamenodeProtocolPB proxy = (ClientNamenodeProtocolPB) NameNodeProxies
.createNameNodeProxy(address, conf, ugi, ClientNamenodeProtocolPB.class);
if (withRetries) { // create the proxy with retries
RetryPolicy createPolicy = RetryPolicies
.retryUpToMaximumCountWithFixedSleep(5,
HdfsConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS);
Map<Class<? extends Exception>, RetryPolicy> remoteExceptionToPolicyMap
= new HashMap<Class<? extends Exception>, RetryPolicy>();
remoteExceptionToPolicyMap.put(AlreadyBeingCreatedException.class,
createPolicy);
Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap
= new HashMap<Class<? extends Exception>, RetryPolicy>();
exceptionToPolicyMap.put(RemoteException.class, RetryPolicies
.retryByRemoteException(RetryPolicies.TRY_ONCE_THEN_FAIL,
remoteExceptionToPolicyMap));
RetryPolicy methodPolicy = RetryPolicies.retryByException(
RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);
Map<String, RetryPolicy> methodNameToPolicyMap
= new HashMap<String, RetryPolicy>();
methodNameToPolicyMap.put("create", methodPolicy);
proxy = (ClientNamenodeProtocolPB) RetryProxy
.create(ClientNamenodeProtocolPB.class, proxy, methodNameToPolicyMap);
}
return new ClientNamenodeProtocolTranslatorPB(proxy);
}
@SuppressWarnings("unchecked")
private static Object createNameNodeProxy(InetSocketAddress address,
Configuration conf, UserGroupInformation ugi, Class xface)
throws IOException {
RPC.setProtocolEngine(conf, xface, ProtobufRpcEngine.class);
Object proxy = RPC.getProxy(xface, RPC.getProtocolVersion(xface), address,
ugi, conf, NetUtils.getDefaultSocketFactory(conf));
return proxy;
}
/** Gets the configured Failover proxy provider's class */
private static <T> Class<FailoverProxyProvider<T>> getFailoverProxyProviderClass(
Configuration conf, URI nameNodeUri, Class<T> xface) throws IOException {
if (nameNodeUri == null) {
return null;
}
String host = nameNodeUri.getHost();
String configKey = DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "."
+ host;
try {
@SuppressWarnings("unchecked")
Class<FailoverProxyProvider<T>> ret = (Class<FailoverProxyProvider<T>>) conf
.getClass(configKey, null, FailoverProxyProvider.class);
if (ret != null) {
// If we found a proxy provider, then this URI should be a logical NN.
// Given that, it shouldn't have a non-default port number.
int port = nameNodeUri.getPort();
if (port > 0 && port != NameNode.DEFAULT_PORT) {
throw new IOException("Port " + port + " specified in URI "
+ nameNodeUri + " but host '" + host
+ "' is a logical (HA) namenode"
+ " and does not use port information.");
}
}
return ret;
} catch (RuntimeException e) {
if (e.getCause() instanceof ClassNotFoundException) {
throw new IOException("Could not load failover proxy provider class "
+ conf.get(configKey) + " which is configured for authority "
+ nameNodeUri, e);
} else {
throw e;
}
}
}
/** Creates the Failover proxy provider instance*/
@SuppressWarnings("unchecked")
private static <T> FailoverProxyProvider<T> createFailoverProxyProvider(
Configuration conf, Class<FailoverProxyProvider<T>> failoverProxyProviderClass,
Class<T> xface, URI nameNodeUri) throws IOException {
Preconditions.checkArgument(
xface.isAssignableFrom(NamenodeProtocols.class),
"Interface %s is not a NameNode protocol", xface);
try {
Constructor<FailoverProxyProvider<T>> ctor = failoverProxyProviderClass
.getConstructor(Configuration.class, URI.class, Class.class);
FailoverProxyProvider<?> provider = ctor.newInstance(conf, nameNodeUri,
xface);
return (FailoverProxyProvider<T>) provider;
} catch (Exception e) {
String message = "Couldn't create proxy provider " + failoverProxyProviderClass;
if (LOG.isDebugEnabled()) {
LOG.debug(message, e);
}
if (e.getCause() instanceof IOException) {
throw (IOException) e.getCause();
} else {
throw new IOException(message, e);
}
}
}
}

View File

@ -20,12 +20,10 @@ package org.apache.hadoop.hdfs.protocolPB;
import java.io.Closeable;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.Arrays;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FileAlreadyExistsException;
@ -34,7 +32,6 @@ import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@ -59,15 +56,12 @@ import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.ProtocolTranslator;
import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.ProtocolMetaInterface;
import org.apache.hadoop.ipc.ProtocolSignature;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RpcClientUtil;
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto;
@ -135,29 +129,11 @@ public class ClientNamenodeProtocolTranslatorPB implements
ProtocolMetaInterface, ClientProtocol, Closeable, ProtocolTranslator {
final private ClientNamenodeProtocolPB rpcProxy;
public ClientNamenodeProtocolTranslatorPB(InetSocketAddress nameNodeAddr,
Configuration conf, UserGroupInformation ugi) throws IOException {
this(getNamenodeRetryProxy(nameNodeAddr, conf, ugi));
}
public ClientNamenodeProtocolTranslatorPB(ClientNamenodeProtocolPB proxy)
throws IOException {
rpcProxy = proxy;
}
private static ClientNamenodeProtocolPB getNamenodeRetryProxy(
InetSocketAddress nameNodeAddr, Configuration conf,
UserGroupInformation ugi) throws IOException {
RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class,
ProtobufRpcEngine.class);
ClientNamenodeProtocolPB proxy = RPC.getProxy(
ClientNamenodeProtocolPB.class, RPC
.getProtocolVersion(ClientNamenodeProtocolPB.class), nameNodeAddr,
ugi, conf, NetUtils.getSocketFactory(conf,
ClientNamenodeProtocolPB.class));
return DFSUtil.createNameNodeProxyWithRetries(proxy);
}
public void close() {
RPC.stopProxy(rpcProxy);
}

View File

@ -20,22 +20,15 @@ package org.apache.hadoop.hdfs.protocolPB;
import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.proto.GetUserMappingsProtocolProtos.GetGroupsForUserRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.GetUserMappingsProtocolProtos.GetGroupsForUserResponseProto;
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.ProtocolMetaInterface;
import org.apache.hadoop.ipc.ProtocolSignature;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RpcClientUtil;
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.tools.GetUserMappingsProtocol;
import com.google.protobuf.RpcController;
@ -47,16 +40,10 @@ public class GetUserMappingsProtocolClientSideTranslatorPB implements
/** RpcController is not used and hence is set to null */
private final static RpcController NULL_CONTROLLER = null;
private final GetUserMappingsProtocolPB rpcProxy;
public GetUserMappingsProtocolClientSideTranslatorPB(
InetSocketAddress nameNodeAddr, UserGroupInformation ugi,
Configuration conf) throws IOException {
RPC.setProtocolEngine(conf, GetUserMappingsProtocolPB.class,
ProtobufRpcEngine.class);
rpcProxy = RPC.getProxy(GetUserMappingsProtocolPB.class,
RPC.getProtocolVersion(GetUserMappingsProtocolPB.class),
NameNode.getAddress(conf), ugi, conf,
NetUtils.getSocketFactory(conf, GetUserMappingsProtocol.class));
GetUserMappingsProtocolPB rpcProxy) {
this.rpcProxy = rpcProxy;
}
@Override

View File

@ -19,17 +19,14 @@ package org.apache.hadoop.hdfs.protocolPB;
import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto;
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtocolMetaInterface;
import org.apache.hadoop.ipc.ProtocolSignature;
@ -52,12 +49,9 @@ public class JournalProtocolTranslatorPB implements ProtocolMetaInterface,
/** RpcController is not used and hence is set to null */
private final static RpcController NULL_CONTROLLER = null;
private final JournalProtocolPB rpcProxy;
public JournalProtocolTranslatorPB(InetSocketAddress nameNodeAddr,
Configuration conf) throws IOException {
RPC.setProtocolEngine(conf, JournalProtocolPB.class, ProtobufRpcEngine.class);
rpcProxy = RPC.getProxy(JournalProtocolPB.class,
RPC.getProtocolVersion(JournalProtocolPB.class), nameNodeAddr, conf);
public JournalProtocolTranslatorPB(JournalProtocolPB rpcProxy) {
this.rpcProxy = rpcProxy;
}
@Override

View File

@ -19,11 +19,9 @@ package org.apache.hadoop.hdfs.protocolPB;
import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto;
@ -47,14 +45,11 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.ProtocolMetaInterface;
import org.apache.hadoop.ipc.ProtocolSignature;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RpcClientUtil;
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
@ -84,15 +79,6 @@ public class NamenodeProtocolTranslatorPB implements NamenodeProtocol,
VersionRequestProto.newBuilder().build();
final private NamenodeProtocolPB rpcProxy;
public NamenodeProtocolTranslatorPB(InetSocketAddress nameNodeAddr,
Configuration conf, UserGroupInformation ugi) throws IOException {
RPC.setProtocolEngine(conf, NamenodeProtocolPB.class,
ProtobufRpcEngine.class);
rpcProxy = RPC.getProxy(NamenodeProtocolPB.class,
RPC.getProtocolVersion(NamenodeProtocolPB.class), nameNodeAddr, ugi,
conf, NetUtils.getSocketFactory(conf, NamenodeProtocolPB.class));
}
public NamenodeProtocolTranslatorPB(NamenodeProtocolPB rpcProxy) {
this.rpcProxy = rpcProxy;

View File

@ -20,21 +20,15 @@ package org.apache.hadoop.hdfs.protocolPB;
import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.proto.RefreshAuthorizationPolicyProtocolProtos.RefreshServiceAclRequestProto;
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.ProtocolMetaInterface;
import org.apache.hadoop.ipc.ProtocolSignature;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RpcClientUtil;
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
import com.google.protobuf.RpcController;
@ -46,16 +40,10 @@ public class RefreshAuthorizationPolicyProtocolClientSideTranslatorPB implements
/** RpcController is not used and hence is set to null */
private final static RpcController NULL_CONTROLLER = null;
private final RefreshAuthorizationPolicyProtocolPB rpcProxy;
public RefreshAuthorizationPolicyProtocolClientSideTranslatorPB(
InetSocketAddress nameNodeAddr, UserGroupInformation ugi,
Configuration conf) throws IOException {
RPC.setProtocolEngine(conf, RefreshAuthorizationPolicyProtocolPB.class,
ProtobufRpcEngine.class);
rpcProxy = RPC.getProxy(RefreshAuthorizationPolicyProtocolPB.class,
RPC.getProtocolVersion(RefreshAuthorizationPolicyProtocolPB.class),
NameNode.getAddress(conf), ugi, conf,
NetUtils.getSocketFactory(conf, RefreshAuthorizationPolicyProtocol.class));
RefreshAuthorizationPolicyProtocolPB rpcProxy) {
this.rpcProxy = rpcProxy;
}
@Override

View File

@ -20,23 +20,17 @@ package org.apache.hadoop.hdfs.protocolPB;
import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.proto.RefreshUserMappingsProtocolProtos.RefreshSuperUserGroupsConfigurationRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.RefreshUserMappingsProtocolProtos.RefreshUserToGroupsMappingsRequestProto;
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.ProtocolMetaInterface;
import org.apache.hadoop.ipc.ProtocolSignature;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RpcClientUtil;
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.RefreshUserMappingsProtocol;
import org.apache.hadoop.security.UserGroupInformation;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
@ -47,16 +41,10 @@ public class RefreshUserMappingsProtocolClientSideTranslatorPB implements
/** RpcController is not used and hence is set to null */
private final static RpcController NULL_CONTROLLER = null;
private final RefreshUserMappingsProtocolPB rpcProxy;
public RefreshUserMappingsProtocolClientSideTranslatorPB(
InetSocketAddress nameNodeAddr, UserGroupInformation ugi,
Configuration conf) throws IOException {
RPC.setProtocolEngine(conf, RefreshUserMappingsProtocolPB.class,
ProtobufRpcEngine.class);
rpcProxy = RPC.getProxy(RefreshUserMappingsProtocolPB.class,
RPC.getProtocolVersion(RefreshUserMappingsProtocolPB.class),
NameNode.getAddress(conf), ugi, conf,
NetUtils.getSocketFactory(conf, RefreshUserMappingsProtocol.class));
RefreshUserMappingsProtocolPB rpcProxy) {
this.rpcProxy = rpcProxy;
}
@Override

View File

@ -31,7 +31,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@ -76,10 +76,10 @@ class NameNodeConnector {
URI nameNodeUri = NameNode.getUri(this.namenodeAddress);
this.namenode =
HAUtil.createProxy(conf, nameNodeUri, NamenodeProtocol.class)
NameNodeProxies.createProxy(conf, nameNodeUri, NamenodeProtocol.class)
.getProxy();
this.client =
HAUtil.createProxy(conf, nameNodeUri, ClientProtocol.class)
NameNodeProxies.createProxy(conf, nameNodeUri, ClientProtocol.class)
.getProxy();
this.fs = FileSystem.get(nameNodeUri, conf);

View File

@ -26,13 +26,13 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalProtocolService;
import org.apache.hadoop.hdfs.protocolPB.JournalProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.JournalProtocolServerSideTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
@ -41,7 +41,6 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.net.NetUtils;
@ -71,7 +70,7 @@ public class BackupNode extends NameNode {
private static final String BN_SERVICE_RPC_ADDRESS_KEY = DFSConfigKeys.DFS_NAMENODE_BACKUP_SERVICE_RPC_ADDRESS_KEY;
/** Name-node proxy */
NamenodeProtocolTranslatorPB namenode;
NamenodeProtocol namenode;
/** Name-node RPC address */
String nnRpcAddress;
/** Name-node HTTP address */
@ -192,7 +191,7 @@ public class BackupNode extends NameNode {
}
// Stop the RPC client
if (namenode != null) {
IOUtils.cleanup(LOG, namenode);
RPC.stopProxy(namenode);
}
namenode = null;
// Stop the checkpoint manager
@ -285,8 +284,9 @@ public class BackupNode extends NameNode {
private NamespaceInfo handshake(Configuration conf) throws IOException {
// connect to name node
InetSocketAddress nnAddress = NameNode.getServiceAddress(conf, true);
this.namenode = new NamenodeProtocolTranslatorPB(nnAddress, conf,
UserGroupInformation.getCurrentUser());
this.namenode = NameNodeProxies.createNonHAProxy(conf, nnAddress,
NamenodeProtocol.class, UserGroupInformation.getCurrentUser(),
true).getProxy();
this.nnRpcAddress = NetUtils.getHostPortString(nnAddress);
this.nnHttpAddress = NetUtils.getHostPortString(super.getHttpServerAddress(conf));
// get version and id info from the name-node

View File

@ -26,8 +26,8 @@ import javax.servlet.http.HttpServletRequest;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.ipc.RemoteException;
@ -77,7 +77,8 @@ abstract class DfsServlet extends HttpServlet {
NameNodeHttpServer.getNameNodeAddressFromContext(context);
Configuration conf = new HdfsConfiguration(
NameNodeHttpServer.getConfFromContext(context));
return DFSUtil.createNamenode(nnAddr, conf);
return NameNodeProxies.createProxy(conf, NameNode.getUri(nnAddr),
ClientProtocol.class).getProxy();
}
protected UserGroupInformation getUGI(HttpServletRequest request,

View File

@ -22,12 +22,14 @@ import java.net.InetSocketAddress;
import java.util.Arrays;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocolPB.JournalProtocolTranslatorPB;
import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
/**
* An implementation of the abstract class {@link EditLogOutputStream},
@ -40,7 +42,7 @@ import org.apache.hadoop.net.NetUtils;
class EditLogBackupOutputStream extends EditLogOutputStream {
static int DEFAULT_BUFFER_SIZE = 256;
private JournalProtocolTranslatorPB backupNode; // RPC proxy to backup node
private JournalProtocol backupNode; // RPC proxy to backup node
private NamenodeRegistration bnRegistration; // backup node registration
private NamenodeRegistration nnRegistration; // active node registration
private EditsDoubleBuffer doubleBuf;
@ -55,8 +57,9 @@ class EditLogBackupOutputStream extends EditLogOutputStream {
InetSocketAddress bnAddress =
NetUtils.createSocketAddr(bnRegistration.getAddress());
try {
this.backupNode =
new JournalProtocolTranslatorPB(bnAddress, new HdfsConfiguration());
this.backupNode = NameNodeProxies.createNonHAProxy(new HdfsConfiguration(),
bnAddress, JournalProtocol.class, UserGroupInformation.getCurrentUser(),
true).getProxy();
} catch(IOException e) {
Storage.LOG.error("Error connecting to: " + bnAddress, e);
throw e;
@ -93,14 +96,14 @@ class EditLogBackupOutputStream extends EditLogOutputStream {
throw new IOException("BackupEditStream has " + size +
" records still to be flushed and cannot be closed.");
}
IOUtils.cleanup(Storage.LOG, backupNode); // stop the RPC threads
RPC.stopProxy(backupNode); // stop the RPC threads
doubleBuf.close();
doubleBuf = null;
}
@Override
public void abort() throws IOException {
IOUtils.cleanup(Storage.LOG, backupNode);
RPC.stopProxy(backupNode);
doubleBuf = null;
}

View File

@ -46,10 +46,10 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
import org.apache.hadoop.hdfs.server.common.JspHelper;
@ -212,8 +212,9 @@ public class SecondaryNameNode implements Runnable {
nameNodeAddr = NameNode.getServiceAddress(conf, true);
this.conf = conf;
this.namenode = new NamenodeProtocolTranslatorPB(nameNodeAddr, conf,
UserGroupInformation.getCurrentUser());
this.namenode = NameNodeProxies.createNonHAProxy(conf, nameNodeAddr,
NamenodeProtocol.class, UserGroupInformation.getCurrentUser(),
true).getProxy();
// initialize checkpoint directories
fsName = getInfoServer();

View File

@ -32,8 +32,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.io.retry.FailoverProxyProvider;
import org.apache.hadoop.ipc.RPC;
@ -121,18 +120,8 @@ public class ConfiguredFailoverProxyProvider<T> implements
AddressRpcProxyPair current = proxies.get(currentProxyIndex);
if (current.namenode == null) {
try {
if (NamenodeProtocol.class.equals(xface)) {
current.namenode = DFSUtil.createNNProxyWithNamenodeProtocol(
current.address, conf, ugi, false);
} else if (ClientProtocol.class.equals(xface)) {
current.namenode = DFSUtil.createNNProxyWithClientProtocol(
current.address, conf, ugi, false);
} else {
throw new IllegalStateException(
"Upsupported protocol found when creating the proxy conection to NameNode. "
+ ((xface != null) ? xface.getClass().getName() : xface)
+ " is not supported by " + this.getClass().getName());
}
current.namenode = NameNodeProxies.createNonHAProxy(conf,
current.address, xface, ugi, false).getProxy();
} catch (IOException e) {
LOG.error("Failed to create RPC proxy to NameNode", e);
throw new RuntimeException(e);

View File

@ -38,19 +38,20 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
import org.apache.hadoop.hdfs.protocolPB.RefreshAuthorizationPolicyProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.RefreshUserMappingsProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.RefreshUserMappingsProtocol;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.ToolRunner;
@ -791,9 +792,9 @@ public class DFSAdmin extends FsShell {
conf.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, ""));
// Create the client
RefreshAuthorizationPolicyProtocolClientSideTranslatorPB refreshProtocol =
new RefreshAuthorizationPolicyProtocolClientSideTranslatorPB(
NameNode.getAddress(conf), getUGI(), conf);
RefreshAuthorizationPolicyProtocol refreshProtocol =
NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
RefreshAuthorizationPolicyProtocol.class).getProxy();
// Refresh the authorization policy in-effect
refreshProtocol.refreshServiceAcl();
@ -817,9 +818,9 @@ public class DFSAdmin extends FsShell {
conf.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, ""));
// Create the client
RefreshUserMappingsProtocolClientSideTranslatorPB refreshProtocol =
new RefreshUserMappingsProtocolClientSideTranslatorPB(
NameNode.getAddress(conf), getUGI(), conf);
RefreshUserMappingsProtocol refreshProtocol =
NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
RefreshUserMappingsProtocol.class).getProxy();
// Refresh the user-to-groups mappings
refreshProtocol.refreshUserToGroupsMappings();
@ -844,9 +845,9 @@ public class DFSAdmin extends FsShell {
conf.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, ""));
// Create the client
RefreshUserMappingsProtocolClientSideTranslatorPB refreshProtocol =
new RefreshUserMappingsProtocolClientSideTranslatorPB(
NameNode.getAddress(conf), getUGI(), conf);
RefreshUserMappingsProtocol refreshProtocol =
NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
RefreshUserMappingsProtocol.class).getProxy();
// Refresh the user-to-groups mappings
refreshProtocol.refreshSuperUserGroupsConfiguration();

View File

@ -21,8 +21,11 @@ import java.io.IOException;
import java.io.PrintStream;
import java.net.InetSocketAddress;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.protocolPB.GetUserMappingsProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.security.UserGroupInformation;
@ -34,6 +37,7 @@ import org.apache.hadoop.util.ToolRunner;
* HDFS implementation of a tool for getting the groups which a given user
* belongs to.
*/
@InterfaceAudience.Private
public class GetGroups extends GetGroupsBase {
static{
@ -41,11 +45,11 @@ public class GetGroups extends GetGroupsBase {
}
GetGroups(Configuration conf) {
public GetGroups(Configuration conf) {
super(conf);
}
GetGroups(Configuration conf, PrintStream out) {
public GetGroups(Configuration conf, PrintStream out) {
super(conf, out);
}
@ -57,9 +61,8 @@ public class GetGroups extends GetGroupsBase {
@Override
protected GetUserMappingsProtocol getUgmProtocol() throws IOException {
return new GetUserMappingsProtocolClientSideTranslatorPB(
NameNode.getAddress(getConf()), UserGroupInformation.getCurrentUser(),
getConf());
return NameNodeProxies.createProxy(getConf(), FileSystem.getDefaultUri(getConf()),
GetUserMappingsProtocol.class).getProxy();
}
public static void main(String[] argv) throws Exception {

View File

@ -25,7 +25,6 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
@ -34,8 +33,6 @@ import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.UserGroupInformation;
import junit.framework.TestCase;
/**
* This class tests if block replacement request to data nodes work correctly.
@ -97,8 +94,8 @@ public class TestGetBlocks extends TestCase {
// get RPC client to namenode
InetSocketAddress addr = new InetSocketAddress("localhost",
cluster.getNameNodePort());
NamenodeProtocol namenode = new NamenodeProtocolTranslatorPB(addr, CONF,
UserGroupInformation.getCurrentUser());
NamenodeProtocol namenode = NameNodeProxies.createProxy(CONF,
NameNode.getUri(addr), NamenodeProtocol.class).getProxy();
// get blocks of size fileLen from dataNodes[0]
BlockWithLocations[] locs;

View File

@ -22,6 +22,7 @@ import java.net.InetSocketAddress;
import junit.framework.Assert;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
@ -31,8 +32,13 @@ import org.apache.hadoop.hdfs.protocolPB.JournalProtocolTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.RefreshAuthorizationPolicyProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.RefreshUserMappingsProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.RefreshUserMappingsProtocol;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
import org.apache.hadoop.tools.GetUserMappingsProtocol;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
@ -66,8 +72,9 @@ public class TestIsMethodSupported {
@Test
public void testNamenodeProtocol() throws IOException {
NamenodeProtocolTranslatorPB translator =
new NamenodeProtocolTranslatorPB(nnAddress, conf,
UserGroupInformation.getCurrentUser());
(NamenodeProtocolTranslatorPB) NameNodeProxies.createNonHAProxy(conf,
nnAddress, NamenodeProtocol.class, UserGroupInformation.getCurrentUser(),
true).getProxy();
boolean exists = translator.isMethodSupported("rollEditLog");
Assert.assertTrue(exists);
exists = translator.isMethodSupported("bogusMethod");
@ -99,15 +106,17 @@ public class TestIsMethodSupported {
@Test
public void testClientNamenodeProtocol() throws IOException {
ClientNamenodeProtocolTranslatorPB translator =
new ClientNamenodeProtocolTranslatorPB(nnAddress, conf,
UserGroupInformation.getCurrentUser());
(ClientNamenodeProtocolTranslatorPB) NameNodeProxies.createNonHAProxy(
conf, nnAddress, ClientProtocol.class,
UserGroupInformation.getCurrentUser(), true).getProxy();
Assert.assertTrue(translator.isMethodSupported("mkdirs"));
}
@Test
public void tesJournalProtocol() throws IOException {
JournalProtocolTranslatorPB translator =
new JournalProtocolTranslatorPB(nnAddress, conf);
JournalProtocolTranslatorPB translator = (JournalProtocolTranslatorPB)
NameNodeProxies.createNonHAProxy(conf, nnAddress, JournalProtocol.class,
UserGroupInformation.getCurrentUser(), true).getProxy();
//Nameode doesn't implement JournalProtocol
Assert.assertFalse(translator.isMethodSupported("startLogSegment"));
}
@ -130,24 +139,30 @@ public class TestIsMethodSupported {
@Test
public void testGetUserMappingsProtocol() throws IOException {
GetUserMappingsProtocolClientSideTranslatorPB translator =
new GetUserMappingsProtocolClientSideTranslatorPB(
nnAddress, UserGroupInformation.getCurrentUser(), conf);
(GetUserMappingsProtocolClientSideTranslatorPB)
NameNodeProxies.createNonHAProxy(conf, nnAddress,
GetUserMappingsProtocol.class, UserGroupInformation.getCurrentUser(),
true).getProxy();
Assert.assertTrue(translator.isMethodSupported("getGroupsForUser"));
}
@Test
public void testRefreshAuthorizationPolicyProtocol() throws IOException {
RefreshAuthorizationPolicyProtocolClientSideTranslatorPB translator =
new RefreshAuthorizationPolicyProtocolClientSideTranslatorPB(
nnAddress, UserGroupInformation.getCurrentUser(), conf);
RefreshAuthorizationPolicyProtocolClientSideTranslatorPB translator =
(RefreshAuthorizationPolicyProtocolClientSideTranslatorPB)
NameNodeProxies.createNonHAProxy(conf, nnAddress,
RefreshAuthorizationPolicyProtocol.class,
UserGroupInformation.getCurrentUser(), true).getProxy();
Assert.assertTrue(translator.isMethodSupported("refreshServiceAcl"));
}
@Test
public void testRefreshUserMappingsProtocol() throws IOException {
RefreshUserMappingsProtocolClientSideTranslatorPB translator =
new RefreshUserMappingsProtocolClientSideTranslatorPB(
nnAddress, UserGroupInformation.getCurrentUser(), conf);
(RefreshUserMappingsProtocolClientSideTranslatorPB)
NameNodeProxies.createNonHAProxy(conf, nnAddress,
RefreshUserMappingsProtocol.class,
UserGroupInformation.getCurrentUser(), true).getProxy();
Assert.assertTrue(
translator.isMethodSupported("refreshUserToGroupsMappings"));
}

View File

@ -75,7 +75,8 @@ public class TestReplication extends TestCase {
private void checkFile(FileSystem fileSys, Path name, int repl)
throws IOException {
Configuration conf = fileSys.getConf();
ClientProtocol namenode = DFSUtil.createNamenode(conf);
ClientProtocol namenode = NameNodeProxies.createProxy(conf, fileSys.getUri(),
ClientProtocol.class).getProxy();
waitForBlockReplication(name.toString(), namenode,
Math.min(numDatanodes, repl), -1);

View File

@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@ -100,7 +101,8 @@ public class TestBalancer extends TestCase {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numNodes).build();
try {
cluster.waitActive();
client = DFSUtil.createNamenode(conf);
client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(),
ClientProtocol.class).getProxy();
short replicationFactor = (short)(numNodes-1);
long fileLen = size/replicationFactor;
@ -194,7 +196,8 @@ public class TestBalancer extends TestCase {
.simulatedCapacities(capacities)
.build();
cluster.waitActive();
client = DFSUtil.createNamenode(conf);
client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(),
ClientProtocol.class).getProxy();
for(int i = 0; i < blocksDN.length; i++)
cluster.injectBlocks(i, Arrays.asList(blocksDN[i]));
@ -308,7 +311,8 @@ public class TestBalancer extends TestCase {
.build();
try {
cluster.waitActive();
client = DFSUtil.createNamenode(conf);
client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(),
ClientProtocol.class).getProxy();
long totalCapacity = sum(capacities);
@ -400,7 +404,8 @@ public class TestBalancer extends TestCase {
.build();
try {
cluster.waitActive();
client = DFSUtil.createNamenode(conf);
client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(),
ClientProtocol.class).getProxy();
long totalCapacity = sum(capacities);

View File

@ -23,10 +23,12 @@ import java.net.InetSocketAddress;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.MiniDFSNNTopology.NNConf;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
@ -71,12 +73,13 @@ public class TestBalancerWithHANameNodes {
cluster = new MiniDFSCluster.Builder(conf).nnTopology(simpleHATopology)
.numDataNodes(capacities.length).racks(racks).simulatedCapacities(
capacities).build();
HATestUtil.setFailoverConfigurations(cluster, conf);
try {
cluster.waitActive();
cluster.transitionToActive(1);
Thread.sleep(500);
client = DFSUtil.createNamenode(cluster.getNameNode(1)
.getNameNodeAddress(), conf);
client = NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
ClientProtocol.class).getProxy();
long totalCapacity = TestBalancer.sum(capacities);
// fill up the cluster to be 30% full
long totalUsedSpace = totalCapacity * 3 / 10;

View File

@ -150,6 +150,11 @@ public abstract class HATestUtil {
return fs;
}
public static void setFailoverConfigurations(MiniDFSCluster cluster,
Configuration conf) {
setFailoverConfigurations(cluster, conf, getLogicalHostname(cluster));
}
/** Sets the required configurations for performing failover of default namespace. */
public static void setFailoverConfigurations(MiniDFSCluster cluster,
Configuration conf, String logicalName) {

View File

@ -0,0 +1,57 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.ha;
import java.io.IOException;
import java.io.PrintStream;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.tools.GetGroups;
import org.apache.hadoop.tools.GetGroupsTestBase;
import org.apache.hadoop.util.Tool;
import org.junit.After;
import org.junit.Before;
public class TestGetGroupsWithHA extends GetGroupsTestBase {
private MiniDFSCluster cluster;
@Before
public void setUpNameNode() throws IOException {
conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleHATopology())
.numDataNodes(0).build();
HATestUtil.setFailoverConfigurations(cluster, conf);
}
@After
public void tearDownNameNode() {
if (cluster != null) {
cluster.shutdown();
}
}
@Override
protected Tool getTool(PrintStream o) {
return new GetGroups(conf, o);
}
}