HDFS-9022. Move NameNode.getAddress() and NameNode.getUri() to hadoop-hdfs-client. Contributed by Mingliang Liu.
This commit is contained in:
parent
3f82f582e5
commit
9eee97508f
|
@ -25,6 +25,7 @@ import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.crypto.key.KeyProvider;
|
import org.apache.hadoop.crypto.key.KeyProvider;
|
||||||
import org.apache.hadoop.crypto.key.KeyProviderFactory;
|
import org.apache.hadoop.crypto.key.KeyProviderFactory;
|
||||||
import org.apache.hadoop.fs.BlockLocation;
|
import org.apache.hadoop.fs.BlockLocation;
|
||||||
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.net.BasicInetPeer;
|
import org.apache.hadoop.hdfs.net.BasicInetPeer;
|
||||||
|
@ -33,6 +34,7 @@ import org.apache.hadoop.hdfs.net.Peer;
|
||||||
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
|
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory;
|
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory;
|
||||||
|
@ -587,4 +589,43 @@ public class DFSUtilClient {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static InetSocketAddress getNNAddress(String address) {
|
||||||
|
return NetUtils.createSocketAddr(address,
|
||||||
|
HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static InetSocketAddress getNNAddress(Configuration conf) {
|
||||||
|
URI filesystemURI = FileSystem.getDefaultUri(conf);
|
||||||
|
return getNNAddress(filesystemURI);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return address of file system
|
||||||
|
*/
|
||||||
|
public static InetSocketAddress getNNAddress(URI filesystemURI) {
|
||||||
|
String authority = filesystemURI.getAuthority();
|
||||||
|
if (authority == null) {
|
||||||
|
throw new IllegalArgumentException(String.format(
|
||||||
|
"Invalid URI for NameNode address (check %s): %s has no authority.",
|
||||||
|
FileSystem.FS_DEFAULT_NAME_KEY, filesystemURI.toString()));
|
||||||
|
}
|
||||||
|
if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase(
|
||||||
|
filesystemURI.getScheme())) {
|
||||||
|
throw new IllegalArgumentException(String.format(
|
||||||
|
"Invalid URI for NameNode address (check %s): " +
|
||||||
|
"%s is not of scheme '%s'.", FileSystem.FS_DEFAULT_NAME_KEY,
|
||||||
|
filesystemURI.toString(), HdfsConstants.HDFS_URI_SCHEME));
|
||||||
|
}
|
||||||
|
return getNNAddress(authority);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static URI getNNUri(InetSocketAddress namenode) {
|
||||||
|
int port = namenode.getPort();
|
||||||
|
String portString =
|
||||||
|
(port == HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT) ?
|
||||||
|
"" : (":" + port);
|
||||||
|
return URI.create(HdfsConstants.HDFS_URI_SCHEME + "://"
|
||||||
|
+ namenode.getHostName() + portString);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,10 +27,10 @@ import java.util.List;
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.hdfs.DFSClient;
|
import org.apache.hadoop.hdfs.DFSClient;
|
||||||
|
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||||
import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
|
import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
|
import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
|
||||||
import org.apache.hadoop.mount.MountEntry;
|
import org.apache.hadoop.mount.MountEntry;
|
||||||
import org.apache.hadoop.mount.MountInterface;
|
import org.apache.hadoop.mount.MountInterface;
|
||||||
import org.apache.hadoop.mount.MountResponse;
|
import org.apache.hadoop.mount.MountResponse;
|
||||||
|
@ -90,7 +90,7 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface {
|
||||||
UserGroupInformation.setConfiguration(config);
|
UserGroupInformation.setConfiguration(config);
|
||||||
SecurityUtil.login(config, NfsConfigKeys.DFS_NFS_KEYTAB_FILE_KEY,
|
SecurityUtil.login(config, NfsConfigKeys.DFS_NFS_KEYTAB_FILE_KEY,
|
||||||
NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY);
|
NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY);
|
||||||
this.dfsClient = new DFSClient(NameNode.getAddress(config), config);
|
this.dfsClient = new DFSClient(DFSUtilClient.getNNAddress(config), config);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -33,8 +33,8 @@ import org.apache.commons.logging.Log;
|
||||||
import org.apache.hadoop.fs.FSDataInputStream;
|
import org.apache.hadoop.fs.FSDataInputStream;
|
||||||
import org.apache.hadoop.hdfs.DFSClient;
|
import org.apache.hadoop.hdfs.DFSClient;
|
||||||
import org.apache.hadoop.hdfs.DFSInputStream;
|
import org.apache.hadoop.hdfs.DFSInputStream;
|
||||||
|
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||||
import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
|
import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
|
||||||
import org.apache.hadoop.io.MultipleIOException;
|
import org.apache.hadoop.io.MultipleIOException;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.util.ShutdownHookManager;
|
import org.apache.hadoop.util.ShutdownHookManager;
|
||||||
|
@ -173,7 +173,7 @@ class DFSClientCache {
|
||||||
return ugi.doAs(new PrivilegedExceptionAction<DFSClient>() {
|
return ugi.doAs(new PrivilegedExceptionAction<DFSClient>() {
|
||||||
@Override
|
@Override
|
||||||
public DFSClient run() throws IOException {
|
public DFSClient run() throws IOException {
|
||||||
return new DFSClient(NameNode.getAddress(config), config);
|
return new DFSClient(DFSUtilClient.getNNAddress(config), config);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,6 +28,7 @@ import java.util.Arrays;
|
||||||
import java.util.concurrent.ConcurrentNavigableMap;
|
import java.util.concurrent.ConcurrentNavigableMap;
|
||||||
|
|
||||||
import org.apache.hadoop.hdfs.DFSClient;
|
import org.apache.hadoop.hdfs.DFSClient;
|
||||||
|
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
|
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
|
||||||
import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
|
import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
|
||||||
|
@ -35,7 +36,6 @@ import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.COMMIT_STATUS;
|
import org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.COMMIT_STATUS;
|
||||||
import org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.CommitCtx;
|
import org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.CommitCtx;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
|
||||||
import org.apache.hadoop.nfs.nfs3.FileHandle;
|
import org.apache.hadoop.nfs.nfs3.FileHandle;
|
||||||
import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
|
import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
|
||||||
import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow;
|
import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow;
|
||||||
|
@ -480,7 +480,7 @@ public class TestWrites {
|
||||||
try {
|
try {
|
||||||
cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
|
cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
client = new DFSClient(NameNode.getAddress(config), config);
|
client = new DFSClient(DFSUtilClient.getNNAddress(config), config);
|
||||||
|
|
||||||
// Use emphral port in case tests are running in parallel
|
// Use emphral port in case tests are running in parallel
|
||||||
config.setInt("nfs3.mountd.port", 0);
|
config.setInt("nfs3.mountd.port", 0);
|
||||||
|
@ -596,7 +596,8 @@ public class TestWrites {
|
||||||
nfs3.startServiceInternal(false);
|
nfs3.startServiceInternal(false);
|
||||||
nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
|
nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
|
||||||
|
|
||||||
DFSClient dfsClient = new DFSClient(NameNode.getAddress(config), config);
|
DFSClient dfsClient = new DFSClient(DFSUtilClient.getNNAddress(config),
|
||||||
|
config);
|
||||||
HdfsFileStatus status = dfsClient.getFileInfo("/");
|
HdfsFileStatus status = dfsClient.getFileInfo("/");
|
||||||
FileHandle rootHandle = new FileHandle(status.getFileId());
|
FileHandle rootHandle = new FileHandle(status.getFileId());
|
||||||
|
|
||||||
|
|
|
@ -929,6 +929,9 @@ Release 2.8.0 - UNRELEASED
|
||||||
HDFS-7995. Implement chmod in the HDFS Web UI.
|
HDFS-7995. Implement chmod in the HDFS Web UI.
|
||||||
(Ravi Prakash and Haohui Mai via wheat9)
|
(Ravi Prakash and Haohui Mai via wheat9)
|
||||||
|
|
||||||
|
HDFS-9022. Move NameNode.getAddress() and NameNode.getUri() to
|
||||||
|
hadoop-hdfs-client. (Mingliang Liu via wheat9)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
|
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
|
||||||
|
|
|
@ -149,7 +149,6 @@ import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
|
||||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
|
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
|
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
|
||||||
import org.apache.hadoop.io.DataOutputBuffer;
|
import org.apache.hadoop.io.DataOutputBuffer;
|
||||||
|
@ -250,17 +249,17 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
= new HashMap<Long, DFSOutputStream>();
|
= new HashMap<Long, DFSOutputStream>();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Same as this(NameNode.getAddress(conf), conf);
|
* Same as this(NameNode.getNNAddress(conf), conf);
|
||||||
* @see #DFSClient(InetSocketAddress, Configuration)
|
* @see #DFSClient(InetSocketAddress, Configuration)
|
||||||
* @deprecated Deprecated at 0.21
|
* @deprecated Deprecated at 0.21
|
||||||
*/
|
*/
|
||||||
@Deprecated
|
@Deprecated
|
||||||
public DFSClient(Configuration conf) throws IOException {
|
public DFSClient(Configuration conf) throws IOException {
|
||||||
this(NameNode.getAddress(conf), conf);
|
this(DFSUtilClient.getNNAddress(conf), conf);
|
||||||
}
|
}
|
||||||
|
|
||||||
public DFSClient(InetSocketAddress address, Configuration conf) throws IOException {
|
public DFSClient(InetSocketAddress address, Configuration conf) throws IOException {
|
||||||
this(NameNode.getUri(address), conf);
|
this(DFSUtilClient.getNNUri(address), conf);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -411,7 +411,7 @@ public class DFSUtil {
|
||||||
NameNode.initializeGenericKeys(confForNn, nsId, nnId);
|
NameNode.initializeGenericKeys(confForNn, nsId, nnId);
|
||||||
String principal = SecurityUtil.getServerPrincipal(confForNn
|
String principal = SecurityUtil.getServerPrincipal(confForNn
|
||||||
.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY),
|
.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY),
|
||||||
NameNode.getAddress(confForNn).getHostName());
|
DFSUtilClient.getNNAddress(confForNn).getHostName());
|
||||||
principals.add(principal);
|
principals.add(principal);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -419,7 +419,7 @@ public class DFSUtil {
|
||||||
NameNode.initializeGenericKeys(confForNn, nsId, null);
|
NameNode.initializeGenericKeys(confForNn, nsId, null);
|
||||||
String principal = SecurityUtil.getServerPrincipal(confForNn
|
String principal = SecurityUtil.getServerPrincipal(confForNn
|
||||||
.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY),
|
.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY),
|
||||||
NameNode.getAddress(confForNn).getHostName());
|
DFSUtilClient.getNNAddress(confForNn).getHostName());
|
||||||
principals.add(principal);
|
principals.add(principal);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -495,7 +495,8 @@ public class DFSUtil {
|
||||||
// Use default address as fall back
|
// Use default address as fall back
|
||||||
String defaultAddress;
|
String defaultAddress;
|
||||||
try {
|
try {
|
||||||
defaultAddress = NetUtils.getHostPortString(NameNode.getAddress(conf));
|
defaultAddress = NetUtils.getHostPortString(
|
||||||
|
DFSUtilClient.getNNAddress(conf));
|
||||||
} catch (IllegalArgumentException e) {
|
} catch (IllegalArgumentException e) {
|
||||||
defaultAddress = null;
|
defaultAddress = null;
|
||||||
}
|
}
|
||||||
|
@ -531,7 +532,8 @@ public class DFSUtil {
|
||||||
// Use default address as fall back
|
// Use default address as fall back
|
||||||
String defaultAddress;
|
String defaultAddress;
|
||||||
try {
|
try {
|
||||||
defaultAddress = NetUtils.getHostPortString(NameNode.getAddress(conf));
|
defaultAddress = NetUtils.getHostPortString(
|
||||||
|
DFSUtilClient.getNNAddress(conf));
|
||||||
} catch (IllegalArgumentException e) {
|
} catch (IllegalArgumentException e) {
|
||||||
defaultAddress = null;
|
defaultAddress = null;
|
||||||
}
|
}
|
||||||
|
|
|
@ -165,8 +165,8 @@ public class NameNodeProxies {
|
||||||
|
|
||||||
if (failoverProxyProvider == null) {
|
if (failoverProxyProvider == null) {
|
||||||
// Non-HA case
|
// Non-HA case
|
||||||
return createNonHAProxy(conf, NameNode.getAddress(nameNodeUri), xface,
|
return createNonHAProxy(conf, DFSUtilClient.getNNAddress(nameNodeUri),
|
||||||
UserGroupInformation.getCurrentUser(), true,
|
xface, UserGroupInformation.getCurrentUser(), true,
|
||||||
fallbackToSimpleAuth);
|
fallbackToSimpleAuth);
|
||||||
} else {
|
} else {
|
||||||
// HA case
|
// HA case
|
||||||
|
@ -183,10 +183,10 @@ public class NameNodeProxies {
|
||||||
HdfsConstants.HDFS_URI_SCHEME);
|
HdfsConstants.HDFS_URI_SCHEME);
|
||||||
} else {
|
} else {
|
||||||
dtService = SecurityUtil.buildTokenService(
|
dtService = SecurityUtil.buildTokenService(
|
||||||
NameNode.getAddress(nameNodeUri));
|
DFSUtilClient.getNNAddress(nameNodeUri));
|
||||||
}
|
}
|
||||||
return new ProxyAndInfo<T>(proxy, dtService,
|
return new ProxyAndInfo<T>(proxy, dtService,
|
||||||
NameNode.getAddress(nameNodeUri));
|
DFSUtilClient.getNNAddress(nameNodeUri));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -249,10 +249,10 @@ public class NameNodeProxies {
|
||||||
HdfsConstants.HDFS_URI_SCHEME);
|
HdfsConstants.HDFS_URI_SCHEME);
|
||||||
} else {
|
} else {
|
||||||
dtService = SecurityUtil.buildTokenService(
|
dtService = SecurityUtil.buildTokenService(
|
||||||
NameNode.getAddress(nameNodeUri));
|
DFSUtilClient.getNNAddress(nameNodeUri));
|
||||||
}
|
}
|
||||||
return new ProxyAndInfo<T>(proxy, dtService,
|
return new ProxyAndInfo<T>(proxy, dtService,
|
||||||
NameNode.getAddress(nameNodeUri));
|
DFSUtilClient.getNNAddress(nameNodeUri));
|
||||||
} else {
|
} else {
|
||||||
LOG.warn("Currently creating proxy using " +
|
LOG.warn("Currently creating proxy using " +
|
||||||
"LossyRetryInvocationHandler requires NN HA setup");
|
"LossyRetryInvocationHandler requires NN HA setup");
|
||||||
|
|
|
@ -26,6 +26,7 @@ import javax.servlet.http.HttpServletRequest;
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.NameNodeProxies;
|
import org.apache.hadoop.hdfs.NameNodeProxies;
|
||||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||||
|
@ -77,7 +78,7 @@ abstract class DfsServlet extends HttpServlet {
|
||||||
NameNodeHttpServer.getNameNodeAddressFromContext(context);
|
NameNodeHttpServer.getNameNodeAddressFromContext(context);
|
||||||
Configuration conf = new HdfsConfiguration(
|
Configuration conf = new HdfsConfiguration(
|
||||||
NameNodeHttpServer.getConfFromContext(context));
|
NameNodeHttpServer.getConfFromContext(context));
|
||||||
return NameNodeProxies.createProxy(conf, NameNode.getUri(nnAddr),
|
return NameNodeProxies.createProxy(conf, DFSUtilClient.getNNUri(nnAddr),
|
||||||
ClientProtocol.class).getProxy();
|
ClientProtocol.class).getProxy();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -32,6 +32,7 @@ import javax.servlet.http.HttpServletResponse;
|
||||||
|
|
||||||
import org.apache.hadoop.ha.HAServiceProtocol;
|
import org.apache.hadoop.ha.HAServiceProtocol;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
|
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||||
import org.apache.hadoop.security.SecurityUtil;
|
import org.apache.hadoop.security.SecurityUtil;
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
@ -237,7 +238,7 @@ public class ImageServlet extends HttpServlet {
|
||||||
|
|
||||||
validRequestors.add(SecurityUtil.getServerPrincipal(conf
|
validRequestors.add(SecurityUtil.getServerPrincipal(conf
|
||||||
.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY),
|
.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY),
|
||||||
NameNode.getAddress(conf).getHostName()));
|
DFSUtilClient.getNNAddress(conf).getHostName()));
|
||||||
try {
|
try {
|
||||||
validRequestors.add(
|
validRequestors.add(
|
||||||
SecurityUtil.getServerPrincipal(conf
|
SecurityUtil.getServerPrincipal(conf
|
||||||
|
@ -261,7 +262,7 @@ public class ImageServlet extends HttpServlet {
|
||||||
for (Configuration otherNnConf : otherNnConfs) {
|
for (Configuration otherNnConf : otherNnConfs) {
|
||||||
validRequestors.add(SecurityUtil.getServerPrincipal(otherNnConf
|
validRequestors.add(SecurityUtil.getServerPrincipal(otherNnConf
|
||||||
.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY),
|
.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY),
|
||||||
NameNode.getAddress(otherNnConf).getHostName()));
|
DFSUtilClient.getNNAddress(otherNnConf).getHostName()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -445,10 +445,6 @@ public class NameNode implements NameNodeStatusMXBean {
|
||||||
return clientNamenodeAddress;
|
return clientNamenodeAddress;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static InetSocketAddress getAddress(String address) {
|
|
||||||
return NetUtils.createSocketAddr(address, DFS_NAMENODE_RPC_PORT_DEFAULT);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Set the configuration property for the service rpc address
|
* Set the configuration property for the service rpc address
|
||||||
* to address
|
* to address
|
||||||
|
@ -470,45 +466,18 @@ public class NameNode implements NameNodeStatusMXBean {
|
||||||
boolean fallback) {
|
boolean fallback) {
|
||||||
String addr = conf.getTrimmed(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY);
|
String addr = conf.getTrimmed(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY);
|
||||||
if (addr == null || addr.isEmpty()) {
|
if (addr == null || addr.isEmpty()) {
|
||||||
return fallback ? getAddress(conf) : null;
|
return fallback ? DFSUtilClient.getNNAddress(conf) : null;
|
||||||
}
|
}
|
||||||
return getAddress(addr);
|
return DFSUtilClient.getNNAddress(addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static InetSocketAddress getAddress(Configuration conf) {
|
@Deprecated
|
||||||
URI filesystemURI = FileSystem.getDefaultUri(conf);
|
|
||||||
return getAddress(filesystemURI);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return address of file system
|
* @deprecated Use {@link DFSUtilClient#getNNUri(InetSocketAddress)} instead.
|
||||||
*/
|
*/
|
||||||
public static InetSocketAddress getAddress(URI filesystemURI) {
|
|
||||||
String authority = filesystemURI.getAuthority();
|
|
||||||
if (authority == null) {
|
|
||||||
throw new IllegalArgumentException(String.format(
|
|
||||||
"Invalid URI for NameNode address (check %s): %s has no authority.",
|
|
||||||
FileSystem.FS_DEFAULT_NAME_KEY, filesystemURI.toString()));
|
|
||||||
}
|
|
||||||
if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase(
|
|
||||||
filesystemURI.getScheme())) {
|
|
||||||
throw new IllegalArgumentException(String.format(
|
|
||||||
"Invalid URI for NameNode address (check %s): %s is not of scheme '%s'.",
|
|
||||||
FileSystem.FS_DEFAULT_NAME_KEY, filesystemURI.toString(),
|
|
||||||
HdfsConstants.HDFS_URI_SCHEME));
|
|
||||||
}
|
|
||||||
return getAddress(authority);
|
|
||||||
}
|
|
||||||
|
|
||||||
public static URI getUri(InetSocketAddress namenode) {
|
public static URI getUri(InetSocketAddress namenode) {
|
||||||
int port = namenode.getPort();
|
return DFSUtilClient.getNNUri(namenode);
|
||||||
String portString = (port == DFS_NAMENODE_RPC_PORT_DEFAULT) ?
|
|
||||||
"" : (":" + port);
|
|
||||||
return URI.create(HdfsConstants.HDFS_URI_SCHEME + "://"
|
|
||||||
+ namenode.getHostName()+portString);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
// Common NameNode methods implementation for the active name-node role.
|
// Common NameNode methods implementation for the active name-node role.
|
||||||
//
|
//
|
||||||
|
@ -529,7 +498,7 @@ public class NameNode implements NameNodeStatusMXBean {
|
||||||
}
|
}
|
||||||
|
|
||||||
protected InetSocketAddress getRpcServerAddress(Configuration conf) {
|
protected InetSocketAddress getRpcServerAddress(Configuration conf) {
|
||||||
return getAddress(conf);
|
return DFSUtilClient.getNNAddress(conf);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Given a configuration get the bind host of the service rpc server
|
/** Given a configuration get the bind host of the service rpc server
|
||||||
|
@ -564,7 +533,7 @@ public class NameNode implements NameNodeStatusMXBean {
|
||||||
|
|
||||||
protected void setRpcServerAddress(Configuration conf,
|
protected void setRpcServerAddress(Configuration conf,
|
||||||
InetSocketAddress rpcAddress) {
|
InetSocketAddress rpcAddress) {
|
||||||
FileSystem.setDefaultUri(conf, getUri(rpcAddress));
|
FileSystem.setDefaultUri(conf, DFSUtilClient.getNNUri(rpcAddress));
|
||||||
}
|
}
|
||||||
|
|
||||||
protected InetSocketAddress getHttpServerAddress(Configuration conf) {
|
protected InetSocketAddress getHttpServerAddress(Configuration conf) {
|
||||||
|
@ -1012,7 +981,7 @@ public class NameNode implements NameNodeStatusMXBean {
|
||||||
checkAllowFormat(conf);
|
checkAllowFormat(conf);
|
||||||
|
|
||||||
if (UserGroupInformation.isSecurityEnabled()) {
|
if (UserGroupInformation.isSecurityEnabled()) {
|
||||||
InetSocketAddress socAddr = getAddress(conf);
|
InetSocketAddress socAddr = DFSUtilClient.getNNAddress(conf);
|
||||||
SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY,
|
SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY,
|
||||||
DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
|
DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
|
||||||
}
|
}
|
||||||
|
@ -1115,7 +1084,7 @@ public class NameNode implements NameNodeStatusMXBean {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (UserGroupInformation.isSecurityEnabled()) {
|
if (UserGroupInformation.isSecurityEnabled()) {
|
||||||
InetSocketAddress socAddr = getAddress(conf);
|
InetSocketAddress socAddr = DFSUtilClient.getNNAddress(conf);
|
||||||
SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY,
|
SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY,
|
||||||
DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
|
DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
|
||||||
}
|
}
|
||||||
|
|
|
@ -758,7 +758,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
|
||||||
|
|
||||||
private void copyBlocksToLostFound(String parent, HdfsFileStatus file,
|
private void copyBlocksToLostFound(String parent, HdfsFileStatus file,
|
||||||
LocatedBlocks blocks) throws IOException {
|
LocatedBlocks blocks) throws IOException {
|
||||||
final DFSClient dfs = new DFSClient(NameNode.getAddress(conf), conf);
|
final DFSClient dfs = new DFSClient(DFSUtilClient.getNNAddress(conf), conf);
|
||||||
final String fullName = file.getFullName(parent);
|
final String fullName = file.getFullName(parent);
|
||||||
OutputStream fos = null;
|
OutputStream fos = null;
|
||||||
try {
|
try {
|
||||||
|
|
|
@ -38,6 +38,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configurable;
|
import org.apache.hadoop.conf.Configurable;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
|
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||||
import org.apache.hadoop.hdfs.HAUtil;
|
import org.apache.hadoop.hdfs.HAUtil;
|
||||||
import org.apache.hadoop.hdfs.NameNodeProxies;
|
import org.apache.hadoop.hdfs.NameNodeProxies;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||||
|
@ -101,7 +102,7 @@ public class BootstrapStandby implements Tool, Configurable {
|
||||||
parseConfAndFindOtherNN();
|
parseConfAndFindOtherNN();
|
||||||
NameNode.checkAllowFormat(conf);
|
NameNode.checkAllowFormat(conf);
|
||||||
|
|
||||||
InetSocketAddress myAddr = NameNode.getAddress(conf);
|
InetSocketAddress myAddr = DFSUtilClient.getNNAddress(conf);
|
||||||
SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY,
|
SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY,
|
||||||
DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, myAddr.getHostName());
|
DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, myAddr.getHostName());
|
||||||
|
|
||||||
|
|
|
@ -24,9 +24,9 @@ import java.net.URI;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||||
|
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||||
import org.apache.hadoop.hdfs.NameNodeProxies;
|
import org.apache.hadoop.hdfs.NameNodeProxies;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||||
import org.apache.hadoop.ipc.RPC;
|
import org.apache.hadoop.ipc.RPC;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
|
@ -91,7 +91,7 @@ public class IPFailoverProxyProvider<T> extends
|
||||||
if (nnProxyInfo == null) {
|
if (nnProxyInfo == null) {
|
||||||
try {
|
try {
|
||||||
// Create a proxy that is not wrapped in RetryProxy
|
// Create a proxy that is not wrapped in RetryProxy
|
||||||
InetSocketAddress nnAddr = NameNode.getAddress(nameNodeUri);
|
InetSocketAddress nnAddr = DFSUtilClient.getNNAddress(nameNodeUri);
|
||||||
nnProxyInfo = new ProxyInfo<T>(NameNodeProxies.createNonHAProxy(
|
nnProxyInfo = new ProxyInfo<T>(NameNodeProxies.createNonHAProxy(
|
||||||
conf, nnAddr, xface, UserGroupInformation.getCurrentUser(),
|
conf, nnAddr, xface, UserGroupInformation.getCurrentUser(),
|
||||||
false).getProxy(), nnAddr.toString());
|
false).getProxy(), nnAddr.toString());
|
||||||
|
|
|
@ -39,6 +39,7 @@ import org.apache.hadoop.ha.HealthMonitor;
|
||||||
import org.apache.hadoop.ha.ZKFailoverController;
|
import org.apache.hadoop.ha.ZKFailoverController;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
|
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||||
import org.apache.hadoop.hdfs.HAUtil;
|
import org.apache.hadoop.hdfs.HAUtil;
|
||||||
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
|
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
|
@ -166,7 +167,7 @@ public class DFSZKFailoverController extends ZKFailoverController {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void loginAsFCUser() throws IOException {
|
public void loginAsFCUser() throws IOException {
|
||||||
InetSocketAddress socAddr = NameNode.getAddress(conf);
|
InetSocketAddress socAddr = DFSUtilClient.getNNAddress(conf);
|
||||||
SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY,
|
SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY,
|
||||||
DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
|
DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,9 +29,9 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
|
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.NameNodeProxies;
|
import org.apache.hadoop.hdfs.NameNodeProxies;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
|
||||||
import org.apache.hadoop.tools.GetGroupsBase;
|
import org.apache.hadoop.tools.GetGroupsBase;
|
||||||
import org.apache.hadoop.tools.GetUserMappingsProtocol;
|
import org.apache.hadoop.tools.GetUserMappingsProtocol;
|
||||||
import org.apache.hadoop.util.ToolRunner;
|
import org.apache.hadoop.util.ToolRunner;
|
||||||
|
@ -63,7 +63,7 @@ public class GetGroups extends GetGroupsBase {
|
||||||
@Override
|
@Override
|
||||||
protected InetSocketAddress getProtocolAddress(Configuration conf)
|
protected InetSocketAddress getProtocolAddress(Configuration conf)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
return NameNode.getAddress(conf);
|
return DFSUtilClient.getNNAddress(conf);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -1797,8 +1797,8 @@ public class DFSTestUtil {
|
||||||
URI nameNodeUri, UserGroupInformation ugi)
|
URI nameNodeUri, UserGroupInformation ugi)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
return NameNodeProxies.createNonHAProxy(conf,
|
return NameNodeProxies.createNonHAProxy(conf,
|
||||||
NameNode.getAddress(nameNodeUri), NamenodeProtocol.class, ugi, false).
|
DFSUtilClient.getNNAddress(nameNodeUri), NamenodeProtocol.class, ugi,
|
||||||
getProxy();
|
false).getProxy();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -300,7 +300,7 @@ public class TestDFSClientFailover {
|
||||||
Class<T> xface) {
|
Class<T> xface) {
|
||||||
try {
|
try {
|
||||||
this.proxy = NameNodeProxies.createNonHAProxy(conf,
|
this.proxy = NameNodeProxies.createNonHAProxy(conf,
|
||||||
NameNode.getAddress(uri), xface,
|
DFSUtilClient.getNNAddress(uri), xface,
|
||||||
UserGroupInformation.getCurrentUser(), false).getProxy();
|
UserGroupInformation.getCurrentUser(), false).getProxy();
|
||||||
this.xface = xface;
|
this.xface = xface;
|
||||||
} catch (IOException ioe) {
|
} catch (IOException ioe) {
|
||||||
|
|
|
@ -28,7 +28,6 @@ import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.FsShell;
|
import org.apache.hadoop.fs.FsShell;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
|
||||||
import org.apache.hadoop.util.ToolRunner;
|
import org.apache.hadoop.util.ToolRunner;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
@ -53,7 +52,7 @@ public class TestDFSShellGenericOptions {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void testFsOption(String [] args, String namenode) {
|
private void testFsOption(String [] args, String namenode) {
|
||||||
// prepare arguments to create a directory /data
|
// prepare arguments to create a directory /data
|
||||||
args[0] = "-fs";
|
args[0] = "-fs";
|
||||||
args[1] = namenode;
|
args[1] = namenode;
|
||||||
|
@ -81,7 +80,7 @@ public class TestDFSShellGenericOptions {
|
||||||
// prepare arguments to create a directory /data
|
// prepare arguments to create a directory /data
|
||||||
args[0] = "-conf";
|
args[0] = "-conf";
|
||||||
args[1] = siteFile.getPath();
|
args[1] = siteFile.getPath();
|
||||||
execute(args, namenode);
|
execute(args, namenode);
|
||||||
} catch (FileNotFoundException e) {
|
} catch (FileNotFoundException e) {
|
||||||
e.printStackTrace();
|
e.printStackTrace();
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -94,7 +93,7 @@ public class TestDFSShellGenericOptions {
|
||||||
// prepare arguments to create a directory /data
|
// prepare arguments to create a directory /data
|
||||||
args[0] = "-D";
|
args[0] = "-D";
|
||||||
args[1] = "fs.defaultFS="+namenode;
|
args[1] = "fs.defaultFS="+namenode;
|
||||||
execute(args, namenode);
|
execute(args, namenode);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void execute(String [] args, String namenode) {
|
private void execute(String [] args, String namenode) {
|
||||||
|
@ -102,9 +101,9 @@ public class TestDFSShellGenericOptions {
|
||||||
FileSystem fs=null;
|
FileSystem fs=null;
|
||||||
try {
|
try {
|
||||||
ToolRunner.run(shell, args);
|
ToolRunner.run(shell, args);
|
||||||
fs = FileSystem.get(NameNode.getUri(NameNode.getAddress(namenode)),
|
fs = FileSystem.get(DFSUtilClient.getNNUri(
|
||||||
shell.getConf());
|
DFSUtilClient.getNNAddress(namenode)), shell.getConf());
|
||||||
assertTrue("Directory does not get created",
|
assertTrue("Directory does not get created",
|
||||||
fs.isDirectory(new Path("/data")));
|
fs.isDirectory(new Path("/data")));
|
||||||
fs.delete(new Path("/data"), true);
|
fs.delete(new Path("/data"), true);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
|
|
|
@ -34,13 +34,13 @@ public class TestDefaultNameNodePort {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testGetAddressFromString() throws Exception {
|
public void testGetAddressFromString() throws Exception {
|
||||||
assertEquals(NameNode.getAddress("foo").getPort(),
|
assertEquals(DFSUtilClient.getNNAddress("foo").getPort(),
|
||||||
HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
|
HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
|
||||||
assertEquals(NameNode.getAddress("hdfs://foo/").getPort(),
|
assertEquals(DFSUtilClient.getNNAddress("hdfs://foo/").getPort(),
|
||||||
HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
|
HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
|
||||||
assertEquals(NameNode.getAddress("hdfs://foo:555").getPort(),
|
assertEquals(DFSUtilClient.getNNAddress("hdfs://foo:555").getPort(),
|
||||||
555);
|
555);
|
||||||
assertEquals(NameNode.getAddress("foo:555").getPort(),
|
assertEquals(DFSUtilClient.getNNAddress("foo:555").getPort(),
|
||||||
555);
|
555);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -48,20 +48,20 @@ public class TestDefaultNameNodePort {
|
||||||
public void testGetAddressFromConf() throws Exception {
|
public void testGetAddressFromConf() throws Exception {
|
||||||
Configuration conf = new HdfsConfiguration();
|
Configuration conf = new HdfsConfiguration();
|
||||||
FileSystem.setDefaultUri(conf, "hdfs://foo/");
|
FileSystem.setDefaultUri(conf, "hdfs://foo/");
|
||||||
assertEquals(NameNode.getAddress(conf).getPort(),
|
assertEquals(DFSUtilClient.getNNAddress(conf).getPort(),
|
||||||
HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
|
HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
|
||||||
FileSystem.setDefaultUri(conf, "hdfs://foo:555/");
|
FileSystem.setDefaultUri(conf, "hdfs://foo:555/");
|
||||||
assertEquals(NameNode.getAddress(conf).getPort(), 555);
|
assertEquals(DFSUtilClient.getNNAddress(conf).getPort(), 555);
|
||||||
FileSystem.setDefaultUri(conf, "foo");
|
FileSystem.setDefaultUri(conf, "foo");
|
||||||
assertEquals(NameNode.getAddress(conf).getPort(),
|
assertEquals(DFSUtilClient.getNNAddress(conf).getPort(),
|
||||||
HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
|
HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testGetUri() {
|
public void testGetUri() {
|
||||||
assertEquals(NameNode.getUri(new InetSocketAddress("foo", 555)),
|
assertEquals(DFSUtilClient.getNNUri(new InetSocketAddress("foo", 555)),
|
||||||
URI.create("hdfs://foo:555"));
|
URI.create("hdfs://foo:555"));
|
||||||
assertEquals(NameNode.getUri(new InetSocketAddress("foo",
|
assertEquals(DFSUtilClient.getNNUri(new InetSocketAddress("foo",
|
||||||
HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT)),
|
HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT)),
|
||||||
URI.create("hdfs://foo"));
|
URI.create("hdfs://foo"));
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,12 +35,9 @@ import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.RemoteIterator;
|
import org.apache.hadoop.fs.RemoteIterator;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
|
||||||
import org.apache.hadoop.ipc.RemoteException;
|
import org.apache.hadoop.ipc.RemoteException;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
|
||||||
import org.apache.log4j.Level;
|
import org.apache.log4j.Level;
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
|
@ -73,7 +70,7 @@ public class TestFileStatus {
|
||||||
cluster = new MiniDFSCluster.Builder(conf).build();
|
cluster = new MiniDFSCluster.Builder(conf).build();
|
||||||
fs = cluster.getFileSystem();
|
fs = cluster.getFileSystem();
|
||||||
fc = FileContext.getFileContext(cluster.getURI(0), conf);
|
fc = FileContext.getFileContext(cluster.getURI(0), conf);
|
||||||
dfsClient = new DFSClient(NameNode.getAddress(conf), conf);
|
dfsClient = new DFSClient(DFSUtilClient.getNNAddress(conf), conf);
|
||||||
file1 = new Path("filestatus.dat");
|
file1 = new Path("filestatus.dat");
|
||||||
DFSTestUtil.createFile(fs, file1, fileSize, fileSize, blockSize, (short) 1,
|
DFSTestUtil.createFile(fs, file1, fileSize, fileSize, blockSize, (short) 1,
|
||||||
seed);
|
seed);
|
||||||
|
|
|
@ -41,7 +41,6 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
|
||||||
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
|
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
|
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
|
||||||
import org.apache.hadoop.ipc.RemoteException;
|
import org.apache.hadoop.ipc.RemoteException;
|
||||||
|
@ -193,8 +192,8 @@ public class TestGetBlocks {
|
||||||
DatanodeInfo[] dataNodes = null;
|
DatanodeInfo[] dataNodes = null;
|
||||||
boolean notWritten;
|
boolean notWritten;
|
||||||
do {
|
do {
|
||||||
final DFSClient dfsclient = new DFSClient(NameNode.getAddress(CONF),
|
final DFSClient dfsclient = new DFSClient(
|
||||||
CONF);
|
DFSUtilClient.getNNAddress(CONF), CONF);
|
||||||
locatedBlocks = dfsclient.getNamenode()
|
locatedBlocks = dfsclient.getNamenode()
|
||||||
.getBlockLocations("/tmp.txt", 0, fileLen).getLocatedBlocks();
|
.getBlockLocations("/tmp.txt", 0, fileLen).getLocatedBlocks();
|
||||||
assertEquals(2, locatedBlocks.size());
|
assertEquals(2, locatedBlocks.size());
|
||||||
|
@ -216,7 +215,7 @@ public class TestGetBlocks {
|
||||||
InetSocketAddress addr = new InetSocketAddress("localhost",
|
InetSocketAddress addr = new InetSocketAddress("localhost",
|
||||||
cluster.getNameNodePort());
|
cluster.getNameNodePort());
|
||||||
NamenodeProtocol namenode = NameNodeProxies.createProxy(CONF,
|
NamenodeProtocol namenode = NameNodeProxies.createProxy(CONF,
|
||||||
NameNode.getUri(addr), NamenodeProtocol.class).getProxy();
|
DFSUtilClient.getNNUri(addr), NamenodeProtocol.class).getProxy();
|
||||||
|
|
||||||
// get blocks of size fileLen from dataNodes[0]
|
// get blocks of size fileLen from dataNodes[0]
|
||||||
BlockWithLocations[] locs;
|
BlockWithLocations[] locs;
|
||||||
|
|
|
@ -41,7 +41,6 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSImage;
|
import org.apache.hadoop.hdfs.server.namenode.FSImage;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.apache.hadoop.test.PathUtils;
|
import org.apache.hadoop.test.PathUtils;
|
||||||
|
@ -225,7 +224,7 @@ public class TestPersistBlocks {
|
||||||
try {
|
try {
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
|
||||||
FileSystem fs = cluster.getFileSystem();
|
FileSystem fs = cluster.getFileSystem();
|
||||||
NameNode.getAddress(conf).getPort();
|
DFSUtilClient.getNNAddress(conf).getPort();
|
||||||
// Creating a file with 4096 blockSize to write multiple blocks
|
// Creating a file with 4096 blockSize to write multiple blocks
|
||||||
stream = fs.create(FILE_PATH, true, BLOCK_SIZE, (short) 1, BLOCK_SIZE);
|
stream = fs.create(FILE_PATH, true, BLOCK_SIZE, (short) 1, BLOCK_SIZE);
|
||||||
stream.write(DATA_BEFORE_RESTART);
|
stream.write(DATA_BEFORE_RESTART);
|
||||||
|
@ -274,7 +273,7 @@ public class TestPersistBlocks {
|
||||||
try {
|
try {
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
|
||||||
FileSystem fs = cluster.getFileSystem();
|
FileSystem fs = cluster.getFileSystem();
|
||||||
NameNode.getAddress(conf).getPort();
|
DFSUtilClient.getNNAddress(conf).getPort();
|
||||||
// Creating a file with 4096 blockSize to write multiple blocks
|
// Creating a file with 4096 blockSize to write multiple blocks
|
||||||
stream = fs.create(FILE_PATH, true, BLOCK_SIZE, (short) 1, BLOCK_SIZE);
|
stream = fs.create(FILE_PATH, true, BLOCK_SIZE, (short) 1, BLOCK_SIZE);
|
||||||
stream.write(DATA_BEFORE_RESTART, 0, DATA_BEFORE_RESTART.length / 2);
|
stream.write(DATA_BEFORE_RESTART, 0, DATA_BEFORE_RESTART.length / 2);
|
||||||
|
|
|
@ -34,13 +34,13 @@ import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.DFSClient;
|
import org.apache.hadoop.hdfs.DFSClient;
|
||||||
import org.apache.hadoop.hdfs.DFSOutputStream;
|
import org.apache.hadoop.hdfs.DFSOutputStream;
|
||||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
|
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster.Builder;
|
import org.apache.hadoop.hdfs.MiniDFSCluster.Builder;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
|
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
|
||||||
import org.apache.hadoop.hdfs.TestRollingUpgrade;
|
import org.apache.hadoop.hdfs.TestRollingUpgrade;
|
||||||
import org.apache.hadoop.hdfs.client.BlockReportOptions;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
|
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||||
|
@ -294,9 +294,9 @@ public class TestDataNodeRollingUpgrade {
|
||||||
String testFile2 = "/" + GenericTestUtils.getMethodName() + ".02.dat";
|
String testFile2 = "/" + GenericTestUtils.getMethodName() + ".02.dat";
|
||||||
String testFile3 = "/" + GenericTestUtils.getMethodName() + ".03.dat";
|
String testFile3 = "/" + GenericTestUtils.getMethodName() + ".03.dat";
|
||||||
|
|
||||||
DFSClient client1 = new DFSClient(NameNode.getAddress(conf), conf);
|
DFSClient client1 = new DFSClient(DFSUtilClient.getNNAddress(conf), conf);
|
||||||
DFSClient client2 = new DFSClient(NameNode.getAddress(conf), conf);
|
DFSClient client2 = new DFSClient(DFSUtilClient.getNNAddress(conf), conf);
|
||||||
DFSClient client3 = new DFSClient(NameNode.getAddress(conf), conf);
|
DFSClient client3 = new DFSClient(DFSUtilClient.getNNAddress(conf), conf);
|
||||||
|
|
||||||
DFSOutputStream s1 = (DFSOutputStream) client1.create(testFile1, true);
|
DFSOutputStream s1 = (DFSOutputStream) client1.create(testFile1, true);
|
||||||
DFSOutputStream s2 = (DFSOutputStream) client2.create(testFile2, true);
|
DFSOutputStream s2 = (DFSOutputStream) client2.create(testFile2, true);
|
||||||
|
|
|
@ -37,6 +37,7 @@ import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
|
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
|
@ -1497,7 +1498,7 @@ public class NNThroughputBenchmark implements Tool {
|
||||||
UserGroupInformation.getCurrentUser());
|
UserGroupInformation.getCurrentUser());
|
||||||
clientProto = dfs.getClient().getNamenode();
|
clientProto = dfs.getClient().getNamenode();
|
||||||
dataNodeProto = new DatanodeProtocolClientSideTranslatorPB(
|
dataNodeProto = new DatanodeProtocolClientSideTranslatorPB(
|
||||||
NameNode.getAddress(nnUri), config);
|
DFSUtilClient.getNNAddress(nnUri), config);
|
||||||
refreshUserMappingsProto =
|
refreshUserMappingsProto =
|
||||||
DFSTestUtil.getRefreshUserMappingsProtocolProxy(config, nnUri);
|
DFSTestUtil.getRefreshUserMappingsProtocolProxy(config, nnUri);
|
||||||
getBlockPoolId(dfs);
|
getBlockPoolId(dfs);
|
||||||
|
|
|
@ -53,9 +53,9 @@ import org.apache.hadoop.hdfs.DFSClient;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
|
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||||
|
@ -978,7 +978,7 @@ public class TestINodeFile {
|
||||||
long parentId = fsdir.getINode("/").getId();
|
long parentId = fsdir.getINode("/").getId();
|
||||||
String testPath = "/.reserved/.inodes/" + dirId + "/..";
|
String testPath = "/.reserved/.inodes/" + dirId + "/..";
|
||||||
|
|
||||||
client = new DFSClient(NameNode.getAddress(conf), conf);
|
client = new DFSClient(DFSUtilClient.getNNAddress(conf), conf);
|
||||||
HdfsFileStatus status = client.getFileInfo(testPath);
|
HdfsFileStatus status = client.getFileInfo(testPath);
|
||||||
assertTrue(parentId == status.getFileId());
|
assertTrue(parentId == status.getFileId());
|
||||||
|
|
||||||
|
|
|
@ -35,6 +35,7 @@ import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
|
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||||
import org.apache.hadoop.hdfs.HAUtil;
|
import org.apache.hadoop.hdfs.HAUtil;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
||||||
|
@ -246,7 +247,7 @@ public class TestFailureToReadEdits {
|
||||||
FileSystem fs0 = null;
|
FileSystem fs0 = null;
|
||||||
try {
|
try {
|
||||||
// Make sure that when the active restarts, it loads all the edits.
|
// Make sure that when the active restarts, it loads all the edits.
|
||||||
fs0 = FileSystem.get(NameNode.getUri(nn0.getNameNodeAddress()),
|
fs0 = FileSystem.get(DFSUtilClient.getNNUri(nn0.getNameNodeAddress()),
|
||||||
conf);
|
conf);
|
||||||
|
|
||||||
assertTrue(fs0.exists(new Path(TEST_DIR1)));
|
assertTrue(fs0.exists(new Path(TEST_DIR1)));
|
||||||
|
|
Loading…
Reference in New Issue