HDFS-5321. Clean up the HTTP-related configuration in HDFS. Contributed by Haohui Mai.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1574270 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
53768eccf3
commit
e90687f90e
|
@ -329,8 +329,7 @@ public class HttpFSFileSystem extends FileSystem
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
protected int getDefaultPort() {
|
protected int getDefaultPort() {
|
||||||
return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY,
|
return DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT;
|
||||||
DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -517,6 +517,8 @@ Release 2.4.0 - UNRELEASED
|
||||||
|
|
||||||
HDFS-6046. add dfs.client.mmap.enabled (cmccabe)
|
HDFS-6046. add dfs.client.mmap.enabled (cmccabe)
|
||||||
|
|
||||||
|
HDFS-5321. Clean up the HTTP-related configuration in HDFS (wheat9)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HDFS-5790. LeaseManager.findPath is very slow when many leases need recovery
|
HDFS-5790. LeaseManager.findPath is very slow when many leases need recovery
|
||||||
|
|
|
@ -123,7 +123,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
||||||
"dfs.namenode.path.based.cache.block.map.allocation.percent";
|
"dfs.namenode.path.based.cache.block.map.allocation.percent";
|
||||||
public static final float DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT_DEFAULT = 0.25f;
|
public static final float DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT_DEFAULT = 0.25f;
|
||||||
|
|
||||||
public static final String DFS_NAMENODE_HTTP_PORT_KEY = "dfs.http.port";
|
|
||||||
public static final int DFS_NAMENODE_HTTP_PORT_DEFAULT = 50070;
|
public static final int DFS_NAMENODE_HTTP_PORT_DEFAULT = 50070;
|
||||||
public static final String DFS_NAMENODE_HTTP_ADDRESS_KEY = "dfs.namenode.http-address";
|
public static final String DFS_NAMENODE_HTTP_ADDRESS_KEY = "dfs.namenode.http-address";
|
||||||
public static final String DFS_NAMENODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_NAMENODE_HTTP_PORT_DEFAULT;
|
public static final String DFS_NAMENODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_NAMENODE_HTTP_PORT_DEFAULT;
|
||||||
|
@ -294,7 +293,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
||||||
|
|
||||||
//Following keys have no defaults
|
//Following keys have no defaults
|
||||||
public static final String DFS_DATANODE_DATA_DIR_KEY = "dfs.datanode.data.dir";
|
public static final String DFS_DATANODE_DATA_DIR_KEY = "dfs.datanode.data.dir";
|
||||||
public static final String DFS_NAMENODE_HTTPS_PORT_KEY = "dfs.https.port";
|
|
||||||
public static final int DFS_NAMENODE_HTTPS_PORT_DEFAULT = 50470;
|
public static final int DFS_NAMENODE_HTTPS_PORT_DEFAULT = 50470;
|
||||||
public static final String DFS_NAMENODE_HTTPS_ADDRESS_KEY = "dfs.namenode.https-address";
|
public static final String DFS_NAMENODE_HTTPS_ADDRESS_KEY = "dfs.namenode.https-address";
|
||||||
public static final String DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_NAMENODE_HTTPS_PORT_DEFAULT;
|
public static final String DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_NAMENODE_HTTPS_PORT_DEFAULT;
|
||||||
|
|
|
@ -42,7 +42,6 @@ import java.net.URI;
|
||||||
import java.net.URISyntaxException;
|
import java.net.URISyntaxException;
|
||||||
import java.security.SecureRandom;
|
import java.security.SecureRandom;
|
||||||
import java.text.SimpleDateFormat;
|
import java.text.SimpleDateFormat;
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
|
@ -730,46 +729,6 @@ public class DFSUtil {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Resolve an HDFS URL into real INetSocketAddress. It works like a DNS resolver
|
|
||||||
* when the URL points to an non-HA cluster. When the URL points to an HA
|
|
||||||
* cluster, the resolver further resolves the logical name (i.e., the authority
|
|
||||||
* in the URL) into real namenode addresses.
|
|
||||||
*/
|
|
||||||
public static InetSocketAddress[] resolveWebHdfsUri(URI uri, Configuration conf)
|
|
||||||
throws IOException {
|
|
||||||
int defaultPort;
|
|
||||||
String scheme = uri.getScheme();
|
|
||||||
if (WebHdfsFileSystem.SCHEME.equals(scheme)) {
|
|
||||||
defaultPort = DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT;
|
|
||||||
} else if (SWebHdfsFileSystem.SCHEME.equals(scheme)) {
|
|
||||||
defaultPort = DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT;
|
|
||||||
} else {
|
|
||||||
throw new IllegalArgumentException("Unsupported scheme: " + scheme);
|
|
||||||
}
|
|
||||||
|
|
||||||
ArrayList<InetSocketAddress> ret = new ArrayList<InetSocketAddress>();
|
|
||||||
|
|
||||||
if (!HAUtil.isLogicalUri(conf, uri)) {
|
|
||||||
InetSocketAddress addr = NetUtils.createSocketAddr(uri.getAuthority(),
|
|
||||||
defaultPort);
|
|
||||||
ret.add(addr);
|
|
||||||
|
|
||||||
} else {
|
|
||||||
Map<String, Map<String, InetSocketAddress>> addresses = DFSUtil
|
|
||||||
.getHaNnWebHdfsAddresses(conf, scheme);
|
|
||||||
|
|
||||||
for (Map<String, InetSocketAddress> addrs : addresses.values()) {
|
|
||||||
for (InetSocketAddress addr : addrs.values()) {
|
|
||||||
ret.add(addr);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
InetSocketAddress[] r = new InetSocketAddress[ret.size()];
|
|
||||||
return ret.toArray(r);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns list of InetSocketAddress corresponding to backup node rpc
|
* Returns list of InetSocketAddress corresponding to backup node rpc
|
||||||
* addresses from the configuration.
|
* addresses from the configuration.
|
||||||
|
|
|
@ -123,8 +123,7 @@ public class HftpFileSystem extends FileSystem
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected int getDefaultPort() {
|
protected int getDefaultPort() {
|
||||||
return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY,
|
return DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT;
|
||||||
DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -64,7 +64,6 @@ public class HsftpFileSystem extends HftpFileSystem {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected int getDefaultPort() {
|
protected int getDefaultPort() {
|
||||||
return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY,
|
return DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT;
|
||||||
DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -42,7 +42,6 @@ public class SWebHdfsFileSystem extends WebHdfsFileSystem {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected int getDefaultPort() {
|
protected int getDefaultPort() {
|
||||||
return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY,
|
return DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT;
|
||||||
DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,6 +29,7 @@ import java.net.MalformedURLException;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
import java.security.PrivilegedExceptionAction;
|
import java.security.PrivilegedExceptionAction;
|
||||||
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.StringTokenizer;
|
import java.util.StringTokenizer;
|
||||||
|
@ -172,7 +173,7 @@ public class WebHdfsFileSystem extends FileSystem
|
||||||
|
|
||||||
ugi = UserGroupInformation.getCurrentUser();
|
ugi = UserGroupInformation.getCurrentUser();
|
||||||
this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
|
this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
|
||||||
this.nnAddrs = DFSUtil.resolveWebHdfsUri(this.uri, conf);
|
this.nnAddrs = resolveNNAddr();
|
||||||
|
|
||||||
boolean isHA = HAUtil.isLogicalUri(conf, this.uri);
|
boolean isHA = HAUtil.isLogicalUri(conf, this.uri);
|
||||||
// In non-HA case, the code needs to call getCanonicalUri() in order to
|
// In non-HA case, the code needs to call getCanonicalUri() in order to
|
||||||
|
@ -237,8 +238,7 @@ public class WebHdfsFileSystem extends FileSystem
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected int getDefaultPort() {
|
protected int getDefaultPort() {
|
||||||
return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY,
|
return DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT;
|
||||||
DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -1082,4 +1082,36 @@ public class WebHdfsFileSystem extends FileSystem
|
||||||
final Map<?, ?> m = run(op, p);
|
final Map<?, ?> m = run(op, p);
|
||||||
return JsonUtil.toMD5MD5CRC32FileChecksum(m);
|
return JsonUtil.toMD5MD5CRC32FileChecksum(m);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Resolve an HDFS URL into real INetSocketAddress. It works like a DNS
|
||||||
|
* resolver when the URL points to an non-HA cluster. When the URL points to
|
||||||
|
* an HA cluster, the resolver further resolves the logical name (i.e., the
|
||||||
|
* authority in the URL) into real namenode addresses.
|
||||||
|
*/
|
||||||
|
private InetSocketAddress[] resolveNNAddr() throws IOException {
|
||||||
|
Configuration conf = getConf();
|
||||||
|
final String scheme = uri.getScheme();
|
||||||
|
|
||||||
|
ArrayList<InetSocketAddress> ret = new ArrayList<InetSocketAddress>();
|
||||||
|
|
||||||
|
if (!HAUtil.isLogicalUri(conf, uri)) {
|
||||||
|
InetSocketAddress addr = NetUtils.createSocketAddr(uri.getAuthority(),
|
||||||
|
getDefaultPort());
|
||||||
|
ret.add(addr);
|
||||||
|
|
||||||
|
} else {
|
||||||
|
Map<String, Map<String, InetSocketAddress>> addresses = DFSUtil
|
||||||
|
.getHaNnWebHdfsAddresses(conf, scheme);
|
||||||
|
|
||||||
|
for (Map<String, InetSocketAddress> addrs : addresses.values()) {
|
||||||
|
for (InetSocketAddress addr : addrs.values()) {
|
||||||
|
ret.add(addr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
InetSocketAddress[] r = new InetSocketAddress[ret.size()];
|
||||||
|
return ret.toArray(r);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,7 +32,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
|
||||||
import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
|
import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
|
||||||
import static org.hamcrest.CoreMatchers.not;
|
import static org.hamcrest.CoreMatchers.not;
|
||||||
import static org.junit.Assert.assertArrayEquals;
|
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertFalse;
|
||||||
import static org.junit.Assert.assertNull;
|
import static org.junit.Assert.assertNull;
|
||||||
|
@ -579,25 +578,6 @@ public class TestDFSUtil {
|
||||||
assertEquals(NS1_NN2_ADDR, map.get("ns1").get("nn2").toString());
|
assertEquals(NS1_NN2_ADDR, map.get("ns1").get("nn2").toString());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testResolve() throws IOException, URISyntaxException {
|
|
||||||
final String LOGICAL_HOST_NAME = "ns1";
|
|
||||||
final String NS1_NN1_HOST = "ns1-nn1.example.com";
|
|
||||||
final String NS1_NN2_HOST = "ns1-nn2.example.com";
|
|
||||||
final String NS1_NN1_ADDR = "ns1-nn1.example.com:8020";
|
|
||||||
final String NS1_NN2_ADDR = "ns1-nn2.example.com:8020";
|
|
||||||
final int DEFAULT_PORT = NameNode.DEFAULT_PORT;
|
|
||||||
|
|
||||||
Configuration conf = createWebHDFSHAConfiguration(LOGICAL_HOST_NAME, NS1_NN1_ADDR, NS1_NN2_ADDR);
|
|
||||||
URI uri = new URI("webhdfs://ns1");
|
|
||||||
assertTrue(HAUtil.isLogicalUri(conf, uri));
|
|
||||||
InetSocketAddress[] addrs = DFSUtil.resolveWebHdfsUri(uri, conf);
|
|
||||||
assertArrayEquals(new InetSocketAddress[] {
|
|
||||||
new InetSocketAddress(NS1_NN1_HOST, DEFAULT_PORT),
|
|
||||||
new InetSocketAddress(NS1_NN2_HOST, DEFAULT_PORT),
|
|
||||||
}, addrs);
|
|
||||||
}
|
|
||||||
|
|
||||||
private static Configuration createWebHDFSHAConfiguration(String logicalHostName, String nnaddr1, String nnaddr2) {
|
private static Configuration createWebHDFSHAConfiguration(String logicalHostName, String nnaddr1, String nnaddr2) {
|
||||||
HdfsConfiguration conf = new HdfsConfiguration();
|
HdfsConfiguration conf = new HdfsConfiguration();
|
||||||
|
|
||||||
|
|
|
@ -310,23 +310,6 @@ public class TestHftpFileSystem {
|
||||||
fs.getCanonicalServiceName());
|
fs.getCanonicalServiceName());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testHftpCustomDefaultPorts() throws IOException {
|
|
||||||
Configuration conf = new Configuration();
|
|
||||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 123);
|
|
||||||
|
|
||||||
URI uri = URI.create("hftp://localhost");
|
|
||||||
HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);
|
|
||||||
|
|
||||||
assertEquals(123, fs.getDefaultPort());
|
|
||||||
|
|
||||||
assertEquals(uri, fs.getUri());
|
|
||||||
|
|
||||||
// HFTP uses http to get the token so canonical service name should
|
|
||||||
// return the http port.
|
|
||||||
assertEquals("127.0.0.1:123", fs.getCanonicalServiceName());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testHftpCustomUriPortWithDefaultPorts() throws IOException {
|
public void testHftpCustomUriPortWithDefaultPorts() throws IOException {
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
|
@ -343,12 +326,11 @@ public class TestHftpFileSystem {
|
||||||
@Test
|
@Test
|
||||||
public void testHftpCustomUriPortWithCustomDefaultPorts() throws IOException {
|
public void testHftpCustomUriPortWithCustomDefaultPorts() throws IOException {
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 123);
|
|
||||||
|
|
||||||
URI uri = URI.create("hftp://localhost:789");
|
URI uri = URI.create("hftp://localhost:789");
|
||||||
HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);
|
HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);
|
||||||
|
|
||||||
assertEquals(123, fs.getDefaultPort());
|
assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT,
|
||||||
|
fs.getDefaultPort());
|
||||||
|
|
||||||
assertEquals(uri, fs.getUri());
|
assertEquals(uri, fs.getUri());
|
||||||
assertEquals("127.0.0.1:789", fs.getCanonicalServiceName());
|
assertEquals("127.0.0.1:789", fs.getCanonicalServiceName());
|
||||||
|
@ -383,20 +365,6 @@ public class TestHftpFileSystem {
|
||||||
fs.getCanonicalServiceName());
|
fs.getCanonicalServiceName());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testHsftpCustomDefaultPorts() throws IOException {
|
|
||||||
Configuration conf = new Configuration();
|
|
||||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 123);
|
|
||||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, 456);
|
|
||||||
|
|
||||||
URI uri = URI.create("hsftp://localhost");
|
|
||||||
HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf);
|
|
||||||
|
|
||||||
assertEquals(456, fs.getDefaultPort());
|
|
||||||
|
|
||||||
assertEquals(uri, fs.getUri());
|
|
||||||
assertEquals("127.0.0.1:456", fs.getCanonicalServiceName());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testHsftpCustomUriPortWithDefaultPorts() throws IOException {
|
public void testHsftpCustomUriPortWithDefaultPorts() throws IOException {
|
||||||
|
@ -414,13 +382,12 @@ public class TestHftpFileSystem {
|
||||||
@Test
|
@Test
|
||||||
public void testHsftpCustomUriPortWithCustomDefaultPorts() throws IOException {
|
public void testHsftpCustomUriPortWithCustomDefaultPorts() throws IOException {
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 123);
|
|
||||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, 456);
|
|
||||||
|
|
||||||
URI uri = URI.create("hsftp://localhost:789");
|
URI uri = URI.create("hsftp://localhost:789");
|
||||||
HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf);
|
HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf);
|
||||||
|
|
||||||
assertEquals(456, fs.getDefaultPort());
|
assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT,
|
||||||
|
fs.getDefaultPort());
|
||||||
|
|
||||||
assertEquals(uri, fs.getUri());
|
assertEquals(uri, fs.getUri());
|
||||||
assertEquals("127.0.0.1:789", fs.getCanonicalServiceName());
|
assertEquals("127.0.0.1:789", fs.getCanonicalServiceName());
|
||||||
|
|
Loading…
Reference in New Issue