HDFS-7684. The host:port settings of the deamons should be trimmed before use. Contributed by Anu Engineer.

(cherry picked from commit 9235e2fb4e94a86671e1494f293c973cf572281c)
This commit is contained in:
Akira Ajisaka 2015-02-12 17:38:37 -08:00
parent df6bee5a75
commit 5696376e29
11 changed files with 223 additions and 18 deletions

View File

@ -326,6 +326,9 @@ Release 2.7.0 - UNRELEASED
HDFS-7694. FSDataInputStream should support "unbuffer" (cmccabe)
HDFS-7684. The host:port settings of the daemons should be trimmed before
use. (Anu Engineer via aajisaka)
OPTIMIZATIONS
HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

View File

@ -187,16 +187,16 @@ public class DatanodeManager {
networktopology = NetworkTopology.getInstance(conf);
this.defaultXferPort = NetUtils.createSocketAddr(
conf.get(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY,
conf.getTrimmed(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY,
DFSConfigKeys.DFS_DATANODE_ADDRESS_DEFAULT)).getPort();
this.defaultInfoPort = NetUtils.createSocketAddr(
conf.get(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY,
conf.getTrimmed(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY,
DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_DEFAULT)).getPort();
this.defaultInfoSecurePort = NetUtils.createSocketAddr(
conf.get(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY,
conf.getTrimmed(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY,
DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_DEFAULT)).getPort();
this.defaultIpcPort = NetUtils.createSocketAddr(
conf.get(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY,
conf.getTrimmed(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY,
DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_DEFAULT)).getPort();
try {
this.hostFileManager.refresh(conf.get(DFSConfigKeys.DFS_HOSTS, ""),

View File

@ -730,7 +730,7 @@ public class DataNode extends ReconfigurableBase
private void initIpcServer(Configuration conf) throws IOException {
InetSocketAddress ipcAddr = NetUtils.createSocketAddr(
conf.get(DFS_DATANODE_IPC_ADDRESS_KEY));
conf.getTrimmed(DFS_DATANODE_IPC_ADDRESS_KEY));
// Add all the RPC protocols that the Datanode implements
RPC.setProtocolEngine(conf, ClientDatanodeProtocolPB.class,
@ -1373,7 +1373,7 @@ public class DataNode extends ReconfigurableBase
* Determine the http server's effective addr
*/
public static InetSocketAddress getInfoAddr(Configuration conf) {
return NetUtils.createSocketAddr(conf.get(DFS_DATANODE_HTTP_ADDRESS_KEY,
return NetUtils.createSocketAddr(conf.getTrimmed(DFS_DATANODE_HTTP_ADDRESS_KEY,
DFS_DATANODE_HTTP_ADDRESS_DEFAULT));
}
@ -2877,7 +2877,7 @@ public class DataNode extends ReconfigurableBase
static InetSocketAddress getStreamingAddr(Configuration conf) {
return NetUtils.createSocketAddr(
conf.get(DFS_DATANODE_ADDRESS_KEY, DFS_DATANODE_ADDRESS_DEFAULT));
conf.getTrimmed(DFS_DATANODE_ADDRESS_KEY, DFS_DATANODE_ADDRESS_DEFAULT));
}
@Override // DataNodeMXBean

View File

@ -151,7 +151,7 @@ public class DatanodeHttpServer implements Closeable {
}
if (httpsServer != null) {
InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf.get(
InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf.getTrimmed(
DFS_DATANODE_HTTPS_ADDRESS_KEY, DFS_DATANODE_HTTPS_ADDRESS_DEFAULT));
ChannelFuture f = httpsServer.bind(secInfoSocAddr);
f.syncUninterruptibly();

View File

@ -94,13 +94,13 @@ public class BackupNode extends NameNode {
/////////////////////////////////////////////////////
@Override // NameNode
protected InetSocketAddress getRpcServerAddress(Configuration conf) {
String addr = conf.get(BN_ADDRESS_NAME_KEY, BN_ADDRESS_DEFAULT);
String addr = conf.getTrimmed(BN_ADDRESS_NAME_KEY, BN_ADDRESS_DEFAULT);
return NetUtils.createSocketAddr(addr);
}
@Override
protected InetSocketAddress getServiceRpcServerAddress(Configuration conf) {
String addr = conf.get(BN_SERVICE_RPC_ADDRESS_KEY);
String addr = conf.getTrimmed(BN_SERVICE_RPC_ADDRESS_KEY);
if (addr == null || addr.isEmpty()) {
return null;
}
@ -122,7 +122,7 @@ public class BackupNode extends NameNode {
@Override // NameNode
protected InetSocketAddress getHttpServerAddress(Configuration conf) {
assert getNameNodeAddress() != null : "rpcAddress should be calculated first";
String addr = conf.get(BN_HTTP_ADDRESS_NAME_KEY, BN_HTTP_ADDRESS_DEFAULT);
String addr = conf.getTrimmed(BN_HTTP_ADDRESS_NAME_KEY, BN_HTTP_ADDRESS_DEFAULT);
return NetUtils.createSocketAddr(addr);
}

View File

@ -247,7 +247,7 @@ public class ImageServlet extends HttpServlet {
DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY,
conf.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY),
DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
conf.get(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
conf.getTrimmed(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT));
LOG.warn(msg);
}

View File

@ -449,7 +449,7 @@ public class NameNode implements NameNodeStatusMXBean {
*/
public static InetSocketAddress getServiceAddress(Configuration conf,
boolean fallback) {
String addr = conf.get(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY);
String addr = conf.getTrimmed(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY);
if (addr == null || addr.isEmpty()) {
return fallback ? getAddress(conf) : null;
}
@ -577,7 +577,7 @@ public class NameNode implements NameNodeStatusMXBean {
/** @return the NameNode HTTP address. */
public static InetSocketAddress getHttpAddress(Configuration conf) {
return NetUtils.createSocketAddr(
conf.get(DFS_NAMENODE_HTTP_ADDRESS_KEY, DFS_NAMENODE_HTTP_ADDRESS_DEFAULT));
conf.getTrimmed(DFS_NAMENODE_HTTP_ADDRESS_KEY, DFS_NAMENODE_HTTP_ADDRESS_DEFAULT));
}
protected void loadNamesystem(Configuration conf) throws IOException {

View File

@ -103,7 +103,7 @@ public class NameNodeHttpServer {
final String infoHost = bindAddress.getHostName();
final InetSocketAddress httpAddr = bindAddress;
final String httpsAddrString = conf.get(
final String httpsAddrString = conf.getTrimmed(
DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY,
DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT);
InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);
@ -127,7 +127,7 @@ public class NameNodeHttpServer {
if (policy.isHttpsEnabled()) {
// assume same ssl port for all datanodes
InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(conf.get(
InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(conf.getTrimmed(
DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, infoHost + ":"
+ DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT));
httpServer.setAttribute(DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY,

View File

@ -200,7 +200,7 @@ public class SecondaryNameNode implements Runnable,
}
public static InetSocketAddress getHttpAddress(Configuration conf) {
return NetUtils.createSocketAddr(conf.get(
return NetUtils.createSocketAddr(conf.getTrimmed(
DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT));
}
@ -253,7 +253,7 @@ public class SecondaryNameNode implements Runnable,
final InetSocketAddress httpAddr = infoSocAddr;
final String httpsAddrString = conf.get(
final String httpsAddrString = conf.getTrimmed(
DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_KEY,
DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_DEFAULT);
InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);

View File

@ -0,0 +1,59 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import static org.junit.Assert.assertNotEquals;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.apache.hadoop.hdfs.DFSConfigKeys;
public class TestMalformedURLs {
private MiniDFSCluster cluster;
Configuration config;
@Before
public void setUp() throws Exception {
Configuration.addDefaultResource("hdfs-site.malformed.xml");
config = new Configuration();
}
@Test
public void testTryStartingCluster() throws Exception {
// if we are able to start the cluster, it means
// that we were able to read the configuration
// correctly.
assertNotEquals(config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY),
config.getTrimmed(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY));
cluster = new MiniDFSCluster.Builder(config).build();
cluster.waitActive();
}
@After
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
}

View File

@ -0,0 +1,143 @@
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!--
This file creates URLs with spaces at the beginning and
end and makes sure that we read them correctly.
JIRA - HDFS 7684
-->
<configuration>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>0.0.0.0:50090 </value>
<description>
The secondary namenode http server address and port.
</description>
</property>
<property>
<name>dfs.namenode.secondary.https-address</name>
<value>0.0.0.0:50091 </value>
<description>
The secondary namenode HTTPS server address and port.
</description>
</property>
<property>
<name>dfs.datanode.address</name>
<value>0.0.0.0:50010 </value>
<description>
The datanode server address and port for data transfer.
</description>
</property>
<property>
<name>dfs.datanode.http.address</name>
<value>0.0.0.0:50075 </value>
<description>
The datanode http server address and port.
</description>
</property>
<property>
<name>dfs.datanode.ipc.address</name>
<value>0.0.0.0:50020 </value>
<description>
The datanode ipc server address and port.
</description>
</property>
<property>
<name>dfs.datanode.handler.count</name>
<value>10</value>
<description>The number of server threads for the datanode.</description>
</property>
<property>
<name>dfs.namenode.http-address</name>
<value>0.0.0.0:50070 </value>
<description>
The address and the base port where the dfs namenode web ui will listen on.
</description>
</property>
<property>
<name>dfs.datanode.https.address</name>
<value>0.0.0.0:50475 </value>
<description>The datanode secure http server address and port.</description>
</property>
<property>
<name>dfs.namenode.https-address</name>
<value>0.0.0.0:50470 </value>
<description>The namenode secure http server address and port.</description>
</property>
<property>
<name>dfs.namenode.backup.address</name>
<value>0.0.0.0:50100 </value>
<description>
The backup node server address and port.
If the port is 0 then the server will start on a free port.
</description>
</property>
<property>
<name>dfs.namenode.backup.http-address</name>
<value> 0.0.0.0:50105 </value>
<description>
The backup node http server address and port.
If the port is 0 then the server will start on a free port.
</description>
</property>
<property>
<name>dfs.journalnode.rpc-address</name>
<value>0.0.0.0:8485</value>
<description>
The JournalNode RPC server address and port.
</description>
</property>
<property>
<name>dfs.journalnode.http-address</name>
<value>0.0.0.0:8480</value>
<description>
The address and port the JournalNode HTTP server listens on.
If the port is 0 then the server will start on a free port.
</description>
</property>
<property>
<name>dfs.journalnode.https-address</name>
<value>0.0.0.0:8481</value>
<description>
The address and port the JournalNode HTTPS server listens on.
If the port is 0 then the server will start on a free port.
</description>
</property>
</configuration>