diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 429a6b20277..ef2a5b80fb4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -326,6 +326,9 @@ Release 2.7.0 - UNRELEASED HDFS-7694. FSDataInputStream should support "unbuffer" (cmccabe) + HDFS-7684. The host:port settings of the daemons should be trimmed before + use. (Anu Engineer via aajisaka) + OPTIMIZATIONS HDFS-7454. Reduce memory footprint for AclEntries in NameNode. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 5cb110b23b3..329fec38718 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -187,16 +187,16 @@ public class DatanodeManager { networktopology = NetworkTopology.getInstance(conf); this.defaultXferPort = NetUtils.createSocketAddr( - conf.get(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, + conf.getTrimmed(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, DFSConfigKeys.DFS_DATANODE_ADDRESS_DEFAULT)).getPort(); this.defaultInfoPort = NetUtils.createSocketAddr( - conf.get(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, + conf.getTrimmed(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_DEFAULT)).getPort(); this.defaultInfoSecurePort = NetUtils.createSocketAddr( - conf.get(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, + conf.getTrimmed(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_DEFAULT)).getPort(); this.defaultIpcPort = NetUtils.createSocketAddr( - conf.get(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, + conf.getTrimmed(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_DEFAULT)).getPort(); try { this.hostFileManager.refresh(conf.get(DFSConfigKeys.DFS_HOSTS, ""), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index cfd33cd752f..fa0d74be0f8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -730,7 +730,7 @@ public class DataNode extends ReconfigurableBase private void initIpcServer(Configuration conf) throws IOException { InetSocketAddress ipcAddr = NetUtils.createSocketAddr( - conf.get(DFS_DATANODE_IPC_ADDRESS_KEY)); + conf.getTrimmed(DFS_DATANODE_IPC_ADDRESS_KEY)); // Add all the RPC protocols that the Datanode implements RPC.setProtocolEngine(conf, ClientDatanodeProtocolPB.class, @@ -1373,7 +1373,7 @@ public class DataNode extends ReconfigurableBase * Determine the http server's effective addr */ public static InetSocketAddress getInfoAddr(Configuration conf) { - return NetUtils.createSocketAddr(conf.get(DFS_DATANODE_HTTP_ADDRESS_KEY, + return NetUtils.createSocketAddr(conf.getTrimmed(DFS_DATANODE_HTTP_ADDRESS_KEY, DFS_DATANODE_HTTP_ADDRESS_DEFAULT)); } @@ -2877,7 +2877,7 @@ public class DataNode extends ReconfigurableBase static InetSocketAddress getStreamingAddr(Configuration conf) { return NetUtils.createSocketAddr( - conf.get(DFS_DATANODE_ADDRESS_KEY, DFS_DATANODE_ADDRESS_DEFAULT)); + conf.getTrimmed(DFS_DATANODE_ADDRESS_KEY, DFS_DATANODE_ADDRESS_DEFAULT)); } @Override // DataNodeMXBean diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java index 4ee82fb2631..b620ba6ac10 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java @@ -151,7 +151,7 @@ public class DatanodeHttpServer implements Closeable { } if (httpsServer != null) { - InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf.get( + InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf.getTrimmed( DFS_DATANODE_HTTPS_ADDRESS_KEY, DFS_DATANODE_HTTPS_ADDRESS_DEFAULT)); ChannelFuture f = httpsServer.bind(secInfoSocAddr); f.syncUninterruptibly(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java index 000806916fe..4efa6e12f45 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java @@ -94,13 +94,13 @@ public class BackupNode extends NameNode { ///////////////////////////////////////////////////// @Override // NameNode protected InetSocketAddress getRpcServerAddress(Configuration conf) { - String addr = conf.get(BN_ADDRESS_NAME_KEY, BN_ADDRESS_DEFAULT); + String addr = conf.getTrimmed(BN_ADDRESS_NAME_KEY, BN_ADDRESS_DEFAULT); return NetUtils.createSocketAddr(addr); } @Override protected InetSocketAddress getServiceRpcServerAddress(Configuration conf) { - String addr = conf.get(BN_SERVICE_RPC_ADDRESS_KEY); + String addr = conf.getTrimmed(BN_SERVICE_RPC_ADDRESS_KEY); if (addr == null || addr.isEmpty()) { return null; } @@ -122,7 +122,7 @@ public class BackupNode extends NameNode { @Override // NameNode protected InetSocketAddress getHttpServerAddress(Configuration conf) { assert getNameNodeAddress() != null : "rpcAddress should be calculated first"; - String addr = conf.get(BN_HTTP_ADDRESS_NAME_KEY, BN_HTTP_ADDRESS_DEFAULT); + String addr = conf.getTrimmed(BN_HTTP_ADDRESS_NAME_KEY, BN_HTTP_ADDRESS_DEFAULT); return NetUtils.createSocketAddr(addr); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java index 702c8f14db1..c565eb521b8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java @@ -247,7 +247,7 @@ public class ImageServlet extends HttpServlet { DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY, conf.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY), DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, - conf.get(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, + conf.getTrimmed(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT)); LOG.warn(msg); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 5a05eaf02ce..2e7af9db50a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -449,7 +449,7 @@ public class NameNode implements NameNodeStatusMXBean { */ public static InetSocketAddress getServiceAddress(Configuration conf, boolean fallback) { - String addr = conf.get(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY); + String addr = conf.getTrimmed(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY); if (addr == null || addr.isEmpty()) { return fallback ? getAddress(conf) : null; } @@ -577,7 +577,7 @@ public class NameNode implements NameNodeStatusMXBean { /** @return the NameNode HTTP address. */ public static InetSocketAddress getHttpAddress(Configuration conf) { return NetUtils.createSocketAddr( - conf.get(DFS_NAMENODE_HTTP_ADDRESS_KEY, DFS_NAMENODE_HTTP_ADDRESS_DEFAULT)); + conf.getTrimmed(DFS_NAMENODE_HTTP_ADDRESS_KEY, DFS_NAMENODE_HTTP_ADDRESS_DEFAULT)); } protected void loadNamesystem(Configuration conf) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java index 5efcd1cf086..9a46811d57f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java @@ -103,7 +103,7 @@ public class NameNodeHttpServer { final String infoHost = bindAddress.getHostName(); final InetSocketAddress httpAddr = bindAddress; - final String httpsAddrString = conf.get( + final String httpsAddrString = conf.getTrimmed( DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT); InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString); @@ -127,7 +127,7 @@ public class NameNodeHttpServer { if (policy.isHttpsEnabled()) { // assume same ssl port for all datanodes - InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(conf.get( + InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(conf.getTrimmed( DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, infoHost + ":" + DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT)); httpServer.setAttribute(DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java index 10f1720dfb0..83e6426a30c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java @@ -200,7 +200,7 @@ public class SecondaryNameNode implements Runnable, } public static InetSocketAddress getHttpAddress(Configuration conf) { - return NetUtils.createSocketAddr(conf.get( + return NetUtils.createSocketAddr(conf.getTrimmed( DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT)); } @@ -253,7 +253,7 @@ public class SecondaryNameNode implements Runnable, final InetSocketAddress httpAddr = infoSocAddr; - final String httpsAddrString = conf.get( + final String httpsAddrString = conf.getTrimmed( DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_KEY, DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_DEFAULT); InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMalformedURLs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMalformedURLs.java new file mode 100644 index 00000000000..2515da12f0f --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMalformedURLs.java @@ -0,0 +1,59 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.namenode; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.MiniDFSCluster; + +import static org.junit.Assert.assertNotEquals; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import org.apache.hadoop.hdfs.DFSConfigKeys; + +public class TestMalformedURLs { + private MiniDFSCluster cluster; + Configuration config; + + @Before + public void setUp() throws Exception { + Configuration.addDefaultResource("hdfs-site.malformed.xml"); + config = new Configuration(); + } + + @Test + public void testTryStartingCluster() throws Exception { + // if we are able to start the cluster, it means + // that we were able to read the configuration + // correctly. + + assertNotEquals(config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY), + config.getTrimmed(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY)); + cluster = new MiniDFSCluster.Builder(config).build(); + cluster.waitActive(); + } + + @After + public void tearDown() throws Exception { + if (cluster != null) { + cluster.shutdown(); + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hdfs-site.malformed.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hdfs-site.malformed.xml new file mode 100644 index 00000000000..fdf501711f2 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hdfs-site.malformed.xml @@ -0,0 +1,143 @@ + + + + + + + + + + + + dfs.namenode.secondary.http-address + 0.0.0.0:50090 + + The secondary namenode http server address and port. + + + + + dfs.namenode.secondary.https-address + 0.0.0.0:50091 + + The secondary namenode HTTPS server address and port. + + + + + dfs.datanode.address + 0.0.0.0:50010 + + The datanode server address and port for data transfer. + + + + + dfs.datanode.http.address + 0.0.0.0:50075 + + The datanode http server address and port. + + + + + dfs.datanode.ipc.address + 0.0.0.0:50020 + + The datanode ipc server address and port. + + + + + dfs.datanode.handler.count + 10 + The number of server threads for the datanode. + + + + dfs.namenode.http-address + 0.0.0.0:50070 + + The address and the base port where the dfs namenode web ui will listen on. + + + + + dfs.datanode.https.address + 0.0.0.0:50475 + The datanode secure http server address and port. + + + + dfs.namenode.https-address + 0.0.0.0:50470 + The namenode secure http server address and port. + + + + dfs.namenode.backup.address + 0.0.0.0:50100 + + The backup node server address and port. + If the port is 0 then the server will start on a free port. + + + + + dfs.namenode.backup.http-address + 0.0.0.0:50105 + + The backup node http server address and port. + If the port is 0 then the server will start on a free port. + + + + + + dfs.journalnode.rpc-address + 0.0.0.0:8485 + + The JournalNode RPC server address and port. + + + + + dfs.journalnode.http-address + 0.0.0.0:8480 + + The address and port the JournalNode HTTP server listens on. + If the port is 0 then the server will start on a free port. + + + + + dfs.journalnode.https-address + 0.0.0.0:8481 + + The address and port the JournalNode HTTPS server listens on. + If the port is 0 then the server will start on a free port. + + + + +