diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index c356776b2e5..22624309828 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -110,6 +110,9 @@ Release 2.1.1-beta - UNRELEASED
HDFS-3245. Add metrics and web UI for cluster version summary. (Ravi
Prakash via kihwal)
+ HDFS-5128. Allow multiple net interfaces to be used with HA namenode RPC
+ server. (kihwal)
+
OPTIMIZATIONS
BUG FIXES
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index c11783da0bc..66aa1ac7ff6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -104,7 +104,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_NAMENODE_HTTP_ADDRESS_KEY = "dfs.namenode.http-address";
public static final String DFS_NAMENODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_NAMENODE_HTTP_PORT_DEFAULT;
public static final String DFS_NAMENODE_RPC_ADDRESS_KEY = "dfs.namenode.rpc-address";
+ public static final String DFS_NAMENODE_RPC_BIND_HOST_KEY = "dfs.namenode.rpc-bind-host";
public static final String DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY = "dfs.namenode.servicerpc-address";
+ public static final String DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY = "dfs.namenode.servicerpc-bind-host";
public static final String DFS_NAMENODE_MAX_OBJECTS_KEY = "dfs.namenode.max.objects";
public static final long DFS_NAMENODE_MAX_OBJECTS_DEFAULT = 0;
public static final String DFS_NAMENODE_SAFEMODE_EXTENSION_KEY = "dfs.namenode.safemode.extension";
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index 37ae7f210ae..3b93b0fb64b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -168,12 +168,14 @@ public class NameNode implements NameNodeStatusMXBean {
*/
public static final String[] NAMENODE_SPECIFIC_KEYS = {
DFS_NAMENODE_RPC_ADDRESS_KEY,
+ DFS_NAMENODE_RPC_BIND_HOST_KEY,
DFS_NAMENODE_NAME_DIR_KEY,
DFS_NAMENODE_EDITS_DIR_KEY,
DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
DFS_NAMENODE_CHECKPOINT_DIR_KEY,
DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,
DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+ DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY,
DFS_NAMENODE_HTTP_ADDRESS_KEY,
DFS_NAMENODE_KEYTAB_FILE_KEY,
DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
@@ -389,6 +391,28 @@ public class NameNode implements NameNodeStatusMXBean {
return getAddress(conf);
}
+ /** Given a configuration get the bind host of the service rpc server
+ * If the bind host is not configured returns null.
+ */
+ protected String getServiceRpcServerBindHost(Configuration conf) {
+ String addr = conf.getTrimmed(DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY);
+ if (addr == null || addr.isEmpty()) {
+ return null;
+ }
+ return addr;
+ }
+
+ /** Given a configuration get the bind host of the client rpc server
+ * If the bind host is not configured returns null.
+ */
+ protected String getRpcServerBindHost(Configuration conf) {
+ String addr = conf.getTrimmed(DFS_NAMENODE_RPC_BIND_HOST_KEY);
+ if (addr == null || addr.isEmpty()) {
+ return null;
+ }
+ return addr;
+ }
+
/**
* Modifies the configuration passed to contain the service rpc address setting
*/
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index b4f56a981f2..e95cbdd97bc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -137,6 +137,7 @@ import org.apache.hadoop.tools.protocolPB.GetUserMappingsProtocolServerSideTrans
import org.apache.hadoop.util.VersionInfo;
import org.apache.hadoop.util.VersionUtil;
+import com.google.common.annotations.VisibleForTesting;
import com.google.protobuf.BlockingService;
/**
@@ -219,6 +220,13 @@ class NameNodeRpcServer implements NamenodeProtocols {
InetSocketAddress serviceRpcAddr = nn.getServiceRpcServerAddress(conf);
if (serviceRpcAddr != null) {
+ String bindHost = nn.getServiceRpcServerBindHost(conf);
+ if (bindHost == null) {
+ bindHost = serviceRpcAddr.getHostName();
+ }
+ LOG.info("Service RPC server is binding to " + bindHost + ":" +
+ serviceRpcAddr.getPort());
+
int serviceHandlerCount =
conf.getInt(DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY,
DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT);
@@ -226,7 +234,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
.setProtocol(
org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB.class)
.setInstance(clientNNPbService)
- .setBindAddress(serviceRpcAddr.getHostName())
+ .setBindAddress(bindHost)
.setPort(serviceRpcAddr.getPort()).setNumHandlers(serviceHandlerCount)
.setVerbose(false)
.setSecretManager(namesystem.getDelegationTokenSecretManager())
@@ -246,17 +254,26 @@ class NameNodeRpcServer implements NamenodeProtocols {
DFSUtil.addPBProtocol(conf, GetUserMappingsProtocolPB.class,
getUserMappingService, serviceRpcServer);
- serviceRPCAddress = serviceRpcServer.getListenerAddress();
+ // Update the address with the correct port
+ InetSocketAddress listenAddr = serviceRpcServer.getListenerAddress();
+ serviceRPCAddress = new InetSocketAddress(
+ serviceRpcAddr.getHostName(), listenAddr.getPort());
nn.setRpcServiceServerAddress(conf, serviceRPCAddress);
} else {
serviceRpcServer = null;
serviceRPCAddress = null;
}
InetSocketAddress rpcAddr = nn.getRpcServerAddress(conf);
+ String bindHost = nn.getRpcServerBindHost(conf);
+ if (bindHost == null) {
+ bindHost = rpcAddr.getHostName();
+ }
+ LOG.info("RPC server is binding to " + bindHost + ":" + rpcAddr.getPort());
+
this.clientRpcServer = new RPC.Builder(conf)
.setProtocol(
org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB.class)
- .setInstance(clientNNPbService).setBindAddress(rpcAddr.getHostName())
+ .setInstance(clientNNPbService).setBindAddress(bindHost)
.setPort(rpcAddr.getPort()).setNumHandlers(handlerCount)
.setVerbose(false)
.setSecretManager(namesystem.getDelegationTokenSecretManager()).build();
@@ -286,7 +303,9 @@ class NameNodeRpcServer implements NamenodeProtocols {
}
// The rpc-server port can be ephemeral... ensure we have the correct info
- clientRpcAddress = clientRpcServer.getListenerAddress();
+ InetSocketAddress listenAddr = clientRpcServer.getListenerAddress();
+ clientRpcAddress = new InetSocketAddress(
+ rpcAddr.getHostName(), listenAddr.getPort());
nn.setRpcServerAddress(conf, clientRpcAddress);
minimumDataNodeVersion = conf.get(
@@ -310,6 +329,12 @@ class NameNodeRpcServer implements NamenodeProtocols {
NSQuotaExceededException.class,
DSQuotaExceededException.class);
}
+
+ /** Allow access to the client RPC server for testing */
+ @VisibleForTesting
+ RPC.Server getClientRpcServer() {
+ return clientRpcServer;
+ }
/**
* Start client and service RPC servers.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 06eca701264..f25245e36f7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -51,6 +51,18 @@
+
+ dfs.namenode.rpc-bind-host
+
+
+ The actual address the server will bind to. If this optional address is
+ set, the RPC server will bind to this address and the port specified in
+ dfs.namenode.rpc-address for the RPC server. It can also be specified
+ per name node or name service for HA/Federation. This is most useful for
+ making name node listen to all interfaces by setting to 0.0.0.0.
+
+
+
dfs.namenode.servicerpc-address
@@ -64,6 +76,18 @@
+
+ dfs.namenode.servicerpc-bind-host
+
+
+ The actual address the server will bind to. If this optional address is
+ set, the service RPC server will bind to this address and the port
+ specified in dfs.namenode.servicerpc-address. It can also be specified
+ per name node or name service for HA/Federation. This is most useful for
+ making name node listen to all interfaces by setting to 0.0.0.0.
+
+
+
dfs.namenode.secondary.http-address
0.0.0.0:50090
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRpcServer.java
new file mode 100644
index 00000000000..ada93e84f0e
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRpcServer.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Test the MiniDFSCluster functionality that allows "dfs.datanode.address",
+ * "dfs.datanode.http.address", and "dfs.datanode.ipc.address" to be
+ * configurable. The MiniDFSCluster.startDataNodes() API now has a parameter
+ * that will check these properties if told to do so.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_BIND_HOST_KEY;
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+
+import org.junit.Test;
+
+public class TestNameNodeRpcServer {
+
+ @Test
+ public void testNamenodeRpcBindAny() throws IOException {
+ Configuration conf = new HdfsConfiguration();
+
+ // The name node in MiniDFSCluster only binds to 127.0.0.1.
+ // We can set the bind address to 0.0.0.0 to make it listen
+ // to all interfaces.
+ conf.set(DFS_NAMENODE_RPC_BIND_HOST_KEY, "0.0.0.0");
+ MiniDFSCluster cluster = null;
+
+ try {
+ cluster = new MiniDFSCluster.Builder(conf).build();
+ cluster.waitActive();
+ assertEquals("0.0.0.0", ((NameNodeRpcServer)cluster.getNameNodeRpc())
+ .getClientRpcServer().getListenerAddress().getHostName());
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ // Reset the config
+ conf.unset(DFS_NAMENODE_RPC_BIND_HOST_KEY);
+ }
+ }
+}
+