From 87a6db45b70a1a07165e0773c4452d1327258bfa Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Mon, 9 Dec 2013 21:58:47 +0000 Subject: [PATCH] HDFS-5629. Support HTTPS in JournalNode and SecondaryNameNode. Contributed by Haohui Mai. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1549692 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 5 ++ .../java/org/apache/hadoop/hdfs/DFSUtil.java | 65 ++++++++++++++ .../qjournal/client/IPCLoggerChannel.java | 64 +++++++++++--- .../hdfs/qjournal/server/JournalNode.java | 9 +- .../server/JournalNodeHttpServer.java | 63 +++++--------- .../qjournal/server/JournalNodeRpcServer.java | 4 + .../hadoop/hdfs/server/datanode/DataNode.java | 7 +- .../server/namenode/NameNodeHttpServer.java | 58 ++----------- .../server/namenode/SecondaryNameNode.java | 85 +++++++++--------- .../src/main/proto/QJournalProtocol.proto | 4 + .../hdfs/qjournal/MiniJournalCluster.java | 86 +++++++++++-------- .../hdfs/qjournal/server/TestJournalNode.java | 6 +- 13 files changed, 268 insertions(+), 191 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index d79dc5495ed..9c80fbf04cc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -239,6 +239,9 @@ Trunk (Unreleased) HDFS-5554. Flatten INodeFile hierarchy: Replace INodeFileWithSnapshot with FileWithSnapshotFeature. (jing9 via szetszwo) + HDFS-5629. Support HTTPS in JournalNode and SecondaryNameNode. + (Haohui Mai via jing9) + OPTIMIZATIONS HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 6caef9389c3..ab170058808 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -130,6 +130,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final int DFS_NAMENODE_SAFEMODE_MIN_DATANODES_DEFAULT = 0; public static final String DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY = "dfs.namenode.secondary.http-address"; public static final String DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50090"; + public static final String DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_KEY = "dfs.namenode.secondary.https-address"; + public static final String DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_DEFAULT = "0.0.0.0:50091"; public static final String DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY = "dfs.namenode.checkpoint.check.period"; public static final long DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_DEFAULT = 60; public static final String DFS_NAMENODE_CHECKPOINT_PERIOD_KEY = "dfs.namenode.checkpoint.period"; @@ -504,6 +506,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_JOURNALNODE_HTTP_ADDRESS_KEY = "dfs.journalnode.http-address"; public static final int DFS_JOURNALNODE_HTTP_PORT_DEFAULT = 8480; public static final String DFS_JOURNALNODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_JOURNALNODE_HTTP_PORT_DEFAULT; + public static final String DFS_JOURNALNODE_HTTPS_ADDRESS_KEY = "dfs.journalnode.https-address"; + public static final int DFS_JOURNALNODE_HTTPS_PORT_DEFAULT = 8481; + public static final String DFS_JOURNALNODE_HTTPS_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_JOURNALNODE_HTTPS_PORT_DEFAULT; public static final String DFS_JOURNALNODE_KEYTAB_FILE_KEY = "dfs.journalnode.keytab.file"; public static final String DFS_JOURNALNODE_USER_NAME_KEY = "dfs.journalnode.kerberos.principal"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index 55dbc622eba..291d5ee0059 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX; @@ -89,6 +90,7 @@ import org.apache.hadoop.net.NodeBase; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.ToolRunner; @@ -1573,4 +1575,67 @@ public static long parseRelativeTime(String relTime) throws IOException { } return ttl*1000; } + + /** + * Load HTTPS-related configuration. + */ + public static Configuration loadSslConfiguration(Configuration conf) { + Configuration sslConf = new Configuration(false); + + sslConf.addResource(conf.get( + DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY, + DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT)); + + boolean requireClientAuth = conf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY, + DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT); + sslConf.setBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY, requireClientAuth); + return sslConf; + } + + /** + * Return a HttpServer.Builder that the journalnode / namenode / secondary + * namenode can use to initialize their HTTP / HTTPS server. + * + */ + public static HttpServer.Builder httpServerTemplateForNNAndJN( + Configuration conf, final InetSocketAddress httpAddr, + final InetSocketAddress httpsAddr, String name, String spnegoUserNameKey, + String spnegoKeytabFileKey) throws IOException { + HttpConfig.Policy policy = getHttpPolicy(conf); + + HttpServer.Builder builder = new HttpServer.Builder().setName(name) + .setConf(conf).setACL(new AccessControlList(conf.get(DFS_ADMIN, " "))) + .setSecurityEnabled(UserGroupInformation.isSecurityEnabled()) + .setUsernameConfKey(spnegoUserNameKey) + .setKeytabConfKey(getSpnegoKeytabKey(conf, spnegoKeytabFileKey)); + + // initialize the webserver for uploading/downloading files. + LOG.info("Starting web server as: " + + SecurityUtil.getServerPrincipal(conf.get(spnegoUserNameKey), + httpAddr.getHostName())); + + if (policy.isHttpEnabled()) { + if (httpAddr.getPort() == 0) { + builder.setFindPort(true); + } + + URI uri = URI.create("http://" + NetUtils.getHostPortString(httpAddr)); + builder.addEndpoint(uri); + LOG.info("Starting Web-server for " + name + " at: " + uri); + } + + if (policy.isHttpsEnabled() && httpsAddr != null) { + Configuration sslConf = loadSslConfiguration(conf); + loadSslConfToHttpServerBuilder(builder, sslConf); + + if (httpsAddr.getPort() == 0) { + builder.setFindPort(true); + } + + URI uri = URI.create("https://" + NetUtils.getHostPortString(httpsAddr)); + builder.addEndpoint(uri); + LOG.info("Starting Web-server for " + name + " at: " + uri); + } + return builder; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java index 323a5599d68..b5ea5ce69e2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.net.InetSocketAddress; import java.net.MalformedURLException; +import java.net.URI; import java.net.URL; import java.security.PrivilegedExceptionAction; import java.util.concurrent.Callable; @@ -84,8 +85,9 @@ public class IPCLoggerChannel implements AsyncLogger { private final String journalId; private final NamespaceInfo nsInfo; - private int httpPort = -1; - + + private URL httpServerURL; + private final IPCLoggerChannelMetrics metrics; /** @@ -241,13 +243,12 @@ protected ExecutorService createExecutor() { public URL buildURLToFetchLogs(long segmentTxId) { Preconditions.checkArgument(segmentTxId > 0, "Invalid segment: %s", segmentTxId); - Preconditions.checkState(httpPort != -1, - "HTTP port not set yet"); + Preconditions.checkState(hasHttpServerEndPoint(), "No HTTP/HTTPS endpoint"); try { String path = GetJournalEditServlet.buildPath( journalId, segmentTxId, nsInfo); - return new URL("http", addr.getHostName(), httpPort, path.toString()); + return new URL(httpServerURL, path); } catch (MalformedURLException e) { // should never get here. throw new RuntimeException(e); @@ -313,7 +314,7 @@ public ListenableFuture getJournalState() { public GetJournalStateResponseProto call() throws IOException { GetJournalStateResponseProto ret = getProxy().getJournalState(journalId); - httpPort = ret.getHttpPort(); + constructHttpServerURI(ret); return ret; } }); @@ -528,7 +529,7 @@ public RemoteEditLogManifest call() throws IOException { journalId, fromTxnId, forReading, inProgressOk); // Update the http port, since we need this to build URLs to any of the // returned logs. - httpPort = ret.getHttpPort(); + constructHttpServerURI(ret); return PBHelper.convert(ret.getManifest()); } }); @@ -540,10 +541,12 @@ public ListenableFuture prepareRecovery( return executor.submit(new Callable() { @Override public PrepareRecoveryResponseProto call() throws IOException { - if (httpPort < 0) { - // If the HTTP port hasn't been set yet, force an RPC call so we know - // what the HTTP port should be. - httpPort = getProxy().getJournalState(journalId).getHttpPort(); + if (!hasHttpServerEndPoint()) { + // force an RPC call so we know what the HTTP port should be if it + // haven't done so. + GetJournalStateResponseProto ret = getProxy().getJournalState( + journalId); + constructHttpServerURI(ret); } return getProxy().prepareRecovery(createReqInfo(), segmentTxId); } @@ -594,4 +597,43 @@ public synchronized long getLagTimeMillis() { Math.max(lastCommitNanos - lastAckNanos, 0), TimeUnit.NANOSECONDS); } + + private void constructHttpServerURI(GetEditLogManifestResponseProto ret) { + if (ret.hasFromURL()) { + URI uri = URI.create(ret.getFromURL()); + httpServerURL = getHttpServerURI(uri.getScheme(), uri.getPort()); + } else { + httpServerURL = getHttpServerURI("http", ret.getHttpPort());; + } + } + + private void constructHttpServerURI(GetJournalStateResponseProto ret) { + if (ret.hasFromURL()) { + URI uri = URI.create(ret.getFromURL()); + httpServerURL = getHttpServerURI(uri.getScheme(), uri.getPort()); + } else { + httpServerURL = getHttpServerURI("http", ret.getHttpPort());; + } + } + + /** + * Construct the http server based on the response. + * + * The fromURL field in the response specifies the endpoint of the http + * server. However, the address might not be accurate since the server can + * bind to multiple interfaces. Here the client plugs in the address specified + * in the configuration and generates the URI. + */ + private URL getHttpServerURI(String scheme, int port) { + try { + return new URL(scheme, addr.getHostName(), port, ""); + } catch (MalformedURLException e) { + // Unreachable + throw new RuntimeException(e); + } + } + + private boolean hasHttpServerEndPoint() { + return httpServerURL != null; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java index 34d5db004f6..c43edb9d84c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java @@ -64,7 +64,7 @@ public class JournalNode implements Tool, Configurable, JournalNodeMXBean { private JournalNodeHttpServer httpServer; private Map journalsById = Maps.newHashMap(); private ObjectName journalNodeInfoBeanName; - + private String httpServerURI; private File localDir; static { @@ -140,6 +140,8 @@ public void start() throws IOException { httpServer = new JournalNodeHttpServer(conf, this); httpServer.start(); + httpServerURI = httpServer.getServerURI().toString(); + rpcServer = new JournalNodeRpcServer(conf, this); rpcServer.start(); } @@ -155,11 +157,14 @@ public InetSocketAddress getBoundIpcAddress() { return rpcServer.getAddress(); } - + @Deprecated public InetSocketAddress getBoundHttpAddress() { return httpServer.getAddress(); } + public String getHttpServerURI() { + return httpServerURI; + } /** * Stop the daemon with the given status code diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java index b5537120d26..4a7b95cca09 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java @@ -17,19 +17,12 @@ */ package org.apache.hadoop.hdfs.qjournal.server; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_INTERNAL_SPNEGO_USER_NAME_KEY; - import java.io.IOException; import java.net.InetSocketAddress; import java.net.URI; -import java.net.URISyntaxException; import javax.servlet.ServletContext; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -37,22 +30,15 @@ import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.http.HttpServer; import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.security.authorize.AccessControlList; -import org.apache.hadoop.security.SecurityUtil; -import org.apache.hadoop.security.UserGroupInformation; /** * Encapsulates the HTTP server started by the Journal Service. */ @InterfaceAudience.Private public class JournalNodeHttpServer { - public static final Log LOG = LogFactory.getLog( - JournalNodeHttpServer.class); - public static final String JN_ATTRIBUTE_KEY = "localjournal"; private HttpServer httpServer; - private int infoPort; private JournalNode localJournalNode; private final Configuration conf; @@ -63,40 +49,24 @@ public class JournalNodeHttpServer { } void start() throws IOException { - final InetSocketAddress bindAddr = getAddress(conf); + final InetSocketAddress httpAddr = getAddress(conf); - // initialize the webserver for uploading/downloading files. - LOG.info("Starting web server as: "+ SecurityUtil.getServerPrincipal(conf - .get(DFS_JOURNALNODE_INTERNAL_SPNEGO_USER_NAME_KEY), - bindAddr.getHostName())); + final String httpsAddrString = conf.get( + DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_KEY, + DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_DEFAULT); + InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString); - int tmpInfoPort = bindAddr.getPort(); - URI httpEndpoint; - try { - httpEndpoint = new URI("http://" + NetUtils.getHostPortString(bindAddr)); - } catch (URISyntaxException e) { - throw new IOException(e); - } + HttpServer.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf, + httpAddr, httpsAddr, "journal", + DFSConfigKeys.DFS_JOURNALNODE_INTERNAL_SPNEGO_USER_NAME_KEY, + DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY); - httpServer = new HttpServer.Builder().setName("journal") - .addEndpoint(httpEndpoint) - .setFindPort(tmpInfoPort == 0).setConf(conf).setACL( - new AccessControlList(conf.get(DFS_ADMIN, " "))) - .setSecurityEnabled(UserGroupInformation.isSecurityEnabled()) - .setUsernameConfKey( - DFS_JOURNALNODE_INTERNAL_SPNEGO_USER_NAME_KEY) - .setKeytabConfKey(DFSUtil.getSpnegoKeytabKey(conf, - DFS_JOURNALNODE_KEYTAB_FILE_KEY)).build(); + httpServer = builder.build(); httpServer.setAttribute(JN_ATTRIBUTE_KEY, localJournalNode); httpServer.setAttribute(JspHelper.CURRENT_CONF, conf); httpServer.addInternalServlet("getJournal", "/getJournal", GetJournalEditServlet.class, true); httpServer.start(); - - // The web-server port can be ephemeral... ensure we have the correct info - infoPort = httpServer.getConnectorAddress(0).getPort(); - - LOG.info("Journal Web-server up at: " + bindAddr + ":" + infoPort); } void stop() throws IOException { @@ -112,12 +82,25 @@ void stop() throws IOException { /** * Return the actual address bound to by the running server. */ + @Deprecated public InetSocketAddress getAddress() { InetSocketAddress addr = httpServer.getConnectorAddress(0); assert addr.getPort() != 0; return addr; } + /** + * Return the URI that locates the HTTP server. + */ + URI getServerURI() { + // getHttpClientScheme() only returns https for HTTPS_ONLY policy. This + // matches the behavior that the first connector is a HTTPS connector only + // for HTTPS_ONLY policy. + InetSocketAddress addr = httpServer.getConnectorAddress(0); + return URI.create(DFSUtil.getHttpClientScheme(conf) + "://" + + NetUtils.getHostPortString(addr)); + } + private static InetSocketAddress getAddress(Configuration conf) { String addr = conf.get(DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY, DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_DEFAULT); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java index 79bd333ad34..c30e16aaf97 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java @@ -115,6 +115,7 @@ public boolean isFormatted(String journalId) throws IOException { return jn.getOrCreateJournal(journalId).isFormatted(); } + @SuppressWarnings("deprecation") @Override public GetJournalStateResponseProto getJournalState(String journalId) throws IOException { @@ -122,6 +123,7 @@ public GetJournalStateResponseProto getJournalState(String journalId) return GetJournalStateResponseProto.newBuilder() .setLastPromisedEpoch(epoch) .setHttpPort(jn.getBoundHttpAddress().getPort()) + .setFromURL(jn.getHttpServerURI()) .build(); } @@ -173,6 +175,7 @@ public void purgeLogsOlderThan(RequestInfo reqInfo, long minTxIdToKeep) .purgeLogsOlderThan(reqInfo, minTxIdToKeep); } + @SuppressWarnings("deprecation") @Override public GetEditLogManifestResponseProto getEditLogManifest(String jid, long sinceTxId, boolean forReading, boolean inProgressOk) @@ -184,6 +187,7 @@ public GetEditLogManifestResponseProto getEditLogManifest(String jid, return GetEditLogManifestResponseProto.newBuilder() .setManifest(PBHelper.convert(manifest)) .setHttpPort(jn.getBoundHttpAddress().getPort()) + .setFromURL(jn.getHttpServerURI()) .build(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 201e003a359..9639f178982 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -339,12 +339,7 @@ private void startInfoServer(Configuration conf) throws IOException { InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf.get( DFS_DATANODE_HTTPS_ADDRESS_KEY, infoHost + ":" + 0)); - Configuration sslConf = new Configuration(false); - sslConf.addResource(conf.get( - DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY, - DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT)); - sslConf.setBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY, conf.getBoolean( - DFS_CLIENT_HTTPS_NEED_AUTH_KEY, DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT)); + Configuration sslConf = DFSUtil.loadSslConfiguration(conf); DFSUtil.loadSslConfToHttpServerBuilder(builder, sslConf); int port = secInfoSocAddr.getPort(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java index a9d89dd5452..6baf4c48683 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java @@ -17,13 +17,9 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY; import java.io.IOException; import java.net.InetSocketAddress; -import java.net.URI; import java.util.HashMap; import java.util.Map; @@ -45,7 +41,6 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.authorize.AccessControlList; /** * Encapsulates the HTTP server started by the NameNode. @@ -102,51 +97,16 @@ void start() throws IOException { HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf); final String infoHost = bindAddress.getHostName(); - HttpServer.Builder builder = new HttpServer.Builder() - .setName("hdfs") - .setConf(conf) - .setACL(new AccessControlList(conf.get(DFS_ADMIN, " "))) - .setSecurityEnabled(UserGroupInformation.isSecurityEnabled()) - .setUsernameConfKey( - DFSConfigKeys.DFS_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY) - .setKeytabConfKey( - DFSUtil.getSpnegoKeytabKey(conf, - DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY)); + final InetSocketAddress httpAddr = bindAddress; + final String httpsAddrString = conf.get( + DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, + DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT); + InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString); - if (policy.isHttpEnabled()) { - int port = bindAddress.getPort(); - if (port == 0) { - builder.setFindPort(true); - } - builder.addEndpoint(URI.create("http://" + infoHost + ":" + port)); - } - - if (policy.isHttpsEnabled()) { - final String httpsAddrString = conf.get( - DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, - DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT); - InetSocketAddress addr = NetUtils.createSocketAddr(httpsAddrString); - - Configuration sslConf = new Configuration(false); - - sslConf.addResource(conf.get( - DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY, - DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT)); - - sslConf.addResource(conf.get( - DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY, - DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT)); - sslConf.setBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY, conf.getBoolean( - DFS_CLIENT_HTTPS_NEED_AUTH_KEY, DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT)); - DFSUtil.loadSslConfToHttpServerBuilder(builder, sslConf); - - if (addr.getPort() == 0) { - builder.setFindPort(true); - } - - builder.addEndpoint(URI.create("https://" - + NetUtils.getHostPortString(addr))); - } + HttpServer.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf, + httpAddr, httpsAddr, "hdfs", + DFSConfigKeys.DFS_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY, + DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY); httpServer = builder.build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java index 69c43a2ec13..6d136e87558 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java @@ -17,19 +17,12 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SECONDARY_NAMENODE_USER_NAME_KEY; import static org.apache.hadoop.util.ExitUtil.terminate; import java.io.File; import java.io.FilenameFilter; import java.io.IOException; import java.net.InetSocketAddress; -import java.net.MalformedURLException; import java.net.URI; import java.net.URL; import java.security.PrivilegedAction; @@ -71,6 +64,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; +import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.http.HttpServer; import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.ipc.RemoteException; @@ -79,7 +73,6 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; @@ -121,8 +114,7 @@ public class SecondaryNameNode implements Runnable { private InetSocketAddress nameNodeAddr; private volatile boolean shouldRun; private HttpServer infoServer; - private int infoPort; - private String infoBindAddress; + private URL imageListenURL; private Collection checkpointDirs; private List checkpointEditsDirs; @@ -210,8 +202,8 @@ public SecondaryNameNode(Configuration conf, public static InetSocketAddress getHttpAddress(Configuration conf) { return NetUtils.createSocketAddr(conf.get( - DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, - DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT)); + DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, + DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT)); } /** @@ -221,17 +213,19 @@ public static InetSocketAddress getHttpAddress(Configuration conf) { private void initialize(final Configuration conf, CommandLineOpts commandLineOpts) throws IOException { final InetSocketAddress infoSocAddr = getHttpAddress(conf); - infoBindAddress = infoSocAddr.getHostName(); + final String infoBindAddress = infoSocAddr.getHostName(); UserGroupInformation.setConfiguration(conf); if (UserGroupInformation.isSecurityEnabled()) { - SecurityUtil.login(conf, DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY, - DFS_SECONDARY_NAMENODE_USER_NAME_KEY, infoBindAddress); + SecurityUtil.login(conf, + DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY, + DFSConfigKeys.DFS_SECONDARY_NAMENODE_USER_NAME_KEY, infoBindAddress); } // initiate Java VM metrics DefaultMetricsSystem.initialize("SecondaryNameNode"); JvmMetrics.create("SecondaryNameNode", - conf.get(DFS_METRICS_SESSION_ID_KEY), DefaultMetricsSystem.instance()); - + conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY), + DefaultMetricsSystem.instance()); + // Create connection to the namenode. shouldRun = true; nameNodeAddr = NameNode.getServiceAddress(conf, true); @@ -256,19 +250,19 @@ private void initialize(final Configuration conf, // Initialize other scheduling parameters from the configuration checkpointConf = new CheckpointConf(conf); - // initialize the webserver for uploading files. - int tmpInfoPort = infoSocAddr.getPort(); - URI httpEndpoint = URI.create("http://" + NetUtils.getHostPortString(infoSocAddr)); + final InetSocketAddress httpAddr = infoSocAddr; - infoServer = new HttpServer.Builder().setName("secondary") - .addEndpoint(httpEndpoint) - .setFindPort(tmpInfoPort == 0).setConf(conf).setACL( - new AccessControlList(conf.get(DFS_ADMIN, " "))) - .setSecurityEnabled(UserGroupInformation.isSecurityEnabled()) - .setUsernameConfKey( - DFSConfigKeys.DFS_SECONDARY_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY) - .setKeytabConfKey(DFSUtil.getSpnegoKeytabKey(conf, - DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY)).build(); + final String httpsAddrString = conf.get( + DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_KEY, + DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_DEFAULT); + InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString); + + HttpServer.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf, + httpAddr, httpsAddr, "secondary", + DFSConfigKeys.DFS_SECONDARY_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY, + DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY); + + infoServer = builder.build(); infoServer.setAttribute("secondary.name.node", this); infoServer.setAttribute("name.system.image", checkpointImage); @@ -278,14 +272,25 @@ private void initialize(final Configuration conf, infoServer.start(); LOG.info("Web server init done"); + imageListenURL = new URL(DFSUtil.getHttpClientScheme(conf) + "://" + + NetUtils.getHostPortString(infoServer.getConnectorAddress(0))); - // The web-server port can be ephemeral... ensure we have the correct info - infoPort = infoServer.getConnectorAddress(0).getPort(); + HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf); + int connIdx = 0; + if (policy.isHttpEnabled()) { + InetSocketAddress httpAddress = infoServer.getConnectorAddress(connIdx++); + conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, + NetUtils.getHostPortString(httpAddress)); + } - conf.set(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, infoBindAddress + ":" + infoPort); - LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" + infoPort); - LOG.info("Checkpoint Period :" + checkpointConf.getPeriod() + " secs " + - "(" + checkpointConf.getPeriod() / 60 + " min)"); + if (policy.isHttpsEnabled()) { + InetSocketAddress httpsAddress = infoServer.getConnectorAddress(connIdx); + conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_KEY, + NetUtils.getHostPortString(httpsAddress)); + } + + LOG.info("Checkpoint Period :" + checkpointConf.getPeriod() + " secs " + + "(" + checkpointConf.getPeriod() / 60 + " min)"); LOG.info("Log Size Trigger :" + checkpointConf.getTxnCount() + " txns"); } @@ -487,15 +492,7 @@ private URL getInfoServer() throws IOException { * for image transfers */ private URL getImageListenAddress() { - StringBuilder sb = new StringBuilder() - .append(DFSUtil.getHttpClientScheme(conf)).append("://") - .append(infoBindAddress).append(":").append(infoPort); - try { - return new URL(sb.toString()); - } catch (MalformedURLException e) { - // Unreachable - throw new RuntimeException(e); - } + return imageListenURL; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto index a9e8017e96f..8ee8ca5558b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto @@ -142,7 +142,9 @@ message GetJournalStateRequestProto { message GetJournalStateResponseProto { required uint64 lastPromisedEpoch = 1; + // Deprecated by fromURL required uint32 httpPort = 2; + optional string fromURL = 3; } /** @@ -182,7 +184,9 @@ message GetEditLogManifestRequestProto { message GetEditLogManifestResponseProto { required RemoteEditLogManifestProto manifest = 1; + // Deprecated by fromURL required uint32 httpPort = 2; + optional string fromURL = 3; // TODO: we should add nsinfo somewhere // to verify that it matches up with our expectation diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java index ee1c320a7c6..fab83b46357 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.qjournal.server.JournalNode; +import org.apache.hadoop.net.NetUtils; import com.google.common.base.Joiner; import com.google.common.collect.Lists; @@ -66,11 +67,21 @@ public MiniJournalCluster build() throws IOException { } } + private static final class JNInfo { + private JournalNode node; + private InetSocketAddress ipcAddr; + private String httpServerURI; + + private JNInfo(JournalNode node) { + this.node = node; + this.ipcAddr = node.getBoundIpcAddress(); + this.httpServerURI = node.getHttpServerURI(); + } + } + private static final Log LOG = LogFactory.getLog(MiniJournalCluster.class); private File baseDir; - private JournalNode nodes[]; - private InetSocketAddress ipcAddrs[]; - private InetSocketAddress httpAddrs[]; + private JNInfo nodes[]; private MiniJournalCluster(Builder b) throws IOException { LOG.info("Starting MiniJournalCluster with " + @@ -81,22 +92,19 @@ private MiniJournalCluster(Builder b) throws IOException { } else { this.baseDir = new File(MiniDFSCluster.getBaseDirectory()); } - - nodes = new JournalNode[b.numJournalNodes]; - ipcAddrs = new InetSocketAddress[b.numJournalNodes]; - httpAddrs = new InetSocketAddress[b.numJournalNodes]; + + nodes = new JNInfo[b.numJournalNodes]; + for (int i = 0; i < b.numJournalNodes; i++) { if (b.format) { File dir = getStorageDir(i); LOG.debug("Fully deleting JN directory " + dir); FileUtil.fullyDelete(dir); } - nodes[i] = new JournalNode(); - nodes[i].setConf(createConfForNode(b, i)); - nodes[i].start(); - - ipcAddrs[i] = nodes[i].getBoundIpcAddress(); - httpAddrs[i] = nodes[i].getBoundHttpAddress(); + JournalNode jn = new JournalNode(); + jn.setConf(createConfForNode(b, i)); + jn.start(); + nodes[i] = new JNInfo(jn); } } @@ -106,8 +114,8 @@ private MiniJournalCluster(Builder b) throws IOException { */ public URI getQuorumJournalURI(String jid) { List addrs = Lists.newArrayList(); - for (InetSocketAddress addr : ipcAddrs) { - addrs.add("127.0.0.1:" + addr.getPort()); + for (JNInfo info : nodes) { + addrs.add("127.0.0.1:" + info.ipcAddr.getPort()); } String addrsVal = Joiner.on(";").join(addrs); LOG.debug("Setting logger addresses to: " + addrsVal); @@ -122,8 +130,8 @@ public URI getQuorumJournalURI(String jid) { * Start the JournalNodes in the cluster. */ public void start() throws IOException { - for (JournalNode jn : nodes) { - jn.start(); + for (JNInfo info : nodes) { + info.node.start(); } } @@ -133,12 +141,12 @@ public void start() throws IOException { */ public void shutdown() throws IOException { boolean failed = false; - for (JournalNode jn : nodes) { + for (JNInfo info : nodes) { try { - jn.stopAndJoin(0); + info.node.stopAndJoin(0); } catch (Exception e) { failed = true; - LOG.warn("Unable to stop journal node " + jn, e); + LOG.warn("Unable to stop journal node " + info.node, e); } } if (failed) { @@ -150,8 +158,8 @@ private Configuration createConfForNode(Builder b, int idx) { Configuration conf = new Configuration(b.conf); File logDir = getStorageDir(idx); conf.set(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY, logDir.toString()); - conf.set(DFSConfigKeys.DFS_JOURNALNODE_RPC_ADDRESS_KEY, "0.0.0.0:0"); - conf.set(DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); + conf.set(DFSConfigKeys.DFS_JOURNALNODE_RPC_ADDRESS_KEY, "localhost:0"); + conf.set(DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY, "localhost:0"); return conf; } @@ -164,23 +172,33 @@ public File getCurrentDir(int idx, String jid) { } public JournalNode getJournalNode(int i) { - return nodes[i]; + return nodes[i].node; } public void restartJournalNode(int i) throws InterruptedException, IOException { - Configuration conf = new Configuration(nodes[i].getConf()); - if (nodes[i].isStarted()) { - nodes[i].stopAndJoin(0); + JNInfo info = nodes[i]; + JournalNode jn = info.node; + Configuration conf = new Configuration(jn.getConf()); + if (jn.isStarted()) { + jn.stopAndJoin(0); } - conf.set(DFSConfigKeys.DFS_JOURNALNODE_RPC_ADDRESS_KEY, "127.0.0.1:" + - ipcAddrs[i].getPort()); - conf.set(DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY, "127.0.0.1:" + - httpAddrs[i].getPort()); - - nodes[i] = new JournalNode(); - nodes[i].setConf(conf); - nodes[i].start(); + conf.set(DFSConfigKeys.DFS_JOURNALNODE_RPC_ADDRESS_KEY, + NetUtils.getHostPortString(info.ipcAddr)); + + final String uri = info.httpServerURI; + if (uri.startsWith("http://")) { + conf.set(DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY, + uri.substring(("http://".length()))); + } else if (info.httpServerURI.startsWith("https://")) { + conf.set(DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_KEY, + uri.substring(("https://".length()))); + } + + JournalNode newJN = new JournalNode(); + newJN.setConf(conf); + newJN.start(); + info.node = newJN; } public int getQuorumSize() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java index 79ca6ca72bb..e594f6e306f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java @@ -25,7 +25,6 @@ import java.io.File; import java.net.HttpURLConnection; -import java.net.InetSocketAddress; import java.net.URL; import java.util.concurrent.ExecutionException; @@ -163,10 +162,7 @@ public void testReturnsSegmentInfoAtEpochTransition() throws Exception { @Test(timeout=100000) public void testHttpServer() throws Exception { - InetSocketAddress addr = jn.getBoundHttpAddress(); - assertTrue(addr.getPort() > 0); - - String urlRoot = "http://localhost:" + addr.getPort(); + String urlRoot = jn.getHttpServerURI(); // Check default servlets. String pageContents = DFSTestUtil.urlGet(new URL(urlRoot + "/jmx"));