HDFS-5629. Merge change r1549692 from trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1551723 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jing Zhao 2013-12-17 21:31:12 +00:00
parent 246e9cb136
commit 7a3ac36ede
13 changed files with 268 additions and 191 deletions

View File

@ -171,6 +171,9 @@ Release 2.4.0 - UNRELEASED
HDFS-5312. Generate HTTP / HTTPS URL in DFSUtil#getInfoServer() based on the
configured http policy. (Haohui Mai via jing9)
HDFS-5629. Support HTTPS in JournalNode and SecondaryNameNode.
(Haohui Mai via jing9)
OPTIMIZATIONS
HDFS-5239. Allow FSNamesystem lock fairness to be configurable (daryn)

View File

@ -124,6 +124,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final int DFS_NAMENODE_SAFEMODE_MIN_DATANODES_DEFAULT = 0;
public static final String DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY = "dfs.namenode.secondary.http-address";
public static final String DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50090";
public static final String DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_KEY = "dfs.namenode.secondary.https-address";
public static final String DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_DEFAULT = "0.0.0.0:50091";
public static final String DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY = "dfs.namenode.checkpoint.check.period";
public static final long DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_DEFAULT = 60;
public static final String DFS_NAMENODE_CHECKPOINT_PERIOD_KEY = "dfs.namenode.checkpoint.period";
@ -484,6 +486,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_JOURNALNODE_HTTP_ADDRESS_KEY = "dfs.journalnode.http-address";
public static final int DFS_JOURNALNODE_HTTP_PORT_DEFAULT = 8480;
public static final String DFS_JOURNALNODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_JOURNALNODE_HTTP_PORT_DEFAULT;
public static final String DFS_JOURNALNODE_HTTPS_ADDRESS_KEY = "dfs.journalnode.https-address";
public static final int DFS_JOURNALNODE_HTTPS_PORT_DEFAULT = 8481;
public static final String DFS_JOURNALNODE_HTTPS_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_JOURNALNODE_HTTPS_PORT_DEFAULT;
public static final String DFS_JOURNALNODE_KEYTAB_FILE_KEY = "dfs.journalnode.keytab.file";
public static final String DFS_JOURNALNODE_USER_NAME_KEY = "dfs.journalnode.kerberos.principal";

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
@ -89,6 +90,7 @@ import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.ToolRunner;
@ -1507,4 +1509,67 @@ public class DFSUtil {
sslConf.get("ssl.server.truststore.password"),
sslConf.get("ssl.server.truststore.type", "jks"));
}
/**
* Load HTTPS-related configuration.
*/
public static Configuration loadSslConfiguration(Configuration conf) {
Configuration sslConf = new Configuration(false);
sslConf.addResource(conf.get(
DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT));
boolean requireClientAuth = conf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT);
sslConf.setBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY, requireClientAuth);
return sslConf;
}
/**
* Return a HttpServer.Builder that the journalnode / namenode / secondary
* namenode can use to initialize their HTTP / HTTPS server.
*
*/
public static HttpServer.Builder httpServerTemplateForNNAndJN(
Configuration conf, final InetSocketAddress httpAddr,
final InetSocketAddress httpsAddr, String name, String spnegoUserNameKey,
String spnegoKeytabFileKey) throws IOException {
HttpConfig.Policy policy = getHttpPolicy(conf);
HttpServer.Builder builder = new HttpServer.Builder().setName(name)
.setConf(conf).setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")))
.setSecurityEnabled(UserGroupInformation.isSecurityEnabled())
.setUsernameConfKey(spnegoUserNameKey)
.setKeytabConfKey(getSpnegoKeytabKey(conf, spnegoKeytabFileKey));
// initialize the webserver for uploading/downloading files.
LOG.info("Starting web server as: "
+ SecurityUtil.getServerPrincipal(conf.get(spnegoUserNameKey),
httpAddr.getHostName()));
if (policy.isHttpEnabled()) {
if (httpAddr.getPort() == 0) {
builder.setFindPort(true);
}
URI uri = URI.create("http://" + NetUtils.getHostPortString(httpAddr));
builder.addEndpoint(uri);
LOG.info("Starting Web-server for " + name + " at: " + uri);
}
if (policy.isHttpsEnabled() && httpsAddr != null) {
Configuration sslConf = loadSslConfiguration(conf);
loadSslConfToHttpServerBuilder(builder, sslConf);
if (httpsAddr.getPort() == 0) {
builder.setFindPort(true);
}
URI uri = URI.create("https://" + NetUtils.getHostPortString(httpsAddr));
builder.addEndpoint(uri);
LOG.info("Starting Web-server for " + name + " at: " + uri);
}
return builder;
}
}

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.qjournal.client;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.MalformedURLException;
import java.net.URI;
import java.net.URL;
import java.security.PrivilegedExceptionAction;
import java.util.concurrent.Callable;
@ -84,8 +85,9 @@ public class IPCLoggerChannel implements AsyncLogger {
private final String journalId;
private final NamespaceInfo nsInfo;
private int httpPort = -1;
private URL httpServerURL;
private final IPCLoggerChannelMetrics metrics;
/**
@ -242,13 +244,12 @@ public class IPCLoggerChannel implements AsyncLogger {
public URL buildURLToFetchLogs(long segmentTxId) {
Preconditions.checkArgument(segmentTxId > 0,
"Invalid segment: %s", segmentTxId);
Preconditions.checkState(httpPort != -1,
"HTTP port not set yet");
Preconditions.checkState(hasHttpServerEndPoint(), "No HTTP/HTTPS endpoint");
try {
String path = GetJournalEditServlet.buildPath(
journalId, segmentTxId, nsInfo);
return new URL("http", addr.getHostName(), httpPort, path.toString());
return new URL(httpServerURL, path);
} catch (MalformedURLException e) {
// should never get here.
throw new RuntimeException(e);
@ -314,7 +315,7 @@ public class IPCLoggerChannel implements AsyncLogger {
public GetJournalStateResponseProto call() throws IOException {
GetJournalStateResponseProto ret =
getProxy().getJournalState(journalId);
httpPort = ret.getHttpPort();
constructHttpServerURI(ret);
return ret;
}
});
@ -528,7 +529,7 @@ public class IPCLoggerChannel implements AsyncLogger {
journalId, fromTxnId, inProgressOk);
// Update the http port, since we need this to build URLs to any of the
// returned logs.
httpPort = ret.getHttpPort();
constructHttpServerURI(ret);
return PBHelper.convert(ret.getManifest());
}
});
@ -540,10 +541,12 @@ public class IPCLoggerChannel implements AsyncLogger {
return executor.submit(new Callable<PrepareRecoveryResponseProto>() {
@Override
public PrepareRecoveryResponseProto call() throws IOException {
if (httpPort < 0) {
// If the HTTP port hasn't been set yet, force an RPC call so we know
// what the HTTP port should be.
httpPort = getProxy().getJournalState(journalId).getHttpPort();
if (!hasHttpServerEndPoint()) {
// force an RPC call so we know what the HTTP port should be if it
// haven't done so.
GetJournalStateResponseProto ret = getProxy().getJournalState(
journalId);
constructHttpServerURI(ret);
}
return getProxy().prepareRecovery(createReqInfo(), segmentTxId);
}
@ -594,4 +597,43 @@ public class IPCLoggerChannel implements AsyncLogger {
Math.max(lastCommitNanos - lastAckNanos, 0),
TimeUnit.NANOSECONDS);
}
private void constructHttpServerURI(GetEditLogManifestResponseProto ret) {
if (ret.hasFromURL()) {
URI uri = URI.create(ret.getFromURL());
httpServerURL = getHttpServerURI(uri.getScheme(), uri.getPort());
} else {
httpServerURL = getHttpServerURI("http", ret.getHttpPort());;
}
}
private void constructHttpServerURI(GetJournalStateResponseProto ret) {
if (ret.hasFromURL()) {
URI uri = URI.create(ret.getFromURL());
httpServerURL = getHttpServerURI(uri.getScheme(), uri.getPort());
} else {
httpServerURL = getHttpServerURI("http", ret.getHttpPort());;
}
}
/**
* Construct the http server based on the response.
*
* The fromURL field in the response specifies the endpoint of the http
* server. However, the address might not be accurate since the server can
* bind to multiple interfaces. Here the client plugs in the address specified
* in the configuration and generates the URI.
*/
private URL getHttpServerURI(String scheme, int port) {
try {
return new URL(scheme, addr.getHostName(), port, "");
} catch (MalformedURLException e) {
// Unreachable
throw new RuntimeException(e);
}
}
private boolean hasHttpServerEndPoint() {
return httpServerURL != null;
}
}

View File

@ -64,7 +64,7 @@ public class JournalNode implements Tool, Configurable, JournalNodeMXBean {
private JournalNodeHttpServer httpServer;
private Map<String, Journal> journalsById = Maps.newHashMap();
private ObjectName journalNodeInfoBeanName;
private String httpServerURI;
private File localDir;
static {
@ -140,6 +140,8 @@ public class JournalNode implements Tool, Configurable, JournalNodeMXBean {
httpServer = new JournalNodeHttpServer(conf, this);
httpServer.start();
httpServerURI = httpServer.getServerURI().toString();
rpcServer = new JournalNodeRpcServer(conf, this);
rpcServer.start();
}
@ -155,11 +157,14 @@ public class JournalNode implements Tool, Configurable, JournalNodeMXBean {
return rpcServer.getAddress();
}
@Deprecated
public InetSocketAddress getBoundHttpAddress() {
return httpServer.getAddress();
}
public String getHttpServerURI() {
return httpServerURI;
}
/**
* Stop the daemon with the given status code

View File

@ -17,19 +17,12 @@
*/
package org.apache.hadoop.hdfs.qjournal.server;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_INTERNAL_SPNEGO_USER_NAME_KEY;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URISyntaxException;
import javax.servlet.ServletContext;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
@ -37,22 +30,15 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.http.HttpServer;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
/**
* Encapsulates the HTTP server started by the Journal Service.
*/
@InterfaceAudience.Private
public class JournalNodeHttpServer {
public static final Log LOG = LogFactory.getLog(
JournalNodeHttpServer.class);
public static final String JN_ATTRIBUTE_KEY = "localjournal";
private HttpServer httpServer;
private int infoPort;
private JournalNode localJournalNode;
private final Configuration conf;
@ -63,40 +49,24 @@ public class JournalNodeHttpServer {
}
void start() throws IOException {
final InetSocketAddress bindAddr = getAddress(conf);
final InetSocketAddress httpAddr = getAddress(conf);
// initialize the webserver for uploading/downloading files.
LOG.info("Starting web server as: "+ SecurityUtil.getServerPrincipal(conf
.get(DFS_JOURNALNODE_INTERNAL_SPNEGO_USER_NAME_KEY),
bindAddr.getHostName()));
final String httpsAddrString = conf.get(
DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_KEY,
DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_DEFAULT);
InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);
int tmpInfoPort = bindAddr.getPort();
URI httpEndpoint;
try {
httpEndpoint = new URI("http://" + NetUtils.getHostPortString(bindAddr));
} catch (URISyntaxException e) {
throw new IOException(e);
}
HttpServer.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
httpAddr, httpsAddr, "journal",
DFSConfigKeys.DFS_JOURNALNODE_INTERNAL_SPNEGO_USER_NAME_KEY,
DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY);
httpServer = new HttpServer.Builder().setName("journal")
.addEndpoint(httpEndpoint)
.setFindPort(tmpInfoPort == 0).setConf(conf).setACL(
new AccessControlList(conf.get(DFS_ADMIN, " ")))
.setSecurityEnabled(UserGroupInformation.isSecurityEnabled())
.setUsernameConfKey(
DFS_JOURNALNODE_INTERNAL_SPNEGO_USER_NAME_KEY)
.setKeytabConfKey(DFSUtil.getSpnegoKeytabKey(conf,
DFS_JOURNALNODE_KEYTAB_FILE_KEY)).build();
httpServer = builder.build();
httpServer.setAttribute(JN_ATTRIBUTE_KEY, localJournalNode);
httpServer.setAttribute(JspHelper.CURRENT_CONF, conf);
httpServer.addInternalServlet("getJournal", "/getJournal",
GetJournalEditServlet.class, true);
httpServer.start();
// The web-server port can be ephemeral... ensure we have the correct info
infoPort = httpServer.getConnectorAddress(0).getPort();
LOG.info("Journal Web-server up at: " + bindAddr + ":" + infoPort);
}
void stop() throws IOException {
@ -112,12 +82,25 @@ public class JournalNodeHttpServer {
/**
* Return the actual address bound to by the running server.
*/
@Deprecated
public InetSocketAddress getAddress() {
InetSocketAddress addr = httpServer.getConnectorAddress(0);
assert addr.getPort() != 0;
return addr;
}
/**
* Return the URI that locates the HTTP server.
*/
URI getServerURI() {
// getHttpClientScheme() only returns https for HTTPS_ONLY policy. This
// matches the behavior that the first connector is a HTTPS connector only
// for HTTPS_ONLY policy.
InetSocketAddress addr = httpServer.getConnectorAddress(0);
return URI.create(DFSUtil.getHttpClientScheme(conf) + "://"
+ NetUtils.getHostPortString(addr));
}
private static InetSocketAddress getAddress(Configuration conf) {
String addr = conf.get(DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY,
DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_DEFAULT);

View File

@ -115,6 +115,7 @@ class JournalNodeRpcServer implements QJournalProtocol {
return jn.getOrCreateJournal(journalId).isFormatted();
}
@SuppressWarnings("deprecation")
@Override
public GetJournalStateResponseProto getJournalState(String journalId)
throws IOException {
@ -122,6 +123,7 @@ class JournalNodeRpcServer implements QJournalProtocol {
return GetJournalStateResponseProto.newBuilder()
.setLastPromisedEpoch(epoch)
.setHttpPort(jn.getBoundHttpAddress().getPort())
.setFromURL(jn.getHttpServerURI())
.build();
}
@ -173,6 +175,7 @@ class JournalNodeRpcServer implements QJournalProtocol {
.purgeLogsOlderThan(reqInfo, minTxIdToKeep);
}
@SuppressWarnings("deprecation")
@Override
public GetEditLogManifestResponseProto getEditLogManifest(String jid,
long sinceTxId, boolean inProgressOk)
@ -184,6 +187,7 @@ class JournalNodeRpcServer implements QJournalProtocol {
return GetEditLogManifestResponseProto.newBuilder()
.setManifest(PBHelper.convert(manifest))
.setHttpPort(jn.getBoundHttpAddress().getPort())
.setFromURL(jn.getHttpServerURI())
.build();
}

View File

@ -336,12 +336,7 @@ public class DataNode extends Configured
InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf.get(
DFS_DATANODE_HTTPS_ADDRESS_KEY, infoHost + ":" + 0));
Configuration sslConf = new Configuration(false);
sslConf.addResource(conf.get(
DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT));
sslConf.setBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY, conf.getBoolean(
DFS_CLIENT_HTTPS_NEED_AUTH_KEY, DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT));
Configuration sslConf = DFSUtil.loadSslConfiguration(conf);
DFSUtil.loadSslConfToHttpServerBuilder(builder, sslConf);
int port = secInfoSocAddr.getPort();

View File

@ -17,13 +17,9 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URI;
import java.util.HashMap;
import java.util.Map;
@ -45,7 +41,6 @@ import org.apache.hadoop.http.HttpServer;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
/**
* Encapsulates the HTTP server started by the NameNode.
@ -101,51 +96,16 @@ public class NameNodeHttpServer {
HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);
final String infoHost = bindAddress.getHostName();
HttpServer.Builder builder = new HttpServer.Builder()
.setName("hdfs")
.setConf(conf)
.setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")))
.setSecurityEnabled(UserGroupInformation.isSecurityEnabled())
.setUsernameConfKey(
DFSConfigKeys.DFS_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY)
.setKeytabConfKey(
DFSUtil.getSpnegoKeytabKey(conf,
DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY));
final InetSocketAddress httpAddr = bindAddress;
final String httpsAddrString = conf.get(
DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY,
DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT);
InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);
if (policy.isHttpEnabled()) {
int port = bindAddress.getPort();
if (port == 0) {
builder.setFindPort(true);
}
builder.addEndpoint(URI.create("http://" + infoHost + ":" + port));
}
if (policy.isHttpsEnabled()) {
final String httpsAddrString = conf.get(
DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY,
DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT);
InetSocketAddress addr = NetUtils.createSocketAddr(httpsAddrString);
Configuration sslConf = new Configuration(false);
sslConf.addResource(conf.get(
DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT));
sslConf.addResource(conf.get(
DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT));
sslConf.setBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY, conf.getBoolean(
DFS_CLIENT_HTTPS_NEED_AUTH_KEY, DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT));
DFSUtil.loadSslConfToHttpServerBuilder(builder, sslConf);
if (addr.getPort() == 0) {
builder.setFindPort(true);
}
builder.addEndpoint(URI.create("https://"
+ NetUtils.getHostPortString(addr)));
}
HttpServer.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
httpAddr, httpsAddr, "hdfs",
DFSConfigKeys.DFS_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY,
DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY);
httpServer = builder.build();

View File

@ -17,19 +17,12 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SECONDARY_NAMENODE_USER_NAME_KEY;
import static org.apache.hadoop.util.ExitUtil.terminate;
import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.MalformedURLException;
import java.net.URI;
import java.net.URL;
import java.security.PrivilegedAction;
@ -71,6 +64,7 @@ import org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager.StorageP
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.http.HttpServer;
import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.ipc.RemoteException;
@ -79,7 +73,6 @@ import org.apache.hadoop.metrics2.source.JvmMetrics;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
@ -121,8 +114,7 @@ public class SecondaryNameNode implements Runnable {
private InetSocketAddress nameNodeAddr;
private volatile boolean shouldRun;
private HttpServer infoServer;
private int infoPort;
private String infoBindAddress;
private URL imageListenURL;
private Collection<URI> checkpointDirs;
private List<URI> checkpointEditsDirs;
@ -210,8 +202,8 @@ public class SecondaryNameNode implements Runnable {
public static InetSocketAddress getHttpAddress(Configuration conf) {
return NetUtils.createSocketAddr(conf.get(
DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT));
DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT));
}
/**
@ -221,17 +213,19 @@ public class SecondaryNameNode implements Runnable {
private void initialize(final Configuration conf,
CommandLineOpts commandLineOpts) throws IOException {
final InetSocketAddress infoSocAddr = getHttpAddress(conf);
infoBindAddress = infoSocAddr.getHostName();
final String infoBindAddress = infoSocAddr.getHostName();
UserGroupInformation.setConfiguration(conf);
if (UserGroupInformation.isSecurityEnabled()) {
SecurityUtil.login(conf, DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY,
DFS_SECONDARY_NAMENODE_USER_NAME_KEY, infoBindAddress);
SecurityUtil.login(conf,
DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY,
DFSConfigKeys.DFS_SECONDARY_NAMENODE_USER_NAME_KEY, infoBindAddress);
}
// initiate Java VM metrics
DefaultMetricsSystem.initialize("SecondaryNameNode");
JvmMetrics.create("SecondaryNameNode",
conf.get(DFS_METRICS_SESSION_ID_KEY), DefaultMetricsSystem.instance());
conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY),
DefaultMetricsSystem.instance());
// Create connection to the namenode.
shouldRun = true;
nameNodeAddr = NameNode.getServiceAddress(conf, true);
@ -256,19 +250,19 @@ public class SecondaryNameNode implements Runnable {
// Initialize other scheduling parameters from the configuration
checkpointConf = new CheckpointConf(conf);
// initialize the webserver for uploading files.
int tmpInfoPort = infoSocAddr.getPort();
URI httpEndpoint = URI.create("http://" + NetUtils.getHostPortString(infoSocAddr));
final InetSocketAddress httpAddr = infoSocAddr;
infoServer = new HttpServer.Builder().setName("secondary")
.addEndpoint(httpEndpoint)
.setFindPort(tmpInfoPort == 0).setConf(conf).setACL(
new AccessControlList(conf.get(DFS_ADMIN, " ")))
.setSecurityEnabled(UserGroupInformation.isSecurityEnabled())
.setUsernameConfKey(
DFSConfigKeys.DFS_SECONDARY_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY)
.setKeytabConfKey(DFSUtil.getSpnegoKeytabKey(conf,
DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY)).build();
final String httpsAddrString = conf.get(
DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_KEY,
DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_DEFAULT);
InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);
HttpServer.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
httpAddr, httpsAddr, "secondary",
DFSConfigKeys.DFS_SECONDARY_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY,
DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY);
infoServer = builder.build();
infoServer.setAttribute("secondary.name.node", this);
infoServer.setAttribute("name.system.image", checkpointImage);
@ -278,14 +272,25 @@ public class SecondaryNameNode implements Runnable {
infoServer.start();
LOG.info("Web server init done");
imageListenURL = new URL(DFSUtil.getHttpClientScheme(conf) + "://"
+ NetUtils.getHostPortString(infoServer.getConnectorAddress(0)));
// The web-server port can be ephemeral... ensure we have the correct info
infoPort = infoServer.getConnectorAddress(0).getPort();
HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);
int connIdx = 0;
if (policy.isHttpEnabled()) {
InetSocketAddress httpAddress = infoServer.getConnectorAddress(connIdx++);
conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
NetUtils.getHostPortString(httpAddress));
}
conf.set(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, infoBindAddress + ":" + infoPort);
LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" + infoPort);
LOG.info("Checkpoint Period :" + checkpointConf.getPeriod() + " secs " +
"(" + checkpointConf.getPeriod() / 60 + " min)");
if (policy.isHttpsEnabled()) {
InetSocketAddress httpsAddress = infoServer.getConnectorAddress(connIdx);
conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_KEY,
NetUtils.getHostPortString(httpsAddress));
}
LOG.info("Checkpoint Period :" + checkpointConf.getPeriod() + " secs "
+ "(" + checkpointConf.getPeriod() / 60 + " min)");
LOG.info("Log Size Trigger :" + checkpointConf.getTxnCount() + " txns");
}
@ -487,15 +492,7 @@ public class SecondaryNameNode implements Runnable {
* for image transfers
*/
private URL getImageListenAddress() {
StringBuilder sb = new StringBuilder()
.append(DFSUtil.getHttpClientScheme(conf)).append("://")
.append(infoBindAddress).append(":").append(infoPort);
try {
return new URL(sb.toString());
} catch (MalformedURLException e) {
// Unreachable
throw new RuntimeException(e);
}
return imageListenURL;
}
/**

View File

@ -142,7 +142,9 @@ message GetJournalStateRequestProto {
message GetJournalStateResponseProto {
required uint64 lastPromisedEpoch = 1;
// Deprecated by fromURL
required uint32 httpPort = 2;
optional string fromURL = 3;
}
/**
@ -182,7 +184,9 @@ message GetEditLogManifestRequestProto {
message GetEditLogManifestResponseProto {
required RemoteEditLogManifestProto manifest = 1;
// Deprecated by fromURL
required uint32 httpPort = 2;
optional string fromURL = 3;
// TODO: we should add nsinfo somewhere
// to verify that it matches up with our expectation

View File

@ -31,6 +31,7 @@ import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.qjournal.server.JournalNode;
import org.apache.hadoop.net.NetUtils;
import com.google.common.base.Joiner;
import com.google.common.collect.Lists;
@ -66,11 +67,21 @@ public class MiniJournalCluster {
}
}
private static final class JNInfo {
private JournalNode node;
private InetSocketAddress ipcAddr;
private String httpServerURI;
private JNInfo(JournalNode node) {
this.node = node;
this.ipcAddr = node.getBoundIpcAddress();
this.httpServerURI = node.getHttpServerURI();
}
}
private static final Log LOG = LogFactory.getLog(MiniJournalCluster.class);
private File baseDir;
private JournalNode nodes[];
private InetSocketAddress ipcAddrs[];
private InetSocketAddress httpAddrs[];
private JNInfo nodes[];
private MiniJournalCluster(Builder b) throws IOException {
LOG.info("Starting MiniJournalCluster with " +
@ -81,22 +92,19 @@ public class MiniJournalCluster {
} else {
this.baseDir = new File(MiniDFSCluster.getBaseDirectory());
}
nodes = new JournalNode[b.numJournalNodes];
ipcAddrs = new InetSocketAddress[b.numJournalNodes];
httpAddrs = new InetSocketAddress[b.numJournalNodes];
nodes = new JNInfo[b.numJournalNodes];
for (int i = 0; i < b.numJournalNodes; i++) {
if (b.format) {
File dir = getStorageDir(i);
LOG.debug("Fully deleting JN directory " + dir);
FileUtil.fullyDelete(dir);
}
nodes[i] = new JournalNode();
nodes[i].setConf(createConfForNode(b, i));
nodes[i].start();
ipcAddrs[i] = nodes[i].getBoundIpcAddress();
httpAddrs[i] = nodes[i].getBoundHttpAddress();
JournalNode jn = new JournalNode();
jn.setConf(createConfForNode(b, i));
jn.start();
nodes[i] = new JNInfo(jn);
}
}
@ -106,8 +114,8 @@ public class MiniJournalCluster {
*/
public URI getQuorumJournalURI(String jid) {
List<String> addrs = Lists.newArrayList();
for (InetSocketAddress addr : ipcAddrs) {
addrs.add("127.0.0.1:" + addr.getPort());
for (JNInfo info : nodes) {
addrs.add("127.0.0.1:" + info.ipcAddr.getPort());
}
String addrsVal = Joiner.on(";").join(addrs);
LOG.debug("Setting logger addresses to: " + addrsVal);
@ -122,8 +130,8 @@ public class MiniJournalCluster {
* Start the JournalNodes in the cluster.
*/
public void start() throws IOException {
for (JournalNode jn : nodes) {
jn.start();
for (JNInfo info : nodes) {
info.node.start();
}
}
@ -133,12 +141,12 @@ public class MiniJournalCluster {
*/
public void shutdown() throws IOException {
boolean failed = false;
for (JournalNode jn : nodes) {
for (JNInfo info : nodes) {
try {
jn.stopAndJoin(0);
info.node.stopAndJoin(0);
} catch (Exception e) {
failed = true;
LOG.warn("Unable to stop journal node " + jn, e);
LOG.warn("Unable to stop journal node " + info.node, e);
}
}
if (failed) {
@ -150,8 +158,8 @@ public class MiniJournalCluster {
Configuration conf = new Configuration(b.conf);
File logDir = getStorageDir(idx);
conf.set(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY, logDir.toString());
conf.set(DFSConfigKeys.DFS_JOURNALNODE_RPC_ADDRESS_KEY, "0.0.0.0:0");
conf.set(DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
conf.set(DFSConfigKeys.DFS_JOURNALNODE_RPC_ADDRESS_KEY, "localhost:0");
conf.set(DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY, "localhost:0");
return conf;
}
@ -164,23 +172,33 @@ public class MiniJournalCluster {
}
public JournalNode getJournalNode(int i) {
return nodes[i];
return nodes[i].node;
}
public void restartJournalNode(int i) throws InterruptedException, IOException {
Configuration conf = new Configuration(nodes[i].getConf());
if (nodes[i].isStarted()) {
nodes[i].stopAndJoin(0);
JNInfo info = nodes[i];
JournalNode jn = info.node;
Configuration conf = new Configuration(jn.getConf());
if (jn.isStarted()) {
jn.stopAndJoin(0);
}
conf.set(DFSConfigKeys.DFS_JOURNALNODE_RPC_ADDRESS_KEY, "127.0.0.1:" +
ipcAddrs[i].getPort());
conf.set(DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY, "127.0.0.1:" +
httpAddrs[i].getPort());
nodes[i] = new JournalNode();
nodes[i].setConf(conf);
nodes[i].start();
conf.set(DFSConfigKeys.DFS_JOURNALNODE_RPC_ADDRESS_KEY,
NetUtils.getHostPortString(info.ipcAddr));
final String uri = info.httpServerURI;
if (uri.startsWith("http://")) {
conf.set(DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY,
uri.substring(("http://".length())));
} else if (info.httpServerURI.startsWith("https://")) {
conf.set(DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_KEY,
uri.substring(("https://".length())));
}
JournalNode newJN = new JournalNode();
newJN.setConf(conf);
newJN.start();
info.node = newJN;
}
public int getQuorumSize() {

View File

@ -25,7 +25,6 @@ import static org.junit.Assert.fail;
import java.io.File;
import java.net.HttpURLConnection;
import java.net.InetSocketAddress;
import java.net.URL;
import java.util.concurrent.ExecutionException;
@ -163,10 +162,7 @@ public class TestJournalNode {
@Test(timeout=100000)
public void testHttpServer() throws Exception {
InetSocketAddress addr = jn.getBoundHttpAddress();
assertTrue(addr.getPort() > 0);
String urlRoot = "http://localhost:" + addr.getPort();
String urlRoot = jn.getHttpServerURI();
// Check default servlets.
String pageContents = DFSTestUtil.urlGet(new URL(urlRoot + "/jmx"));