HDFS-5536. Merge change r1547925 from trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1551715 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jing Zhao 2013-12-17 21:18:38 +00:00
parent c143486c17
commit b19b529f38
21 changed files with 447 additions and 236 deletions

View File

@ -31,15 +31,25 @@ public class HttpConfig {
private static Policy policy;
public enum Policy {
HTTP_ONLY,
HTTPS_ONLY;
HTTPS_ONLY,
HTTP_AND_HTTPS;
public static Policy fromString(String value) {
if (value.equalsIgnoreCase(CommonConfigurationKeysPublic
.HTTP_POLICY_HTTPS_ONLY)) {
if (HTTPS_ONLY.name().equalsIgnoreCase(value)) {
return HTTPS_ONLY;
} else if (HTTP_AND_HTTPS.name().equalsIgnoreCase(value)) {
return HTTP_AND_HTTPS;
}
return HTTP_ONLY;
}
public boolean isHttpEnabled() {
return this == HTTP_ONLY || this == HTTP_AND_HTTPS;
}
public boolean isHttpsEnabled() {
return this == HTTPS_ONLY || this == HTTP_AND_HTTPS;
}
}
static {

View File

@ -1126,9 +1126,7 @@
<name>hadoop.ssl.enabled</name>
<value>false</value>
<description>
Whether to use SSL for the HTTP endpoints. If set to true, the
NameNode, DataNode, ResourceManager, NodeManager, HistoryServer and
MapReduceAppMaster web UIs will be served over HTTPS instead HTTP.
Deprecated. Use dfs.http.policy and yarn.http.policy instead.
</description>
</property>

View File

@ -754,6 +754,10 @@ KVNO Timestamp Principal
| | | Enable HDFS block access tokens for secure operations. |
*-------------------------+-------------------------+------------------------+
| <<<dfs.https.enable>>> | <true> | |
| | | This value is deprecated. Use dfs.http.policy |
*-------------------------+-------------------------+------------------------+
| <<<dfs.http.policy>>> | <HTTP_ONLY> or <HTTPS_ONLY> or <HTTP_AND_HTTPS> | |
| | | HTTPS_ONLY turns off http access |
*-------------------------+-------------------------+------------------------+
| <<<dfs.namenode.https-address>>> | <nn_host_fqdn:50470> | |
*-------------------------+-------------------------+------------------------+

View File

@ -165,6 +165,9 @@ Release 2.4.0 - UNRELEASED
HDFS-5545. Allow specifying endpoints for listeners in HttpServer. (Haohui
Mai via jing9)
HDFS-5536. Implement HTTP policy for Namenode and DataNode. (Haohui Mai via
jing9)
OPTIMIZATIONS
HDFS-5239. Allow FSNamesystem lock fairness to be configurable (daryn)

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
import org.apache.hadoop.http.HttpConfig;
/**
* This class contains constants for configuration keys used
@ -340,6 +341,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final boolean DFS_SUPPORT_APPEND_DEFAULT = true;
public static final String DFS_HTTPS_ENABLE_KEY = "dfs.https.enable";
public static final boolean DFS_HTTPS_ENABLE_DEFAULT = false;
public static final String DFS_HTTP_POLICY_KEY = "dfs.http.policy";
public static final String DFS_HTTP_POLICY_DEFAULT = HttpConfig.Policy.HTTP_ONLY.name();
public static final String DFS_DEFAULT_CHUNK_VIEW_SIZE_KEY = "dfs.default.chunk.view.size";
public static final int DFS_DEFAULT_CHUNK_VIEW_SIZE_DEFAULT = 32*1024;
public static final String DFS_DATANODE_HTTPS_ADDRESS_KEY = "dfs.datanode.https.address";

View File

@ -18,6 +18,8 @@
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY;
@ -64,6 +66,7 @@ import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
@ -78,6 +81,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.http.HttpServer;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
@ -1409,12 +1413,58 @@ public class DFSUtil {
defaultKey : DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY;
}
public static HttpServer.Builder loadSslConfToHttpServerBuilder(
HttpServer.Builder builder, Configuration sslConf) {
/**
* Get http policy. Http Policy is chosen as follows:
* <ol>
* <li>If hadoop.ssl.enabled is set, http endpoints are not started. Only
* https endpoints are started on configured https ports</li>
* <li>This configuration is overridden by dfs.https.enable configuration, if
* it is set to true. In that case, both http and https endpoints are stared.</li>
* <li>All the above configurations are overridden by dfs.http.policy
* configuration. With this configuration you can set http-only, https-only
* and http-and-https endpoints.</li>
* </ol>
* See hdfs-default.xml documentation for more details on each of the above
* configuration settings.
*/
public static HttpConfig.Policy getHttpPolicy(Configuration conf) {
String httpPolicy = conf.get(DFSConfigKeys.DFS_HTTP_POLICY_KEY,
DFSConfigKeys.DFS_HTTP_POLICY_DEFAULT);
HttpConfig.Policy policy = HttpConfig.Policy.fromString(httpPolicy);
if (policy == HttpConfig.Policy.HTTP_ONLY) {
boolean httpsEnabled = conf.getBoolean(
DFSConfigKeys.DFS_HTTPS_ENABLE_KEY,
DFSConfigKeys.DFS_HTTPS_ENABLE_DEFAULT);
boolean hadoopSslEnabled = conf.getBoolean(
CommonConfigurationKeys.HADOOP_SSL_ENABLED_KEY,
CommonConfigurationKeys.HADOOP_SSL_ENABLED_DEFAULT);
if (hadoopSslEnabled) {
LOG.warn(CommonConfigurationKeys.HADOOP_SSL_ENABLED_KEY
+ " is deprecated. Please use "
+ DFSConfigKeys.DFS_HTTPS_ENABLE_KEY + ".");
policy = HttpConfig.Policy.HTTPS_ONLY;
} else if (httpsEnabled) {
LOG.warn(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY
+ " is deprecated. Please use "
+ DFSConfigKeys.DFS_HTTPS_ENABLE_KEY + ".");
policy = HttpConfig.Policy.HTTP_AND_HTTPS;
}
}
conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, policy.name());
return policy;
}
public static HttpServer.Builder loadSslConfToHttpServerBuilder(HttpServer.Builder builder,
Configuration sslConf) {
return builder
.needsClientAuth(
sslConf.getBoolean(DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT))
sslConf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT))
.keyPassword(sslConf.get("ssl.server.keystore.keypassword"))
.keyStore(sslConf.get("ssl.server.keystore.location"),
sslConf.get("ssl.server.keystore.password"),

View File

@ -19,7 +19,6 @@
package org.apache.hadoop.hdfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configuration.DeprecationDelta;
import org.apache.hadoop.classification.InterfaceAudience;

View File

@ -17,7 +17,6 @@
*/
package org.apache.hadoop.hdfs.server.datanode;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
@ -66,6 +65,7 @@ import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlo
import org.apache.hadoop.hdfs.server.protocol.*;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.hdfs.web.resources.Param;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.http.HttpServer;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.ReadaheadPool;
@ -181,9 +181,11 @@ public class DataNode extends Configured
private DNConf dnConf;
private volatile boolean heartbeatsDisabledForTests = false;
private DataStorage storage = null;
private HttpServer infoServer = null;
private int infoPort;
private int infoSecurePort;
DataNodeMetrics metrics;
private InetSocketAddress streamingAddr;
@ -285,7 +287,7 @@ public class DataNode extends Configured
* explicitly configured in the given config, then it is determined
* via the DNS class.
*
* @param config
* @param config configuration
* @return the hostname (NB: may not be a FQDN)
* @throws UnknownHostException if the dfs.datanode.dns.interface
* option is used and the hostname can not be determined
@ -303,40 +305,54 @@ public class DataNode extends Configured
return name;
}
/**
* @see DFSUtil#getHttpPolicy(org.apache.hadoop.conf.Configuration)
* for information related to the different configuration options and
* Http Policy is decided.
*/
private void startInfoServer(Configuration conf) throws IOException {
// create a servlet to serve full-file content
HttpServer.Builder builder = new HttpServer.Builder().setName("datanode")
.setConf(conf).setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")));
HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);
InetSocketAddress infoSocAddr = DataNode.getInfoAddr(conf);
String infoHost = infoSocAddr.getHostName();
int tmpInfoPort = infoSocAddr.getPort();
HttpServer.Builder builder = new HttpServer.Builder().setName("datanode")
.addEndpoint(URI.create("http://" + NetUtils.getHostPortString(infoSocAddr)))
.setFindPort(tmpInfoPort == 0).setConf(conf)
.setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")));
LOG.info("Opened info server at " + infoHost + ":" + tmpInfoPort);
if (conf.getBoolean(DFS_HTTPS_ENABLE_KEY, false)) {
if (policy.isHttpEnabled()) {
if (secureResources == null) {
int port = infoSocAddr.getPort();
builder.addEndpoint(URI.create("http://" + infoHost + ":" + port));
if (port == 0) {
builder.setFindPort(true);
}
} else {
// The http socket is created externally using JSVC, we add it in
// directly.
builder.setConnector(secureResources.getListener());
}
}
if (policy.isHttpsEnabled()) {
InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf.get(
DFS_DATANODE_HTTPS_ADDRESS_KEY, infoHost + ":" + 0));
builder.addEndpoint(URI.create("https://"
+ NetUtils.getHostPortString(secInfoSocAddr)));
Configuration sslConf = new Configuration(false);
sslConf.setBoolean(DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY, conf
.getBoolean(DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT));
sslConf.addResource(conf.get(
DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT));
sslConf.setBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY, conf.getBoolean(
DFS_CLIENT_HTTPS_NEED_AUTH_KEY, DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT));
DFSUtil.loadSslConfToHttpServerBuilder(builder, sslConf);
if(LOG.isDebugEnabled()) {
LOG.debug("Datanode listening for SSL on " + secInfoSocAddr);
int port = secInfoSocAddr.getPort();
if (port == 0) {
builder.setFindPort(true);
}
infoSecurePort = secInfoSocAddr.getPort();
builder.addEndpoint(URI.create("https://" + infoHost + ":" + port));
}
this.infoServer = (secureResources == null) ? builder.build() :
builder.setConnector(secureResources.getListener()).build();
this.infoServer = builder.build();
this.infoServer.addInternalServlet(null, "/streamFile/*", StreamFile.class);
this.infoServer.addInternalServlet(null, "/getFileChecksum/*",
FileChecksumServlets.GetServlet.class);
@ -352,7 +368,15 @@ public class DataNode extends Configured
WebHdfsFileSystem.PATH_PREFIX + "/*");
}
this.infoServer.start();
this.infoPort = infoServer.getConnectorAddress(0).getPort();
int connIdx = 0;
if (policy.isHttpEnabled()) {
infoPort = infoServer.getConnectorAddress(connIdx++).getPort();
}
if (policy.isHttpsEnabled()) {
infoSecurePort = infoServer.getConnectorAddress(connIdx).getPort();
}
}
private void startPlugins(Configuration conf) {

View File

@ -16,27 +16,20 @@
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.ServerSocket;
import java.nio.channels.ServerSocketChannel;
import java.security.GeneralSecurityException;
import org.apache.commons.daemon.Daemon;
import org.apache.commons.daemon.DaemonContext;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.http.HttpServer;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.ssl.SSLFactory;
import org.mortbay.jetty.Connector;
import org.mortbay.jetty.nio.SelectChannelConnector;
import org.mortbay.jetty.security.SslSocketConnector;
import javax.net.ssl.SSLServerSocketFactory;
import com.google.common.annotations.VisibleForTesting;
@ -65,7 +58,6 @@ public class SecureDataNodeStarter implements Daemon {
private String [] args;
private SecureResources resources;
private SSLFactory sslFactory;
@Override
public void init(DaemonContext context) throws Exception {
@ -74,9 +66,7 @@ public class SecureDataNodeStarter implements Daemon {
// Stash command-line arguments for regular datanode
args = context.getArguments();
sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, conf);
resources = getSecureResources(sslFactory, conf);
resources = getSecureResources(conf);
}
@Override
@ -85,18 +75,23 @@ public class SecureDataNodeStarter implements Daemon {
DataNode.secureMain(args, resources);
}
@Override public void destroy() {
sslFactory.destroy();
}
@Override public void destroy() {}
@Override public void stop() throws Exception { /* Nothing to do */ }
/**
* Acquire privileged resources (i.e., the privileged ports) for the data
* node. The privileged resources consist of the port of the RPC server and
* the port of HTTP (not HTTPS) server.
*/
@VisibleForTesting
public static SecureResources getSecureResources(final SSLFactory sslFactory,
Configuration conf) throws Exception {
public static SecureResources getSecureResources(Configuration conf)
throws Exception {
HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);
// Obtain secure port for data streaming to datanode
InetSocketAddress streamingAddr = DataNode.getStreamingAddr(conf);
int socketWriteTimeout = conf.getInt(DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
int socketWriteTimeout = conf.getInt(
DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
HdfsServerConstants.WRITE_TIMEOUT);
ServerSocket ss = (socketWriteTimeout > 0) ?
@ -105,29 +100,20 @@ public class SecureDataNodeStarter implements Daemon {
// Check that we got the port we need
if (ss.getLocalPort() != streamingAddr.getPort()) {
throw new RuntimeException("Unable to bind on specified streaming port in secure " +
"context. Needed " + streamingAddr.getPort() + ", got " + ss.getLocalPort());
throw new RuntimeException(
"Unable to bind on specified streaming port in secure "
+ "context. Needed " + streamingAddr.getPort() + ", got "
+ ss.getLocalPort());
}
// Obtain secure listener for web server
Connector listener;
if (HttpConfig.isSecure()) {
try {
sslFactory.init();
} catch (GeneralSecurityException ex) {
throw new IOException(ex);
}
SslSocketConnector sslListener = new SslSocketConnector() {
@Override
protected SSLServerSocketFactory createFactory() throws Exception {
return sslFactory.createSSLServerSocketFactory();
}
};
listener = sslListener;
} else {
System.err.println("Opened streaming server at " + streamingAddr);
// Bind a port for the web server. The code intends to bind HTTP server to
// privileged port only, as the client can authenticate the server using
// certificates if they are communicating through SSL.
Connector listener = null;
if (policy.isHttpEnabled()) {
listener = HttpServer.createDefaultChannelConnector();
}
InetSocketAddress infoSocAddr = DataNode.getInfoAddr(conf);
listener.setHost(infoSocAddr.getHostName());
listener.setPort(infoSocAddr.getPort());
@ -144,8 +130,9 @@ public class SecureDataNodeStarter implements Daemon {
UserGroupInformation.isSecurityEnabled()) {
throw new RuntimeException("Cannot start secure datanode with unprivileged ports");
}
System.err.println("Opened streaming server at " + streamingAddr);
System.err.println("Opened info server at " + infoSocAddr);
}
return new SecureResources(ss, listener);
}

View File

@ -123,11 +123,6 @@ public class BackupNode extends NameNode {
return NetUtils.createSocketAddr(addr);
}
@Override // NameNode
protected void setHttpServerAddress(Configuration conf){
conf.set(BN_HTTP_ADDRESS_NAME_KEY, NetUtils.getHostPortString(getHttpAddress()));
}
@Override // NameNode
protected void loadNamesystem(Configuration conf) throws IOException {
conf.setFloat(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY,
@ -163,6 +158,10 @@ public class BackupNode extends NameNode {
registerWith(nsInfo);
// Checkpoint daemon should start after the rpc server started
runCheckpointDaemon(conf);
InetSocketAddress addr = getHttpAddress();
if (addr != null) {
conf.set(BN_HTTP_ADDRESS_NAME_KEY, NetUtils.getHostPortString(getHttpAddress()));
}
}
@Override

View File

@ -17,6 +17,10 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
@ -432,18 +436,12 @@ public class NameNode implements NameNodeStatusMXBean {
return getHttpAddress(conf);
}
/** @return the NameNode HTTP address set in the conf. */
/** @return the NameNode HTTP address. */
public static InetSocketAddress getHttpAddress(Configuration conf) {
return NetUtils.createSocketAddr(
conf.get(DFS_NAMENODE_HTTP_ADDRESS_KEY, DFS_NAMENODE_HTTP_ADDRESS_DEFAULT));
}
protected void setHttpServerAddress(Configuration conf) {
String hostPort = NetUtils.getHostPortString(getHttpAddress());
conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY, hostPort);
LOG.info("Web-server up at: " + hostPort);
}
protected void loadNamesystem(Configuration conf) throws IOException {
this.namesystem = FSNamesystem.loadFromDisk(conf);
}
@ -492,7 +490,6 @@ public class NameNode implements NameNodeStatusMXBean {
if (NamenodeRole.NAMENODE == role) {
startHttpServer(conf);
validateConfigurationSettingsOrAbort(conf);
}
loadNamesystem(conf);
@ -500,8 +497,6 @@ public class NameNode implements NameNodeStatusMXBean {
if (NamenodeRole.NAMENODE == role) {
httpServer.setNameNodeAddress(getNameNodeAddress());
httpServer.setFSImage(getFSImage());
} else {
validateConfigurationSettingsOrAbort(conf);
}
pauseMonitor = new JvmPauseMonitor(conf);
@ -519,45 +514,6 @@ public class NameNode implements NameNodeStatusMXBean {
return new NameNodeRpcServer(conf, this);
}
/**
* Verifies that the final Configuration Settings look ok for the NameNode to
* properly start up
* Things to check for include:
* - HTTP Server Port does not equal the RPC Server Port
* @param conf
* @throws IOException
*/
protected void validateConfigurationSettings(final Configuration conf)
throws IOException {
// check to make sure the web port and rpc port do not match
if(getHttpServerAddress(conf).getPort()
== getRpcServerAddress(conf).getPort()) {
String errMsg = "dfs.namenode.rpc-address " +
"("+ getRpcServerAddress(conf) + ") and " +
"dfs.namenode.http-address ("+ getHttpServerAddress(conf) + ") " +
"configuration keys are bound to the same port, unable to start " +
"NameNode. Port: " + getRpcServerAddress(conf).getPort();
throw new IOException(errMsg);
}
}
/**
* Validate NameNode configuration. Log a fatal error and abort if
* configuration is invalid.
*
* @param conf Configuration to validate
* @throws IOException thrown if conf is invalid
*/
private void validateConfigurationSettingsOrAbort(Configuration conf)
throws IOException {
try {
validateConfigurationSettings(conf);
} catch (IOException e) {
LOG.fatal(e.toString());
throw e;
}
}
/** Start the services common to active and standby states */
private void startCommonServices(Configuration conf) throws IOException {
namesystem.startCommonServices(conf, haContext);
@ -636,7 +592,6 @@ public class NameNode implements NameNodeStatusMXBean {
httpServer = new NameNodeHttpServer(conf, this, getHttpServerAddress(conf));
httpServer.start();
httpServer.setStartupProgress(startupProgress);
setHttpServerAddress(conf);
}
private void stopHttpServer() {
@ -658,7 +613,7 @@ public class NameNode implements NameNodeStatusMXBean {
* <li>{@link StartupOption#CHECKPOINT CHECKPOINT} - start checkpoint node</li>
* <li>{@link StartupOption#UPGRADE UPGRADE} - start the cluster
* upgrade and create a snapshot of the current file system state</li>
* <li>{@link StartupOption#RECOVERY RECOVERY} - recover name node
* <li>{@link StartupOption#RECOVER RECOVERY} - recover name node
* metadata</li>
* <li>{@link StartupOption#ROLLBACK ROLLBACK} - roll the
* cluster back to the previous state</li>

View File

@ -18,6 +18,8 @@
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY;
import java.io.IOException;
import java.net.InetSocketAddress;
@ -38,6 +40,7 @@ import org.apache.hadoop.hdfs.web.AuthFilter;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.hdfs.web.resources.Param;
import org.apache.hadoop.hdfs.web.resources.UserParam;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.http.HttpServer;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.SecurityUtil;
@ -62,52 +65,18 @@ public class NameNodeHttpServer {
protected static final String NAMENODE_ATTRIBUTE_KEY = "name.node";
public static final String STARTUP_PROGRESS_ATTRIBUTE_KEY = "startup.progress";
public NameNodeHttpServer(
Configuration conf,
NameNode nn,
NameNodeHttpServer(Configuration conf, NameNode nn,
InetSocketAddress bindAddress) {
this.conf = conf;
this.nn = nn;
this.bindAddress = bindAddress;
}
void start() throws IOException {
final String infoHost = bindAddress.getHostName();
int infoPort = bindAddress.getPort();
HttpServer.Builder builder = new HttpServer.Builder().setName("hdfs")
.addEndpoint(URI.create(("http://" + NetUtils.getHostPortString(bindAddress))))
.setFindPort(infoPort == 0).setConf(conf).setACL(
new AccessControlList(conf.get(DFS_ADMIN, " ")))
.setSecurityEnabled(UserGroupInformation.isSecurityEnabled())
.setUsernameConfKey(
DFSConfigKeys.DFS_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY)
.setKeytabConfKey(DFSUtil.getSpnegoKeytabKey(conf,
DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY));
boolean certSSL = conf.getBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, false);
if (certSSL) {
httpsAddress = NetUtils.createSocketAddr(conf.get(
DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY,
DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT));
builder.addEndpoint(URI.create("https://"
+ NetUtils.getHostPortString(httpsAddress)));
Configuration sslConf = new Configuration(false);
sslConf.setBoolean(DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY, conf
.getBoolean(DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT));
sslConf.addResource(conf.get(
DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT));
DFSUtil.loadSslConfToHttpServerBuilder(builder, sslConf);
}
httpServer = builder.build();
private void initWebHdfs(Configuration conf) throws IOException {
if (WebHdfsFileSystem.isEnabled(conf, HttpServer.LOG)) {
// set user pattern based on configuration file
UserParam.setUserPattern(conf.get(DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY, DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));
// add SPNEGO authentication filter for webhdfs
//add SPNEGO authentication filter for webhdfs
final String name = "SPNEGO";
final String classname = AuthFilter.class.getName();
final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
@ -121,19 +90,92 @@ public class NameNodeHttpServer {
NamenodeWebHdfsMethods.class.getPackage().getName()
+ ";" + Param.class.getPackage().getName(), pathSpec);
}
}
/**
* @see DFSUtil#getHttpPolicy(org.apache.hadoop.conf.Configuration)
* for information related to the different configuration options and
* Http Policy is decided.
*/
void start() throws IOException {
HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);
final String infoHost = bindAddress.getHostName();
HttpServer.Builder builder = new HttpServer.Builder()
.setName("hdfs")
.setConf(conf)
.setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")))
.setSecurityEnabled(UserGroupInformation.isSecurityEnabled())
.setUsernameConfKey(
DFSConfigKeys.DFS_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY)
.setKeytabConfKey(
DFSUtil.getSpnegoKeytabKey(conf,
DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY));
if (policy.isHttpEnabled()) {
int port = bindAddress.getPort();
if (port == 0) {
builder.setFindPort(true);
}
builder.addEndpoint(URI.create("http://" + infoHost + ":" + port));
}
if (policy.isHttpsEnabled()) {
final String httpsAddrString = conf.get(
DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY,
DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT);
InetSocketAddress addr = NetUtils.createSocketAddr(httpsAddrString);
Configuration sslConf = new Configuration(false);
sslConf.addResource(conf.get(
DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT));
sslConf.addResource(conf.get(
DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT));
sslConf.setBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY, conf.getBoolean(
DFS_CLIENT_HTTPS_NEED_AUTH_KEY, DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT));
DFSUtil.loadSslConfToHttpServerBuilder(builder, sslConf);
if (addr.getPort() == 0) {
builder.setFindPort(true);
}
builder.addEndpoint(URI.create("https://"
+ NetUtils.getHostPortString(addr)));
}
httpServer = builder.build();
if (policy.isHttpsEnabled()) {
// assume same ssl port for all datanodes
InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(conf.get(
DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, infoHost + ":"
+ DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT));
httpServer.setAttribute(DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY,
datanodeSslPort.getPort());
}
initWebHdfs(conf);
httpServer.setAttribute(NAMENODE_ATTRIBUTE_KEY, nn);
httpServer.setAttribute(JspHelper.CURRENT_CONF, conf);
setupServlets(httpServer, conf);
httpServer.start();
httpAddress = httpServer.getConnectorAddress(0);
if (certSSL) {
httpsAddress = httpServer.getConnectorAddress(1);
// assume same ssl port for all datanodes
InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(conf.get(
DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, infoHost + ":" + 50475));
httpServer.setAttribute(DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY, datanodeSslPort
.getPort());
int connIdx = 0;
if (policy.isHttpEnabled()) {
httpAddress = httpServer.getConnectorAddress(connIdx++);
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY,
NetUtils.getHostPortString(httpAddress));
}
if (policy.isHttpsEnabled()) {
httpsAddress = httpServer.getConnectorAddress(connIdx);
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY,
NetUtils.getHostPortString(httpsAddress));
}
}
@ -169,18 +211,17 @@ public class NameNodeHttpServer {
return params;
}
public void stop() throws Exception {
void stop() throws Exception {
if (httpServer != null) {
httpServer.stop();
}
}
public InetSocketAddress getHttpAddress() {
InetSocketAddress getHttpAddress() {
return httpAddress;
}
public InetSocketAddress getHttpsAddress() {
InetSocketAddress getHttpsAddress() {
return httpsAddress;
}
@ -189,7 +230,7 @@ public class NameNodeHttpServer {
*
* @param fsImage FSImage to set
*/
public void setFSImage(FSImage fsImage) {
void setFSImage(FSImage fsImage) {
httpServer.setAttribute(FSIMAGE_ATTRIBUTE_KEY, fsImage);
}
@ -198,7 +239,7 @@ public class NameNodeHttpServer {
*
* @param nameNodeAddress InetSocketAddress to set
*/
public void setNameNodeAddress(InetSocketAddress nameNodeAddress) {
void setNameNodeAddress(InetSocketAddress nameNodeAddress) {
httpServer.setAttribute(NAMENODE_ADDRESS_ATTRIBUTE_KEY,
NetUtils.getConnectAddress(nameNodeAddress));
}
@ -208,7 +249,7 @@ public class NameNodeHttpServer {
*
* @param prog StartupProgress to set
*/
public void setStartupProgress(StartupProgress prog) {
void setStartupProgress(StartupProgress prog) {
httpServer.setAttribute(STARTUP_PROGRESS_ATTRIBUTE_KEY, prog);
}
@ -238,7 +279,7 @@ public class NameNodeHttpServer {
ContentSummaryServlet.class, false);
}
public static FSImage getFsImageFromContext(ServletContext context) {
static FSImage getFsImageFromContext(ServletContext context) {
return (FSImage)context.getAttribute(FSIMAGE_ATTRIBUTE_KEY);
}
@ -246,7 +287,7 @@ public class NameNodeHttpServer {
return (NameNode)context.getAttribute(NAMENODE_ATTRIBUTE_KEY);
}
public static Configuration getConfFromContext(ServletContext context) {
static Configuration getConfFromContext(ServletContext context) {
return (Configuration)context.getAttribute(JspHelper.CURRENT_CONF);
}
@ -262,7 +303,7 @@ public class NameNodeHttpServer {
* @param context ServletContext to get
* @return StartupProgress associated with context
*/
public static StartupProgress getStartupProgressFromContext(
static StartupProgress getStartupProgressFromContext(
ServletContext context) {
return (StartupProgress)context.getAttribute(STARTUP_PROGRESS_ATTRIBUTE_KEY);
}

View File

@ -30,7 +30,6 @@ import java.io.FilenameFilter;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URISyntaxException;
import java.security.PrivilegedAction;
import java.security.PrivilegedExceptionAction;
import java.util.Collection;
@ -257,12 +256,7 @@ public class SecondaryNameNode implements Runnable {
// initialize the webserver for uploading files.
int tmpInfoPort = infoSocAddr.getPort();
URI httpEndpoint;
try {
httpEndpoint = new URI("http://" + NetUtils.getHostPortString(infoSocAddr));
} catch (URISyntaxException e) {
throw new IOException(e);
}
URI httpEndpoint = URI.create("http://" + NetUtils.getHostPortString(infoSocAddr));
infoServer = new HttpServer.Builder().setName("secondary")
.addEndpoint(httpEndpoint)
@ -273,6 +267,7 @@ public class SecondaryNameNode implements Runnable {
DFSConfigKeys.DFS_SECONDARY_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY)
.setKeytabConfKey(DFSUtil.getSpnegoKeytabKey(conf,
DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY)).build();
infoServer.setAttribute("secondary.name.node", this);
infoServer.setAttribute("name.system.image", checkpointImage);
infoServer.setAttribute(JspHelper.CURRENT_CONF, conf);

View File

@ -245,8 +245,12 @@ public class NamenodeWebHdfsMethods {
+ Param.toSortedString("&", parameters);
final String uripath = WebHdfsFileSystem.PATH_PREFIX + path;
final URI uri = new URI("http", null, dn.getHostName(), dn.getInfoPort(),
uripath, query, null);
final String scheme = request.getScheme();
int port = "http".equals(scheme) ? dn.getInfoPort() : dn
.getInfoSecurePort();
final URI uri = new URI(scheme, null, dn.getHostName(), port, uripath,
query, null);
if (LOG.isTraceEnabled()) {
LOG.trace("redirectURI=" + uri);
}

View File

@ -137,7 +137,20 @@
<property>
<name>dfs.https.enable</name>
<value>false</value>
<description>
Deprecated. Use "dfs.http.policy" instead.
</description>
</property>
<property>
<name>dfs.http.policy</name>
<value>HTTP_ONLY</value>
<description>Decide if HTTPS(SSL) is supported on HDFS
This configures the HTTP endpoint for HDFS daemons:
The following values are supported:
- HTTP_ONLY : Service is provided only on http
- HTTPS_ONLY : Service is provided only on https
- HTTP_AND_HTTPS : Service is provided both on http and https
</description>
</property>

View File

@ -33,6 +33,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HOSTS;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY;
@ -901,12 +902,17 @@ public class MiniDFSCluster {
// After the NN has started, set back the bound ports into
// the conf
conf.set(DFSUtil.addKeySuffixes(
DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId, nnId),
nn.getNameNodeAddressHostPortString());
conf.set(DFSUtil.addKeySuffixes(
DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId, nnId), NetUtils
.getHostPortString(nn.getHttpAddress()));
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
nameserviceId, nnId), nn.getNameNodeAddressHostPortString());
if (nn.getHttpAddress() != null) {
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_HTTP_ADDRESS_KEY,
nameserviceId, nnId), NetUtils.getHostPortString(nn.getHttpAddress()));
}
if (nn.getHttpsAddress() != null) {
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_HTTPS_ADDRESS_KEY,
nameserviceId, nnId), NetUtils.getHostPortString(nn.getHttpsAddress()));
}
DFSUtil.setGenericConf(conf, nameserviceId, nnId,
DFS_NAMENODE_HTTP_ADDRESS_KEY);
nameNodes[nnIndex] = new NameNodeInfo(nn, nameserviceId, nnId,
@ -1182,9 +1188,8 @@ public class MiniDFSCluster {
SecureResources secureResources = null;
if (UserGroupInformation.isSecurityEnabled()) {
SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, dnConf);
try {
secureResources = SecureDataNodeStarter.getSecureResources(sslFactory, dnConf);
secureResources = SecureDataNodeStarter.getSecureResources(dnConf);
} catch (Exception ex) {
ex.printStackTrace();
}

View File

@ -158,9 +158,8 @@ public class MiniDFSClusterWithNodeGroup extends MiniDFSCluster {
SecureResources secureResources = null;
if (UserGroupInformation.isSecurityEnabled()) {
SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, dnConf);
try {
secureResources = SecureDataNodeStarter.getSecureResources(sslFactory, dnConf);
secureResources = SecureDataNodeStarter.getSecureResources(dnConf);
} catch (Exception ex) {
ex.printStackTrace();
}

View File

@ -0,0 +1,127 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.File;
import java.net.InetSocketAddress;
import java.net.URL;
import java.net.URLConnection;
import java.util.Arrays;
import java.util.Collection;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.web.URLConnectionFactory;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.http.HttpConfig.Policy;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
@RunWith(value = Parameterized.class)
public class TestNameNodeHttpServer {
private static final String BASEDIR = System.getProperty("test.build.dir",
"target/test-dir") + "/" + TestNameNodeHttpServer.class.getSimpleName();
private static String keystoresDir;
private static String sslConfDir;
private static Configuration conf;
private static URLConnectionFactory connectionFactory;
@Parameters
public static Collection<Object[]> policy() {
Object[][] params = new Object[][] { { HttpConfig.Policy.HTTP_ONLY },
{ HttpConfig.Policy.HTTPS_ONLY }, { HttpConfig.Policy.HTTP_AND_HTTPS } };
return Arrays.asList(params);
}
private final HttpConfig.Policy policy;
public TestNameNodeHttpServer(Policy policy) {
super();
this.policy = policy;
}
@BeforeClass
public static void setUp() throws Exception {
File base = new File(BASEDIR);
FileUtil.fullyDelete(base);
base.mkdirs();
conf = new Configuration();
keystoresDir = new File(BASEDIR).getAbsolutePath();
sslConfDir = KeyStoreTestUtil.getClasspathDir(TestNameNodeHttpServer.class);
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
connectionFactory = URLConnectionFactory
.newDefaultURLConnectionFactory(conf);
}
@AfterClass
public static void tearDown() throws Exception {
FileUtil.fullyDelete(new File(BASEDIR));
KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
}
@Test
public void testHttpPolicy() throws Exception {
conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, policy.name());
InetSocketAddress addr = InetSocketAddress.createUnresolved("localhost", 0);
NameNodeHttpServer server = null;
try {
server = new NameNodeHttpServer(conf, null, addr);
server.start();
Assert.assertTrue(implies(policy.isHttpEnabled(),
canAccess("http", server.getHttpAddress())));
Assert.assertTrue(implies(!policy.isHttpEnabled(),
server.getHttpAddress() == null));
Assert.assertTrue(implies(policy.isHttpsEnabled(),
canAccess("https", server.getHttpsAddress())));
Assert.assertTrue(implies(!policy.isHttpsEnabled(),
server.getHttpsAddress() == null));
} finally {
server.stop();
}
}
private static boolean canAccess(String scheme, InetSocketAddress addr) {
if (addr == null)
return false;
try {
URL url = new URL(scheme + "://" + NetUtils.getHostPortString(addr));
URLConnection conn = connectionFactory.openConnection(url);
conn.connect();
conn.getContent();
} catch (Exception e) {
return false;
}
return true;
}
private static boolean implies(boolean a, boolean b) {
return !a || b;
}
}

View File

@ -22,6 +22,7 @@ import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
import java.net.BindException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
@ -50,7 +51,7 @@ public class TestValidateConfigurationSettings {
* an exception
* is thrown when trying to re-use the same port
*/
@Test
@Test(expected = BindException.class)
public void testThatMatchingRPCandHttpPortsThrowException()
throws IOException {
@ -63,14 +64,7 @@ public class TestValidateConfigurationSettings {
FileSystem.setDefaultUri(conf, "hdfs://localhost:9000");
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:9000");
DFSTestUtil.formatNameNode(conf);
try {
NameNode nameNode = new NameNode(conf);
fail("Should have throw the exception since the ports match");
} catch (IOException e) {
// verify we're getting the right IOException
assertTrue(e.toString().contains("dfs.namenode.rpc-address ("));
System.out.println("Got expected exception: " + e.toString());
}
new NameNode(conf);
}
/**

View File

@ -28,6 +28,7 @@ import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
import org.junit.AfterClass;
import org.junit.Assert;
@ -49,7 +50,7 @@ public class TestHttpsFileSystem {
public static void setUp() throws Exception {
conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
conf.setBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, true);
conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
File base = new File(BASEDIR);