diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
index b52f2d3d08d..43d1f72242e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
@@ -170,7 +170,7 @@ public class HttpFSServer {
throws IOException, FileSystemAccessException {
String hadoopUser = getEffectiveUser(user, doAs);
FileSystemAccess fsAccess = HttpFSServerWebApp.get().get(FileSystemAccess.class);
- Configuration conf = HttpFSServerWebApp.get().get(FileSystemAccess.class).getDefaultConfiguration();
+ Configuration conf = HttpFSServerWebApp.get().get(FileSystemAccess.class).getFileSystemConfiguration();
return fsAccess.execute(hadoopUser, conf, executor);
}
@@ -194,7 +194,7 @@ public class HttpFSServer {
private FileSystem createFileSystem(Principal user, String doAs) throws IOException, FileSystemAccessException {
String hadoopUser = getEffectiveUser(user, doAs);
FileSystemAccess fsAccess = HttpFSServerWebApp.get().get(FileSystemAccess.class);
- Configuration conf = HttpFSServerWebApp.get().get(FileSystemAccess.class).getDefaultConfiguration();
+ Configuration conf = HttpFSServerWebApp.get().get(FileSystemAccess.class).getFileSystemConfiguration();
FileSystem fs = fsAccess.createFileSystem(hadoopUser, conf);
FileSystemReleaseFilter.setFileSystem(fs);
return fs;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebApp.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebApp.java
index 9e1609ed6b6..fec8aa0805b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebApp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebApp.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.fs.http.server;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.lib.server.ServerException;
import org.apache.hadoop.lib.service.FileSystemAccess;
import org.apache.hadoop.lib.servlet.ServerWebApp;
@@ -29,8 +30,9 @@ import java.io.IOException;
/**
* Bootstrap class that manages the initialization and destruction of the
- * HttpFSServer server, it is a javax.servlet.ServletContextListener
- * implementation that is wired in HttpFSServer's WAR WEB-INF/web.xml
.
+ * HttpFSServer server, it is a javax.servlet.ServletContextListener
+ *
implementation that is wired in HttpFSServer's WAR
+ * WEB-INF/web.xml
.
*
* It provides acces to the server context via the singleton {@link #get}.
*
@@ -38,7 +40,8 @@ import java.io.IOException;
* with httpfs.
.
*/
public class HttpFSServerWebApp extends ServerWebApp {
- private static final Logger LOG = LoggerFactory.getLogger(HttpFSServerWebApp.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(HttpFSServerWebApp.class);
/**
* Server name and prefix for all configuration properties.
@@ -67,8 +70,8 @@ public class HttpFSServerWebApp extends ServerWebApp {
/**
* Constructor used for testing purposes.
*/
- protected HttpFSServerWebApp(String homeDir, String configDir, String logDir, String tempDir,
- Configuration config) {
+ protected HttpFSServerWebApp(String homeDir, String configDir, String logDir,
+ String tempDir, Configuration config) {
super(NAME, homeDir, configDir, logDir, tempDir, config);
}
@@ -80,9 +83,11 @@ public class HttpFSServerWebApp extends ServerWebApp {
}
/**
- * Initializes the HttpFSServer server, loads configuration and required services.
+ * Initializes the HttpFSServer server, loads configuration and required
+ * services.
*
- * @throws ServerException thrown if HttpFSServer server could not be initialized.
+ * @throws ServerException thrown if HttpFSServer server could not be
+ * initialized.
*/
@Override
public void init() throws ServerException {
@@ -93,7 +98,8 @@ public class HttpFSServerWebApp extends ServerWebApp {
SERVER = this;
adminGroup = getConfig().get(getPrefixedName(CONF_ADMIN_GROUP), "admin");
LOG.info("Connects to Namenode [{}]",
- get().get(FileSystemAccess.class).getDefaultConfiguration().get("fs.default.name"));
+ get().get(FileSystemAccess.class).getFileSystemConfiguration().
+ get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY));
}
/**
@@ -106,7 +112,8 @@ public class HttpFSServerWebApp extends ServerWebApp {
}
/**
- * Returns HttpFSServer server singleton, configuration and services are accessible through it.
+ * Returns HttpFSServer server singleton, configuration and services are
+ * accessible through it.
*
* @return the HttpFSServer server singleton.
*/
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/FileSystemAccess.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/FileSystemAccess.java
index 7984761d547..5d8ce9e6e2f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/FileSystemAccess.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/FileSystemAccess.java
@@ -37,6 +37,6 @@ public interface FileSystemAccess {
public void releaseFileSystem(FileSystem fs) throws IOException;
- public Configuration getDefaultConfiguration();
+ public Configuration getFileSystemConfiguration();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/FileSystemAccessException.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/FileSystemAccessException.java
index 8a0ba3caa0a..42fc8ff1bdc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/FileSystemAccessException.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/FileSystemAccessException.java
@@ -26,12 +26,14 @@ public class FileSystemAccessException extends XException {
H01("Service property [{0}] not defined"),
H02("Kerberos initialization failed, {0}"),
H03("FileSystemExecutor error, {0}"),
- H04("JobClientExecutor error, {0}"),
+ H04("Invalid configuration, it has not be created by the FileSystemAccessService"),
H05("[{0}] validation failed, {1}"),
H06("Property [{0}] not defined in configuration object"),
H07("[{0}] not healthy, {1}"),
- H08(""),
- H09("Invalid FileSystemAccess security mode [{0}]");
+ H08("{0}"),
+ H09("Invalid FileSystemAccess security mode [{0}]"),
+ H10("Hadoop config directory not found [{0}]"),
+ H11("Could not load Hadoop config files, {0}");
private String template;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java
index f1a9ac055dd..eb31b060843 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java
@@ -19,7 +19,9 @@
package org.apache.hadoop.lib.service.hadoop;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.lib.server.BaseService;
import org.apache.hadoop.lib.server.ServiceException;
import org.apache.hadoop.lib.service.FileSystemAccess;
@@ -32,6 +34,7 @@ import org.apache.hadoop.util.VersionInfo;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.security.PrivilegedExceptionAction;
@@ -54,9 +57,11 @@ public class FileSystemAccessService extends BaseService implements FileSystemAc
public static final String NAME_NODE_WHITELIST = "name.node.whitelist";
- private static final String HADOOP_CONF_PREFIX = "conf:";
+ public static final String HADOOP_CONF_DIR = "config.dir";
- private static final String NAME_NODE_PROPERTY = "fs.default.name";
+ private static final String[] HADOOP_CONF_FILES = {"core-site.xml", "hdfs-site.xml"};
+
+ private static final String FILE_SYSTEM_SERVICE_CREATED = "FileSystemAccessService.created";
public FileSystemAccessService() {
super(PREFIX);
@@ -102,26 +107,40 @@ public class FileSystemAccessService extends BaseService implements FileSystemAc
throw new ServiceException(FileSystemAccessException.ERROR.H09, security);
}
- serviceHadoopConf = new Configuration(false);
- for (Map.Entry entry : getServiceConfig()) {
- String name = (String) entry.getKey();
- if (name.startsWith(HADOOP_CONF_PREFIX)) {
- name = name.substring(HADOOP_CONF_PREFIX.length());
- String value = (String) entry.getValue();
- serviceHadoopConf.set(name, value);
-
- }
+ String hadoopConfDirProp = getServiceConfig().get(HADOOP_CONF_DIR, getServer().getConfigDir());
+ File hadoopConfDir = new File(hadoopConfDirProp).getAbsoluteFile();
+ if (hadoopConfDir == null) {
+ hadoopConfDir = new File(getServer().getConfigDir()).getAbsoluteFile();
+ }
+ if (!hadoopConfDir.exists()) {
+ throw new ServiceException(FileSystemAccessException.ERROR.H10, hadoopConfDir);
+ }
+ try {
+ serviceHadoopConf = loadHadoopConf(hadoopConfDir);
+ } catch (IOException ex) {
+ throw new ServiceException(FileSystemAccessException.ERROR.H11, ex.toString(), ex);
}
- setRequiredServiceHadoopConf(serviceHadoopConf);
- LOG.debug("FileSystemAccess default configuration:");
+ LOG.debug("FileSystemAccess FileSystem configuration:");
for (Map.Entry entry : serviceHadoopConf) {
LOG.debug(" {} = {}", entry.getKey(), entry.getValue());
}
+ setRequiredServiceHadoopConf(serviceHadoopConf);
nameNodeWhitelist = toLowerCase(getServiceConfig().getTrimmedStringCollection(NAME_NODE_WHITELIST));
}
+ private Configuration loadHadoopConf(File dir) throws IOException {
+ Configuration hadoopConf = new Configuration(false);
+ for (String file : HADOOP_CONF_FILES) {
+ File f = new File(dir, file);
+ if (f.exists()) {
+ hadoopConf.addResource(new Path(f.getAbsolutePath()));
+ }
+ }
+ return hadoopConf;
+ }
+
@Override
public void postInit() throws ServiceException {
super.postInit();
@@ -166,17 +185,6 @@ public class FileSystemAccessService extends BaseService implements FileSystemAc
conf.set("fs.hdfs.impl.disable.cache", "true");
}
- protected Configuration createHadoopConf(Configuration conf) {
- Configuration hadoopConf = new Configuration();
- ConfigurationUtils.copy(serviceHadoopConf, hadoopConf);
- ConfigurationUtils.copy(conf, hadoopConf);
- return hadoopConf;
- }
-
- protected Configuration createNameNodeConf(Configuration conf) {
- return createHadoopConf(conf);
- }
-
protected FileSystem createFileSystem(Configuration namenodeConf) throws IOException {
return FileSystem.get(namenodeConf);
}
@@ -202,16 +210,22 @@ public class FileSystemAccessService extends BaseService implements FileSystemAc
Check.notEmpty(user, "user");
Check.notNull(conf, "conf");
Check.notNull(executor, "executor");
- if (conf.get(NAME_NODE_PROPERTY) == null || conf.getTrimmed(NAME_NODE_PROPERTY).length() == 0) {
- throw new FileSystemAccessException(FileSystemAccessException.ERROR.H06, NAME_NODE_PROPERTY);
+ if (!conf.getBoolean(FILE_SYSTEM_SERVICE_CREATED, false)) {
+ throw new FileSystemAccessException(FileSystemAccessException.ERROR.H04);
+ }
+ if (conf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY) == null ||
+ conf.getTrimmed(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY).length() == 0) {
+ throw new FileSystemAccessException(FileSystemAccessException.ERROR.H06,
+ CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY);
}
try {
- validateNamenode(new URI(conf.get(NAME_NODE_PROPERTY)).getAuthority());
+ validateNamenode(
+ new URI(conf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY)).
+ getAuthority());
UserGroupInformation ugi = getUGI(user);
return ugi.doAs(new PrivilegedExceptionAction() {
public T run() throws Exception {
- Configuration namenodeConf = createNameNodeConf(conf);
- FileSystem fs = createFileSystem(namenodeConf);
+ FileSystem fs = createFileSystem(conf);
Instrumentation instrumentation = getServer().get(Instrumentation.class);
Instrumentation.Cron cron = instrumentation.createCron();
try {
@@ -236,13 +250,16 @@ public class FileSystemAccessService extends BaseService implements FileSystemAc
throws IOException, FileSystemAccessException {
Check.notEmpty(user, "user");
Check.notNull(conf, "conf");
+ if (!conf.getBoolean(FILE_SYSTEM_SERVICE_CREATED, false)) {
+ throw new FileSystemAccessException(FileSystemAccessException.ERROR.H04);
+ }
try {
- validateNamenode(new URI(conf.get(NAME_NODE_PROPERTY)).getAuthority());
+ validateNamenode(
+ new URI(conf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY)).getAuthority());
UserGroupInformation ugi = getUGI(user);
return ugi.doAs(new PrivilegedExceptionAction() {
public FileSystem run() throws Exception {
- Configuration namenodeConf = createNameNodeConf(conf);
- return createFileSystem(namenodeConf);
+ return createFileSystem(conf);
}
});
} catch (IOException ex) {
@@ -267,11 +284,11 @@ public class FileSystemAccessService extends BaseService implements FileSystemAc
closeFileSystem(fs);
}
-
@Override
- public Configuration getDefaultConfiguration() {
- Configuration conf = new Configuration(false);
+ public Configuration getFileSystemConfiguration() {
+ Configuration conf = new Configuration(true);
ConfigurationUtils.copy(serviceHadoopConf, conf);
+ conf.setBoolean(FILE_SYSTEM_SERVICE_CREATED, true);
return conf;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml
index c58c925663e..e96042ef133 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml
@@ -153,29 +153,6 @@
-
-
-
- namenode.hostname
- localhost
-
- The HDFS Namenode host the httpfs server connects to perform file
- system operations.
-
- This property is only used to resolve other properties within this
- configuration file.
-
-
-
-
- httpfs.hadoop.conf:fs.default.name
- hdfs://${namenode.hostname}:8020
-
- The HDFS Namenode URI the httpfs server connects to perform file
- system operations.
-
-
-
@@ -206,12 +183,4 @@
-
- httpfs.hadoop.conf:dfs.namenode.kerberos.principal
- hdfs/${namenode.hostname}@${kerberos.realm}
-
- The HDFS Namenode Kerberos principal.
-
-
-
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/ServerSetup.apt.vm b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/ServerSetup.apt.vm
index 26891721b8d..fe5ad30608e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/ServerSetup.apt.vm
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/ServerSetup.apt.vm
@@ -37,13 +37,13 @@ Hadoop HDFS over HTTP ${project.version} - Server Setup
* Configure HttpFS
- Edit the <<>> file and
- set the <<>> property to the HDFS
- Namenode URI. For example:
+ By default, HttpFS assumes that Hadoop configuration files
+ (<<>>) are in the HttpFS
+ configuration directory.
-+---+
-httpfs.fsAccess.conf:fs.default.name=hdfs://localhost:8021
-+---+
+ If this is not the case, add to the <<>> file the
+ <<>> property set to the location
+ of the Hadoop configuration directory.
* Configure Hadoop
@@ -53,11 +53,11 @@ httpfs.fsAccess.conf:fs.default.name=hdfs://localhost:8021
+---+
...
- fsAccess.proxyuser.#HTTPFSUSER#.hosts
+ hadoop.proxyuser.#HTTPFSUSER#.hosts
httpfs-host.foo.com
- fsAccess.proxyuser.#HTTPFSUSER#.groups
+ hadoop.proxyuser.#HTTPFSUSER#.groups
*
...
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java
index 579498713f5..4837352fc3b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.fs.http.client;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileStatus;
@@ -70,16 +71,24 @@ public class TestHttpFSFileSystem extends HFSTestCase {
w.write("secret");
w.close();
- String fsDefaultName = TestHdfsHelper.getHdfsConf().get("fs.default.name");
+ //HDFS configuration
+ String fsDefaultName = TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY);
Configuration conf = new Configuration(false);
- conf.set("httpfs.hadoop.conf:fs.default.name", fsDefaultName);
- conf.set("httpfs.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".groups", HadoopUsersConfTestHelper
- .getHadoopProxyUserGroups());
- conf.set("httpfs.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".hosts", HadoopUsersConfTestHelper
- .getHadoopProxyUserHosts());
+ conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsDefaultName);
+ File hdfsSite = new File(new File(homeDir, "conf"), "hdfs-site.xml");
+ OutputStream os = new FileOutputStream(hdfsSite);
+ conf.writeXml(os);
+ os.close();
+
+ //HTTPFS configuration
+ conf = new Configuration(false);
+ conf.set("httpfs.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".groups",
+ HadoopUsersConfTestHelper.getHadoopProxyUserGroups());
+ conf.set("httpfs.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".hosts",
+ HadoopUsersConfTestHelper.getHadoopProxyUserHosts());
conf.set("httpfs.authentication.signature.secret.file", secretFile.getAbsolutePath());
- File hoopSite = new File(new File(homeDir, "conf"), "httpfs-site.xml");
- OutputStream os = new FileOutputStream(hoopSite);
+ File httpfsSite = new File(new File(homeDir, "conf"), "httpfs-site.xml");
+ os = new FileOutputStream(httpfsSite);
conf.writeXml(os);
os.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
index d397fa35a51..ff525e643a7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
@@ -20,10 +20,12 @@ package org.apache.hadoop.fs.http.server;
import junit.framework.Assert;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.lib.service.security.DummyGroupMapping;
+import org.apache.hadoop.lib.server.Service;
+import org.apache.hadoop.lib.server.ServiceException;
+import org.apache.hadoop.lib.service.Groups;
import org.apache.hadoop.test.HFSTestCase;
import org.apache.hadoop.test.HadoopUsersConfTestHelper;
import org.apache.hadoop.test.TestDir;
@@ -40,12 +42,15 @@ import java.io.BufferedReader;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileWriter;
+import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.Writer;
import java.net.HttpURLConnection;
import java.net.URL;
import java.text.MessageFormat;
+import java.util.Arrays;
+import java.util.List;
public class TestHttpFSServer extends HFSTestCase {
@@ -54,12 +59,48 @@ public class TestHttpFSServer extends HFSTestCase {
@TestJetty
public void server() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
- Configuration hoopConf = new Configuration(false);
- HttpFSServerWebApp server = new HttpFSServerWebApp(dir, dir, dir, dir, hoopConf);
+
+ Configuration httpfsConf = new Configuration(false);
+ HttpFSServerWebApp server = new HttpFSServerWebApp(dir, dir, dir, dir, httpfsConf);
server.init();
server.destroy();
}
+ public static class MockGroups implements Service,Groups {
+
+ @Override
+ public void init(org.apache.hadoop.lib.server.Server server) throws ServiceException {
+ }
+
+ @Override
+ public void postInit() throws ServiceException {
+ }
+
+ @Override
+ public void destroy() {
+ }
+
+ @Override
+ public Class[] getServiceDependencies() {
+ return new Class[0];
+ }
+
+ @Override
+ public Class getInterface() {
+ return Groups.class;
+ }
+
+ @Override
+ public void serverStatusChange(org.apache.hadoop.lib.server.Server.Status oldStatus,
+ org.apache.hadoop.lib.server.Server.Status newStatus) throws ServiceException {
+ }
+
+ @Override
+ public List getGroups(String user) throws IOException {
+ return Arrays.asList(HadoopUsersConfTestHelper.getHadoopUserGroups(user));
+ }
+
+ }
private void createHttpFSServer() throws Exception {
File homeDir = TestDirHelper.getTestDir();
Assert.assertTrue(new File(homeDir, "conf").mkdir());
@@ -72,13 +113,29 @@ public class TestHttpFSServer extends HFSTestCase {
w.write("secret");
w.close();
- String fsDefaultName = TestHdfsHelper.getHdfsConf().get("fs.default.name");
+ //HDFS configuration
+ File hadoopConfDir = new File(new File(homeDir, "conf"), "hadoop-conf");
+ hadoopConfDir.mkdirs();
+ String fsDefaultName = TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY);
Configuration conf = new Configuration(false);
- conf.set("httpfs.hadoop.conf:fs.default.name", fsDefaultName);
- conf.set("httpfs.groups." + CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING, DummyGroupMapping.class.getName());
+ conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsDefaultName);
+ File hdfsSite = new File(hadoopConfDir, "hdfs-site.xml");
+ OutputStream os = new FileOutputStream(hdfsSite);
+ conf.writeXml(os);
+ os.close();
+
+ //HTTPFS configuration
+ conf = new Configuration(false);
+ conf.set("httpfs.services.ext", MockGroups.class.getName());
+ conf.set("httpfs.admin.group", HadoopUsersConfTestHelper.
+ getHadoopUserGroups(HadoopUsersConfTestHelper.getHadoopUsers()[0])[0]);
+ conf.set("httpfs.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".groups",
+ HadoopUsersConfTestHelper.getHadoopProxyUserGroups());
+ conf.set("httpfs.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".hosts",
+ HadoopUsersConfTestHelper.getHadoopProxyUserHosts());
conf.set("httpfs.authentication.signature.secret.file", secretFile.getAbsolutePath());
- File hoopSite = new File(new File(homeDir, "conf"), "httpfs-site.xml");
- OutputStream os = new FileOutputStream(hoopSite);
+ File httpfsSite = new File(new File(homeDir, "conf"), "httpfs-site.xml");
+ os = new FileOutputStream(httpfsSite);
conf.writeXml(os);
os.close();
@@ -103,7 +160,8 @@ public class TestHttpFSServer extends HFSTestCase {
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_UNAUTHORIZED);
url = new URL(TestJettyHelper.getJettyURL(),
- MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation", "root"));
+ MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation",
+ HadoopUsersConfTestHelper.getHadoopUsers()[0]));
conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
@@ -112,7 +170,8 @@ public class TestHttpFSServer extends HFSTestCase {
Assert.assertTrue(line.contains("\"counters\":{"));
url = new URL(TestJettyHelper.getJettyURL(),
- MessageFormat.format("/webhdfs/v1/foo?user.name={0}&op=instrumentation", "root"));
+ MessageFormat.format("/webhdfs/v1/foo?user.name={0}&op=instrumentation",
+ HadoopUsersConfTestHelper.getHadoopUsers()[0]));
conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_BAD_REQUEST);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/hadoop/TestFileSystemAccessService.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/hadoop/TestFileSystemAccessService.java
index 84ff45a1658..b8689c9d6e4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/hadoop/TestFileSystemAccessService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/hadoop/TestFileSystemAccessService.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.lib.service.hadoop;
import junit.framework.Assert;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.lib.server.Server;
@@ -34,13 +35,32 @@ import org.apache.hadoop.test.TestException;
import org.apache.hadoop.test.TestHdfs;
import org.apache.hadoop.test.TestHdfsHelper;
import org.apache.hadoop.util.StringUtils;
+import org.junit.Before;
import org.junit.Test;
+import java.io.File;
+import java.io.FileOutputStream;
import java.io.IOException;
+import java.io.OutputStream;
import java.util.Arrays;
public class TestFileSystemAccessService extends HFSTestCase {
+ private void createHadoopConf(Configuration hadoopConf) throws Exception {
+ String dir = TestDirHelper.getTestDir().getAbsolutePath();
+ File hdfsSite = new File(dir, "hdfs-site.xml");
+ OutputStream os = new FileOutputStream(hdfsSite);
+ hadoopConf.writeXml(os);
+ os.close();
+ }
+
+ @Before
+ public void createHadoopConf() throws Exception {
+ Configuration hadoopConf = new Configuration(false);
+ hadoopConf.set("foo", "FOO");
+ createHadoopConf(hadoopConf);
+ }
+
@Test
@TestDir
public void simpleSecurity() throws Exception {
@@ -124,7 +144,7 @@ public class TestFileSystemAccessService extends HFSTestCase {
FileSystemAccessService.class.getName()));
Configuration conf = new Configuration(false);
conf.set("server.services", services);
- conf.set("server.hadoop.conf:foo", "FOO");
+
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
FileSystemAccessService fsAccess = (FileSystemAccessService) server.get(FileSystemAccess.class);
@@ -132,6 +152,32 @@ public class TestFileSystemAccessService extends HFSTestCase {
server.destroy();
}
+ @Test
+ @TestDir
+ public void serviceHadoopConfCustomDir() throws Exception {
+ String dir = TestDirHelper.getTestDir().getAbsolutePath();
+ String hadoopConfDir = new File(dir, "confx").getAbsolutePath();
+ new File(hadoopConfDir).mkdirs();
+ String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(),
+ FileSystemAccessService.class.getName()));
+ Configuration conf = new Configuration(false);
+ conf.set("server.services", services);
+ conf.set("server.hadoop.config.dir", hadoopConfDir);
+
+ File hdfsSite = new File(hadoopConfDir, "hdfs-site.xml");
+ OutputStream os = new FileOutputStream(hdfsSite);
+ Configuration hadoopConf = new Configuration(false);
+ hadoopConf.set("foo", "BAR");
+ hadoopConf.writeXml(os);
+ os.close();
+
+ Server server = new Server("server", dir, dir, dir, dir, conf);
+ server.init();
+ FileSystemAccessService fsAccess = (FileSystemAccessService) server.get(FileSystemAccess.class);
+ Assert.assertEquals(fsAccess.serviceHadoopConf.get("foo"), "BAR");
+ server.destroy();
+ }
+
@Test
@TestDir
public void inWhitelists() throws Exception {
@@ -188,12 +234,17 @@ public class TestFileSystemAccessService extends HFSTestCase {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(),
FileSystemAccessService.class.getName()));
+
+ Configuration hadoopConf = new Configuration(false);
+ hadoopConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY));
+ createHadoopConf(hadoopConf);
+
Configuration conf = new Configuration(false);
conf.set("server.services", services);
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
FileSystemAccess hadoop = server.get(FileSystemAccess.class);
- FileSystem fs = hadoop.createFileSystem("u", TestHdfsHelper.getHdfsConf());
+ FileSystem fs = hadoop.createFileSystem("u", hadoop.getFileSystemConfiguration());
Assert.assertNotNull(fs);
fs.mkdirs(new Path("/tmp/foo"));
hadoop.releaseFileSystem(fs);
@@ -214,6 +265,11 @@ public class TestFileSystemAccessService extends HFSTestCase {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(),
FileSystemAccessService.class.getName()));
+
+ Configuration hadoopConf = new Configuration(false);
+ hadoopConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY));
+ createHadoopConf(hadoopConf);
+
Configuration conf = new Configuration(false);
conf.set("server.services", services);
Server server = new Server("server", dir, dir, dir, dir, conf);
@@ -222,7 +278,7 @@ public class TestFileSystemAccessService extends HFSTestCase {
final FileSystem fsa[] = new FileSystem[1];
- hadoop.execute("u", TestHdfsHelper.getHdfsConf(), new FileSystemAccess.FileSystemExecutor() {
+ hadoop.execute("u", hadoop.getFileSystemConfiguration(), new FileSystemAccess.FileSystemExecutor() {
@Override
public Void execute(FileSystem fs) throws IOException {
fs.mkdirs(new Path("/tmp/foo"));
@@ -248,14 +304,18 @@ public class TestFileSystemAccessService extends HFSTestCase {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(),
FileSystemAccessService.class.getName()));
+ Configuration hadoopConf = new Configuration(false);
+ hadoopConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY));
+ createHadoopConf(hadoopConf);
+
Configuration conf = new Configuration(false);
conf.set("server.services", services);
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
FileSystemAccess fsAccess = server.get(FileSystemAccess.class);
- Configuration hdfsConf = TestHdfsHelper.getHdfsConf();
- hdfsConf.set("fs.default.name", "");
+ Configuration hdfsConf = fsAccess.getFileSystemConfiguration();
+ hdfsConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, "");
fsAccess.execute("u", hdfsConf, new FileSystemAccess.FileSystemExecutor() {
@Override
public Void execute(FileSystem fs) throws IOException {
@@ -271,6 +331,11 @@ public class TestFileSystemAccessService extends HFSTestCase {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(),
FileSystemAccessService.class.getName()));
+
+ Configuration hadoopConf = new Configuration(false);
+ hadoopConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY));
+ createHadoopConf(hadoopConf);
+
Configuration conf = new Configuration(false);
conf.set("server.services", services);
Server server = new Server("server", dir, dir, dir, dir, conf);
@@ -279,7 +344,7 @@ public class TestFileSystemAccessService extends HFSTestCase {
final FileSystem fsa[] = new FileSystem[1];
try {
- hadoop.execute("u", TestHdfsHelper.getHdfsConf(), new FileSystemAccess.FileSystemExecutor() {
+ hadoop.execute("u", hadoop.getFileSystemConfiguration(), new FileSystemAccess.FileSystemExecutor() {
@Override
public Void execute(FileSystem fs) throws IOException {
fsa[0] = fs;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/HadoopUsersConfTestHelper.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/HadoopUsersConfTestHelper.java
index 398a8853dd9..f27d0efaae9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/HadoopUsersConfTestHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/HadoopUsersConfTestHelper.java
@@ -145,7 +145,12 @@ public class HadoopUsersConfTestHelper {
*/
public static String[] getHadoopUserGroups(String user) {
if (getHadoopUsers() == DEFAULT_USERS) {
- return DEFAULT_USERS_GROUP;
+ for (String defaultUser : DEFAULT_USERS) {
+ if (defaultUser.equals(user)) {
+ return DEFAULT_USERS_GROUP;
+ }
+ }
+ return new String[0];
} else {
String groups = System.getProperty(HADOOP_USER_PREFIX + user);
return (groups != null) ? groups.split(",") : new String[0];
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 37b10538ac6..0eeac32201e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -257,6 +257,8 @@ Release 2.0.0 - UNRELEASED
HDFS-3294. Fix code indentation in NamenodeWebHdfsMethods and
DatanodeWebHdfsMethods. (szetszwo)
+ HDFS-3263. HttpFS should read HDFS config from Hadoop site.xml files (tucu)
+
OPTIMIZATIONS
HDFS-2477. Optimize computing the diff between a block report and the