diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java index 6dd1f658947..7d29b8f44ca 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java @@ -66,8 +66,7 @@ public class ConfigUtil { */ public static void addLink(final Configuration conf, final String src, final URI target) { - addLink( conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, - src, target); + addLink(conf, getDefaultMountTableName(conf), src, target); } /** @@ -88,8 +87,7 @@ public class ConfigUtil { * @param target */ public static void addLinkMergeSlash(Configuration conf, final URI target) { - addLinkMergeSlash(conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, - target); + addLinkMergeSlash(conf, getDefaultMountTableName(conf), target); } /** @@ -110,8 +108,7 @@ public class ConfigUtil { * @param target */ public static void addLinkFallback(Configuration conf, final URI target) { - addLinkFallback(conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, - target); + addLinkFallback(conf, getDefaultMountTableName(conf), target); } /** @@ -132,7 +129,7 @@ public class ConfigUtil { * @param targets */ public static void addLinkMerge(Configuration conf, final URI[] targets) { - addLinkMerge(conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, targets); + addLinkMerge(conf, getDefaultMountTableName(conf), targets); } /** @@ -166,8 +163,7 @@ public class ConfigUtil { public static void addLinkNfly(final Configuration conf, final String src, final URI ... targets) { - addLinkNfly(conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, src, null, - targets); + addLinkNfly(conf, getDefaultMountTableName(conf), src, null, targets); } /** @@ -177,8 +173,7 @@ public class ConfigUtil { */ public static void setHomeDirConf(final Configuration conf, final String homedir) { - setHomeDirConf( conf, - Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, homedir); + setHomeDirConf(conf, getDefaultMountTableName(conf), homedir); } /** @@ -202,7 +197,7 @@ public class ConfigUtil { * @return home dir value, null if variable is not in conf */ public static String getHomeDirValue(final Configuration conf) { - return getHomeDirValue(conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE); + return getHomeDirValue(conf, getDefaultMountTableName(conf)); } /** @@ -216,4 +211,18 @@ public class ConfigUtil { return conf.get(getConfigViewFsPrefix(mountTableName) + "." + Constants.CONFIG_VIEWFS_HOMEDIR); } + + /** + * Get the name of the default mount table to use. If + * {@link Constants#CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE_NAME_KEY} is specified, + * it's value is returned. Otherwise, + * {@link Constants#CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE} is returned. + * + * @param conf Configuration to use. + * @return the name of the default mount table to use. + */ + public static String getDefaultMountTableName(final Configuration conf) { + return conf.get(Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE_NAME_KEY, + Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE); + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java index f454f63084c..28ebf73cf55 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java @@ -41,12 +41,18 @@ public interface Constants { * then the hadoop default value (/user) is used. */ public static final String CONFIG_VIEWFS_HOMEDIR = "homedir"; - + + /** + * Config key to specify the name of the default mount table. + */ + String CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE_NAME_KEY = + "fs.viewfs.mounttable.default.name.key"; + /** * Config variable name for the default mount table. */ public static final String CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE = "default"; - + /** * Config variable full prefix for the default mount table. */ diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java index d1e5d3a4e5f..3d709b13bfc 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java @@ -465,7 +465,7 @@ abstract class InodeTree { FileAlreadyExistsException, IOException { String mountTableName = viewName; if (mountTableName == null) { - mountTableName = Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE; + mountTableName = ConfigUtil.getDefaultMountTableName(config); } homedirPrefix = ConfigUtil.getHomeDirValue(config, mountTableName); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAuthorityLocalFs.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAuthorityLocalFs.java index 2e498f2c0a0..fd5de72ed71 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAuthorityLocalFs.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAuthorityLocalFs.java @@ -48,10 +48,9 @@ public class TestViewFsWithAuthorityLocalFs extends ViewFsBaseTest { fcTarget = FileContext.getLocalFSFileContext(); super.setUp(); // this sets up conf (and fcView which we replace) - // Now create a viewfs using a mount table called "default" - // hence viewfs://default/ + // Now create a viewfs using a mount table using the {MOUNT_TABLE_NAME} schemeWithAuthority = - new URI(FsConstants.VIEWFS_SCHEME, "default", "/", null, null); + new URI(FsConstants.VIEWFS_SCHEME, MOUNT_TABLE_NAME, "/", null, null); fcView = FileContext.getFileContext(schemeWithAuthority, conf); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java index 90722aab2f8..21b0c159e2a 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java @@ -97,6 +97,8 @@ import org.junit.Test; *

*/ abstract public class ViewFsBaseTest { + protected static final String MOUNT_TABLE_NAME = "mycluster"; + FileContext fcView; // the view file system - the mounts are here FileContext fcTarget; // the target file system - the mount will point here Path targetTestRoot; @@ -130,6 +132,9 @@ abstract public class ViewFsBaseTest { // Set up the defaultMT in the config with our mount point links conf = new Configuration(); + conf.set( + Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE_NAME_KEY, + MOUNT_TABLE_NAME); ConfigUtil.addLink(conf, "/targetRoot", targetTestRoot.toUri()); ConfigUtil.addLink(conf, "/user", new Path(targetTestRoot,"user").toUri()); @@ -1011,9 +1016,8 @@ abstract public class ViewFsBaseTest { userUgi.doAs(new PrivilegedExceptionAction() { @Override public Object run() throws Exception { - String clusterName = Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE; - URI viewFsUri = - new URI(FsConstants.VIEWFS_SCHEME, clusterName, "/", null, null); + URI viewFsUri = new URI( + FsConstants.VIEWFS_SCHEME, MOUNT_TABLE_NAME, "/", null, null); FileSystem vfs = FileSystem.get(viewFsUri, conf); LambdaTestUtils.intercept(IOException.class, "There is no primary group for UGI", () -> vfs diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsTestSetup.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsTestSetup.java index efced73943e..b2d7416aa76 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsTestSetup.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsTestSetup.java @@ -153,7 +153,7 @@ public class ViewFsTestSetup { String prefix = new StringBuilder(Constants.CONFIG_VIEWFS_PREFIX).append(".") .append((mountTable == null - ? Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE + ? ConfigUtil.getDefaultMountTableName(conf) : mountTable)) .append(".").toString(); out.writeBytes(""); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFsOverloadScheme.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFsOverloadScheme.md index e65c5458676..feb0ba27183 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFsOverloadScheme.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFsOverloadScheme.md @@ -150,6 +150,39 @@ DFSAdmin commands with View File System Overload Scheme Please refer to the [HDFSCommands Guide](./HDFSCommands.html#dfsadmin_with_ViewFsOverloadScheme) +Accessing paths without authority +--------------------------------- + +Accessing paths like `hdfs:///foo/bar`, `hdfs:/foo/bar` or `viewfs:/foo/bar`, where the authority (cluster name or hostname) of the path is not specified, is very common. +This is especially true when the same code is expected to run on multiple clusters with different names or HDFS Namenodes. + +When `ViewFileSystemOverloadScheme` is used (as described above), and if (a) the scheme of the path being accessed is different from the scheme of the path specified as `fs.defaultFS` +and (b) if the path doesn't have an authority specified, accessing the path can result in an error like `Empty Mount table in config for viewfs://default/`. +For example, when the following configuration is used but a path like `viewfs:/foo/bar` or `viewfs:///foo/bar` is accessed, such an error arises. +```xml + + fs.hdfs.impl + org.apache.hadoop.fs.viewfs.ViewFileSystemOverloadScheme + + + + fs.defaultFS + hdfs://cluster/ + +``` + +#### Solution +To avoid the above problem, the configuration `fs.viewfs.mounttable.default.name.key` has to be set to the name of the cluster, i.e, the following should be added to `core-site.xml` +```xml + + fs.viewfs.mounttable.default.name.key + cluster + +``` +The string in this configuration `cluster` should match the name of the authority in the value of `fs.defaultFS`. Further, the configuration should have a mount table +configured correctly as in the above examples, i.e., the configurations `fs.viewfs.mounttable.*cluster*.link.` should be set (note the same string +`cluster` is used in these configurations). + Appendix: A Mount Table Configuration with XInclude --------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java index b8bed1df84a..b3836956c79 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java @@ -299,14 +299,16 @@ public class TestViewFileSystemHdfs extends ViewFileSystemBaseTest { new URI(uri2.getScheme(), uri2.getAuthority(), "/", null, null) }; + String clusterName = "mycluster"; final Configuration testConf = new Configuration(conf); + testConf.set(Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE_NAME_KEY, + clusterName); testConf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 1); final String testString = "Hello Nfly!"; final Path nflyRoot = new Path("/nflyroot"); - ConfigUtil.addLinkNfly(testConf, - Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, + clusterName, nflyRoot.toString(), "minReplication=2," + repairKey + "=true", testUris); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java index 417a80c60a7..6e101a67960 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java @@ -17,15 +17,14 @@ */ package org.apache.hadoop.fs.viewfs; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - import java.io.File; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; @@ -47,6 +46,9 @@ import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import static org.junit.Assert.*; + + /** * Tests ViewFileSystemOverloadScheme with configured mount links. */ @@ -236,6 +238,64 @@ public class TestViewFileSystemOverloadSchemeWithHdfsScheme { } } + /** + * Create mount links as follows + * hdfs://localhost:xxx/HDFSUser --> hdfs://localhost:xxx/HDFSUser/ + * hdfs://localhost:xxx/local --> file://TEST_ROOT_DIR/root/ + * Check that "viewfs:/" paths without authority can work when the + * default mount table name is set correctly. + */ + @Test + public void testAccessViewFsPathWithoutAuthority() throws Exception { + final Path hdfsTargetPath = new Path(defaultFSURI + HDFS_USER_FOLDER); + addMountLinks(defaultFSURI.getAuthority(), + new String[] {HDFS_USER_FOLDER, LOCAL_FOLDER }, + new String[] {hdfsTargetPath.toUri().toString(), + localTargetDir.toURI().toString() }, + conf); + + // /HDFSUser/test + Path hdfsDir = new Path(HDFS_USER_FOLDER, "test"); + // /local/test + Path localDir = new Path(LOCAL_FOLDER, "test"); + FileStatus[] expectedStatus; + + try (FileSystem fs = FileSystem.get(conf)) { + fs.mkdirs(hdfsDir); // /HDFSUser/test + fs.mkdirs(localDir); // /local/test + expectedStatus = fs.listStatus(new Path("/")); + } + + // check for viewfs path without authority + Path viewFsRootPath = new Path("viewfs:/"); + try { + viewFsRootPath.getFileSystem(conf); + Assert.fail( + "Mount table with authority default should not be initialized"); + } catch (IOException e) { + assertTrue(e.getMessage().contains( + "Empty Mount table in config for viewfs://default/")); + } + + // set the name of the default mount table here and + // subsequent calls should succeed. + conf.set(Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE_NAME_KEY, + defaultFSURI.getAuthority()); + + try (FileSystem fs = viewFsRootPath.getFileSystem(conf)) { + FileStatus[] status = fs.listStatus(viewFsRootPath); + // compare only the final components of the paths as + // full paths have different schemes (hdfs:/ vs. viewfs:/). + List expectedPaths = Arrays.stream(expectedStatus) + .map(s -> s.getPath().getName()).sorted() + .collect(Collectors.toList()); + List paths = Arrays.stream(status) + .map(s -> s.getPath().getName()).sorted() + .collect(Collectors.toList()); + assertEquals(expectedPaths, paths); + } + } + /** * Create mount links as follows hdfs://localhost:xxx/HDFSUser --> * hdfs://localhost:xxx/HDFSUser/ hdfs://localhost:xxx/local -->