From f9832031c2c1b1cb521a187df55a3e11baf66b66 Mon Sep 17 00:00:00 2001 From: Abhishek Das Date: Tue, 13 Jul 2021 12:47:43 -0700 Subject: [PATCH] HADOOP-17028. ViewFS should initialize mounted target filesystems lazily. Contributed by Abhishek Das (#2260) (cherry picked from commit 1dd03cc4b573270dc960117c3b6c74bb78215caa) --- .../apache/hadoop/fs/viewfs/InodeTree.java | 56 ++++++++---- .../hadoop/fs/viewfs/ViewFileSystem.java | 85 ++++++++++++++----- .../org/apache/hadoop/fs/viewfs/ViewFs.java | 38 +++++++-- .../hadoop/fs/viewfs/TestViewFsConfig.java | 3 +- .../fs/viewfs/ViewFileSystemBaseTest.java | 44 ++++++++++ .../fs/viewfs/TestViewFileSystemHdfs.java | 73 ++++++++++++++++ ...ileSystemOverloadSchemeWithHdfsScheme.java | 6 +- .../hadoop/fs/viewfs/TestViewFsHdfs.java | 78 +++++++++++++++++ ...wFileSystemOverloadSchemeWithDFSAdmin.java | 6 +- 9 files changed, 339 insertions(+), 50 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java index 422e7337b57..65ca2f80959 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.fs.viewfs; +import java.util.function.Function; import com.google.common.base.Preconditions; import java.io.FileNotFoundException; import java.io.IOException; @@ -243,7 +244,10 @@ abstract class InodeTree { */ static class INodeLink extends INode { final URI[] targetDirLinkList; - final T targetFileSystem; // file system object created from the link. + private T targetFileSystem; // file system object created from the link. + // Function to initialize file system. Only applicable for simple links + private Function fileSystemInitMethod; + private final Object lock = new Object(); /** * Construct a mergeLink or nfly. @@ -259,11 +263,13 @@ abstract class InodeTree { * Construct a simple link (i.e. not a mergeLink). */ INodeLink(final String pathToNode, final UserGroupInformation aUgi, - final T targetFs, final URI aTargetDirLink) { + Function createFileSystemMethod, + final URI aTargetDirLink) { super(pathToNode, aUgi); - targetFileSystem = targetFs; + targetFileSystem = null; targetDirLinkList = new URI[1]; targetDirLinkList[0] = aTargetDirLink; + this.fileSystemInitMethod = createFileSystemMethod; } /** @@ -284,7 +290,30 @@ abstract class InodeTree { return false; } - public T getTargetFileSystem() { + /** + * Get the instance of FileSystem to use, creating one if needed. + * @return An Initialized instance of T + * @throws IOException + */ + public T getTargetFileSystem() throws IOException { + if (targetFileSystem != null) { + return targetFileSystem; + } + // For non NFLY and MERGE links, we initialize the FileSystem when the + // corresponding mount path is accessed. + if (targetDirLinkList.length == 1) { + synchronized (lock) { + if (targetFileSystem != null) { + return targetFileSystem; + } + targetFileSystem = fileSystemInitMethod.apply(targetDirLinkList[0]); + if (targetFileSystem == null) { + throw new IOException( + "Could not initialize target File System for URI : " + + targetDirLinkList[0]); + } + } + } return targetFileSystem; } } @@ -345,7 +374,7 @@ abstract class InodeTree { switch (linkType) { case SINGLE: newLink = new INodeLink(fullPath, aUgi, - getTargetFileSystem(new URI(target)), new URI(target)); + initAndGetTargetFs(), new URI(target)); break; case SINGLE_FALLBACK: case MERGE_SLASH: @@ -371,8 +400,7 @@ abstract class InodeTree { * 3 abstract methods. * @throws IOException */ - protected abstract T getTargetFileSystem(URI uri) - throws UnsupportedFileSystemException, URISyntaxException, IOException; + protected abstract Function initAndGetTargetFs(); protected abstract T getTargetFileSystem(INodeDir dir) throws URISyntaxException, IOException; @@ -568,7 +596,7 @@ abstract class InodeTree { if (isMergeSlashConfigured) { Preconditions.checkNotNull(mergeSlashTarget); root = new INodeLink(mountTableName, ugi, - getTargetFileSystem(new URI(mergeSlashTarget)), + initAndGetTargetFs(), new URI(mergeSlashTarget)); mountPoints.add(new MountPoint("/", (INodeLink) root)); rootFallbackLink = null; @@ -586,8 +614,7 @@ abstract class InodeTree { + "not allowed."); } fallbackLink = new INodeLink(mountTableName, ugi, - getTargetFileSystem(new URI(le.getTarget())), - new URI(le.getTarget())); + initAndGetTargetFs(), new URI(le.getTarget())); } else { createLink(le.getSrc(), le.getTarget(), le.getLinkType(), le.getSettings(), le.getUgi(), le.getConfig()); @@ -607,9 +634,8 @@ abstract class InodeTree { new StringBuilder("Empty mount table detected for ").append(theUri) .append(" and considering itself as a linkFallback."); FileSystem.LOG.info(msg.toString()); - rootFallbackLink = - new INodeLink(mountTableName, ugi, getTargetFileSystem(theUri), - theUri); + rootFallbackLink = new INodeLink(mountTableName, ugi, + initAndGetTargetFs(), theUri); getRootDir().addFallbackLink(rootFallbackLink); } } @@ -650,10 +676,10 @@ abstract class InodeTree { * @param p - input path * @param resolveLastComponent * @return ResolveResult which allows further resolution of the remaining path - * @throws FileNotFoundException + * @throws IOException */ ResolveResult resolve(final String p, final boolean resolveLastComponent) - throws FileNotFoundException { + throws IOException { String[] path = breakIntoPathComponents(p); if (path.length <= 1) { // special case for when path is "/" T targetFs = root.isInternalDir() ? diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java index 4f172c2e2fa..a8cadc89f96 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java @@ -26,10 +26,12 @@ import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS_DEFAULT; import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555; +import java.util.function.Function; import java.io.FileNotFoundException; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; +import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -281,7 +283,7 @@ public class ViewFileSystem extends FileSystem { enableInnerCache = config.getBoolean(CONFIG_VIEWFS_ENABLE_INNER_CACHE, CONFIG_VIEWFS_ENABLE_INNER_CACHE_DEFAULT); FsGetter fsGetter = fsGetter(); - final InnerCache innerCache = new InnerCache(fsGetter); + cache = new InnerCache(fsGetter); // Now build client side view (i.e. client side mount table) from config. final String authority = theUri.getAuthority(); String tableName = authority; @@ -297,15 +299,32 @@ public class ViewFileSystem extends FileSystem { fsState = new InodeTree(conf, tableName, myUri, initingUriAsFallbackOnNoMounts) { @Override - protected FileSystem getTargetFileSystem(final URI uri) - throws URISyntaxException, IOException { - FileSystem fs; - if (enableInnerCache) { - fs = innerCache.get(uri, config); - } else { - fs = fsGetter.get(uri, config); + protected Function initAndGetTargetFs() { + return new Function() { + @Override + public FileSystem apply(final URI uri) { + FileSystem fs; + try { + fs = ugi.doAs(new PrivilegedExceptionAction() { + @Override + public FileSystem run() throws IOException { + if (enableInnerCache) { + synchronized (cache) { + return cache.get(uri, config); + } + } else { + return fsGetter().get(uri, config); + } + } + }); + return new ChRootedFileSystem(fs, uri); + } catch (IOException | InterruptedException ex) { + LOG.error("Could not initialize the underlying FileSystem " + + "object. Exception: " + ex.toString()); + } + return null; } - return new ChRootedFileSystem(fs, uri); + }; } @Override @@ -329,13 +348,6 @@ public class ViewFileSystem extends FileSystem { } catch (URISyntaxException e) { throw new IOException("URISyntax exception: " + theUri); } - - if (enableInnerCache) { - // All fs instances are created and cached on startup. The cache is - // readonly after the initialize() so the concurrent access of the cache - // is safe. - cache = innerCache.unmodifiableCache(); - } } /** @@ -367,7 +379,7 @@ public class ViewFileSystem extends FileSystem { @Override public Path resolvePath(final Path f) throws IOException { final InodeTree.ResolveResult res; - res = fsState.resolve(getUriPath(f), true); + res = fsState.resolve(getUriPath(f), true); if (res.isInternalDir()) { return f; } @@ -853,10 +865,35 @@ public class ViewFileSystem extends FileSystem { public void setVerifyChecksum(final boolean verifyChecksum) { List> mountPoints = fsState.getMountPoints(); + Map fsMap = initializeMountedFileSystems(mountPoints); for (InodeTree.MountPoint mount : mountPoints) { - mount.target.targetFileSystem.setVerifyChecksum(verifyChecksum); + fsMap.get(mount.src).setVerifyChecksum(verifyChecksum); } } + + /** + * Initialize the target filesystem for all mount points. + * @param mountPoints The mount points + * @return Mapping of mount point and the initialized target filesystems + * @throws RuntimeException when the target file system cannot be initialized + */ + private Map initializeMountedFileSystems( + List> mountPoints) { + FileSystem fs = null; + Map fsMap = new HashMap<>(mountPoints.size()); + for (InodeTree.MountPoint mount : mountPoints) { + try { + fs = mount.target.getTargetFileSystem(); + fsMap.put(mount.src, fs); + } catch (IOException ex) { + String errMsg = "Not able to initialize FileSystem for mount path " + + mount.src + " with exception " + ex; + LOG.error(errMsg); + throw new RuntimeException(errMsg, ex); + } + } + return fsMap; + } @Override public long getDefaultBlockSize() { @@ -881,6 +918,9 @@ public class ViewFileSystem extends FileSystem { return res.targetFileSystem.getDefaultBlockSize(res.remainingPath); } catch (FileNotFoundException e) { throw new NotInMountpointException(f, "getDefaultBlockSize"); + } catch (IOException e) { + throw new RuntimeException("Not able to initialize fs in " + + " getDefaultBlockSize for path " + f + " with exception", e); } } @@ -892,6 +932,9 @@ public class ViewFileSystem extends FileSystem { return res.targetFileSystem.getDefaultReplication(res.remainingPath); } catch (FileNotFoundException e) { throw new NotInMountpointException(f, "getDefaultReplication"); + } catch (IOException e) { + throw new RuntimeException("Not able to initialize fs in " + + " getDefaultReplication for path " + f + " with exception", e); } } @@ -924,8 +967,9 @@ public class ViewFileSystem extends FileSystem { public void setWriteChecksum(final boolean writeChecksum) { List> mountPoints = fsState.getMountPoints(); + Map fsMap = initializeMountedFileSystems(mountPoints); for (InodeTree.MountPoint mount : mountPoints) { - mount.target.targetFileSystem.setWriteChecksum(writeChecksum); + fsMap.get(mount.src).setWriteChecksum(writeChecksum); } } @@ -933,9 +977,10 @@ public class ViewFileSystem extends FileSystem { public FileSystem[] getChildFileSystems() { List> mountPoints = fsState.getMountPoints(); + Map fsMap = initializeMountedFileSystems(mountPoints); Set children = new HashSet(); for (InodeTree.MountPoint mountPoint : mountPoints) { - FileSystem targetFs = mountPoint.target.targetFileSystem; + FileSystem targetFs = fsMap.get(mountPoint.src); children.addAll(Arrays.asList(targetFs.getChildFileSystems())); } return children.toArray(new FileSystem[]{}); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java index 8168a6a06a8..d4fd397aaba 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java @@ -21,10 +21,12 @@ import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS_DEFAULT; import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555; +import java.util.function.Function; import java.io.FileNotFoundException; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; +import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.EnumSet; import java.util.HashSet; @@ -237,15 +239,32 @@ public class ViewFs extends AbstractFileSystem { initingUriAsFallbackOnNoMounts) { @Override - protected AbstractFileSystem getTargetFileSystem(final URI uri) - throws URISyntaxException, UnsupportedFileSystemException { - String pathString = uri.getPath(); - if (pathString.isEmpty()) { - pathString = "/"; + protected Function initAndGetTargetFs() { + return new Function() { + @Override + public AbstractFileSystem apply(final URI uri) { + AbstractFileSystem fs; + try { + fs = ugi.doAs( + new PrivilegedExceptionAction() { + @Override + public AbstractFileSystem run() throws IOException { + return AbstractFileSystem.createFileSystem(uri, config); + } + }); + String pathString = uri.getPath(); + if (pathString.isEmpty()) { + pathString = "/"; + } + return new ChRootedFs(fs, new Path(pathString)); + } catch (IOException | URISyntaxException | + InterruptedException ex) { + LOG.error("Could not initialize underlying FileSystem object" + +" for uri " + uri + "with exception: " + ex.toString()); + } + return null; } - return new ChRootedFs( - AbstractFileSystem.createFileSystem(uri, config), - new Path(pathString)); + }; } @Override @@ -682,7 +701,8 @@ public class ViewFs extends AbstractFileSystem { List> result = new ArrayList>(initialListSize); for ( int i = 0; i < mountPoints.size(); ++i ) { List> tokens = - mountPoints.get(i).target.targetFileSystem.getDelegationTokens(renewer); + mountPoints.get(i).target.getTargetFileSystem() + .getDelegationTokens(renewer); if (tokens != null) { result.addAll(tokens); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsConfig.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsConfig.java index 56f5b2d997d..7c318654ecf 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsConfig.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsConfig.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.fs.viewfs; +import java.util.function.Function; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; @@ -42,7 +43,7 @@ public class TestViewFsConfig { new InodeTree(conf, null, null, false) { @Override - protected Foo getTargetFileSystem(final URI uri) { + protected Function initAndGetTargetFs() { return null; } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java index 898530465dc..af7f4155f82 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java @@ -66,6 +66,7 @@ import org.junit.rules.TemporaryFolder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static org.apache.hadoop.fs.FileSystemTestHelper.*; +import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_ENABLE_INNER_CACHE; import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555; import org.junit.After; @@ -1354,6 +1355,8 @@ abstract public class ViewFileSystemBaseTest { final int cacheSize = TestFileUtil.getCacheSize(); ViewFileSystem viewFs = (ViewFileSystem) FileSystem .get(new URI("viewfs://" + clusterName + "/"), config); + viewFs.resolvePath( + new Path(String.format("viewfs://%s/%s", clusterName, "/user"))); assertEquals(cacheSize + 1, TestFileUtil.getCacheSize()); viewFs.close(); assertEquals(cacheSize, TestFileUtil.getCacheSize()); @@ -1430,4 +1433,45 @@ abstract public class ViewFileSystemBaseTest { summaryAfter.getLength()); } } + + @Test + public void testTargetFileSystemLazyInitialization() throws Exception { + final String clusterName = "cluster" + new Random().nextInt(); + Configuration config = new Configuration(conf); + config.setBoolean(CONFIG_VIEWFS_ENABLE_INNER_CACHE, false); + config.setClass("fs.mockfs.impl", + TestChRootedFileSystem.MockFileSystem.class, FileSystem.class); + ConfigUtil.addLink(config, clusterName, "/user", + URI.create("mockfs://mockauth1/mockpath")); + ConfigUtil.addLink(config, clusterName, + "/mock", URI.create("mockfs://mockauth/mockpath")); + + final int cacheSize = TestFileUtil.getCacheSize(); + ViewFileSystem viewFs = (ViewFileSystem) FileSystem + .get(new URI("viewfs://" + clusterName + "/"), config); + + // As no inner file system instance has been initialized, + // cache size will remain the same + // cache is disabled for viewfs scheme, so the viewfs:// instance won't + // go in the cache even after the initialization + assertEquals(cacheSize, TestFileUtil.getCacheSize()); + + // This resolve path will initialize the file system corresponding + // to the mount table entry of the path "/user" + viewFs.resolvePath( + new Path(String.format("viewfs://%s/%s", clusterName, "/user"))); + + // Cache size will increase by 1. + assertEquals(cacheSize + 1, TestFileUtil.getCacheSize()); + // This resolve path will initialize the file system corresponding + // to the mount table entry of the path "/mock" + viewFs.resolvePath(new Path(String.format("viewfs://%s/%s", clusterName, + "/mock"))); + // One more file system instance will get initialized. + assertEquals(cacheSize + 2, TestFileUtil.getCacheSize()); + viewFs.close(); + // Initialized FileSystem instances will not be removed from cache as + // viewfs inner cache is disabled + assertEquals(cacheSize + 2, TestFileUtil.getCacheSize()); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java index b3836956c79..fcb52577d99 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java @@ -21,8 +21,11 @@ import java.io.File; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; +import java.security.PrivilegedExceptionAction; import java.util.EnumSet; +import java.util.HashMap; +import java.util.Map; import javax.security.auth.login.LoginException; import org.apache.hadoop.conf.Configuration; @@ -39,6 +42,7 @@ import org.apache.hadoop.fs.FsConstants; import org.apache.hadoop.fs.FsShell; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.contract.ContractTestUtils; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -46,6 +50,7 @@ import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.client.CreateEncryptionZoneFlag; import org.apache.hadoop.hdfs.client.HdfsAdmin; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; @@ -406,4 +411,72 @@ public class TestViewFileSystemHdfs extends ViewFileSystemBaseTest { } } } + + @Test + public void testTargetFileSystemLazyInitializationWithUgi() throws Exception { + final Map map = new HashMap<>(); + final Path user1Path = new Path("/data/user1"); + + // Scenario - 1: Create FileSystem with the current user context + // Both mkdir and delete should be successful + FileSystem fs = FileSystem.get(FsConstants.VIEWFS_URI, conf); + fs.mkdirs(user1Path); + fs.delete(user1Path, false); + + // Scenario - 2: Create FileSystem with the a different user context + final UserGroupInformation userUgi = UserGroupInformation + .createUserForTesting("user1@HADOOP.COM", new String[]{"hadoop"}); + userUgi.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws IOException { + UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); + String doAsUserName = ugi.getUserName(); + assertEquals(doAsUserName, "user1@HADOOP.COM"); + + FileSystem viewFS = FileSystem.get(FsConstants.VIEWFS_URI, conf); + map.put("user1", viewFS); + return null; + } + }); + + // Try creating a directory with the file context created by a different ugi + // Though we are running the mkdir with the current user context, the + // target filesystem will be instantiated by the ugi with which the + // file context was created. + try { + FileSystem otherfs = map.get("user1"); + otherfs.mkdirs(user1Path); + fail("This mkdir should fail"); + } catch (AccessControlException ex) { + // Exception is expected as the FileSystem was created with ugi of user1 + // So when we are trying to access the /user/user1 path for the first + // time, the corresponding file system is initialized and it tries to + // execute the task with ugi with which the FileSystem was created. + } + + // Change the permission of /data path so that user1 can create a directory + fsTarget.setOwner(new Path(targetTestRoot, "data"), + "user1", "test2"); + // set permission of target to allow rename to target + fsTarget.setPermission(new Path(targetTestRoot, "data"), + new FsPermission("775")); + + userUgi.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws IOException { + FileSystem viewFS = FileSystem.get(FsConstants.VIEWFS_URI, conf); + map.put("user1", viewFS); + return null; + } + }); + + // Although we are running with current user context, and current user + // context does not have write permission, we are able to create the + // directory as its using ugi of user1 which has write permission. + FileSystem otherfs = map.get("user1"); + otherfs.mkdirs(user1Path); + String owner = otherfs.getFileStatus(user1Path).getOwner(); + assertEquals("The owner did not match ", owner, userUgi.getShortUserName()); + otherfs.delete(user1Path, false); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java index 8b7eb88404a..403a424e148 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java @@ -35,7 +35,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FsConstants; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RawLocalFileSystem; -import org.apache.hadoop.fs.UnsupportedFileSystemException; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -178,7 +177,8 @@ public class TestViewFileSystemOverloadSchemeWithHdfsScheme { */ addMountLinks(defaultFSURI.getAuthority(), new String[] {userFolder }, new String[] {nonExistTargetPath.toUri().toString() }, conf); - FileSystem.get(conf); + FileSystem fs = FileSystem.get(conf); + fs.resolvePath(new Path(userFolder)); Assert.fail("Expected to fail with non existent link"); } @@ -361,7 +361,7 @@ public class TestViewFileSystemOverloadSchemeWithHdfsScheme { * Unset fs.viewfs.overload.scheme.target.hdfs.impl property. * So, OverloadScheme target fs initialization will fail. */ - @Test(expected = UnsupportedFileSystemException.class, timeout = 30000) + @Test(expected = IOException.class, timeout = 30000) public void testInvalidOverloadSchemeTargetFS() throws Exception { final Path hdfsTargetPath = new Path(defaultFSURI + HDFS_USER_FOLDER); String mountTableIfSet = conf.get(Constants.CONFIG_VIEWFS_MOUNTTABLE_PATH); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java index fda667251b8..540883dd2e2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java @@ -21,18 +21,28 @@ package org.apache.hadoop.fs.viewfs; import java.io.IOException; import java.net.URISyntaxException; +import java.security.PrivilegedExceptionAction; +import java.util.HashMap; +import java.util.Map; import javax.security.auth.login.LoginException; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileContextTestHelper; +import org.apache.hadoop.fs.FsConstants; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; public class TestViewFsHdfs extends ViewFsBaseTest { @@ -85,5 +95,73 @@ public class TestViewFsHdfs extends ViewFsBaseTest { int getExpectedDelegationTokenCount() { return 8; } + + @Test + public void testTargetFileSystemLazyInitialization() throws Exception { + final Map map = new HashMap<>(); + final Path user1Path = new Path("/data/user1"); + + // Scenario - 1: Create FileContext with the current user context + // Both mkdir and delete should be successful + FileContext fs = FileContext.getFileContext(FsConstants.VIEWFS_URI, conf); + fs.mkdir(user1Path, FileContext.DEFAULT_PERM, false); + fs.delete(user1Path, false); + + // Scenario - 2: Create FileContext with the a different user context + final UserGroupInformation userUgi = UserGroupInformation + .createUserForTesting("user1@HADOOP.COM", new String[]{"hadoop"}); + userUgi.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws IOException { + UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); + String doAsUserName = ugi.getUserName(); + assertEquals(doAsUserName, "user1@HADOOP.COM"); + + FileContext viewFS = FileContext.getFileContext( + FsConstants.VIEWFS_URI, conf); + map.put("user1", viewFS); + return null; + } + }); + + // Try creating a directory with the file context created by a different ugi + // Though we are running the mkdir with the current user context, the + // target filesystem will be instantiated by the ugi with which the + // file context was created. + try { + FileContext otherfs = map.get("user1"); + otherfs.mkdir(user1Path, FileContext.DEFAULT_PERM, false); + fail("This mkdir should fail"); + } catch (AccessControlException ex) { + // Exception is expected as the FileContext was created with ugi of user1 + // So when we are trying to access the /user/user1 path for the first + // time, the corresponding file system is initialized and it tries to + // execute the task with ugi with which the FileContext was created. + } + + // Change the permission of /data path so that user1 can create a directory + fcView.setOwner(new Path("/data"), "user1", "test2"); + // set permission of target to allow rename to target + fcView.setPermission(new Path("/data"), new FsPermission("775")); + + userUgi.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws IOException { + FileContext viewFS = FileContext.getFileContext( + FsConstants.VIEWFS_URI, conf); + map.put("user1", viewFS); + return null; + } + }); + + // Although we are running with current user context, and current user + // context does not have write permission, we are able to create the + // directory as its using ugi of user1 which has write permission. + FileContext otherfs = map.get("user1"); + otherfs.mkdir(user1Path, FileContext.DEFAULT_PERM, false); + String owner = otherfs.getFileStatus(user1Path).getOwner(); + assertEquals("The owner did not match ", owner, userUgi.getShortUserName()); + otherfs.delete(user1Path, false); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithDFSAdmin.java index 39df141a37b..2bc46e55ae5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithDFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithDFSAdmin.java @@ -198,13 +198,15 @@ public class TestViewFileSystemOverloadSchemeWithDFSAdmin { */ @Test public void testSafeModeWithWrongFS() throws Exception { + String wrongFsUri = "hdfs://nonExistent"; final Path hdfsTargetPath = - new Path("hdfs://nonExistent" + HDFS_USER_FOLDER); + new Path(wrongFsUri + HDFS_USER_FOLDER); addMountLinks(defaultFSURI.getHost(), new String[] {HDFS_USER_FOLDER}, new String[] {hdfsTargetPath.toUri().toString()}, conf); final DFSAdmin dfsAdmin = new DFSAdmin(conf); redirectStream(); - int ret = ToolRunner.run(dfsAdmin, new String[] {"-safemode", "enter" }); + int ret = ToolRunner.run(dfsAdmin, + new String[] {"-fs", wrongFsUri, "-safemode", "enter" }); assertEquals(-1, ret); assertErrMsg("safemode: java.net.UnknownHostException: nonExistent", 0); }