HADOOP-17028. ViewFS should initialize mounted target filesystems lazily. Contributed by Abhishek Das (#2260)
This commit is contained in:
parent
ea90c5117d
commit
1dd03cc4b5
|
@ -17,6 +17,7 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.fs.viewfs;
|
package org.apache.hadoop.fs.viewfs;
|
||||||
|
|
||||||
|
import java.util.function.Function;
|
||||||
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
|
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
|
||||||
import java.io.FileNotFoundException;
|
import java.io.FileNotFoundException;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
@ -257,7 +258,10 @@ abstract class InodeTree<T> {
|
||||||
*/
|
*/
|
||||||
static class INodeLink<T> extends INode<T> {
|
static class INodeLink<T> extends INode<T> {
|
||||||
final URI[] targetDirLinkList;
|
final URI[] targetDirLinkList;
|
||||||
final T targetFileSystem; // file system object created from the link.
|
private T targetFileSystem; // file system object created from the link.
|
||||||
|
// Function to initialize file system. Only applicable for simple links
|
||||||
|
private Function<URI, T> fileSystemInitMethod;
|
||||||
|
private final Object lock = new Object();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Construct a mergeLink or nfly.
|
* Construct a mergeLink or nfly.
|
||||||
|
@ -273,11 +277,13 @@ abstract class InodeTree<T> {
|
||||||
* Construct a simple link (i.e. not a mergeLink).
|
* Construct a simple link (i.e. not a mergeLink).
|
||||||
*/
|
*/
|
||||||
INodeLink(final String pathToNode, final UserGroupInformation aUgi,
|
INodeLink(final String pathToNode, final UserGroupInformation aUgi,
|
||||||
final T targetFs, final URI aTargetDirLink) {
|
Function<URI, T> createFileSystemMethod,
|
||||||
|
final URI aTargetDirLink) {
|
||||||
super(pathToNode, aUgi);
|
super(pathToNode, aUgi);
|
||||||
targetFileSystem = targetFs;
|
targetFileSystem = null;
|
||||||
targetDirLinkList = new URI[1];
|
targetDirLinkList = new URI[1];
|
||||||
targetDirLinkList[0] = aTargetDirLink;
|
targetDirLinkList[0] = aTargetDirLink;
|
||||||
|
this.fileSystemInitMethod = createFileSystemMethod;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -298,7 +304,30 @@ abstract class InodeTree<T> {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
public T getTargetFileSystem() {
|
/**
|
||||||
|
* Get the instance of FileSystem to use, creating one if needed.
|
||||||
|
* @return An Initialized instance of T
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public T getTargetFileSystem() throws IOException {
|
||||||
|
if (targetFileSystem != null) {
|
||||||
|
return targetFileSystem;
|
||||||
|
}
|
||||||
|
// For non NFLY and MERGE links, we initialize the FileSystem when the
|
||||||
|
// corresponding mount path is accessed.
|
||||||
|
if (targetDirLinkList.length == 1) {
|
||||||
|
synchronized (lock) {
|
||||||
|
if (targetFileSystem != null) {
|
||||||
|
return targetFileSystem;
|
||||||
|
}
|
||||||
|
targetFileSystem = fileSystemInitMethod.apply(targetDirLinkList[0]);
|
||||||
|
if (targetFileSystem == null) {
|
||||||
|
throw new IOException(
|
||||||
|
"Could not initialize target File System for URI : " +
|
||||||
|
targetDirLinkList[0]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
return targetFileSystem;
|
return targetFileSystem;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -359,7 +388,7 @@ abstract class InodeTree<T> {
|
||||||
switch (linkType) {
|
switch (linkType) {
|
||||||
case SINGLE:
|
case SINGLE:
|
||||||
newLink = new INodeLink<T>(fullPath, aUgi,
|
newLink = new INodeLink<T>(fullPath, aUgi,
|
||||||
getTargetFileSystem(new URI(target)), new URI(target));
|
initAndGetTargetFs(), new URI(target));
|
||||||
break;
|
break;
|
||||||
case SINGLE_FALLBACK:
|
case SINGLE_FALLBACK:
|
||||||
case MERGE_SLASH:
|
case MERGE_SLASH:
|
||||||
|
@ -385,8 +414,7 @@ abstract class InodeTree<T> {
|
||||||
* 3 abstract methods.
|
* 3 abstract methods.
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
protected abstract T getTargetFileSystem(URI uri)
|
protected abstract Function<URI, T> initAndGetTargetFs();
|
||||||
throws UnsupportedFileSystemException, URISyntaxException, IOException;
|
|
||||||
|
|
||||||
protected abstract T getTargetFileSystem(INodeDir<T> dir)
|
protected abstract T getTargetFileSystem(INodeDir<T> dir)
|
||||||
throws URISyntaxException, IOException;
|
throws URISyntaxException, IOException;
|
||||||
|
@ -589,7 +617,7 @@ abstract class InodeTree<T> {
|
||||||
if (isMergeSlashConfigured) {
|
if (isMergeSlashConfigured) {
|
||||||
Preconditions.checkNotNull(mergeSlashTarget);
|
Preconditions.checkNotNull(mergeSlashTarget);
|
||||||
root = new INodeLink<T>(mountTableName, ugi,
|
root = new INodeLink<T>(mountTableName, ugi,
|
||||||
getTargetFileSystem(new URI(mergeSlashTarget)),
|
initAndGetTargetFs(),
|
||||||
new URI(mergeSlashTarget));
|
new URI(mergeSlashTarget));
|
||||||
mountPoints.add(new MountPoint<T>("/", (INodeLink<T>) root));
|
mountPoints.add(new MountPoint<T>("/", (INodeLink<T>) root));
|
||||||
rootFallbackLink = null;
|
rootFallbackLink = null;
|
||||||
|
@ -608,8 +636,7 @@ abstract class InodeTree<T> {
|
||||||
+ "not allowed.");
|
+ "not allowed.");
|
||||||
}
|
}
|
||||||
fallbackLink = new INodeLink<T>(mountTableName, ugi,
|
fallbackLink = new INodeLink<T>(mountTableName, ugi,
|
||||||
getTargetFileSystem(new URI(le.getTarget())),
|
initAndGetTargetFs(), new URI(le.getTarget()));
|
||||||
new URI(le.getTarget()));
|
|
||||||
continue;
|
continue;
|
||||||
case REGEX:
|
case REGEX:
|
||||||
addRegexMountEntry(le);
|
addRegexMountEntry(le);
|
||||||
|
@ -633,9 +660,8 @@ abstract class InodeTree<T> {
|
||||||
FileSystem.LOG
|
FileSystem.LOG
|
||||||
.info("Empty mount table detected for {} and considering itself "
|
.info("Empty mount table detected for {} and considering itself "
|
||||||
+ "as a linkFallback.", theUri);
|
+ "as a linkFallback.", theUri);
|
||||||
rootFallbackLink =
|
rootFallbackLink = new INodeLink<T>(mountTableName, ugi,
|
||||||
new INodeLink<T>(mountTableName, ugi, getTargetFileSystem(theUri),
|
initAndGetTargetFs(), theUri);
|
||||||
theUri);
|
|
||||||
getRootDir().addFallbackLink(rootFallbackLink);
|
getRootDir().addFallbackLink(rootFallbackLink);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -733,10 +759,10 @@ abstract class InodeTree<T> {
|
||||||
* @param p - input path
|
* @param p - input path
|
||||||
* @param resolveLastComponent
|
* @param resolveLastComponent
|
||||||
* @return ResolveResult which allows further resolution of the remaining path
|
* @return ResolveResult which allows further resolution of the remaining path
|
||||||
* @throws FileNotFoundException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
ResolveResult<T> resolve(final String p, final boolean resolveLastComponent)
|
ResolveResult<T> resolve(final String p, final boolean resolveLastComponent)
|
||||||
throws FileNotFoundException {
|
throws IOException {
|
||||||
ResolveResult<T> resolveResult = null;
|
ResolveResult<T> resolveResult = null;
|
||||||
String[] path = breakIntoPathComponents(p);
|
String[] path = breakIntoPathComponents(p);
|
||||||
if (path.length <= 1) { // special case for when path is "/"
|
if (path.length <= 1) { // special case for when path is "/"
|
||||||
|
@ -880,19 +906,20 @@ abstract class InodeTree<T> {
|
||||||
ResultKind resultKind, String resolvedPathStr,
|
ResultKind resultKind, String resolvedPathStr,
|
||||||
String targetOfResolvedPathStr, Path remainingPath) {
|
String targetOfResolvedPathStr, Path remainingPath) {
|
||||||
try {
|
try {
|
||||||
T targetFs = getTargetFileSystem(
|
T targetFs = initAndGetTargetFs()
|
||||||
new URI(targetOfResolvedPathStr));
|
.apply(new URI(targetOfResolvedPathStr));
|
||||||
return new ResolveResult<T>(resultKind, targetFs, resolvedPathStr,
|
if (targetFs == null) {
|
||||||
remainingPath, true);
|
|
||||||
} catch (IOException ex) {
|
|
||||||
LOGGER.error(String.format(
|
LOGGER.error(String.format(
|
||||||
"Got Exception while build resolve result."
|
"Not able to initialize target file system."
|
||||||
+ " ResultKind:%s, resolvedPathStr:%s,"
|
+ " ResultKind:%s, resolvedPathStr:%s,"
|
||||||
+ " targetOfResolvedPathStr:%s, remainingPath:%s,"
|
+ " targetOfResolvedPathStr:%s, remainingPath:%s,"
|
||||||
+ " will return null.",
|
+ " will return null.",
|
||||||
resultKind, resolvedPathStr, targetOfResolvedPathStr, remainingPath),
|
resultKind, resolvedPathStr, targetOfResolvedPathStr,
|
||||||
ex);
|
remainingPath));
|
||||||
return null;
|
return null;
|
||||||
|
}
|
||||||
|
return new ResolveResult<T>(resultKind, targetFs, resolvedPathStr,
|
||||||
|
remainingPath, true);
|
||||||
} catch (URISyntaxException uex) {
|
} catch (URISyntaxException uex) {
|
||||||
LOGGER.error(String.format(
|
LOGGER.error(String.format(
|
||||||
"Got Exception while build resolve result."
|
"Got Exception while build resolve result."
|
||||||
|
|
|
@ -26,10 +26,12 @@ import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS
|
||||||
import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS_DEFAULT;
|
import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS_DEFAULT;
|
||||||
import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555;
|
import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555;
|
||||||
|
|
||||||
|
import java.util.function.Function;
|
||||||
import java.io.FileNotFoundException;
|
import java.io.FileNotFoundException;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
import java.net.URISyntaxException;
|
import java.net.URISyntaxException;
|
||||||
|
import java.security.PrivilegedExceptionAction;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
|
@ -302,7 +304,7 @@ public class ViewFileSystem extends FileSystem {
|
||||||
enableInnerCache = config.getBoolean(CONFIG_VIEWFS_ENABLE_INNER_CACHE,
|
enableInnerCache = config.getBoolean(CONFIG_VIEWFS_ENABLE_INNER_CACHE,
|
||||||
CONFIG_VIEWFS_ENABLE_INNER_CACHE_DEFAULT);
|
CONFIG_VIEWFS_ENABLE_INNER_CACHE_DEFAULT);
|
||||||
FsGetter fsGetter = fsGetter();
|
FsGetter fsGetter = fsGetter();
|
||||||
final InnerCache innerCache = new InnerCache(fsGetter);
|
cache = new InnerCache(fsGetter);
|
||||||
// Now build client side view (i.e. client side mount table) from config.
|
// Now build client side view (i.e. client side mount table) from config.
|
||||||
final String authority = theUri.getAuthority();
|
final String authority = theUri.getAuthority();
|
||||||
String tableName = authority;
|
String tableName = authority;
|
||||||
|
@ -318,15 +320,32 @@ public class ViewFileSystem extends FileSystem {
|
||||||
fsState = new InodeTree<FileSystem>(conf, tableName, myUri,
|
fsState = new InodeTree<FileSystem>(conf, tableName, myUri,
|
||||||
initingUriAsFallbackOnNoMounts) {
|
initingUriAsFallbackOnNoMounts) {
|
||||||
@Override
|
@Override
|
||||||
protected FileSystem getTargetFileSystem(final URI uri)
|
protected Function<URI, FileSystem> initAndGetTargetFs() {
|
||||||
throws URISyntaxException, IOException {
|
return new Function<URI, FileSystem>() {
|
||||||
|
@Override
|
||||||
|
public FileSystem apply(final URI uri) {
|
||||||
FileSystem fs;
|
FileSystem fs;
|
||||||
|
try {
|
||||||
|
fs = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
|
||||||
|
@Override
|
||||||
|
public FileSystem run() throws IOException {
|
||||||
if (enableInnerCache) {
|
if (enableInnerCache) {
|
||||||
fs = innerCache.get(uri, config);
|
synchronized (cache) {
|
||||||
} else {
|
return cache.get(uri, config);
|
||||||
fs = fsGetter.get(uri, config);
|
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
return fsGetter().get(uri, config);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
return new ChRootedFileSystem(fs, uri);
|
return new ChRootedFileSystem(fs, uri);
|
||||||
|
} catch (IOException | InterruptedException ex) {
|
||||||
|
LOG.error("Could not initialize the underlying FileSystem "
|
||||||
|
+ "object. Exception: " + ex.toString());
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -350,13 +369,6 @@ public class ViewFileSystem extends FileSystem {
|
||||||
} catch (URISyntaxException e) {
|
} catch (URISyntaxException e) {
|
||||||
throw new IOException("URISyntax exception: " + theUri);
|
throw new IOException("URISyntax exception: " + theUri);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (enableInnerCache) {
|
|
||||||
// All fs instances are created and cached on startup. The cache is
|
|
||||||
// readonly after the initialize() so the concurrent access of the cache
|
|
||||||
// is safe.
|
|
||||||
cache = innerCache;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -908,11 +920,36 @@ public class ViewFileSystem extends FileSystem {
|
||||||
public void setVerifyChecksum(final boolean verifyChecksum) {
|
public void setVerifyChecksum(final boolean verifyChecksum) {
|
||||||
List<InodeTree.MountPoint<FileSystem>> mountPoints =
|
List<InodeTree.MountPoint<FileSystem>> mountPoints =
|
||||||
fsState.getMountPoints();
|
fsState.getMountPoints();
|
||||||
|
Map<String, FileSystem> fsMap = initializeMountedFileSystems(mountPoints);
|
||||||
for (InodeTree.MountPoint<FileSystem> mount : mountPoints) {
|
for (InodeTree.MountPoint<FileSystem> mount : mountPoints) {
|
||||||
mount.target.targetFileSystem.setVerifyChecksum(verifyChecksum);
|
fsMap.get(mount.src).setVerifyChecksum(verifyChecksum);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Initialize the target filesystem for all mount points.
|
||||||
|
* @param mountPoints The mount points
|
||||||
|
* @return Mapping of mount point and the initialized target filesystems
|
||||||
|
* @throws RuntimeException when the target file system cannot be initialized
|
||||||
|
*/
|
||||||
|
private Map<String, FileSystem> initializeMountedFileSystems(
|
||||||
|
List<InodeTree.MountPoint<FileSystem>> mountPoints) {
|
||||||
|
FileSystem fs = null;
|
||||||
|
Map<String, FileSystem> fsMap = new HashMap<>(mountPoints.size());
|
||||||
|
for (InodeTree.MountPoint<FileSystem> mount : mountPoints) {
|
||||||
|
try {
|
||||||
|
fs = mount.target.getTargetFileSystem();
|
||||||
|
fsMap.put(mount.src, fs);
|
||||||
|
} catch (IOException ex) {
|
||||||
|
String errMsg = "Not able to initialize FileSystem for mount path " +
|
||||||
|
mount.src + " with exception " + ex;
|
||||||
|
LOG.error(errMsg);
|
||||||
|
throw new RuntimeException(errMsg, ex);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fsMap;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long getDefaultBlockSize() {
|
public long getDefaultBlockSize() {
|
||||||
throw new NotInMountpointException("getDefaultBlockSize");
|
throw new NotInMountpointException("getDefaultBlockSize");
|
||||||
|
@ -936,6 +973,9 @@ public class ViewFileSystem extends FileSystem {
|
||||||
return res.targetFileSystem.getDefaultBlockSize(res.remainingPath);
|
return res.targetFileSystem.getDefaultBlockSize(res.remainingPath);
|
||||||
} catch (FileNotFoundException e) {
|
} catch (FileNotFoundException e) {
|
||||||
throw new NotInMountpointException(f, "getDefaultBlockSize");
|
throw new NotInMountpointException(f, "getDefaultBlockSize");
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new RuntimeException("Not able to initialize fs in "
|
||||||
|
+ " getDefaultBlockSize for path " + f + " with exception", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -947,6 +987,9 @@ public class ViewFileSystem extends FileSystem {
|
||||||
return res.targetFileSystem.getDefaultReplication(res.remainingPath);
|
return res.targetFileSystem.getDefaultReplication(res.remainingPath);
|
||||||
} catch (FileNotFoundException e) {
|
} catch (FileNotFoundException e) {
|
||||||
throw new NotInMountpointException(f, "getDefaultReplication");
|
throw new NotInMountpointException(f, "getDefaultReplication");
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new RuntimeException("Not able to initialize fs in "
|
||||||
|
+ " getDefaultReplication for path " + f + " with exception", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -979,8 +1022,9 @@ public class ViewFileSystem extends FileSystem {
|
||||||
public void setWriteChecksum(final boolean writeChecksum) {
|
public void setWriteChecksum(final boolean writeChecksum) {
|
||||||
List<InodeTree.MountPoint<FileSystem>> mountPoints =
|
List<InodeTree.MountPoint<FileSystem>> mountPoints =
|
||||||
fsState.getMountPoints();
|
fsState.getMountPoints();
|
||||||
|
Map<String, FileSystem> fsMap = initializeMountedFileSystems(mountPoints);
|
||||||
for (InodeTree.MountPoint<FileSystem> mount : mountPoints) {
|
for (InodeTree.MountPoint<FileSystem> mount : mountPoints) {
|
||||||
mount.target.targetFileSystem.setWriteChecksum(writeChecksum);
|
fsMap.get(mount.src).setWriteChecksum(writeChecksum);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -988,17 +1032,24 @@ public class ViewFileSystem extends FileSystem {
|
||||||
public FileSystem[] getChildFileSystems() {
|
public FileSystem[] getChildFileSystems() {
|
||||||
List<InodeTree.MountPoint<FileSystem>> mountPoints =
|
List<InodeTree.MountPoint<FileSystem>> mountPoints =
|
||||||
fsState.getMountPoints();
|
fsState.getMountPoints();
|
||||||
|
Map<String, FileSystem> fsMap = initializeMountedFileSystems(mountPoints);
|
||||||
Set<FileSystem> children = new HashSet<FileSystem>();
|
Set<FileSystem> children = new HashSet<FileSystem>();
|
||||||
for (InodeTree.MountPoint<FileSystem> mountPoint : mountPoints) {
|
for (InodeTree.MountPoint<FileSystem> mountPoint : mountPoints) {
|
||||||
FileSystem targetFs = mountPoint.target.targetFileSystem;
|
FileSystem targetFs = fsMap.get(mountPoint.src);
|
||||||
children.addAll(Arrays.asList(targetFs.getChildFileSystems()));
|
children.addAll(Arrays.asList(targetFs.getChildFileSystems()));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (fsState.isRootInternalDir() && fsState.getRootFallbackLink() != null) {
|
try {
|
||||||
|
if (fsState.isRootInternalDir() &&
|
||||||
|
fsState.getRootFallbackLink() != null) {
|
||||||
children.addAll(Arrays.asList(
|
children.addAll(Arrays.asList(
|
||||||
fsState.getRootFallbackLink().targetFileSystem
|
fsState.getRootFallbackLink().getTargetFileSystem()
|
||||||
.getChildFileSystems()));
|
.getChildFileSystems()));
|
||||||
}
|
}
|
||||||
|
} catch (IOException ex) {
|
||||||
|
LOG.error("Could not add child filesystems for source path "
|
||||||
|
+ fsState.getRootFallbackLink().fullPath + " with exception " + ex);
|
||||||
|
}
|
||||||
return children.toArray(new FileSystem[]{});
|
return children.toArray(new FileSystem[]{});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -348,7 +348,7 @@ public class ViewFileSystemOverloadScheme extends ViewFileSystem {
|
||||||
FileSystem fs = res.isInternalDir() ?
|
FileSystem fs = res.isInternalDir() ?
|
||||||
(fsState.getRootFallbackLink() != null ?
|
(fsState.getRootFallbackLink() != null ?
|
||||||
((ChRootedFileSystem) fsState
|
((ChRootedFileSystem) fsState
|
||||||
.getRootFallbackLink().targetFileSystem).getMyFs() :
|
.getRootFallbackLink().getTargetFileSystem()).getMyFs() :
|
||||||
fsGetter().get(path.toUri(), conf)) :
|
fsGetter().get(path.toUri(), conf)) :
|
||||||
((ChRootedFileSystem) res.targetFileSystem).getMyFs();
|
((ChRootedFileSystem) res.targetFileSystem).getMyFs();
|
||||||
return new MountPathInfo<FileSystem>(res.remainingPath, res.resolvedPath,
|
return new MountPathInfo<FileSystem>(res.remainingPath, res.resolvedPath,
|
||||||
|
@ -390,8 +390,13 @@ public class ViewFileSystemOverloadScheme extends ViewFileSystem {
|
||||||
if (fsState.getRootFallbackLink() == null) {
|
if (fsState.getRootFallbackLink() == null) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
return ((ChRootedFileSystem) fsState.getRootFallbackLink().targetFileSystem)
|
try {
|
||||||
.getMyFs();
|
return ((ChRootedFileSystem) fsState.getRootFallbackLink()
|
||||||
|
.getTargetFileSystem()).getMyFs();
|
||||||
|
} catch (IOException ex) {
|
||||||
|
LOG.error("Could not get fallback filesystem ");
|
||||||
|
}
|
||||||
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -21,10 +21,12 @@ import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS
|
||||||
import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS_DEFAULT;
|
import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS_DEFAULT;
|
||||||
import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555;
|
import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555;
|
||||||
|
|
||||||
|
import java.util.function.Function;
|
||||||
import java.io.FileNotFoundException;
|
import java.io.FileNotFoundException;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
import java.net.URISyntaxException;
|
import java.net.URISyntaxException;
|
||||||
|
import java.security.PrivilegedExceptionAction;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.EnumSet;
|
import java.util.EnumSet;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
|
@ -237,15 +239,32 @@ public class ViewFs extends AbstractFileSystem {
|
||||||
initingUriAsFallbackOnNoMounts) {
|
initingUriAsFallbackOnNoMounts) {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected AbstractFileSystem getTargetFileSystem(final URI uri)
|
protected Function<URI, AbstractFileSystem> initAndGetTargetFs() {
|
||||||
throws URISyntaxException, UnsupportedFileSystemException {
|
return new Function<URI, AbstractFileSystem>() {
|
||||||
|
@Override
|
||||||
|
public AbstractFileSystem apply(final URI uri) {
|
||||||
|
AbstractFileSystem fs;
|
||||||
|
try {
|
||||||
|
fs = ugi.doAs(
|
||||||
|
new PrivilegedExceptionAction<AbstractFileSystem>() {
|
||||||
|
@Override
|
||||||
|
public AbstractFileSystem run() throws IOException {
|
||||||
|
return AbstractFileSystem.createFileSystem(uri, config);
|
||||||
|
}
|
||||||
|
});
|
||||||
String pathString = uri.getPath();
|
String pathString = uri.getPath();
|
||||||
if (pathString.isEmpty()) {
|
if (pathString.isEmpty()) {
|
||||||
pathString = "/";
|
pathString = "/";
|
||||||
}
|
}
|
||||||
return new ChRootedFs(
|
return new ChRootedFs(fs, new Path(pathString));
|
||||||
AbstractFileSystem.createFileSystem(uri, config),
|
} catch (IOException | URISyntaxException |
|
||||||
new Path(pathString));
|
InterruptedException ex) {
|
||||||
|
LOG.error("Could not initialize underlying FileSystem object"
|
||||||
|
+" for uri " + uri + "with exception: " + ex.toString());
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -719,7 +738,8 @@ public class ViewFs extends AbstractFileSystem {
|
||||||
List<Token<?>> result = new ArrayList<Token<?>>(initialListSize);
|
List<Token<?>> result = new ArrayList<Token<?>>(initialListSize);
|
||||||
for ( int i = 0; i < mountPoints.size(); ++i ) {
|
for ( int i = 0; i < mountPoints.size(); ++i ) {
|
||||||
List<Token<?>> tokens =
|
List<Token<?>> tokens =
|
||||||
mountPoints.get(i).target.targetFileSystem.getDelegationTokens(renewer);
|
mountPoints.get(i).target.getTargetFileSystem()
|
||||||
|
.getDelegationTokens(renewer);
|
||||||
if (tokens != null) {
|
if (tokens != null) {
|
||||||
result.addAll(tokens);
|
result.addAll(tokens);
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.fs.viewfs;
|
package org.apache.hadoop.fs.viewfs;
|
||||||
|
|
||||||
|
import java.util.function.Function;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
@ -63,10 +64,15 @@ public class TestRegexMountPoint {
|
||||||
inodeTree = new InodeTree<TestRegexMountPointFileSystem>(conf,
|
inodeTree = new InodeTree<TestRegexMountPointFileSystem>(conf,
|
||||||
TestRegexMountPoint.class.getName(), null, false) {
|
TestRegexMountPoint.class.getName(), null, false) {
|
||||||
@Override
|
@Override
|
||||||
protected TestRegexMountPointFileSystem getTargetFileSystem(
|
protected Function<URI, TestRegexMountPointFileSystem>
|
||||||
final URI uri) {
|
initAndGetTargetFs() {
|
||||||
|
return new Function<URI, TestRegexMountPointFileSystem>() {
|
||||||
|
@Override
|
||||||
|
public TestRegexMountPointFileSystem apply(URI uri) {
|
||||||
return new TestRegexMountPointFileSystem(uri);
|
return new TestRegexMountPointFileSystem(uri);
|
||||||
}
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected TestRegexMountPointFileSystem getTargetFileSystem(
|
protected TestRegexMountPointFileSystem getTargetFileSystem(
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.fs.viewfs;
|
package org.apache.hadoop.fs.viewfs;
|
||||||
|
|
||||||
|
import java.util.function.Function;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
import java.net.URISyntaxException;
|
import java.net.URISyntaxException;
|
||||||
|
@ -42,7 +43,7 @@ public class TestViewFsConfig {
|
||||||
new InodeTree<Foo>(conf, null, null, false) {
|
new InodeTree<Foo>(conf, null, null, false) {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected Foo getTargetFileSystem(final URI uri) {
|
protected Function<URI, Foo> initAndGetTargetFs() {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -67,6 +67,7 @@ import org.junit.rules.TemporaryFolder;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
import static org.apache.hadoop.fs.FileSystemTestHelper.*;
|
import static org.apache.hadoop.fs.FileSystemTestHelper.*;
|
||||||
|
import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_ENABLE_INNER_CACHE;
|
||||||
import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555;
|
import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555;
|
||||||
|
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
|
@ -1352,6 +1353,8 @@ abstract public class ViewFileSystemBaseTest {
|
||||||
final int cacheSize = TestFileUtil.getCacheSize();
|
final int cacheSize = TestFileUtil.getCacheSize();
|
||||||
ViewFileSystem viewFs = (ViewFileSystem) FileSystem
|
ViewFileSystem viewFs = (ViewFileSystem) FileSystem
|
||||||
.get(new URI("viewfs://" + clusterName + "/"), config);
|
.get(new URI("viewfs://" + clusterName + "/"), config);
|
||||||
|
viewFs.resolvePath(
|
||||||
|
new Path(String.format("viewfs://%s/%s", clusterName, "/user")));
|
||||||
assertEquals(cacheSize + 1, TestFileUtil.getCacheSize());
|
assertEquals(cacheSize + 1, TestFileUtil.getCacheSize());
|
||||||
viewFs.close();
|
viewFs.close();
|
||||||
assertEquals(cacheSize, TestFileUtil.getCacheSize());
|
assertEquals(cacheSize, TestFileUtil.getCacheSize());
|
||||||
|
@ -1428,4 +1431,45 @@ abstract public class ViewFileSystemBaseTest {
|
||||||
summaryAfter.getLength());
|
summaryAfter.getLength());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testTargetFileSystemLazyInitialization() throws Exception {
|
||||||
|
final String clusterName = "cluster" + new Random().nextInt();
|
||||||
|
Configuration config = new Configuration(conf);
|
||||||
|
config.setBoolean(CONFIG_VIEWFS_ENABLE_INNER_CACHE, false);
|
||||||
|
config.setClass("fs.mockfs.impl",
|
||||||
|
TestChRootedFileSystem.MockFileSystem.class, FileSystem.class);
|
||||||
|
ConfigUtil.addLink(config, clusterName, "/user",
|
||||||
|
URI.create("mockfs://mockauth1/mockpath"));
|
||||||
|
ConfigUtil.addLink(config, clusterName,
|
||||||
|
"/mock", URI.create("mockfs://mockauth/mockpath"));
|
||||||
|
|
||||||
|
final int cacheSize = TestFileUtil.getCacheSize();
|
||||||
|
ViewFileSystem viewFs = (ViewFileSystem) FileSystem
|
||||||
|
.get(new URI("viewfs://" + clusterName + "/"), config);
|
||||||
|
|
||||||
|
// As no inner file system instance has been initialized,
|
||||||
|
// cache size will remain the same
|
||||||
|
// cache is disabled for viewfs scheme, so the viewfs:// instance won't
|
||||||
|
// go in the cache even after the initialization
|
||||||
|
assertEquals(cacheSize, TestFileUtil.getCacheSize());
|
||||||
|
|
||||||
|
// This resolve path will initialize the file system corresponding
|
||||||
|
// to the mount table entry of the path "/user"
|
||||||
|
viewFs.resolvePath(
|
||||||
|
new Path(String.format("viewfs://%s/%s", clusterName, "/user")));
|
||||||
|
|
||||||
|
// Cache size will increase by 1.
|
||||||
|
assertEquals(cacheSize + 1, TestFileUtil.getCacheSize());
|
||||||
|
// This resolve path will initialize the file system corresponding
|
||||||
|
// to the mount table entry of the path "/mock"
|
||||||
|
viewFs.resolvePath(new Path(String.format("viewfs://%s/%s", clusterName,
|
||||||
|
"/mock")));
|
||||||
|
// One more file system instance will get initialized.
|
||||||
|
assertEquals(cacheSize + 2, TestFileUtil.getCacheSize());
|
||||||
|
viewFs.close();
|
||||||
|
// Initialized FileSystem instances will not be removed from cache as
|
||||||
|
// viewfs inner cache is disabled
|
||||||
|
assertEquals(cacheSize + 2, TestFileUtil.getCacheSize());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,8 +21,11 @@ import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
import java.net.URISyntaxException;
|
import java.net.URISyntaxException;
|
||||||
|
import java.security.PrivilegedExceptionAction;
|
||||||
import java.util.EnumSet;
|
import java.util.EnumSet;
|
||||||
|
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.Map;
|
||||||
import javax.security.auth.login.LoginException;
|
import javax.security.auth.login.LoginException;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
@ -39,6 +42,7 @@ import org.apache.hadoop.fs.FsConstants;
|
||||||
import org.apache.hadoop.fs.FsShell;
|
import org.apache.hadoop.fs.FsShell;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.contract.ContractTestUtils;
|
import org.apache.hadoop.fs.contract.ContractTestUtils;
|
||||||
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
@ -46,6 +50,7 @@ import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
||||||
import org.apache.hadoop.hdfs.client.CreateEncryptionZoneFlag;
|
import org.apache.hadoop.hdfs.client.CreateEncryptionZoneFlag;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsAdmin;
|
import org.apache.hadoop.hdfs.client.HdfsAdmin;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
|
import org.apache.hadoop.security.AccessControlException;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
|
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
|
||||||
|
@ -406,4 +411,72 @@ public class TestViewFileSystemHdfs extends ViewFileSystemBaseTest {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testTargetFileSystemLazyInitializationWithUgi() throws Exception {
|
||||||
|
final Map<String, FileSystem> map = new HashMap<>();
|
||||||
|
final Path user1Path = new Path("/data/user1");
|
||||||
|
|
||||||
|
// Scenario - 1: Create FileSystem with the current user context
|
||||||
|
// Both mkdir and delete should be successful
|
||||||
|
FileSystem fs = FileSystem.get(FsConstants.VIEWFS_URI, conf);
|
||||||
|
fs.mkdirs(user1Path);
|
||||||
|
fs.delete(user1Path, false);
|
||||||
|
|
||||||
|
// Scenario - 2: Create FileSystem with the a different user context
|
||||||
|
final UserGroupInformation userUgi = UserGroupInformation
|
||||||
|
.createUserForTesting("user1@HADOOP.COM", new String[]{"hadoop"});
|
||||||
|
userUgi.doAs(new PrivilegedExceptionAction<Object>() {
|
||||||
|
@Override
|
||||||
|
public Object run() throws IOException {
|
||||||
|
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
|
||||||
|
String doAsUserName = ugi.getUserName();
|
||||||
|
assertEquals(doAsUserName, "user1@HADOOP.COM");
|
||||||
|
|
||||||
|
FileSystem viewFS = FileSystem.get(FsConstants.VIEWFS_URI, conf);
|
||||||
|
map.put("user1", viewFS);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Try creating a directory with the file context created by a different ugi
|
||||||
|
// Though we are running the mkdir with the current user context, the
|
||||||
|
// target filesystem will be instantiated by the ugi with which the
|
||||||
|
// file context was created.
|
||||||
|
try {
|
||||||
|
FileSystem otherfs = map.get("user1");
|
||||||
|
otherfs.mkdirs(user1Path);
|
||||||
|
fail("This mkdir should fail");
|
||||||
|
} catch (AccessControlException ex) {
|
||||||
|
// Exception is expected as the FileSystem was created with ugi of user1
|
||||||
|
// So when we are trying to access the /user/user1 path for the first
|
||||||
|
// time, the corresponding file system is initialized and it tries to
|
||||||
|
// execute the task with ugi with which the FileSystem was created.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Change the permission of /data path so that user1 can create a directory
|
||||||
|
fsTarget.setOwner(new Path(targetTestRoot, "data"),
|
||||||
|
"user1", "test2");
|
||||||
|
// set permission of target to allow rename to target
|
||||||
|
fsTarget.setPermission(new Path(targetTestRoot, "data"),
|
||||||
|
new FsPermission("775"));
|
||||||
|
|
||||||
|
userUgi.doAs(new PrivilegedExceptionAction<Object>() {
|
||||||
|
@Override
|
||||||
|
public Object run() throws IOException {
|
||||||
|
FileSystem viewFS = FileSystem.get(FsConstants.VIEWFS_URI, conf);
|
||||||
|
map.put("user1", viewFS);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Although we are running with current user context, and current user
|
||||||
|
// context does not have write permission, we are able to create the
|
||||||
|
// directory as its using ugi of user1 which has write permission.
|
||||||
|
FileSystem otherfs = map.get("user1");
|
||||||
|
otherfs.mkdirs(user1Path);
|
||||||
|
String owner = otherfs.getFileStatus(user1Path).getOwner();
|
||||||
|
assertEquals("The owner did not match ", owner, userUgi.getShortUserName());
|
||||||
|
otherfs.delete(user1Path, false);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,7 +35,6 @@ import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.FsConstants;
|
import org.apache.hadoop.fs.FsConstants;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.RawLocalFileSystem;
|
import org.apache.hadoop.fs.RawLocalFileSystem;
|
||||||
import org.apache.hadoop.fs.UnsupportedFileSystemException;
|
|
||||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.test.LambdaTestUtils;
|
import org.apache.hadoop.test.LambdaTestUtils;
|
||||||
|
@ -206,7 +205,8 @@ public class TestViewFileSystemOverloadSchemeWithHdfsScheme {
|
||||||
new String[] {nonExistTargetPath.toUri().toString()}, conf);
|
new String[] {nonExistTargetPath.toUri().toString()}, conf);
|
||||||
if (expectFsInitFailure) {
|
if (expectFsInitFailure) {
|
||||||
LambdaTestUtils.intercept(IOException.class, () -> {
|
LambdaTestUtils.intercept(IOException.class, () -> {
|
||||||
FileSystem.get(conf);
|
FileSystem fs = FileSystem.get(conf);
|
||||||
|
fs.resolvePath(new Path(userFolder));
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
try (FileSystem fs = FileSystem.get(conf)) {
|
try (FileSystem fs = FileSystem.get(conf)) {
|
||||||
|
@ -397,7 +397,7 @@ public class TestViewFileSystemOverloadSchemeWithHdfsScheme {
|
||||||
* Unset fs.viewfs.overload.scheme.target.hdfs.impl property.
|
* Unset fs.viewfs.overload.scheme.target.hdfs.impl property.
|
||||||
* So, OverloadScheme target fs initialization will fail.
|
* So, OverloadScheme target fs initialization will fail.
|
||||||
*/
|
*/
|
||||||
@Test(expected = UnsupportedFileSystemException.class, timeout = 30000)
|
@Test(expected = IOException.class, timeout = 30000)
|
||||||
public void testInvalidOverloadSchemeTargetFS() throws Exception {
|
public void testInvalidOverloadSchemeTargetFS() throws Exception {
|
||||||
final Path hdfsTargetPath = new Path(defaultFSURI + HDFS_USER_FOLDER);
|
final Path hdfsTargetPath = new Path(defaultFSURI + HDFS_USER_FOLDER);
|
||||||
String mountTableIfSet = conf.get(Constants.CONFIG_VIEWFS_MOUNTTABLE_PATH);
|
String mountTableIfSet = conf.get(Constants.CONFIG_VIEWFS_MOUNTTABLE_PATH);
|
||||||
|
|
|
@ -21,18 +21,28 @@ package org.apache.hadoop.fs.viewfs;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.URISyntaxException;
|
import java.net.URISyntaxException;
|
||||||
|
|
||||||
|
import java.security.PrivilegedExceptionAction;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.Map;
|
||||||
import javax.security.auth.login.LoginException;
|
import javax.security.auth.login.LoginException;
|
||||||
|
|
||||||
import org.apache.hadoop.fs.FileContext;
|
import org.apache.hadoop.fs.FileContext;
|
||||||
import org.apache.hadoop.fs.FileContextTestHelper;
|
import org.apache.hadoop.fs.FileContextTestHelper;
|
||||||
|
import org.apache.hadoop.fs.FsConstants;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
import org.apache.hadoop.security.AccessControlException;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
import static org.junit.Assert.fail;
|
||||||
|
|
||||||
public class TestViewFsHdfs extends ViewFsBaseTest {
|
public class TestViewFsHdfs extends ViewFsBaseTest {
|
||||||
|
|
||||||
|
@ -86,4 +96,72 @@ public class TestViewFsHdfs extends ViewFsBaseTest {
|
||||||
return 8;
|
return 8;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testTargetFileSystemLazyInitialization() throws Exception {
|
||||||
|
final Map<String, FileContext> map = new HashMap<>();
|
||||||
|
final Path user1Path = new Path("/data/user1");
|
||||||
|
|
||||||
|
// Scenario - 1: Create FileContext with the current user context
|
||||||
|
// Both mkdir and delete should be successful
|
||||||
|
FileContext fs = FileContext.getFileContext(FsConstants.VIEWFS_URI, conf);
|
||||||
|
fs.mkdir(user1Path, FileContext.DEFAULT_PERM, false);
|
||||||
|
fs.delete(user1Path, false);
|
||||||
|
|
||||||
|
// Scenario - 2: Create FileContext with the a different user context
|
||||||
|
final UserGroupInformation userUgi = UserGroupInformation
|
||||||
|
.createUserForTesting("user1@HADOOP.COM", new String[]{"hadoop"});
|
||||||
|
userUgi.doAs(new PrivilegedExceptionAction<Object>() {
|
||||||
|
@Override
|
||||||
|
public Object run() throws IOException {
|
||||||
|
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
|
||||||
|
String doAsUserName = ugi.getUserName();
|
||||||
|
assertEquals(doAsUserName, "user1@HADOOP.COM");
|
||||||
|
|
||||||
|
FileContext viewFS = FileContext.getFileContext(
|
||||||
|
FsConstants.VIEWFS_URI, conf);
|
||||||
|
map.put("user1", viewFS);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Try creating a directory with the file context created by a different ugi
|
||||||
|
// Though we are running the mkdir with the current user context, the
|
||||||
|
// target filesystem will be instantiated by the ugi with which the
|
||||||
|
// file context was created.
|
||||||
|
try {
|
||||||
|
FileContext otherfs = map.get("user1");
|
||||||
|
otherfs.mkdir(user1Path, FileContext.DEFAULT_PERM, false);
|
||||||
|
fail("This mkdir should fail");
|
||||||
|
} catch (AccessControlException ex) {
|
||||||
|
// Exception is expected as the FileContext was created with ugi of user1
|
||||||
|
// So when we are trying to access the /user/user1 path for the first
|
||||||
|
// time, the corresponding file system is initialized and it tries to
|
||||||
|
// execute the task with ugi with which the FileContext was created.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Change the permission of /data path so that user1 can create a directory
|
||||||
|
fcView.setOwner(new Path("/data"), "user1", "test2");
|
||||||
|
// set permission of target to allow rename to target
|
||||||
|
fcView.setPermission(new Path("/data"), new FsPermission("775"));
|
||||||
|
|
||||||
|
userUgi.doAs(new PrivilegedExceptionAction<Object>() {
|
||||||
|
@Override
|
||||||
|
public Object run() throws IOException {
|
||||||
|
FileContext viewFS = FileContext.getFileContext(
|
||||||
|
FsConstants.VIEWFS_URI, conf);
|
||||||
|
map.put("user1", viewFS);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Although we are running with current user context, and current user
|
||||||
|
// context does not have write permission, we are able to create the
|
||||||
|
// directory as its using ugi of user1 which has write permission.
|
||||||
|
FileContext otherfs = map.get("user1");
|
||||||
|
otherfs.mkdir(user1Path, FileContext.DEFAULT_PERM, false);
|
||||||
|
String owner = otherfs.getFileStatus(user1Path).getOwner();
|
||||||
|
assertEquals("The owner did not match ", owner, userUgi.getShortUserName());
|
||||||
|
otherfs.delete(user1Path, false);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -198,13 +198,15 @@ public class TestViewFileSystemOverloadSchemeWithDFSAdmin {
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testSafeModeWithWrongFS() throws Exception {
|
public void testSafeModeWithWrongFS() throws Exception {
|
||||||
|
String wrongFsUri = "hdfs://nonExistent";
|
||||||
final Path hdfsTargetPath =
|
final Path hdfsTargetPath =
|
||||||
new Path("hdfs://nonExistent" + HDFS_USER_FOLDER);
|
new Path(wrongFsUri + HDFS_USER_FOLDER);
|
||||||
addMountLinks(defaultFSURI.getHost(), new String[] {HDFS_USER_FOLDER},
|
addMountLinks(defaultFSURI.getHost(), new String[] {HDFS_USER_FOLDER},
|
||||||
new String[] {hdfsTargetPath.toUri().toString()}, conf);
|
new String[] {hdfsTargetPath.toUri().toString()}, conf);
|
||||||
final DFSAdmin dfsAdmin = new DFSAdmin(conf);
|
final DFSAdmin dfsAdmin = new DFSAdmin(conf);
|
||||||
redirectStream();
|
redirectStream();
|
||||||
int ret = ToolRunner.run(dfsAdmin, new String[] {"-safemode", "enter" });
|
int ret = ToolRunner.run(dfsAdmin,
|
||||||
|
new String[] {"-fs", wrongFsUri, "-safemode", "enter" });
|
||||||
assertEquals(-1, ret);
|
assertEquals(-1, ret);
|
||||||
assertErrMsg("safemode: java.net.UnknownHostException: nonExistent", 0);
|
assertErrMsg("safemode: java.net.UnknownHostException: nonExistent", 0);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue