HADOOP-8957 AbstractFileSystem#IsValidName should be overridden for embedded file systems like ViewFs (Chris Nauroth via Sanjay)
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1420965 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
618fa6cd59
commit
cd80628ec4
|
@ -298,6 +298,9 @@ Trunk (Unreleased)
|
|||
HADOOP-9131. Turn off TestLocalFileSystem#testListStatusWithColons on
|
||||
Windows. (Chris Nauroth via suresh)
|
||||
|
||||
HADOOP-8957 AbstractFileSystem#IsValidName should be overridden for
|
||||
embedded file systems like ViewFs (Chris Nauroth via Sanjay Radia)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HADOOP-7761. Improve the performance of raw comparisons. (todd)
|
||||
|
|
|
@ -85,14 +85,20 @@ public abstract class AbstractFileSystem {
|
|||
}
|
||||
|
||||
/**
|
||||
* Prohibits names which contain a ".", "..", ":" or "/"
|
||||
* Returns true if the specified string is considered valid in the path part
|
||||
* of a URI by this file system. The default implementation enforces the rules
|
||||
* of HDFS, but subclasses may override this method to implement specific
|
||||
* validation rules for specific file systems.
|
||||
*
|
||||
* @param src String source filename to check, path part of the URI
|
||||
* @return boolean true if the specified string is considered valid
|
||||
*/
|
||||
private static boolean isValidName(String src) {
|
||||
// Check for ".." "." ":" "/"
|
||||
public boolean isValidName(String src) {
|
||||
// Prohibit ".." "." and anything containing ":"
|
||||
StringTokenizer tokens = new StringTokenizer(src, Path.SEPARATOR);
|
||||
while(tokens.hasMoreTokens()) {
|
||||
String element = tokens.nextToken();
|
||||
if (element.equals("target/generated-sources") ||
|
||||
if (element.equals("..") ||
|
||||
element.equals(".") ||
|
||||
(element.indexOf(":") >= 0)) {
|
||||
return false;
|
||||
|
|
|
@ -278,4 +278,9 @@ public abstract class FilterFs extends AbstractFileSystem {
|
|||
public List<Token<?>> getDelegationTokens(String renewer) throws IOException {
|
||||
return myFs.getDelegationTokens(renewer);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isValidName(String src) {
|
||||
return myFs.isValidName(src);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -159,6 +159,14 @@ public class RawLocalFs extends DelegateToFileSystem {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isValidName(String src) {
|
||||
// Different local file systems have different validation rules. Skip
|
||||
// validation here and just let the OS handle it. This is consistent with
|
||||
// RawLocalFileSystem.
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Path getLinkTarget(Path f) throws IOException {
|
||||
/* We should never get here. Valid local links are resolved transparently
|
||||
|
|
|
@ -84,6 +84,11 @@ class ChRootedFs extends AbstractFileSystem {
|
|||
+ path.toUri().getPath());
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isValidName(String src) {
|
||||
return myFs.isValidName(fullPath(new Path(src)).toUri().toString());
|
||||
}
|
||||
|
||||
public ChRootedFs(final AbstractFileSystem fs, final Path theRoot)
|
||||
throws URISyntaxException {
|
||||
super(fs.getUri(), fs.getUri().getScheme(),
|
||||
|
@ -103,7 +108,7 @@ class ChRootedFs extends AbstractFileSystem {
|
|||
// scheme:/// and scheme://authority/
|
||||
myUri = new URI(myFs.getUri().toString() +
|
||||
(myFs.getUri().getAuthority() == null ? "" : Path.SEPARATOR) +
|
||||
chRootPathPart.toString().substring(1));
|
||||
chRootPathPart.toUri().getPath().substring(1));
|
||||
super.checkPath(theRoot);
|
||||
}
|
||||
|
||||
|
|
|
@ -62,6 +62,9 @@ import org.apache.hadoop.util.Time;
|
|||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving /*Evolving for a release,to be changed to Stable */
|
||||
public class ViewFileSystem extends FileSystem {
|
||||
|
||||
private static final Path ROOT_PATH = new Path(Path.SEPARATOR);
|
||||
|
||||
static AccessControlException readOnlyMountTable(final String operation,
|
||||
final String p) {
|
||||
return new AccessControlException(
|
||||
|
@ -96,23 +99,6 @@ public class ViewFileSystem extends FileSystem {
|
|||
InodeTree<FileSystem> fsState; // the fs state; ie the mount table
|
||||
Path homeDir = null;
|
||||
|
||||
/**
|
||||
* Prohibits names which contain a ".", "..", ":" or "/"
|
||||
*/
|
||||
private static boolean isValidName(final String src) {
|
||||
// Check for ".." "." ":" "/"
|
||||
final StringTokenizer tokens = new StringTokenizer(src, Path.SEPARATOR);
|
||||
while(tokens.hasMoreTokens()) {
|
||||
String element = tokens.nextToken();
|
||||
if (element.equals("..") ||
|
||||
element.equals(".") ||
|
||||
(element.indexOf(":") >= 0)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Make the path Absolute and get the path-part of a pathname.
|
||||
* Checks that URI matches this file system
|
||||
|
@ -124,10 +110,6 @@ public class ViewFileSystem extends FileSystem {
|
|||
private String getUriPath(final Path p) {
|
||||
checkPath(p);
|
||||
String s = makeAbsolute(p).toUri().getPath();
|
||||
if (!isValidName(s)) {
|
||||
throw new InvalidPathException("Path part " + s + " from URI" + p
|
||||
+ " is not a valid filename.");
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
|
@ -672,7 +654,7 @@ public class ViewFileSystem extends FileSystem {
|
|||
PERMISSION_RRR, ugi.getUserName(), ugi.getGroupNames()[0],
|
||||
|
||||
new Path(theInternalDir.fullPath).makeQualified(
|
||||
myUri, null));
|
||||
myUri, ROOT_PATH));
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -597,6 +597,12 @@ public class ViewFs extends AbstractFileSystem {
|
|||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isValidName(String src) {
|
||||
// Prefix validated at mount time and rest of path validated by mount target.
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
|
|
|
@ -25,6 +25,7 @@ import java.util.EnumSet;
|
|||
import static org.apache.hadoop.fs.FileContextTestHelper.*;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.AbstractFileSystem;
|
||||
import org.apache.hadoop.fs.CreateFlag;
|
||||
import org.apache.hadoop.fs.FileContext;
|
||||
import org.apache.hadoop.fs.FileContextTestHelper;
|
||||
|
@ -36,6 +37,7 @@ import org.junit.After;
|
|||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.mockito.Mockito;
|
||||
|
||||
public class TestChRootedFs {
|
||||
FileContext fc; // The ChRoootedFs
|
||||
|
@ -307,4 +309,21 @@ public class TestChRootedFs {
|
|||
fc.getDefaultFileSystem().resolvePath(new Path("/nonExisting"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIsValidNameValidInBaseFs() throws Exception {
|
||||
AbstractFileSystem baseFs = Mockito.spy(fc.getDefaultFileSystem());
|
||||
ChRootedFs chRootedFs = new ChRootedFs(baseFs, new Path("/chroot"));
|
||||
Mockito.doReturn(true).when(baseFs).isValidName(Mockito.anyString());
|
||||
Assert.assertTrue(chRootedFs.isValidName("/test"));
|
||||
Mockito.verify(baseFs).isValidName("/chroot/test");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIsValidNameInvalidInBaseFs() throws Exception {
|
||||
AbstractFileSystem baseFs = Mockito.spy(fc.getDefaultFileSystem());
|
||||
ChRootedFs chRootedFs = new ChRootedFs(baseFs, new Path("/chroot"));
|
||||
Mockito.doReturn(false).when(baseFs).isValidName(Mockito.anyString());
|
||||
Assert.assertFalse(chRootedFs.isValidName("/test"));
|
||||
Mockito.verify(baseFs).isValidName("/chroot/test");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -274,6 +274,10 @@ Trunk (Unreleased)
|
|||
HDFS-4269. Datanode rejects all datanode registrations from localhost
|
||||
in single-node developer setup on Windows. (Chris Nauroth via suresh)
|
||||
|
||||
HADOOP-8957 HDFS tests for AbstractFileSystem#IsValidName should be overridden for
|
||||
embedded file systems like ViewFs (Chris Nauroth via Sanjay Radia)
|
||||
|
||||
|
||||
Release 2.0.3-alpha - Unreleased
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -256,6 +256,21 @@ public class TestHDFSFileContextMainOperations extends
|
|||
Assert.assertTrue(fs.exists(dst1)); // ensure rename dst exists
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIsValidNameInvalidNames() {
|
||||
String[] invalidNames = {
|
||||
"/foo/../bar",
|
||||
"/foo/./bar",
|
||||
"/foo/:/bar",
|
||||
"/foo:bar"
|
||||
};
|
||||
|
||||
for (String invalidName: invalidNames) {
|
||||
Assert.assertFalse(invalidName + " is not valid",
|
||||
fc.getDefaultFileSystem().isValidName(invalidName));
|
||||
}
|
||||
}
|
||||
|
||||
private void oldRename(Path src, Path dst, boolean renameSucceeds,
|
||||
boolean exception) throws Exception {
|
||||
DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem();
|
||||
|
|
|
@ -624,8 +624,11 @@ public class TestDFSUtil {
|
|||
@Test
|
||||
public void testIsValidName() {
|
||||
assertFalse(DFSUtil.isValidName("/foo/../bar"));
|
||||
assertFalse(DFSUtil.isValidName("/foo/./bar"));
|
||||
assertFalse(DFSUtil.isValidName("/foo//bar"));
|
||||
assertTrue(DFSUtil.isValidName("/"));
|
||||
assertTrue(DFSUtil.isValidName("/bar/"));
|
||||
assertFalse(DFSUtil.isValidName("/foo/:/bar"));
|
||||
assertFalse(DFSUtil.isValidName("/foo:bar"));
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue