HDFS-15338. listOpenFiles() should throw InvalidPathException in case of invalid paths. Contributed by Jinglun.

This commit is contained in:
Ayush Saxena 2020-05-11 16:48:34 +05:30
parent 282427f6d1
commit 61a4cd5539
4 changed files with 30 additions and 2 deletions

View File

@ -3549,7 +3549,8 @@ public class DistributedFileSystem extends FileSystem
public RemoteIterator<OpenFileEntry> listOpenFiles( public RemoteIterator<OpenFileEntry> listOpenFiles(
EnumSet<OpenFilesType> openFilesTypes, String path) throws IOException { EnumSet<OpenFilesType> openFilesTypes, String path) throws IOException {
return dfs.listOpenFiles(openFilesTypes, path); Path absF = fixRelativePart(new Path(path));
return dfs.listOpenFiles(openFilesTypes, getPathName(absF));
} }

View File

@ -1904,6 +1904,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
*/ */
BatchedListEntries<OpenFileEntry> listOpenFiles(long prevId, BatchedListEntries<OpenFileEntry> listOpenFiles(long prevId,
EnumSet<OpenFilesType> openFilesTypes, String path) throws IOException { EnumSet<OpenFilesType> openFilesTypes, String path) throws IOException {
INode.checkAbsolutePath(path);
final String operationName = "listOpenFiles"; final String operationName = "listOpenFiles";
checkSuperuserPrivilege(); checkSuperuserPrivilege();
checkOperation(OperationCategory.READ); checkOperation(OperationCategory.READ);

View File

@ -806,7 +806,7 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
return path != null && path.startsWith(Path.SEPARATOR); return path != null && path.startsWith(Path.SEPARATOR);
} }
private static void checkAbsolutePath(final String path) { static void checkAbsolutePath(final String path) {
if (!isValidAbsolutePath(path)) { if (!isValidAbsolutePath(path)) {
throw new AssertionError("Absolute path required, but got '" throw new AssertionError("Absolute path required, but got '"
+ path + "'"); + path + "'");

View File

@ -17,6 +17,7 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.test.LambdaTestUtils.intercept;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
@ -295,4 +296,29 @@ public class TestListOpenFiles {
verifyOpenFiles(openFiles, OpenFilesIterator.FILTER_PATH_DEFAULT); verifyOpenFiles(openFiles, OpenFilesIterator.FILTER_PATH_DEFAULT);
} }
} }
@Test
public void testListOpenFilesWithInvalidPathServerSide() throws Exception {
HashMap<Path, FSDataOutputStream> openFiles = new HashMap<>();
openFiles.putAll(
DFSTestUtil.createOpenFiles(fs, new Path("/base"), "open-1", 1));
verifyOpenFiles(openFiles, EnumSet.of(OpenFilesType.ALL_OPEN_FILES),
"/base");
intercept(AssertionError.class, "Absolute path required",
"Expect InvalidPathException", () -> verifyOpenFiles(new HashMap<>(),
EnumSet.of(OpenFilesType.ALL_OPEN_FILES), "hdfs://cluster/base"));
while(openFiles.size() > 0) {
DFSTestUtil.closeOpenFiles(openFiles, 1);
verifyOpenFiles(openFiles);
}
}
@Test
public void testListOpenFilesWithInvalidPathClientSide() throws Exception {
intercept(IllegalArgumentException.class, "Wrong FS",
"Expect IllegalArgumentException", () -> fs
.listOpenFiles(EnumSet.of(OpenFilesType.ALL_OPEN_FILES),
"hdfs://non-cluster/"));
fs.listOpenFiles(EnumSet.of(OpenFilesType.ALL_OPEN_FILES), "/path");
}
} }