HDFS-15540. Directories protected from delete can still be moved to the trash. Contributed by Stephen O'Donnell.
Signed-off-by: Wei-Chiu Chuang <weichiu@apache.org>
This commit is contained in:
parent
ca8e7a7725
commit
2ffe00fc46
|
@ -263,6 +263,11 @@ class FSDirRenameOp {
|
||||||
throws IOException {
|
throws IOException {
|
||||||
final INodesInPath srcIIP = fsd.resolvePath(pc, src, DirOp.WRITE_LINK);
|
final INodesInPath srcIIP = fsd.resolvePath(pc, src, DirOp.WRITE_LINK);
|
||||||
final INodesInPath dstIIP = fsd.resolvePath(pc, dst, DirOp.CREATE_LINK);
|
final INodesInPath dstIIP = fsd.resolvePath(pc, dst, DirOp.CREATE_LINK);
|
||||||
|
|
||||||
|
if(fsd.isNonEmptyDirectory(srcIIP)) {
|
||||||
|
DFSUtil.checkProtectedDescendants(fsd, srcIIP);
|
||||||
|
}
|
||||||
|
|
||||||
if (fsd.isPermissionEnabled()) {
|
if (fsd.isPermissionEnabled()) {
|
||||||
boolean renameToTrash = false;
|
boolean renameToTrash = false;
|
||||||
if (null != options &&
|
if (null != options &&
|
||||||
|
|
|
@ -26,6 +26,8 @@ import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.fs.Trash;
|
||||||
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
import org.apache.hadoop.security.AccessControlException;
|
||||||
|
@ -36,6 +38,7 @@ import org.junit.rules.Timeout;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import java.io.FileNotFoundException;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.*;
|
import java.util.*;
|
||||||
|
|
||||||
|
@ -284,6 +287,31 @@ public class TestProtectedDirectories {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testMoveToTrash() throws Throwable {
|
||||||
|
for (TestMatrixEntry testMatrixEntry : createTestMatrix()) {
|
||||||
|
Configuration conf = new HdfsConfiguration();
|
||||||
|
conf.setInt(DFSConfigKeys.FS_TRASH_INTERVAL_KEY, 3600);
|
||||||
|
MiniDFSCluster cluster = setupTestCase(
|
||||||
|
conf, testMatrixEntry.getProtectedPaths(),
|
||||||
|
testMatrixEntry.getUnprotectedPaths());
|
||||||
|
|
||||||
|
try {
|
||||||
|
LOG.info("Running {}", testMatrixEntry);
|
||||||
|
FileSystem fs = cluster.getFileSystem();
|
||||||
|
for (Path path : testMatrixEntry.getAllPathsToBeDeleted()) {
|
||||||
|
assertThat(
|
||||||
|
testMatrixEntry + ": Testing whether " + path +
|
||||||
|
" can be moved to trash",
|
||||||
|
moveToTrash(fs, path, conf),
|
||||||
|
is(testMatrixEntry.canPathBeDeleted(path)));
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Verify that protected directories could not be renamed.
|
* Verify that protected directories could not be renamed.
|
||||||
*/
|
*/
|
||||||
|
@ -339,6 +367,33 @@ public class TestProtectedDirectories {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testMoveProtectedSubDirsToTrash() throws Throwable {
|
||||||
|
for (TestMatrixEntry testMatrixEntry :
|
||||||
|
createTestMatrixForProtectSubDirs()) {
|
||||||
|
Configuration conf = new HdfsConfiguration();
|
||||||
|
conf.setBoolean(DFS_PROTECTED_SUBDIRECTORIES_ENABLE, true);
|
||||||
|
conf.setInt(DFSConfigKeys.FS_TRASH_INTERVAL_KEY, 3600);
|
||||||
|
MiniDFSCluster cluster = setupTestCase(
|
||||||
|
conf, testMatrixEntry.getProtectedPaths(),
|
||||||
|
testMatrixEntry.getUnprotectedPaths());
|
||||||
|
|
||||||
|
try {
|
||||||
|
LOG.info("Running {}", testMatrixEntry);
|
||||||
|
FileSystem fs = cluster.getFileSystem();
|
||||||
|
for (Path srcPath : testMatrixEntry.getAllPathsToBeDeleted()) {
|
||||||
|
assertThat(
|
||||||
|
testMatrixEntry + ": Testing whether "
|
||||||
|
+ srcPath + " can be moved to trash",
|
||||||
|
moveToTrash(fs, srcPath, conf),
|
||||||
|
is(testMatrixEntry.canPathBeRenamed(srcPath)));
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testDeleteProtectSubDirs() throws Throwable {
|
public void testDeleteProtectSubDirs() throws Throwable {
|
||||||
for (TestMatrixEntry testMatrixEntry :
|
for (TestMatrixEntry testMatrixEntry :
|
||||||
|
@ -465,6 +520,21 @@ public class TestProtectedDirectories {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private boolean moveToTrash(FileSystem fs, Path path, Configuration conf) {
|
||||||
|
try {
|
||||||
|
return Trash.moveToAppropriateTrash(fs, path, conf);
|
||||||
|
} catch (FileNotFoundException fnf) {
|
||||||
|
// fs.delete(...) does not throw an exception if the file does not exist.
|
||||||
|
// The deletePath method in this class, will therefore return true if
|
||||||
|
// there is an attempt to delete a file which does not exist. Therefore
|
||||||
|
// catching this exception and returning true to keep it consistent and
|
||||||
|
// allow tests to work with the same test matrix.
|
||||||
|
return true;
|
||||||
|
} catch (IOException ace) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return true if the path was successfully renamed. False if it
|
* Return true if the path was successfully renamed. False if it
|
||||||
* failed with AccessControlException. Any other exceptions are
|
* failed with AccessControlException. Any other exceptions are
|
||||||
|
|
Loading…
Reference in New Issue