From f8b2d77207bb69785694d52f5c0b24f0bead3271 Mon Sep 17 00:00:00 2001 From: Xiaoyu Yao Date: Mon, 17 Oct 2016 08:22:31 -0700 Subject: [PATCH] HDFS-10922. Adding additional unit tests for Trash (II). Contributed by Weiwei Yang. (cherry picked from commit f1802d0be05ecc0b3248690b6f9efedbc7784112) --- .../org/apache/hadoop/hdfs/DFSTestUtil.java | 40 +++++ .../apache/hadoop/hdfs/TestDFSPermission.java | 23 +-- .../org/apache/hadoop/hdfs/TestHDFSTrash.java | 147 +++++++++++++++++- 3 files changed, 187 insertions(+), 23 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 5a2f524b4eb..67e9e549bd0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -162,6 +162,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Charsets; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; +import com.google.common.base.Strings; import com.google.common.base.Supplier; import com.google.common.collect.Lists; import com.google.common.collect.Maps; @@ -1930,4 +1931,43 @@ public class DFSTestUtil { lastBlock.setNumBytes(len); return lastBlock; } + + /** + * Close current file system and create a new instance as given + * {@link UserGroupInformation}. + */ + public static FileSystem login(final FileSystem fs, + final Configuration conf, final UserGroupInformation ugi) + throws IOException, InterruptedException { + if (fs != null) { + fs.close(); + } + return DFSTestUtil.getFileSystemAs(ugi, conf); + } + + /** + * Test if the given {@link FileStatus} user, group owner and its permission + * are expected, throw {@link AssertionError} if any value is not expected. + */ + public static void verifyFilePermission(FileStatus stat, String owner, + String group, FsAction u, FsAction g, FsAction o) { + if(stat != null) { + if(!Strings.isNullOrEmpty(owner)) { + assertEquals(owner, stat.getOwner()); + } + if(!Strings.isNullOrEmpty(group)) { + assertEquals(group, stat.getGroup()); + } + FsPermission permission = stat.getPermission(); + if(u != null) { + assertEquals(u, permission.getUserAction()); + } + if (g != null) { + assertEquals(g, permission.getGroupAction()); + } + if (o != null) { + assertEquals(o, permission.getOtherAction()); + } + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java index e6524f3b780..0fe304d9c58 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java @@ -41,6 +41,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.Trash; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Time; @@ -287,7 +288,7 @@ public class TestDFSPermission { fs.setPermission(new Path("/"), FsPermission.createImmutable((short)0777)); } - + /* check if the ownership of a file/directory is set correctly */ @Test public void testOwnership() throws Exception { @@ -324,7 +325,7 @@ public class TestDFSPermission { setOwner(FILE_DIR_PATH, USER1.getShortUserName(), GROUP3_NAME, false); // case 3: user1 changes FILE_DIR_PATH's owner to be user2 - login(USER1); + fs = DFSTestUtil.login(fs, conf, USER1); setOwner(FILE_DIR_PATH, USER2.getShortUserName(), null, true); // case 4: user1 changes FILE_DIR_PATH's group to be group1 which it belongs @@ -336,14 +337,14 @@ public class TestDFSPermission { setOwner(FILE_DIR_PATH, null, GROUP3_NAME, true); // case 6: user2 (non-owner) changes FILE_DIR_PATH's group to be group3 - login(USER2); + fs = DFSTestUtil.login(fs, conf, USER2); setOwner(FILE_DIR_PATH, null, GROUP3_NAME, true); // case 7: user2 (non-owner) changes FILE_DIR_PATH's user to be user2 setOwner(FILE_DIR_PATH, USER2.getShortUserName(), null, true); // delete the file/directory - login(SUPERUSER); + fs = DFSTestUtil.login(fs, conf, SUPERUSER); fs.delete(FILE_DIR_PATH, true); } @@ -585,7 +586,7 @@ public class TestDFSPermission { short[] filePermission, Path[] parentDirs, Path[] files, Path[] dirs) throws Exception { boolean[] isDirEmpty = new boolean[NUM_TEST_PERMISSIONS]; - login(SUPERUSER); + fs = DFSTestUtil.login(fs, conf, SUPERUSER); for (int i = 0; i < NUM_TEST_PERMISSIONS; i++) { create(OpType.CREATE, files[i]); create(OpType.MKDIRS, dirs[i]); @@ -601,7 +602,7 @@ public class TestDFSPermission { isDirEmpty[i] = (fs.listStatus(dirs[i]).length == 0); } - login(ugi); + fs = DFSTestUtil.login(fs, conf, ugi); for (int i = 0; i < NUM_TEST_PERMISSIONS; i++) { testCreateMkdirs(ugi, new Path(parentDirs[i], FILE_DIR_NAME), ancestorPermission[i], parentPermission[i]); @@ -1156,16 +1157,6 @@ public class TestDFSPermission { ddpv.verifyPermission(ugi); } - /* log into dfs as the given user */ - private void login(UserGroupInformation ugi) throws IOException, - InterruptedException { - if (fs != null) { - fs.close(); - } - - fs = DFSTestUtil.getFileSystemAs(ugi, conf); - } - /* test non-existent file */ private void checkNonExistentFile() { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSTrash.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSTrash.java index ad4d600f51d..787000bc751 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSTrash.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSTrash.java @@ -17,27 +17,79 @@ */ package org.apache.hadoop.hdfs; -import java.io.IOException; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import java.io.IOException; +import java.util.UUID; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.TestTrash; - +import org.apache.hadoop.fs.Trash; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.UserGroupInformation; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; +import org.mockito.Mockito; /** * Test trash using HDFS */ public class TestHDFSTrash { + + public static final Log LOG = LogFactory.getLog(TestHDFSTrash.class); + private static MiniDFSCluster cluster = null; + private static FileSystem fs; + private static Configuration conf = new HdfsConfiguration(); + + private final static Path TEST_ROOT = new Path("/TestHDFSTrash-ROOT"); + private final static Path TRASH_ROOT = new Path("/TestHDFSTrash-TRASH"); + + final private static String GROUP1_NAME = "group1"; + final private static String GROUP2_NAME = "group2"; + final private static String GROUP3_NAME = "group3"; + final private static String USER1_NAME = "user1"; + final private static String USER2_NAME = "user2"; + + private static UserGroupInformation superUser; + private static UserGroupInformation user1; + private static UserGroupInformation user2; @BeforeClass public static void setUp() throws Exception { - Configuration conf = new HdfsConfiguration(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); + fs = FileSystem.get(conf); + + superUser = UserGroupInformation.getCurrentUser(); + user1 = UserGroupInformation.createUserForTesting(USER1_NAME, + new String[] {GROUP1_NAME, GROUP2_NAME}); + user2 = UserGroupInformation.createUserForTesting(USER2_NAME, + new String[] {GROUP2_NAME, GROUP3_NAME}); + + // Init test and trash root dirs in HDFS + fs.mkdirs(TEST_ROOT); + fs.setPermission(TEST_ROOT, new FsPermission((short) 0777)); + DFSTestUtil.verifyFilePermission( + fs.getFileStatus(TEST_ROOT), + superUser.getShortUserName(), + null, FsAction.ALL, FsAction.ALL, FsAction.ALL); + + fs.mkdirs(TRASH_ROOT); + fs.setPermission(TRASH_ROOT, new FsPermission((short) 0777)); + DFSTestUtil.verifyFilePermission( + fs.getFileStatus(TRASH_ROOT), + superUser.getShortUserName(), + null, FsAction.ALL, FsAction.ALL, FsAction.ALL); } @AfterClass @@ -52,9 +104,90 @@ public class TestHDFSTrash { @Test public void testNonDefaultFS() throws IOException { - FileSystem fs = cluster.getFileSystem(); - Configuration conf = fs.getConf(); - conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, fs.getUri().toString()); - TestTrash.trashNonDefaultFS(conf); + FileSystem fileSystem = cluster.getFileSystem(); + Configuration config = fileSystem.getConf(); + config.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, + fileSystem.getUri().toString()); + TestTrash.trashNonDefaultFS(config); + } + + @Test + public void testHDFSTrashPermission() throws IOException { + FileSystem fileSystem = cluster.getFileSystem(); + Configuration config = fileSystem.getConf(); + config.set(CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY, "0.2"); + TestTrash.verifyTrashPermission(fileSystem, config); + } + + @Test + public void testMoveEmptyDirToTrash() throws IOException { + FileSystem fileSystem = cluster.getFileSystem(); + Configuration config = fileSystem.getConf(); + config.set(CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY, "1"); + TestTrash.verifyMoveEmptyDirToTrash(fileSystem, config); + } + + @Test + public void testDeleteTrash() throws Exception { + Configuration testConf = new Configuration(conf); + testConf.set(CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY, "10"); + + Path user1Tmp = new Path(TEST_ROOT, "test-del-u1"); + Path user2Tmp = new Path(TEST_ROOT, "test-del-u2"); + + // login as user1, move something to trash + // verify user1 can remove its own trash dir + fs = DFSTestUtil.login(fs, testConf, user1); + fs.mkdirs(user1Tmp); + Trash u1Trash = getPerUserTrash(user1, fs, testConf); + Path u1t = u1Trash.getCurrentTrashDir(user1Tmp); + assertTrue(String.format("Failed to move %s to trash", user1Tmp), + u1Trash.moveToTrash(user1Tmp)); + assertTrue( + String.format( + "%s should be allowed to remove its own trash directory %s", + user1.getUserName(), u1t), + fs.delete(u1t, true)); + assertFalse(fs.exists(u1t)); + + // login as user2, move something to trash + fs = DFSTestUtil.login(fs, testConf, user2); + fs.mkdirs(user2Tmp); + Trash u2Trash = getPerUserTrash(user2, fs, testConf); + u2Trash.moveToTrash(user2Tmp); + Path u2t = u2Trash.getCurrentTrashDir(user2Tmp); + + try { + // user1 should not be able to remove user2's trash dir + fs = DFSTestUtil.login(fs, testConf, user1); + fs.delete(u2t, true); + fail(String.format("%s should not be able to remove %s trash directory", + USER1_NAME, USER2_NAME)); + } catch (AccessControlException e) { + assertTrue(e instanceof AccessControlException); + assertTrue("Permission denied messages must carry the username", + e.getMessage().contains(USER1_NAME)); + } + } + + /** + * Return a {@link Trash} instance using giving configuration. + * The trash root directory is set to an unique directory under + * {@link #TRASH_ROOT}. Use this method to isolate trash + * directories for different users. + */ + private Trash getPerUserTrash(UserGroupInformation ugi, + FileSystem fileSystem, Configuration config) throws IOException { + // generate an unique path per instance + UUID trashId = UUID.randomUUID(); + StringBuffer sb = new StringBuffer() + .append(ugi.getUserName()) + .append("-") + .append(trashId.toString()); + Path userTrashRoot = new Path(TRASH_ROOT, sb.toString()); + FileSystem spyUserFs = Mockito.spy(fileSystem); + Mockito.when(spyUserFs.getTrashRoot(Mockito.any(Path.class))) + .thenReturn(userTrashRoot); + return new Trash(spyUserFs, config); } }