HADOOP-16032. Distcp It should clear sub directory ACL before applying new ACL on.

Contributed by Ranith Sardar.

(cherry picked from commit 546c5d70ef)
This commit is contained in:
Ranith Sardar 2019-02-07 21:49:18 +00:00 committed by Steve Loughran
parent 55dde827e6
commit c5eca3f7ee
No known key found for this signature in database
GPG Key ID: D22CF846DBB162A0
2 changed files with 88 additions and 1 deletions

View File

@ -211,6 +211,7 @@ public class DistCpUtils {
List<AclEntry> srcAcl = srcFileStatus.getAclEntries();
List<AclEntry> targetAcl = getAcl(targetFS, targetFileStatus);
if (!srcAcl.equals(targetAcl)) {
targetFS.removeAcl(path);
targetFS.setAcl(path, srcAcl);
}
// setAcl doesn't preserve sticky bit, so also call setPermission if needed.

View File

@ -25,7 +25,9 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.tools.ECAdmin;
@ -39,12 +41,26 @@ import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import com.google.common.collect.Lists;
import java.io.IOException;
import java.io.OutputStream;
import java.util.EnumSet;
import java.util.List;
import java.util.Random;
import java.util.Stack;
import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT;
import static org.apache.hadoop.fs.permission.AclEntryType.GROUP;
import static org.apache.hadoop.fs.permission.AclEntryType.OTHER;
import static org.apache.hadoop.fs.permission.AclEntryType.USER;
import static org.apache.hadoop.fs.permission.FsAction.ALL;
import static org.apache.hadoop.fs.permission.FsAction.EXECUTE;
import static org.apache.hadoop.fs.permission.FsAction.READ;
import static org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE;
import static org.apache.hadoop.fs.permission.FsAction.READ_WRITE;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
@ -60,6 +76,7 @@ public class TestDistCpUtils {
@BeforeClass
public static void create() throws IOException {
config.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
cluster = new MiniDFSCluster.Builder(config)
.numDataNodes(2)
.format(true)
@ -181,6 +198,75 @@ public class TestDistCpUtils {
Assert.assertTrue(srcStatus.getReplication() == dstStatus.getReplication());
}
@Test
public void testPreserveAclsforDefaultACL() throws IOException {
FileSystem fs = FileSystem.get(config);
EnumSet<FileAttribute> attributes = EnumSet.of(FileAttribute.ACL,
FileAttribute.PERMISSION, FileAttribute.XATTR, FileAttribute.GROUP,
FileAttribute.USER, FileAttribute.REPLICATION, FileAttribute.XATTR,
FileAttribute.TIMES);
Path dest = new Path("/tmpdest");
Path src = new Path("/testsrc");
fs.mkdirs(src);
fs.mkdirs(dest);
List<AclEntry> acls = Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", READ_EXECUTE),
aclEntry(ACCESS, USER, READ_WRITE), aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, OTHER, READ), aclEntry(ACCESS, USER, "bar", ALL));
final List<AclEntry> acls1 = Lists.newArrayList(aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "user1", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, OTHER, EXECUTE));
fs.setPermission(src, fullPerm);
fs.setOwner(src, "somebody", "somebody-group");
fs.setTimes(src, 0, 0);
fs.setReplication(src, (short) 1);
fs.setAcl(src, acls);
fs.setPermission(dest, noPerm);
fs.setOwner(dest, "nobody", "nobody-group");
fs.setTimes(dest, 100, 100);
fs.setReplication(dest, (short) 2);
fs.setAcl(dest, acls1);
List<AclEntry> en1 = fs.getAclStatus(src).getEntries();
List<AclEntry> dd2 = fs.getAclStatus(dest).getEntries();
Assert.assertNotEquals(en1, dd2);
CopyListingFileStatus srcStatus = new CopyListingFileStatus(
fs.getFileStatus(src));
en1 = srcStatus.getAclEntries();
DistCpUtils.preserve(fs, dest, srcStatus, attributes, false);
CopyListingFileStatus dstStatus = new CopyListingFileStatus(
fs.getFileStatus(dest));
dd2 = dstStatus.getAclEntries();
en1 = srcStatus.getAclEntries();
// FileStatus.equals only compares path field, must explicitly compare all
// fields
Assert.assertEquals("getPermission", srcStatus.getPermission(),
dstStatus.getPermission());
Assert.assertEquals("Owner", srcStatus.getOwner(), dstStatus.getOwner());
Assert.assertEquals("Group", srcStatus.getGroup(), dstStatus.getGroup());
Assert.assertEquals("AccessTime", srcStatus.getAccessTime(),
dstStatus.getAccessTime());
Assert.assertEquals("ModificationTime", srcStatus.getModificationTime(),
dstStatus.getModificationTime());
Assert.assertEquals("Replication", srcStatus.getReplication(),
dstStatus.getReplication());
Assert.assertArrayEquals(en1.toArray(), dd2.toArray());
}
@Test
public void testPreserveNothingOnDirectory() throws IOException {
FileSystem fs = FileSystem.get(config);