HDFS-5615. NameNode: implement handling of ACLs in combination with sticky bit. Contributed by Chris Nauroth.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4685@1560131 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Haohui Mai 2014-01-21 18:33:02 +00:00
parent 9df33876ff
commit 2af05fb1cf
2 changed files with 248 additions and 140 deletions

View File

@ -39,6 +39,9 @@ HDFS-4685 (Unreleased)
HDFS-5613. NameNode: implement handling of ACLs in combination with HDFS-5613. NameNode: implement handling of ACLs in combination with
symlinks. (Chris Nauroth via wheat9) symlinks. (Chris Nauroth via wheat9)
HDFS-5615. NameNode: implement handling of ACLs in combination with sticky
bit. (Chris Nauroth via wheat9)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES

View File

@ -17,15 +17,21 @@
*/ */
package org.apache.hadoop.fs.permission; package org.apache.hadoop.fs.permission;
import static org.apache.hadoop.fs.permission.AclEntryScope.*;
import static org.apache.hadoop.fs.permission.AclEntryType.*;
import static org.apache.hadoop.fs.permission.FsAction.*;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail; import static org.junit.Assert.fail;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
@ -33,8 +39,12 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test; import org.junit.Test;
public class TestStickyBit { public class TestStickyBit {
@ -44,55 +54,87 @@ public class TestStickyBit {
static UserGroupInformation user2 = static UserGroupInformation user2 =
UserGroupInformation.createUserForTesting("rose", new String[] {"powellestates"}); UserGroupInformation.createUserForTesting("rose", new String[] {"powellestates"});
private static MiniDFSCluster cluster;
private static Configuration conf;
private static FileSystem hdfs;
private static FileSystem hdfsAsUser1;
private static FileSystem hdfsAsUser2;
@BeforeClass
public static void init() throws Exception {
conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
initCluster(true);
}
private static void initCluster(boolean format) throws Exception {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).format(format)
.build();
hdfs = cluster.getFileSystem();
assertTrue(hdfs instanceof DistributedFileSystem);
hdfsAsUser1 = DFSTestUtil.getFileSystemAs(user1, conf);
assertTrue(hdfsAsUser1 instanceof DistributedFileSystem);
hdfsAsUser2 = DFSTestUtil.getFileSystemAs(user2, conf);
assertTrue(hdfsAsUser2 instanceof DistributedFileSystem);
}
@Before
public void setup() throws Exception {
if (hdfs != null) {
for (FileStatus stat: hdfs.listStatus(new Path("/"))) {
hdfs.delete(stat.getPath(), true);
}
}
}
@AfterClass
public static void shutdown() throws Exception {
IOUtils.cleanup(null, hdfs, hdfsAsUser1, hdfsAsUser2);
if (cluster != null) {
cluster.shutdown();
}
}
/** /**
* Ensure that even if a file is in a directory with the sticky bit on, * Ensure that even if a file is in a directory with the sticky bit on,
* another user can write to that file (assuming correct permissions). * another user can write to that file (assuming correct permissions).
*/ */
private void confirmCanAppend(Configuration conf, FileSystem hdfs, private void confirmCanAppend(Configuration conf, Path p) throws Exception {
Path baseDir) throws IOException, InterruptedException {
// Create a tmp directory with wide-open permissions and sticky bit
Path p = new Path(baseDir, "tmp");
hdfs.mkdirs(p);
hdfs.setPermission(p, new FsPermission((short) 01777));
// Write a file to the new tmp directory as a regular user // Write a file to the new tmp directory as a regular user
hdfs = DFSTestUtil.getFileSystemAs(user1, conf);
Path file = new Path(p, "foo"); Path file = new Path(p, "foo");
writeFile(hdfs, file); writeFile(hdfsAsUser1, file);
hdfs.setPermission(file, new FsPermission((short) 0777)); hdfsAsUser1.setPermission(file, new FsPermission((short) 0777));
// Log onto cluster as another user and attempt to append to file // Log onto cluster as another user and attempt to append to file
hdfs = DFSTestUtil.getFileSystemAs(user2, conf);
Path file2 = new Path(p, "foo"); Path file2 = new Path(p, "foo");
FSDataOutputStream h = hdfs.append(file2); FSDataOutputStream h = null;
h.write("Some more data".getBytes()); try {
h.close(); h = hdfsAsUser2.append(file2);
h.write("Some more data".getBytes());
h.close();
h = null;
} finally {
IOUtils.cleanup(null, h);
}
} }
/** /**
* Test that one user can't delete another user's file when the sticky bit is * Test that one user can't delete another user's file when the sticky bit is
* set. * set.
*/ */
private void confirmDeletingFiles(Configuration conf, FileSystem hdfs, private void confirmDeletingFiles(Configuration conf, Path p)
Path baseDir) throws IOException, InterruptedException { throws Exception {
Path p = new Path(baseDir, "contemporary");
hdfs.mkdirs(p);
hdfs.setPermission(p, new FsPermission((short) 01777));
// Write a file to the new temp directory as a regular user // Write a file to the new temp directory as a regular user
hdfs = DFSTestUtil.getFileSystemAs(user1, conf);
Path file = new Path(p, "foo"); Path file = new Path(p, "foo");
writeFile(hdfs, file); writeFile(hdfsAsUser1, file);
// Make sure the correct user is the owner // Make sure the correct user is the owner
assertEquals(user1.getShortUserName(), hdfs.getFileStatus(file).getOwner()); assertEquals(user1.getShortUserName(),
hdfsAsUser1.getFileStatus(file).getOwner());
// Log onto cluster as another user and attempt to delete the file // Log onto cluster as another user and attempt to delete the file
FileSystem hdfs2 = DFSTestUtil.getFileSystemAs(user2, conf);
try { try {
hdfs2.delete(file, false); hdfsAsUser2.delete(file, false);
fail("Shouldn't be able to delete someone else's file with SB on"); fail("Shouldn't be able to delete someone else's file with SB on");
} catch (IOException ioe) { } catch (IOException ioe) {
assertTrue(ioe instanceof AccessControlException); assertTrue(ioe instanceof AccessControlException);
@ -105,13 +147,8 @@ public class TestStickyBit {
* on, the new directory does not automatically get a sticky bit, as is * on, the new directory does not automatically get a sticky bit, as is
* standard Unix behavior * standard Unix behavior
*/ */
private void confirmStickyBitDoesntPropagate(FileSystem hdfs, Path baseDir) private void confirmStickyBitDoesntPropagate(FileSystem hdfs, Path p)
throws IOException { throws IOException {
Path p = new Path(baseDir, "scissorsisters");
// Turn on its sticky bit
hdfs.mkdirs(p, new FsPermission((short) 01666));
// Create a subdirectory within it // Create a subdirectory within it
Path p2 = new Path(p, "bar"); Path p2 = new Path(p, "bar");
hdfs.mkdirs(p2); hdfs.mkdirs(p2);
@ -123,23 +160,19 @@ public class TestStickyBit {
/** /**
* Test basic ability to get and set sticky bits on files and directories. * Test basic ability to get and set sticky bits on files and directories.
*/ */
private void confirmSettingAndGetting(FileSystem hdfs, Path baseDir) private void confirmSettingAndGetting(FileSystem hdfs, Path p, Path baseDir)
throws IOException { throws IOException {
Path p1 = new Path(baseDir, "roguetraders");
hdfs.mkdirs(p1);
// Initially sticky bit should not be set // Initially sticky bit should not be set
assertFalse(hdfs.getFileStatus(p1).getPermission().getStickyBit()); assertFalse(hdfs.getFileStatus(p).getPermission().getStickyBit());
// Same permission, but with sticky bit on // Same permission, but with sticky bit on
short withSB; short withSB;
withSB = (short) (hdfs.getFileStatus(p1).getPermission().toShort() | 01000); withSB = (short) (hdfs.getFileStatus(p).getPermission().toShort() | 01000);
assertTrue((new FsPermission(withSB)).getStickyBit()); assertTrue((new FsPermission(withSB)).getStickyBit());
hdfs.setPermission(p1, new FsPermission(withSB)); hdfs.setPermission(p, new FsPermission(withSB));
assertTrue(hdfs.getFileStatus(p1).getPermission().getStickyBit()); assertTrue(hdfs.getFileStatus(p).getPermission().getStickyBit());
// Write a file to the fs, try to set its sticky bit // Write a file to the fs, try to set its sticky bit
Path f = new Path(baseDir, "somefile"); Path f = new Path(baseDir, "somefile");
@ -154,37 +187,78 @@ public class TestStickyBit {
} }
@Test @Test
public void testGeneralSBBehavior() throws IOException, InterruptedException { public void testGeneralSBBehavior() throws Exception {
MiniDFSCluster cluster = null; Path baseDir = new Path("/mcgann");
try { hdfs.mkdirs(baseDir);
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
FileSystem hdfs = cluster.getFileSystem(); // Create a tmp directory with wide-open permissions and sticky bit
Path p = new Path(baseDir, "tmp");
assertTrue(hdfs instanceof DistributedFileSystem); hdfs.mkdirs(p);
hdfs.setPermission(p, new FsPermission((short) 01777));
Path baseDir = new Path("/mcgann"); confirmCanAppend(conf, p);
hdfs.mkdirs(baseDir);
confirmCanAppend(conf, hdfs, baseDir);
baseDir = new Path("/eccleston"); baseDir = new Path("/eccleston");
hdfs.mkdirs(baseDir); hdfs.mkdirs(baseDir);
confirmSettingAndGetting(hdfs, baseDir); p = new Path(baseDir, "roguetraders");
baseDir = new Path("/tennant"); hdfs.mkdirs(p);
hdfs.mkdirs(baseDir); confirmSettingAndGetting(hdfs, p, baseDir);
confirmDeletingFiles(conf, hdfs, baseDir);
baseDir = new Path("/smith"); baseDir = new Path("/tennant");
hdfs.mkdirs(baseDir); hdfs.mkdirs(baseDir);
confirmStickyBitDoesntPropagate(hdfs, baseDir); p = new Path(baseDir, "contemporary");
hdfs.mkdirs(p);
hdfs.setPermission(p, new FsPermission((short) 01777));
confirmDeletingFiles(conf, p);
} finally { baseDir = new Path("/smith");
if (cluster != null) hdfs.mkdirs(baseDir);
cluster.shutdown(); p = new Path(baseDir, "scissorsisters");
}
// Turn on its sticky bit
hdfs.mkdirs(p, new FsPermission((short) 01666));
confirmStickyBitDoesntPropagate(hdfs, baseDir);
}
@Test
public void testAclGeneralSBBehavior() throws Exception {
Path baseDir = new Path("/mcgann");
hdfs.mkdirs(baseDir);
// Create a tmp directory with wide-open permissions and sticky bit
Path p = new Path(baseDir, "tmp");
hdfs.mkdirs(p);
hdfs.setPermission(p, new FsPermission((short) 01777));
applyAcl(p);
confirmCanAppend(conf, p);
baseDir = new Path("/eccleston");
hdfs.mkdirs(baseDir);
p = new Path(baseDir, "roguetraders");
hdfs.mkdirs(p);
applyAcl(p);
confirmSettingAndGetting(hdfs, p, baseDir);
baseDir = new Path("/tennant");
hdfs.mkdirs(baseDir);
p = new Path(baseDir, "contemporary");
hdfs.mkdirs(p);
hdfs.setPermission(p, new FsPermission((short) 01777));
applyAcl(p);
confirmDeletingFiles(conf, p);
baseDir = new Path("/smith");
hdfs.mkdirs(baseDir);
p = new Path(baseDir, "scissorsisters");
// Turn on its sticky bit
hdfs.mkdirs(p, new FsPermission((short) 01666));
applyAcl(p);
confirmStickyBitDoesntPropagate(hdfs, p);
} }
/** /**
@ -192,46 +266,42 @@ public class TestStickyBit {
* bit is set. * bit is set.
*/ */
@Test @Test
public void testMovingFiles() throws IOException, InterruptedException { public void testMovingFiles() throws Exception {
MiniDFSCluster cluster = null; testMovingFiles(false);
}
@Test
public void testAclMovingFiles() throws Exception {
testMovingFiles(true);
}
private void testMovingFiles(boolean useAcl) throws Exception {
// Create a tmp directory with wide-open permissions and sticky bit
Path tmpPath = new Path("/tmp");
Path tmpPath2 = new Path("/tmp2");
hdfs.mkdirs(tmpPath);
hdfs.mkdirs(tmpPath2);
hdfs.setPermission(tmpPath, new FsPermission((short) 01777));
if (useAcl) {
applyAcl(tmpPath);
}
hdfs.setPermission(tmpPath2, new FsPermission((short) 01777));
if (useAcl) {
applyAcl(tmpPath2);
}
// Write a file to the new tmp directory as a regular user
Path file = new Path(tmpPath, "foo");
writeFile(hdfsAsUser1, file);
// Log onto cluster as another user and attempt to move the file
try { try {
// Set up cluster for testing hdfsAsUser2.rename(file, new Path(tmpPath2, "renamed"));
Configuration conf = new HdfsConfiguration(); fail("Shouldn't be able to rename someone else's file with SB on");
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true); } catch (IOException ioe) {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); assertTrue(ioe instanceof AccessControlException);
FileSystem hdfs = cluster.getFileSystem(); assertTrue(ioe.getMessage().contains("sticky bit"));
assertTrue(hdfs instanceof DistributedFileSystem);
// Create a tmp directory with wide-open permissions and sticky bit
Path tmpPath = new Path("/tmp");
Path tmpPath2 = new Path("/tmp2");
hdfs.mkdirs(tmpPath);
hdfs.mkdirs(tmpPath2);
hdfs.setPermission(tmpPath, new FsPermission((short) 01777));
hdfs.setPermission(tmpPath2, new FsPermission((short) 01777));
// Write a file to the new tmp directory as a regular user
Path file = new Path(tmpPath, "foo");
FileSystem hdfs2 = DFSTestUtil.getFileSystemAs(user1, conf);
writeFile(hdfs2, file);
// Log onto cluster as another user and attempt to move the file
FileSystem hdfs3 = DFSTestUtil.getFileSystemAs(user2, conf);
try {
hdfs3.rename(file, new Path(tmpPath2, "renamed"));
fail("Shouldn't be able to rename someone else's file with SB on");
} catch (IOException ioe) {
assertTrue(ioe instanceof AccessControlException);
assertTrue(ioe.getMessage().contains("sticky bit"));
}
} finally {
if (cluster != null)
cluster.shutdown();
} }
} }
@ -241,56 +311,91 @@ public class TestStickyBit {
* re-start. * re-start.
*/ */
@Test @Test
public void testStickyBitPersistence() throws IOException { public void testStickyBitPersistence() throws Exception {
MiniDFSCluster cluster = null; // A tale of three directories...
try { Path sbSet = new Path("/Housemartins");
Configuration conf = new HdfsConfiguration(); Path sbNotSpecified = new Path("/INXS");
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true); Path sbSetOff = new Path("/Easyworld");
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
FileSystem hdfs = cluster.getFileSystem();
assertTrue(hdfs instanceof DistributedFileSystem); for (Path p : new Path[] { sbSet, sbNotSpecified, sbSetOff })
hdfs.mkdirs(p);
// A tale of three directories... // Two directories had there sticky bits set explicitly...
Path sbSet = new Path("/Housemartins"); hdfs.setPermission(sbSet, new FsPermission((short) 01777));
Path sbNotSpecified = new Path("/INXS"); hdfs.setPermission(sbSetOff, new FsPermission((short) 00777));
Path sbSetOff = new Path("/Easyworld");
for (Path p : new Path[] { sbSet, sbNotSpecified, sbSetOff }) shutdown();
hdfs.mkdirs(p);
// Two directories had there sticky bits set explicitly... // Start file system up again
hdfs.setPermission(sbSet, new FsPermission((short) 01777)); initCluster(false);
hdfs.setPermission(sbSetOff, new FsPermission((short) 00777));
cluster.shutdown(); assertTrue(hdfs.exists(sbSet));
assertTrue(hdfs.getFileStatus(sbSet).getPermission().getStickyBit());
// Start file system up again assertTrue(hdfs.exists(sbNotSpecified));
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).format(false).build(); assertFalse(hdfs.getFileStatus(sbNotSpecified).getPermission()
hdfs = cluster.getFileSystem(); .getStickyBit());
assertTrue(hdfs.exists(sbSet)); assertTrue(hdfs.exists(sbSetOff));
assertTrue(hdfs.getFileStatus(sbSet).getPermission().getStickyBit()); assertFalse(hdfs.getFileStatus(sbSetOff).getPermission().getStickyBit());
}
assertTrue(hdfs.exists(sbNotSpecified)); @Test
assertFalse(hdfs.getFileStatus(sbNotSpecified).getPermission() public void testAclStickyBitPersistence() throws Exception {
.getStickyBit()); // A tale of three directories...
Path sbSet = new Path("/Housemartins");
Path sbNotSpecified = new Path("/INXS");
Path sbSetOff = new Path("/Easyworld");
assertTrue(hdfs.exists(sbSetOff)); for (Path p : new Path[] { sbSet, sbNotSpecified, sbSetOff })
assertFalse(hdfs.getFileStatus(sbSetOff).getPermission().getStickyBit()); hdfs.mkdirs(p);
} finally { // Two directories had there sticky bits set explicitly...
if (cluster != null) hdfs.setPermission(sbSet, new FsPermission((short) 01777));
cluster.shutdown(); applyAcl(sbSet);
} hdfs.setPermission(sbSetOff, new FsPermission((short) 00777));
applyAcl(sbSetOff);
shutdown();
// Start file system up again
initCluster(false);
assertTrue(hdfs.exists(sbSet));
assertTrue(hdfs.getFileStatus(sbSet).getPermission().getStickyBit());
assertTrue(hdfs.exists(sbNotSpecified));
assertFalse(hdfs.getFileStatus(sbNotSpecified).getPermission()
.getStickyBit());
assertTrue(hdfs.exists(sbSetOff));
assertFalse(hdfs.getFileStatus(sbSetOff).getPermission().getStickyBit());
} }
/*** /***
* Write a quick file to the specified file system at specified path * Write a quick file to the specified file system at specified path
*/ */
static private void writeFile(FileSystem hdfs, Path p) throws IOException { static private void writeFile(FileSystem hdfs, Path p) throws IOException {
FSDataOutputStream o = hdfs.create(p); FSDataOutputStream o = null;
o.write("some file contents".getBytes()); try {
o.close(); o = hdfs.create(p);
o.write("some file contents".getBytes());
o.close();
o = null;
} finally {
IOUtils.cleanup(null, o);
}
}
/**
* Applies an ACL (both access and default) to the given path.
*
* @param p Path to set
* @throws IOException if an ACL could not be modified
*/
private static void applyAcl(Path p) throws IOException {
hdfs.modifyAclEntries(p, Arrays.asList(
aclEntry(ACCESS, USER, user2.getShortUserName(), ALL),
aclEntry(DEFAULT, USER, user2.getShortUserName(), ALL)));
} }
} }