HADOOP-9042. Add a test for umask in FileSystemContractBaseTest. Contributed by Colin McCabe

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1411882 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Eli Collins 2012-11-20 21:20:07 +00:00
parent 4688f839d2
commit 5c7d4bd9f1
3 changed files with 29 additions and 2 deletions

View File

@ -91,6 +91,9 @@ Release 2.0.3-alpha - Unreleased
HADOOP-8926. hadoop.util.PureJavaCrc32 cache hit-ratio is low for static HADOOP-8926. hadoop.util.PureJavaCrc32 cache hit-ratio is low for static
data (Gopal V via bobby) data (Gopal V via bobby)
HADOOP-9042. Add a test for umask in FileSystemContractBaseTest.
(Colin McCabe via eli)
BUG FIXES BUG FIXES
HADOOP-8795. BASH tab completion doesn't look in PATH, assumes path to HADOOP-8795. BASH tab completion doesn't look in PATH, assumes path to

View File

@ -23,11 +23,13 @@ import java.io.IOException;
import junit.framework.TestCase; import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
/** /**
* <p> * <p>
@ -43,7 +45,7 @@ import org.apache.hadoop.fs.Path;
* </p> * </p>
*/ */
public abstract class FileSystemContractBaseTest extends TestCase { public abstract class FileSystemContractBaseTest extends TestCase {
protected final static String TEST_UMASK = "062";
protected FileSystem fs; protected FileSystem fs;
protected byte[] data = new byte[getBlockSize() * 2]; // two blocks of data protected byte[] data = new byte[getBlockSize() * 2]; // two blocks of data
{ {
@ -152,6 +154,25 @@ public abstract class FileSystemContractBaseTest extends TestCase {
} }
public void testMkdirsWithUmask() throws Exception {
if (fs.getScheme().equals("s3") || fs.getScheme().equals("s3n")) {
// skip permission tests for S3FileSystem until HDFS-1333 is fixed.
return;
}
Configuration conf = fs.getConf();
String oldUmask = conf.get(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY);
try {
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, TEST_UMASK);
final Path dir = new Path("/test/newDir");
assertTrue(fs.mkdirs(dir, new FsPermission((short)0777)));
FileStatus status = fs.getFileStatus(dir);
assertTrue(status.isDirectory());
assertEquals((short)0715, status.getPermission().toShort());
} finally {
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, oldUmask);
}
}
public void testGetFileStatusThrowsExceptionForNonExistentFile() public void testGetFileStatusThrowsExceptionForNonExistentFile()
throws Exception { throws Exception {
try { try {

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs;
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileSystemContractBaseTest; import org.apache.hadoop.fs.FileSystemContractBaseTest;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
@ -33,6 +34,8 @@ public class TestHDFSFileSystemContract extends FileSystemContractBaseTest {
@Override @Override
protected void setUp() throws Exception { protected void setUp() throws Exception {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,
FileSystemContractBaseTest.TEST_UMASK);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
fs = cluster.getFileSystem(); fs = cluster.getFileSystem();
defaultWorkingDirectory = "/user/" + defaultWorkingDirectory = "/user/" +