Merge r1569890 through r1571813 from trunk.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-5535@1571814 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
commit
abc9a6dad5
|
@ -333,6 +333,8 @@ Trunk (Unreleased)
|
|||
|
||||
HADOOP-10354. TestWebHDFS fails after merge of HDFS-4685 to trunk. (cnauroth)
|
||||
|
||||
HADOOP-10361. Correct alignment in CLI output for ACLs. (cnauroth)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HADOOP-7761. Improve the performance of raw comparisons. (todd)
|
||||
|
|
|
@ -171,7 +171,7 @@ class AclCommands extends FsCommand {
|
|||
FsAction entryPerm = entry.getPermission();
|
||||
FsAction effectivePerm = entryPerm.and(maskPerm);
|
||||
if (entryPerm != effectivePerm) {
|
||||
out.println(String.format("%-31s #effective:%s", entry,
|
||||
out.println(String.format("%s\t#effective:%s", entry,
|
||||
effectivePerm.SYMBOL));
|
||||
} else {
|
||||
out.println(entry);
|
||||
|
|
|
@ -67,8 +67,7 @@ class Ls extends FsCommand {
|
|||
protected static final SimpleDateFormat dateFormat =
|
||||
new SimpleDateFormat("yyyy-MM-dd HH:mm");
|
||||
|
||||
protected int maxPerm = 9, maxRepl = 3, maxLen = 10, maxOwner = 0,
|
||||
maxGroup = 0;
|
||||
protected int maxRepl = 3, maxLen = 10, maxOwner = 0, maxGroup = 0;
|
||||
protected String lineFormat;
|
||||
protected boolean dirRecurse;
|
||||
|
||||
|
@ -117,7 +116,7 @@ class Ls extends FsCommand {
|
|||
FileStatus stat = item.stat;
|
||||
String line = String.format(lineFormat,
|
||||
(stat.isDirectory() ? "d" : "-"),
|
||||
stat.getPermission() + (hasAcl(item) ? "+" : ""),
|
||||
stat.getPermission() + (hasAcl(item) ? "+" : " "),
|
||||
(stat.isFile() ? stat.getReplication() : "-"),
|
||||
stat.getOwner(),
|
||||
stat.getGroup(),
|
||||
|
@ -135,7 +134,6 @@ class Ls extends FsCommand {
|
|||
private void adjustColumnWidths(PathData items[]) {
|
||||
for (PathData item : items) {
|
||||
FileStatus stat = item.stat;
|
||||
maxPerm = maxLength(maxPerm, stat.getPermission());
|
||||
maxRepl = maxLength(maxRepl, stat.getReplication());
|
||||
maxLen = maxLength(maxLen, stat.getLen());
|
||||
maxOwner = maxLength(maxOwner, stat.getOwner());
|
||||
|
@ -143,7 +141,7 @@ class Ls extends FsCommand {
|
|||
}
|
||||
|
||||
StringBuilder fmt = new StringBuilder();
|
||||
fmt.append("%s%-" + maxPerm + "s "); // permission string
|
||||
fmt.append("%s%s"); // permission string
|
||||
fmt.append("%" + maxRepl + "s ");
|
||||
// Do not use '%-0s' as a formatting conversion, since it will throw a
|
||||
// a MissingFormatWidthException if it is used in String.format().
|
||||
|
|
|
@ -712,6 +712,11 @@ public class NetworkTopology {
|
|||
numOfDatanodes -= ((InnerNode)node).getNumOfLeaves();
|
||||
}
|
||||
}
|
||||
if (numOfDatanodes == 0) {
|
||||
throw new InvalidTopologyException(
|
||||
"Failed to find datanode (scope=\"" + String.valueOf(scope) +
|
||||
"\" excludedScope=\"" + String.valueOf(excludedScope) + "\").");
|
||||
}
|
||||
int leaveIndex = r.nextInt(numOfDatanodes);
|
||||
return innerNode.getLeaf(leaveIndex, node);
|
||||
}
|
||||
|
|
|
@ -332,6 +332,10 @@ Trunk (Unreleased)
|
|||
HDFS-5849. Removing ACL from an inode fails if it has only a default ACL.
|
||||
(cnauroth)
|
||||
|
||||
HDFS-5623. NameNode: add tests for skipping ACL enforcement when permission
|
||||
checks are disabled, user is superuser or user is member of supergroup.
|
||||
(cnauroth)
|
||||
|
||||
Release 2.5.0 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -429,6 +433,12 @@ Release 2.4.0 - UNRELEASED
|
|||
HDFS-5935. New Namenode UI FS browser should throw smarter error messages.
|
||||
(Travis Thompson via jing9)
|
||||
|
||||
HDFS-5939. WebHdfs returns misleading error code and logs nothing if trying
|
||||
to create a file with no DNs in cluster. (Yongjun Zhang via jing9)
|
||||
|
||||
HDFS-6006. Remove duplicate code in FSNameSystem#getFileInfo.
|
||||
(Akira Ajisaka via cnauroth)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-5790. LeaseManager.findPath is very slow when many leases need recovery
|
||||
|
|
|
@ -3453,9 +3453,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
HdfsFileStatus stat = null;
|
||||
FSPermissionChecker pc = getPermissionChecker();
|
||||
checkOperation(OperationCategory.READ);
|
||||
if (!DFSUtil.isValidName(src)) {
|
||||
throw new InvalidPathException("Invalid file name: " + src);
|
||||
}
|
||||
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
|
||||
readLock();
|
||||
try {
|
||||
|
@ -7601,10 +7598,14 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
|
||||
AclStatus getAclStatus(String src) throws IOException {
|
||||
aclConfigFlag.checkForApiCall();
|
||||
FSPermissionChecker pc = getPermissionChecker();
|
||||
checkOperation(OperationCategory.READ);
|
||||
readLock();
|
||||
try {
|
||||
checkOperation(OperationCategory.READ);
|
||||
if (isPermissionEnabled) {
|
||||
checkPermission(pc, src, false, null, null, null, null);
|
||||
}
|
||||
return dir.getAclStatus(src);
|
||||
} finally {
|
||||
readUnlock();
|
||||
|
|
|
@ -103,6 +103,7 @@ import org.apache.hadoop.hdfs.web.resources.UriFsPathParam;
|
|||
import org.apache.hadoop.hdfs.web.resources.UserParam;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.ipc.Server;
|
||||
import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException;
|
||||
import org.apache.hadoop.net.NodeBase;
|
||||
import org.apache.hadoop.security.Credentials;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
|
@ -244,8 +245,12 @@ public class NamenodeWebHdfsMethods {
|
|||
final String path, final HttpOpParam.Op op, final long openOffset,
|
||||
final long blocksize,
|
||||
final Param<?, ?>... parameters) throws URISyntaxException, IOException {
|
||||
final DatanodeInfo dn = chooseDatanode(namenode, path, op, openOffset,
|
||||
blocksize);
|
||||
final DatanodeInfo dn;
|
||||
try {
|
||||
dn = chooseDatanode(namenode, path, op, openOffset, blocksize);
|
||||
} catch (InvalidTopologyException ite) {
|
||||
throw new IOException("Failed to find datanode, suggest to check cluster health.", ite);
|
||||
}
|
||||
|
||||
final String delegationQuery;
|
||||
if (!UserGroupInformation.isSecurityEnabled()) {
|
||||
|
|
|
@ -27,6 +27,9 @@ import org.apache.hadoop.fs.permission.AclEntry;
|
|||
import org.apache.hadoop.fs.permission.AclEntryScope;
|
||||
import org.apache.hadoop.fs.permission.AclEntryType;
|
||||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
|
||||
/**
|
||||
* Helper methods useful for writing ACL tests.
|
||||
|
@ -100,6 +103,43 @@ public final class AclTestHelpers {
|
|||
.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Asserts that permission is denied to the given fs/user for the given file.
|
||||
*
|
||||
* @param fs FileSystem to check
|
||||
* @param user UserGroupInformation owner of fs
|
||||
* @param pathToCheck Path file to check
|
||||
* @throws Exception if there is an unexpected error
|
||||
*/
|
||||
public static void assertFilePermissionDenied(FileSystem fs,
|
||||
UserGroupInformation user, Path pathToCheck) throws Exception {
|
||||
try {
|
||||
DFSTestUtil.readFileBuffer(fs, pathToCheck);
|
||||
fail("expected AccessControlException for user " + user + ", path = " +
|
||||
pathToCheck);
|
||||
} catch (AccessControlException e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Asserts that permission is granted to the given fs/user for the given file.
|
||||
*
|
||||
* @param fs FileSystem to check
|
||||
* @param user UserGroupInformation owner of fs
|
||||
* @param pathToCheck Path file to check
|
||||
* @throws Exception if there is an unexpected error
|
||||
*/
|
||||
public static void assertFilePermissionGranted(FileSystem fs,
|
||||
UserGroupInformation user, Path pathToCheck) throws Exception {
|
||||
try {
|
||||
DFSTestUtil.readFileBuffer(fs, pathToCheck);
|
||||
} catch (AccessControlException e) {
|
||||
fail("expected permission granted for user " + user + ", path = " +
|
||||
pathToCheck);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Asserts the value of the FsPermission bits on the inode of a specific path.
|
||||
*
|
||||
|
|
|
@ -27,18 +27,26 @@ import java.io.FileNotFoundException;
|
|||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.protocol.AclException;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.junit.After;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
import org.junit.rules.ExpectedException;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
|
||||
|
@ -47,24 +55,42 @@ import com.google.common.collect.Lists;
|
|||
* also covers interaction of setPermission with inodes that have ACLs.
|
||||
*/
|
||||
public abstract class FSAclBaseTest {
|
||||
private static final UserGroupInformation BRUCE =
|
||||
UserGroupInformation.createUserForTesting("bruce", new String[] { });
|
||||
private static final UserGroupInformation DIANA =
|
||||
UserGroupInformation.createUserForTesting("diana", new String[] { });
|
||||
private static final UserGroupInformation SUPERGROUP_MEMBER =
|
||||
UserGroupInformation.createUserForTesting("super", new String[] {
|
||||
DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT });
|
||||
|
||||
protected static MiniDFSCluster cluster;
|
||||
protected static FileSystem fs;
|
||||
protected static Configuration conf;
|
||||
private static int pathCount = 0;
|
||||
private static Path path;
|
||||
|
||||
@Rule
|
||||
public ExpectedException exception = ExpectedException.none();
|
||||
|
||||
private FileSystem fs, fsAsBruce, fsAsDiana, fsAsSupergroupMember;
|
||||
|
||||
@AfterClass
|
||||
public static void shutdown() throws Exception {
|
||||
IOUtils.cleanup(null, fs);
|
||||
public static void shutdown() {
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
@Before
|
||||
public void setUp() {
|
||||
public void setUp() throws Exception {
|
||||
pathCount += 1;
|
||||
path = new Path("/p" + pathCount);
|
||||
initFileSystems();
|
||||
}
|
||||
|
||||
@After
|
||||
public void destroyFileSystems() {
|
||||
IOUtils.cleanup(null, fs, fsAsBruce, fsAsDiana, fsAsSupergroupMember);
|
||||
fs = fsAsBruce = fsAsDiana = fsAsSupergroupMember = null;
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -1036,6 +1062,188 @@ public abstract class FSAclBaseTest {
|
|||
assertAclFeature(dirPath, true);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSkipAclEnforcementPermsDisabled() throws Exception {
|
||||
Path bruceDir = new Path(path, "bruce");
|
||||
Path bruceFile = new Path(bruceDir, "file");
|
||||
fs.mkdirs(bruceDir);
|
||||
fs.setOwner(bruceDir, "bruce", null);
|
||||
fsAsBruce.create(bruceFile).close();
|
||||
fsAsBruce.modifyAclEntries(bruceFile, Lists.newArrayList(
|
||||
aclEntry(ACCESS, USER, "diana", NONE)));
|
||||
assertFilePermissionDenied(fsAsDiana, DIANA, bruceFile);
|
||||
try {
|
||||
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
|
||||
destroyFileSystems();
|
||||
restartCluster();
|
||||
initFileSystems();
|
||||
assertFilePermissionGranted(fsAsDiana, DIANA, bruceFile);
|
||||
} finally {
|
||||
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
|
||||
restartCluster();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSkipAclEnforcementSuper() throws Exception {
|
||||
Path bruceDir = new Path(path, "bruce");
|
||||
Path bruceFile = new Path(bruceDir, "file");
|
||||
fs.mkdirs(bruceDir);
|
||||
fs.setOwner(bruceDir, "bruce", null);
|
||||
fsAsBruce.create(bruceFile).close();
|
||||
fsAsBruce.modifyAclEntries(bruceFile, Lists.newArrayList(
|
||||
aclEntry(ACCESS, USER, "diana", NONE)));
|
||||
assertFilePermissionGranted(fs, DIANA, bruceFile);
|
||||
assertFilePermissionGranted(fsAsBruce, DIANA, bruceFile);
|
||||
assertFilePermissionDenied(fsAsDiana, DIANA, bruceFile);
|
||||
assertFilePermissionGranted(fsAsSupergroupMember, SUPERGROUP_MEMBER,
|
||||
bruceFile);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testModifyAclEntriesMustBeOwnerOrSuper() throws Exception {
|
||||
Path bruceDir = new Path(path, "bruce");
|
||||
Path bruceFile = new Path(bruceDir, "file");
|
||||
fs.mkdirs(bruceDir);
|
||||
fs.setOwner(bruceDir, "bruce", null);
|
||||
fsAsBruce.create(bruceFile).close();
|
||||
List<AclEntry> aclSpec = Lists.newArrayList(
|
||||
aclEntry(ACCESS, USER, "diana", ALL));
|
||||
fsAsBruce.modifyAclEntries(bruceFile, aclSpec);
|
||||
fs.modifyAclEntries(bruceFile, aclSpec);
|
||||
fsAsSupergroupMember.modifyAclEntries(bruceFile, aclSpec);
|
||||
exception.expect(AccessControlException.class);
|
||||
fsAsDiana.modifyAclEntries(bruceFile, aclSpec);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRemoveAclEntriesMustBeOwnerOrSuper() throws Exception {
|
||||
Path bruceDir = new Path(path, "bruce");
|
||||
Path bruceFile = new Path(bruceDir, "file");
|
||||
fs.mkdirs(bruceDir);
|
||||
fs.setOwner(bruceDir, "bruce", null);
|
||||
fsAsBruce.create(bruceFile).close();
|
||||
List<AclEntry> aclSpec = Lists.newArrayList(
|
||||
aclEntry(ACCESS, USER, "diana"));
|
||||
fsAsBruce.removeAclEntries(bruceFile, aclSpec);
|
||||
fs.removeAclEntries(bruceFile, aclSpec);
|
||||
fsAsSupergroupMember.removeAclEntries(bruceFile, aclSpec);
|
||||
exception.expect(AccessControlException.class);
|
||||
fsAsDiana.removeAclEntries(bruceFile, aclSpec);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRemoveDefaultAclMustBeOwnerOrSuper() throws Exception {
|
||||
Path bruceDir = new Path(path, "bruce");
|
||||
Path bruceFile = new Path(bruceDir, "file");
|
||||
fs.mkdirs(bruceDir);
|
||||
fs.setOwner(bruceDir, "bruce", null);
|
||||
fsAsBruce.create(bruceFile).close();
|
||||
fsAsBruce.removeDefaultAcl(bruceFile);
|
||||
fs.removeDefaultAcl(bruceFile);
|
||||
fsAsSupergroupMember.removeDefaultAcl(bruceFile);
|
||||
exception.expect(AccessControlException.class);
|
||||
fsAsDiana.removeDefaultAcl(bruceFile);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRemoveAclMustBeOwnerOrSuper() throws Exception {
|
||||
Path bruceDir = new Path(path, "bruce");
|
||||
Path bruceFile = new Path(bruceDir, "file");
|
||||
fs.mkdirs(bruceDir);
|
||||
fs.setOwner(bruceDir, "bruce", null);
|
||||
fsAsBruce.create(bruceFile).close();
|
||||
fsAsBruce.removeAcl(bruceFile);
|
||||
fs.removeAcl(bruceFile);
|
||||
fsAsSupergroupMember.removeAcl(bruceFile);
|
||||
exception.expect(AccessControlException.class);
|
||||
fsAsDiana.removeAcl(bruceFile);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSetAclMustBeOwnerOrSuper() throws Exception {
|
||||
Path bruceDir = new Path(path, "bruce");
|
||||
Path bruceFile = new Path(bruceDir, "file");
|
||||
fs.mkdirs(bruceDir);
|
||||
fs.setOwner(bruceDir, "bruce", null);
|
||||
fsAsBruce.create(bruceFile).close();
|
||||
List<AclEntry> aclSpec = Lists.newArrayList(
|
||||
aclEntry(ACCESS, USER, READ_WRITE),
|
||||
aclEntry(ACCESS, USER, "diana", READ_WRITE),
|
||||
aclEntry(ACCESS, GROUP, READ),
|
||||
aclEntry(ACCESS, OTHER, READ));
|
||||
fsAsBruce.setAcl(bruceFile, aclSpec);
|
||||
fs.setAcl(bruceFile, aclSpec);
|
||||
fsAsSupergroupMember.setAcl(bruceFile, aclSpec);
|
||||
exception.expect(AccessControlException.class);
|
||||
fsAsDiana.setAcl(bruceFile, aclSpec);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetAclStatusRequiresTraverseOrSuper() throws Exception {
|
||||
Path bruceDir = new Path(path, "bruce");
|
||||
Path bruceFile = new Path(bruceDir, "file");
|
||||
fs.mkdirs(bruceDir);
|
||||
fs.setOwner(bruceDir, "bruce", null);
|
||||
fsAsBruce.create(bruceFile).close();
|
||||
fsAsBruce.setAcl(bruceDir, Lists.newArrayList(
|
||||
aclEntry(ACCESS, USER, ALL),
|
||||
aclEntry(ACCESS, USER, "diana", READ),
|
||||
aclEntry(ACCESS, GROUP, NONE),
|
||||
aclEntry(ACCESS, OTHER, NONE)));
|
||||
fsAsBruce.getAclStatus(bruceFile);
|
||||
fs.getAclStatus(bruceFile);
|
||||
fsAsSupergroupMember.getAclStatus(bruceFile);
|
||||
exception.expect(AccessControlException.class);
|
||||
fsAsDiana.getAclStatus(bruceFile);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a FileSystem for the super-user.
|
||||
*
|
||||
* @return FileSystem for super-user
|
||||
* @throws Exception if creation fails
|
||||
*/
|
||||
protected FileSystem createFileSystem() throws Exception {
|
||||
return cluster.getFileSystem();
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a FileSystem for a specific user.
|
||||
*
|
||||
* @param user UserGroupInformation specific user
|
||||
* @return FileSystem for specific user
|
||||
* @throws Exception if creation fails
|
||||
*/
|
||||
protected FileSystem createFileSystem(UserGroupInformation user)
|
||||
throws Exception {
|
||||
return DFSTestUtil.getFileSystemAs(user, cluster.getConfiguration(0));
|
||||
}
|
||||
|
||||
/**
|
||||
* Initializes all FileSystem instances used in the tests.
|
||||
*
|
||||
* @throws Exception if initialization fails
|
||||
*/
|
||||
private void initFileSystems() throws Exception {
|
||||
fs = createFileSystem();
|
||||
fsAsBruce = createFileSystem(BRUCE);
|
||||
fsAsDiana = createFileSystem(DIANA);
|
||||
fsAsSupergroupMember = createFileSystem(SUPERGROUP_MEMBER);
|
||||
}
|
||||
|
||||
/**
|
||||
* Restarts the cluster without formatting, so all data is preserved.
|
||||
*
|
||||
* @throws Exception if restart fails
|
||||
*/
|
||||
private void restartCluster() throws Exception {
|
||||
shutdown();
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(false)
|
||||
.build();
|
||||
cluster.waitActive();
|
||||
}
|
||||
|
||||
/**
|
||||
* Asserts whether or not the inode for the test path has an AclFeature.
|
||||
*
|
||||
|
@ -1075,7 +1283,7 @@ public abstract class FSAclBaseTest {
|
|||
* @param perm short expected permission bits
|
||||
* @throws IOException thrown if there is an I/O error
|
||||
*/
|
||||
private static void assertPermission(short perm) throws IOException {
|
||||
private void assertPermission(short perm) throws IOException {
|
||||
assertPermission(path, perm);
|
||||
}
|
||||
|
||||
|
@ -1086,7 +1294,7 @@ public abstract class FSAclBaseTest {
|
|||
* @param perm short expected permission bits
|
||||
* @throws IOException thrown if there is an I/O error
|
||||
*/
|
||||
private static void assertPermission(Path pathToCheck, short perm)
|
||||
private void assertPermission(Path pathToCheck, short perm)
|
||||
throws IOException {
|
||||
AclTestHelpers.assertPermission(fs, pathToCheck, perm);
|
||||
}
|
||||
|
|
|
@ -17,11 +17,8 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
|
@ -33,11 +30,9 @@ public class TestNameNodeAcl extends FSAclBaseTest {
|
|||
|
||||
@BeforeClass
|
||||
public static void init() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
conf = new Configuration();
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
|
||||
cluster.waitActive();
|
||||
fs = cluster.getFileSystem();
|
||||
assertTrue(fs instanceof DistributedFileSystem);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -695,43 +695,6 @@ public class TestAclWithSnapshot {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Asserts that permission is denied to the given fs/user for the given file.
|
||||
*
|
||||
* @param fs FileSystem to check
|
||||
* @param user UserGroupInformation owner of fs
|
||||
* @param pathToCheck Path file to check
|
||||
* @throws Exception if there is an unexpected error
|
||||
*/
|
||||
private static void assertFilePermissionDenied(FileSystem fs,
|
||||
UserGroupInformation user, Path pathToCheck) throws Exception {
|
||||
try {
|
||||
fs.open(pathToCheck).close();
|
||||
fail("expected AccessControlException for user " + user + ", path = " +
|
||||
pathToCheck);
|
||||
} catch (AccessControlException e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Asserts that permission is granted to the given fs/user for the given file.
|
||||
*
|
||||
* @param fs FileSystem to check
|
||||
* @param user UserGroupInformation owner of fs
|
||||
* @param pathToCheck Path file to check
|
||||
* @throws Exception if there is an unexpected error
|
||||
*/
|
||||
private static void assertFilePermissionGranted(FileSystem fs,
|
||||
UserGroupInformation user, Path pathToCheck) throws Exception {
|
||||
try {
|
||||
fs.open(pathToCheck).close();
|
||||
} catch (AccessControlException e) {
|
||||
fail("expected permission granted for user " + user + ", path = " +
|
||||
pathToCheck);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Asserts the value of the FsPermission bits on the inode of the test path.
|
||||
*
|
||||
|
|
|
@ -36,9 +36,10 @@ import org.apache.hadoop.fs.permission.FsPermission;
|
|||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.TestDFSClientRetries;
|
||||
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
|
||||
import org.apache.hadoop.hdfs.TestDFSClientRetries;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.log4j.Level;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
@ -289,6 +290,31 @@ public class TestWebHDFS {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test for catching "no datanode" IOException, when to create a file
|
||||
* but datanode is not running for some reason.
|
||||
*/
|
||||
@Test(timeout=300000)
|
||||
public void testCreateWithNoDN() throws Exception {
|
||||
MiniDFSCluster cluster = null;
|
||||
final Configuration conf = WebHdfsTestUtil.createConf();
|
||||
try {
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
||||
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
|
||||
cluster.waitActive();
|
||||
FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
|
||||
WebHdfsFileSystem.SCHEME);
|
||||
fs.create(new Path("/testnodatanode"));
|
||||
Assert.fail("No exception was thrown");
|
||||
} catch (IOException ex) {
|
||||
GenericTestUtils.assertExceptionContains("Failed to find datanode", ex);
|
||||
} finally {
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* WebHdfs should be enabled by default after HDFS-5532
|
||||
*
|
||||
|
|
|
@ -17,12 +17,11 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.web;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSAclBaseTest;
|
||||
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
|
@ -34,12 +33,10 @@ public class TestWebHDFSAcl extends FSAclBaseTest {
|
|||
|
||||
@BeforeClass
|
||||
public static void init() throws Exception {
|
||||
Configuration conf = WebHdfsTestUtil.createConf();
|
||||
conf = WebHdfsTestUtil.createConf();
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
|
||||
cluster.waitActive();
|
||||
fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME);
|
||||
assertTrue(fs instanceof WebHdfsFileSystem);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -51,4 +48,29 @@ public class TestWebHDFSAcl extends FSAclBaseTest {
|
|||
@Ignore
|
||||
public void testDefaultAclNewSymlinkIntermediate() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Overridden to provide a WebHdfsFileSystem wrapper for the super-user.
|
||||
*
|
||||
* @return WebHdfsFileSystem for super-user
|
||||
* @throws Exception if creation fails
|
||||
*/
|
||||
@Override
|
||||
protected WebHdfsFileSystem createFileSystem() throws Exception {
|
||||
return WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME);
|
||||
}
|
||||
|
||||
/**
|
||||
* Overridden to provide a WebHdfsFileSystem wrapper for a specific user.
|
||||
*
|
||||
* @param user UserGroupInformation specific user
|
||||
* @return WebHdfsFileSystem for specific user
|
||||
* @throws Exception if creation fails
|
||||
*/
|
||||
@Override
|
||||
protected WebHdfsFileSystem createFileSystem(UserGroupInformation user)
|
||||
throws Exception {
|
||||
return WebHdfsTestUtil.getWebHdfsFileSystemAs(user, conf,
|
||||
WebHdfsFileSystem.SCHEME);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -846,15 +846,15 @@
|
|||
</comparator>
|
||||
<comparator>
|
||||
<type>RegexpComparator</type>
|
||||
<expected-output>^user:charlie:rwx\s+#effective:r-x$</expected-output>
|
||||
<expected-output>^user:charlie:rwx\t#effective:r-x$</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>RegexpComparator</type>
|
||||
<expected-output>^group::-wx\s+#effective:--x$</expected-output>
|
||||
<expected-output>^group::-wx\t#effective:--x$</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>RegexpComparator</type>
|
||||
<expected-output>^group:sales:rwx\s+#effective:r-x$</expected-output>
|
||||
<expected-output>^group:sales:rwx\t#effective:r-x$</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
|
@ -870,15 +870,15 @@
|
|||
</comparator>
|
||||
<comparator>
|
||||
<type>RegexpComparator</type>
|
||||
<expected-output>^default:user:charlie:rwx\s+#effective:rw-$</expected-output>
|
||||
<expected-output>^default:user:charlie:rwx\t#effective:rw-$</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>RegexpComparator</type>
|
||||
<expected-output>^default:group::r-x\s+#effective:r--$</expected-output>
|
||||
<expected-output>^default:group::r-x\t#effective:r--$</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>RegexpComparator</type>
|
||||
<expected-output>^default:group:sales:rwx\s+#effective:rw-$</expected-output>
|
||||
<expected-output>^default:group:sales:rwx\t#effective:rw-$</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
|
|
|
@ -352,6 +352,8 @@ Release 2.4.0 - UNRELEASED
|
|||
transits from standby to active mode so as to assimilate any changes that
|
||||
happened while it was in standby mode. (Xuan Gong via vinodkv)
|
||||
|
||||
YARN-1760. TestRMAdminService assumes CapacityScheduler. (kasha)
|
||||
|
||||
Release 2.3.1 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -62,7 +62,7 @@ import org.junit.Test;
|
|||
|
||||
public class TestRMAdminService {
|
||||
|
||||
private final Configuration configuration = new YarnConfiguration();
|
||||
private Configuration configuration;;
|
||||
private MockRM rm = null;
|
||||
private FileSystem fs;
|
||||
private Path workingPath;
|
||||
|
@ -70,7 +70,7 @@ public class TestRMAdminService {
|
|||
|
||||
@Before
|
||||
public void setup() throws IOException {
|
||||
Configuration.addDefaultResource(YarnConfiguration.CS_CONFIGURATION_FILE);
|
||||
configuration = new YarnConfiguration();
|
||||
fs = FileSystem.get(configuration);
|
||||
workingPath =
|
||||
new Path(new File("target", this.getClass().getSimpleName()
|
||||
|
@ -94,9 +94,16 @@ public class TestRMAdminService {
|
|||
fs.delete(tmpDir, true);
|
||||
}
|
||||
|
||||
private void useCapacityScheduler() {
|
||||
configuration.set(YarnConfiguration.RM_SCHEDULER,
|
||||
CapacityScheduler.class.getCanonicalName());
|
||||
configuration.addResource(YarnConfiguration.CS_CONFIGURATION_FILE);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAdminRefreshQueuesWithLocalConfigurationProvider()
|
||||
throws IOException, YarnException {
|
||||
useCapacityScheduler();
|
||||
rm = new MockRM(configuration);
|
||||
rm.init(configuration);
|
||||
rm.start();
|
||||
|
@ -119,6 +126,7 @@ public class TestRMAdminService {
|
|||
throws IOException, YarnException {
|
||||
configuration.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,
|
||||
"org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider");
|
||||
useCapacityScheduler();
|
||||
try {
|
||||
rm = new MockRM(configuration);
|
||||
rm.init(configuration);
|
||||
|
|
Loading…
Reference in New Issue