HDFS-12895. RBF: Add ACL support for mount table. Contributed by Yiqun Lin.
(cherry picked from commit ee028bfdf1
)
This commit is contained in:
parent
1011bfd220
commit
61e16bff63
|
@ -17,6 +17,9 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.server.federation.router;
|
||||
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_ENABLED_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
|
||||
|
@ -35,9 +38,12 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableE
|
|||
import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryResponse;
|
||||
import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest;
|
||||
import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryResponse;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.ipc.RPC.Server;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.service.AbstractService;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
@ -65,6 +71,14 @@ public class RouterAdminServer extends AbstractService
|
|||
private final Server adminServer;
|
||||
private final InetSocketAddress adminAddress;
|
||||
|
||||
/**
|
||||
* Permission related info used for constructing new router permission
|
||||
* checker instance.
|
||||
*/
|
||||
private static String routerOwner;
|
||||
private static String superGroup;
|
||||
private static boolean isPermissionEnabled;
|
||||
|
||||
public RouterAdminServer(Configuration conf, Router router)
|
||||
throws IOException {
|
||||
super(RouterAdminServer.class.getName());
|
||||
|
@ -96,6 +110,7 @@ public class RouterAdminServer extends AbstractService
|
|||
LOG.info("Admin server binding to {}:{}",
|
||||
bindHost, confRpcAddress.getPort());
|
||||
|
||||
initializePermissionSettings(this.conf);
|
||||
this.adminServer = new RPC.Builder(this.conf)
|
||||
.setProtocol(RouterAdminProtocolPB.class)
|
||||
.setInstance(clientNNPbService)
|
||||
|
@ -112,6 +127,22 @@ public class RouterAdminServer extends AbstractService
|
|||
router.setAdminServerAddress(this.adminAddress);
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize permission related settings.
|
||||
*
|
||||
* @param routerConf
|
||||
* @throws IOException
|
||||
*/
|
||||
private static void initializePermissionSettings(Configuration routerConf)
|
||||
throws IOException {
|
||||
routerOwner = UserGroupInformation.getCurrentUser().getShortUserName();
|
||||
superGroup = routerConf.get(
|
||||
DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY,
|
||||
DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT);
|
||||
isPermissionEnabled = routerConf.getBoolean(DFS_PERMISSIONS_ENABLED_KEY,
|
||||
DFS_PERMISSIONS_ENABLED_DEFAULT);
|
||||
}
|
||||
|
||||
/** Allow access to the client RPC server for testing. */
|
||||
@VisibleForTesting
|
||||
Server getAdminServer() {
|
||||
|
@ -180,4 +211,44 @@ public class RouterAdminServer extends AbstractService
|
|||
GetMountTableEntriesRequest request) throws IOException {
|
||||
return getMountTableStore().getMountTableEntries(request);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a new permission checker used for making mount table access
|
||||
* control. This method will be invoked during each RPC call in router
|
||||
* admin server.
|
||||
*
|
||||
* @return Router permission checker
|
||||
* @throws AccessControlException
|
||||
*/
|
||||
public static RouterPermissionChecker getPermissionChecker()
|
||||
throws AccessControlException {
|
||||
if (!isPermissionEnabled) {
|
||||
return null;
|
||||
}
|
||||
|
||||
try {
|
||||
return new RouterPermissionChecker(routerOwner, superGroup,
|
||||
NameNode.getRemoteUser());
|
||||
} catch (IOException e) {
|
||||
throw new AccessControlException(e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get super user name.
|
||||
*
|
||||
* @return String super user name.
|
||||
*/
|
||||
public static String getSuperUser() {
|
||||
return routerOwner;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get super group name.
|
||||
*
|
||||
* @return String super group name.
|
||||
*/
|
||||
public static String getSuperGroup(){
|
||||
return superGroup;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,82 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.federation.router;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
|
||||
/**
|
||||
* Class that helps in checking permissions in Router-based federation.
|
||||
*/
|
||||
public class RouterPermissionChecker extends FSPermissionChecker {
|
||||
static final Log LOG = LogFactory.getLog(RouterPermissionChecker.class);
|
||||
|
||||
/** Mount table default permission. */
|
||||
public static final short MOUNT_TABLE_PERMISSION_DEFAULT = 00755;
|
||||
|
||||
public RouterPermissionChecker(String routerOwner, String supergroup,
|
||||
UserGroupInformation callerUgi) {
|
||||
super(routerOwner, supergroup, callerUgi, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Whether a mount table entry can be accessed by the current context.
|
||||
*
|
||||
* @param mountTable
|
||||
* MountTable being accessed
|
||||
* @param access
|
||||
* type of action being performed on the cache pool
|
||||
* @throws AccessControlException
|
||||
* if mount table cannot be accessed
|
||||
*/
|
||||
public void checkPermission(MountTable mountTable, FsAction access)
|
||||
throws AccessControlException {
|
||||
if (isSuperUser()) {
|
||||
return;
|
||||
}
|
||||
|
||||
FsPermission mode = mountTable.getMode();
|
||||
if (getUser().equals(mountTable.getOwnerName())
|
||||
&& mode.getUserAction().implies(access)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (isMemberOfGroup(mountTable.getGroupName())
|
||||
&& mode.getGroupAction().implies(access)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!getUser().equals(mountTable.getOwnerName())
|
||||
&& !isMemberOfGroup(mountTable.getGroupName())
|
||||
&& mode.getOtherAction().implies(access)) {
|
||||
return;
|
||||
}
|
||||
|
||||
throw new AccessControlException(
|
||||
"Permission denied while accessing mount table "
|
||||
+ mountTable.getSourcePath()
|
||||
+ ": user " + getUser() + " does not have " + access.toString()
|
||||
+ " permissions.");
|
||||
}
|
||||
}
|
|
@ -24,6 +24,9 @@ import java.util.List;
|
|||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
import org.apache.hadoop.hdfs.server.federation.router.RouterAdminServer;
|
||||
import org.apache.hadoop.hdfs.server.federation.router.RouterPermissionChecker;
|
||||
import org.apache.hadoop.hdfs.server.federation.store.MountTableStore;
|
||||
import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver;
|
||||
import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
|
||||
|
@ -36,6 +39,7 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableE
|
|||
import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryResponse;
|
||||
import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
|
||||
import org.apache.hadoop.hdfs.server.federation.store.records.Query;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.util.Time;
|
||||
|
||||
/**
|
||||
|
@ -52,7 +56,15 @@ public class MountTableStoreImpl extends MountTableStore {
|
|||
@Override
|
||||
public AddMountTableEntryResponse addMountTableEntry(
|
||||
AddMountTableEntryRequest request) throws IOException {
|
||||
boolean status = getDriver().put(request.getEntry(), false, true);
|
||||
MountTable mountTable = request.getEntry();
|
||||
if (mountTable != null) {
|
||||
RouterPermissionChecker pc = RouterAdminServer.getPermissionChecker();
|
||||
if (pc != null) {
|
||||
pc.checkPermission(mountTable, FsAction.WRITE);
|
||||
}
|
||||
}
|
||||
|
||||
boolean status = getDriver().put(mountTable, false, true);
|
||||
AddMountTableEntryResponse response =
|
||||
AddMountTableEntryResponse.newInstance();
|
||||
response.setStatus(status);
|
||||
|
@ -62,8 +74,15 @@ public class MountTableStoreImpl extends MountTableStore {
|
|||
@Override
|
||||
public UpdateMountTableEntryResponse updateMountTableEntry(
|
||||
UpdateMountTableEntryRequest request) throws IOException {
|
||||
MountTable entry = request.getEntry();
|
||||
boolean status = getDriver().put(entry, true, true);
|
||||
MountTable mountTable = request.getEntry();
|
||||
if (mountTable != null) {
|
||||
RouterPermissionChecker pc = RouterAdminServer.getPermissionChecker();
|
||||
if (pc != null) {
|
||||
pc.checkPermission(mountTable, FsAction.WRITE);
|
||||
}
|
||||
}
|
||||
|
||||
boolean status = getDriver().put(mountTable, true, true);
|
||||
UpdateMountTableEntryResponse response =
|
||||
UpdateMountTableEntryResponse.newInstance();
|
||||
response.setStatus(status);
|
||||
|
@ -77,8 +96,17 @@ public class MountTableStoreImpl extends MountTableStore {
|
|||
final MountTable partial = MountTable.newInstance();
|
||||
partial.setSourcePath(srcPath);
|
||||
final Query<MountTable> query = new Query<>(partial);
|
||||
int removedRecords = getDriver().remove(getRecordClass(), query);
|
||||
boolean status = (removedRecords == 1);
|
||||
final MountTable deleteEntry = getDriver().get(getRecordClass(), query);
|
||||
|
||||
boolean status = false;
|
||||
if (deleteEntry != null) {
|
||||
RouterPermissionChecker pc = RouterAdminServer.getPermissionChecker();
|
||||
if (pc != null) {
|
||||
pc.checkPermission(deleteEntry, FsAction.WRITE);
|
||||
}
|
||||
status = getDriver().remove(deleteEntry);
|
||||
}
|
||||
|
||||
RemoveMountTableEntryResponse response =
|
||||
RemoveMountTableEntryResponse.newInstance();
|
||||
response.setStatus(status);
|
||||
|
@ -88,12 +116,13 @@ public class MountTableStoreImpl extends MountTableStore {
|
|||
@Override
|
||||
public GetMountTableEntriesResponse getMountTableEntries(
|
||||
GetMountTableEntriesRequest request) throws IOException {
|
||||
|
||||
RouterPermissionChecker pc =
|
||||
RouterAdminServer.getPermissionChecker();
|
||||
// Get all values from the cache
|
||||
List<MountTable> records = getCachedRecords();
|
||||
|
||||
// Sort and filter
|
||||
Collections.sort(records);
|
||||
Collections.sort(records, MountTable.SOURCE_COMPARATOR);
|
||||
String reqSrcPath = request.getSrcPath();
|
||||
if (reqSrcPath != null && !reqSrcPath.isEmpty()) {
|
||||
// Return only entries beneath this path
|
||||
|
@ -103,6 +132,15 @@ public class MountTableStoreImpl extends MountTableStore {
|
|||
String srcPath = record.getSourcePath();
|
||||
if (!srcPath.startsWith(reqSrcPath)) {
|
||||
it.remove();
|
||||
} else if (pc != null) {
|
||||
// do the READ permission check
|
||||
try {
|
||||
pc.checkPermission(record, FsAction.READ);
|
||||
} catch (AccessControlException ignored) {
|
||||
// Remove this mount table entry if it cannot
|
||||
// be accessed by current user.
|
||||
it.remove();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,9 +28,13 @@ import java.util.TreeMap;
|
|||
|
||||
import org.apache.commons.lang.builder.HashCodeBuilder;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
|
||||
import org.apache.hadoop.hdfs.server.federation.resolver.order.DestinationOrder;
|
||||
import org.apache.hadoop.hdfs.server.federation.router.RouterPermissionChecker;
|
||||
import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreSerializer;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -127,6 +131,15 @@ public abstract class MountTable extends BaseRecord {
|
|||
// Set the serialized dest string
|
||||
record.setDestinations(locations);
|
||||
|
||||
// Set permission fields
|
||||
UserGroupInformation ugi = NameNode.getRemoteUser();
|
||||
record.setOwnerName(ugi.getShortUserName());
|
||||
String group = ugi.getGroups().isEmpty() ? ugi.getShortUserName()
|
||||
: ugi.getPrimaryGroupName();
|
||||
record.setGroupName(group);
|
||||
record.setMode(new FsPermission(
|
||||
RouterPermissionChecker.MOUNT_TABLE_PERMISSION_DEFAULT));
|
||||
|
||||
// Validate
|
||||
record.validate();
|
||||
return record;
|
||||
|
@ -193,6 +206,48 @@ public abstract class MountTable extends BaseRecord {
|
|||
*/
|
||||
public abstract void setDestOrder(DestinationOrder order);
|
||||
|
||||
/**
|
||||
* Get owner name of this mount table entry.
|
||||
*
|
||||
* @return Owner name
|
||||
*/
|
||||
public abstract String getOwnerName();
|
||||
|
||||
/**
|
||||
* Set owner name of this mount table entry.
|
||||
*
|
||||
* @param owner Owner name for mount table entry
|
||||
*/
|
||||
public abstract void setOwnerName(String owner);
|
||||
|
||||
/**
|
||||
* Get group name of this mount table entry.
|
||||
*
|
||||
* @return Group name
|
||||
*/
|
||||
public abstract String getGroupName();
|
||||
|
||||
/**
|
||||
* Set group name of this mount table entry.
|
||||
*
|
||||
* @param group Group name for mount table entry
|
||||
*/
|
||||
public abstract void setGroupName(String group);
|
||||
|
||||
/**
|
||||
* Get permission of this mount table entry.
|
||||
*
|
||||
* @return FsPermission permission mode
|
||||
*/
|
||||
public abstract FsPermission getMode();
|
||||
|
||||
/**
|
||||
* Set permission for this mount table entry.
|
||||
*
|
||||
* @param mode Permission for mount table entry
|
||||
*/
|
||||
public abstract void setMode(FsPermission mode);
|
||||
|
||||
/**
|
||||
* Get the default location.
|
||||
* @return The default location.
|
||||
|
@ -235,6 +290,19 @@ public abstract class MountTable extends BaseRecord {
|
|||
if (this.isReadOnly()) {
|
||||
sb.append("[RO]");
|
||||
}
|
||||
|
||||
if (this.getOwnerName() != null) {
|
||||
sb.append("[owner:").append(this.getOwnerName()).append("]");
|
||||
}
|
||||
|
||||
if (this.getGroupName() != null) {
|
||||
sb.append("[group:").append(this.getGroupName()).append("]");
|
||||
}
|
||||
|
||||
if (this.getMode() != null) {
|
||||
sb.append("[mode:").append(this.getMode()).append("]");
|
||||
}
|
||||
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@ import java.io.IOException;
|
|||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.MountTableRecordProto;
|
||||
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.MountTableRecordProto.Builder;
|
||||
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.MountTableRecordProto.DestOrder;
|
||||
|
@ -28,6 +29,8 @@ import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProt
|
|||
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoteLocationProto;
|
||||
import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
|
||||
import org.apache.hadoop.hdfs.server.federation.resolver.order.DestinationOrder;
|
||||
import org.apache.hadoop.hdfs.server.federation.router.RouterAdminServer;
|
||||
import org.apache.hadoop.hdfs.server.federation.router.RouterPermissionChecker;
|
||||
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.FederationProtocolPBTranslator;
|
||||
import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
|
||||
|
||||
|
@ -189,6 +192,64 @@ public class MountTablePBImpl extends MountTable implements PBRecord {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getOwnerName() {
|
||||
MountTableRecordProtoOrBuilder proto = this.translator.getProtoOrBuilder();
|
||||
if (!proto.hasOwnerName()) {
|
||||
return RouterAdminServer.getSuperUser();
|
||||
}
|
||||
return proto.getOwnerName();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setOwnerName(String owner) {
|
||||
Builder builder = this.translator.getBuilder();
|
||||
if (owner == null) {
|
||||
builder.clearOwnerName();
|
||||
} else {
|
||||
builder.setOwnerName(owner);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getGroupName() {
|
||||
MountTableRecordProtoOrBuilder proto = this.translator.getProtoOrBuilder();
|
||||
if (!proto.hasGroupName()) {
|
||||
return RouterAdminServer.getSuperGroup();
|
||||
}
|
||||
return proto.getGroupName();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setGroupName(String group) {
|
||||
Builder builder = this.translator.getBuilder();
|
||||
if (group == null) {
|
||||
builder.clearGroupName();
|
||||
} else {
|
||||
builder.setGroupName(group);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public FsPermission getMode() {
|
||||
MountTableRecordProtoOrBuilder proto = this.translator.getProtoOrBuilder();
|
||||
short mode = RouterPermissionChecker.MOUNT_TABLE_PERMISSION_DEFAULT;
|
||||
if (proto.hasMode()) {
|
||||
mode = (short) proto.getMode();
|
||||
}
|
||||
return new FsPermission(mode);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setMode(FsPermission mode) {
|
||||
Builder builder = this.translator.getBuilder();
|
||||
if (mode == null) {
|
||||
builder.clearMode();
|
||||
} else {
|
||||
builder.setMode(mode.toShort());
|
||||
}
|
||||
}
|
||||
|
||||
private DestinationOrder convert(DestOrder order) {
|
||||
switch (order) {
|
||||
case LOCAL:
|
||||
|
|
|
@ -46,7 +46,7 @@ import org.apache.hadoop.security.UserGroupInformation;
|
|||
*
|
||||
* Some of the helper methods are gaurded by {@link FSNamesystem#readLock()}.
|
||||
*/
|
||||
class FSPermissionChecker implements AccessControlEnforcer {
|
||||
public class FSPermissionChecker implements AccessControlEnforcer {
|
||||
static final Log LOG = LogFactory.getLog(UserGroupInformation.class);
|
||||
|
||||
private static String getPath(byte[][] components, int start, int end) {
|
||||
|
@ -86,7 +86,7 @@ class FSPermissionChecker implements AccessControlEnforcer {
|
|||
private final INodeAttributeProvider attributeProvider;
|
||||
|
||||
|
||||
FSPermissionChecker(String fsOwner, String supergroup,
|
||||
protected FSPermissionChecker(String fsOwner, String supergroup,
|
||||
UserGroupInformation callerUgi,
|
||||
INodeAttributeProvider attributeProvider) {
|
||||
this.fsOwner = fsOwner;
|
||||
|
|
|
@ -26,6 +26,7 @@ import java.util.Map;
|
|||
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.conf.Configured;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
|
||||
|
@ -77,7 +78,7 @@ public class RouterAdmin extends Configured implements Tool {
|
|||
public void printUsage() {
|
||||
String usage = "Federation Admin Tools:\n"
|
||||
+ "\t[-add <source> <nameservice> <destination> "
|
||||
+ "[-readonly]\n"
|
||||
+ "[-readonly] -owner <owner> -group <group> -mode <mode>]\n"
|
||||
+ "\t[-rm <source>]\n"
|
||||
+ "\t[-ls <path>]\n";
|
||||
System.out.println(usage);
|
||||
|
@ -193,6 +194,9 @@ public class RouterAdmin extends Configured implements Tool {
|
|||
|
||||
// Optional parameters
|
||||
boolean readOnly = false;
|
||||
String owner = null;
|
||||
String group = null;
|
||||
FsPermission mode = null;
|
||||
DestinationOrder order = DestinationOrder.HASH;
|
||||
while (i < parameters.length) {
|
||||
if (parameters[i].equals("-readonly")) {
|
||||
|
@ -204,11 +208,23 @@ public class RouterAdmin extends Configured implements Tool {
|
|||
} catch(Exception e) {
|
||||
System.err.println("Cannot parse order: " + parameters[i]);
|
||||
}
|
||||
} else if (parameters[i].equals("-owner")) {
|
||||
i++;
|
||||
owner = parameters[i];
|
||||
} else if (parameters[i].equals("-group")) {
|
||||
i++;
|
||||
group = parameters[i];
|
||||
} else if (parameters[i].equals("-mode")) {
|
||||
i++;
|
||||
short modeValue = Short.parseShort(parameters[i], 8);
|
||||
mode = new FsPermission(modeValue);
|
||||
}
|
||||
|
||||
i++;
|
||||
}
|
||||
|
||||
return addMount(mount, nss, dest, readOnly, order);
|
||||
return addMount(mount, nss, dest, readOnly, order,
|
||||
new ACLEntity(owner, group, mode));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -219,11 +235,13 @@ public class RouterAdmin extends Configured implements Tool {
|
|||
* @param dest Destination path.
|
||||
* @param readonly If the mount point is read only.
|
||||
* @param order Order of the destination locations.
|
||||
* @param aclInfo the ACL info for mount point.
|
||||
* @return If the mount point was added.
|
||||
* @throws IOException Error adding the mount point.
|
||||
*/
|
||||
public boolean addMount(String mount, String[] nss, String dest,
|
||||
boolean readonly, DestinationOrder order) throws IOException {
|
||||
boolean readonly, DestinationOrder order, ACLEntity aclInfo)
|
||||
throws IOException {
|
||||
// Get the existing entry
|
||||
MountTableManager mountTable = client.getMountTableManager();
|
||||
GetMountTableEntriesRequest getRequest =
|
||||
|
@ -251,6 +269,20 @@ public class RouterAdmin extends Configured implements Tool {
|
|||
if (order != null) {
|
||||
newEntry.setDestOrder(order);
|
||||
}
|
||||
|
||||
// Set ACL info for mount table entry
|
||||
if (aclInfo.getOwner() != null) {
|
||||
newEntry.setOwnerName(aclInfo.getOwner());
|
||||
}
|
||||
|
||||
if (aclInfo.getGroup() != null) {
|
||||
newEntry.setGroupName(aclInfo.getGroup());
|
||||
}
|
||||
|
||||
if (aclInfo.getMode() != null) {
|
||||
newEntry.setMode(aclInfo.getMode());
|
||||
}
|
||||
|
||||
AddMountTableEntryRequest request =
|
||||
AddMountTableEntryRequest.newInstance(newEntry);
|
||||
AddMountTableEntryResponse addResponse =
|
||||
|
@ -273,6 +305,20 @@ public class RouterAdmin extends Configured implements Tool {
|
|||
if (order != null) {
|
||||
existingEntry.setDestOrder(order);
|
||||
}
|
||||
|
||||
// Update ACL info of mount table entry
|
||||
if (aclInfo.getOwner() != null) {
|
||||
existingEntry.setOwnerName(aclInfo.getOwner());
|
||||
}
|
||||
|
||||
if (aclInfo.getGroup() != null) {
|
||||
existingEntry.setGroupName(aclInfo.getGroup());
|
||||
}
|
||||
|
||||
if (aclInfo.getMode() != null) {
|
||||
existingEntry.setMode(aclInfo.getMode());
|
||||
}
|
||||
|
||||
UpdateMountTableEntryRequest updateRequest =
|
||||
UpdateMountTableEntryRequest.newInstance(existingEntry);
|
||||
UpdateMountTableEntryResponse updateResponse =
|
||||
|
@ -323,8 +369,8 @@ public class RouterAdmin extends Configured implements Tool {
|
|||
private static void printMounts(List<MountTable> entries) {
|
||||
System.out.println("Mount Table Entries:");
|
||||
System.out.println(String.format(
|
||||
"%-25s %-25s",
|
||||
"Source", "Destinations"));
|
||||
"%-25s %-25s %-25s %-25s %-25s",
|
||||
"Source", "Destinations", "Owner", "Group", "Mode"));
|
||||
for (MountTable entry : entries) {
|
||||
StringBuilder destBuilder = new StringBuilder();
|
||||
for (RemoteLocation location : entry.getDestinations()) {
|
||||
|
@ -334,8 +380,38 @@ public class RouterAdmin extends Configured implements Tool {
|
|||
destBuilder.append(String.format("%s->%s", location.getNameserviceId(),
|
||||
location.getDest()));
|
||||
}
|
||||
System.out.println(String.format("%-25s %-25s", entry.getSourcePath(),
|
||||
System.out.print(String.format("%-25s %-25s", entry.getSourcePath(),
|
||||
destBuilder.toString()));
|
||||
|
||||
System.out.println(String.format(" %-25s %-25s %-25s",
|
||||
entry.getOwnerName(), entry.getGroupName(), entry.getMode()));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Inner class that stores ACL info of mount table.
|
||||
*/
|
||||
static class ACLEntity {
|
||||
private final String owner;
|
||||
private final String group;
|
||||
private final FsPermission mode;
|
||||
|
||||
ACLEntity(String owner, String group, FsPermission mode) {
|
||||
this.owner = owner;
|
||||
this.group = group;
|
||||
this.mode = mode;
|
||||
}
|
||||
|
||||
public String getOwner() {
|
||||
return owner;
|
||||
}
|
||||
|
||||
public String getGroup() {
|
||||
return group;
|
||||
}
|
||||
|
||||
public FsPermission getMode() {
|
||||
return mode;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -129,6 +129,10 @@ message MountTableRecordProto {
|
|||
RANDOM = 2;
|
||||
}
|
||||
optional DestOrder destOrder = 6 [default = HASH];
|
||||
|
||||
optional string ownerName = 10;
|
||||
optional string groupName = 11;
|
||||
optional int32 mode = 12;
|
||||
}
|
||||
|
||||
message AddMountTableEntryRequestProto {
|
||||
|
|
|
@ -335,6 +335,9 @@
|
|||
<th>Target path</th>
|
||||
<th>Order</th>
|
||||
<th>Read only</th>
|
||||
<th>Owner</th>
|
||||
<th>Group</th>
|
||||
<th>Permission</th>
|
||||
<th>Date Modified</th>
|
||||
<th>Date Created</th>
|
||||
</tr>
|
||||
|
@ -347,6 +350,9 @@
|
|||
<td>{path}</td>
|
||||
<td>{order}</td>
|
||||
<td class="dfshealth-node-icon dfshealth-mount-read-only-{readonly}"/>
|
||||
<td>{ownerName}</td>
|
||||
<td>{groupName}</td>
|
||||
<td>{mode}</td>
|
||||
<td>{dateModified}</td>
|
||||
<td>{dateCreated}</td>
|
||||
</tr>
|
||||
|
|
|
@ -423,7 +423,7 @@ Runs the DFS router. See [Router](./HDFSRouterFederation.html#Router) for more i
|
|||
Usage:
|
||||
|
||||
hdfs dfsrouteradmin
|
||||
[-add <source> <nameservice> <destination> [-readonly]]
|
||||
[-add <source> <nameservice> <destination> [-readonly] -owner <owner> -group <group> -mode <mode>]
|
||||
[-rm <source>]
|
||||
[-ls <path>]
|
||||
|
||||
|
|
|
@ -190,6 +190,14 @@ It also supports mount points that disallow writes:
|
|||
|
||||
If a mount point is not set, the Router will map it to the default namespace `dfs.federation.router.default.nameserviceId`.
|
||||
|
||||
Mount table have UNIX-like *permissions*, which restrict which users and groups have access to the mount point. Write permissions allow users to add
|
||||
, update or remove mount point. Read permissions allow users to list mount point. Execute permissions are unused.
|
||||
|
||||
Mount table permission can be set by following command:
|
||||
|
||||
[hdfs]$ $HADOOP_HOME/bin/hdfs dfsrouteradmin -add /tmp ns1 /tmp -owner root -group supergroup -mode 0755
|
||||
|
||||
The option mode is UNIX-style permissions for the mount table. Permissions are specified in octal, e.g. 0755. By default, this is set to 0755.
|
||||
|
||||
Client configuration
|
||||
--------------------
|
||||
|
|
|
@ -84,6 +84,9 @@ public class TestFederationMetrics extends TestMetricsBase {
|
|||
json.getString("nameserviceId"));
|
||||
assertEquals(entry.getDefaultLocation().getDest(),
|
||||
json.getString("path"));
|
||||
assertEquals(entry.getOwnerName(), json.getString("ownerName"));
|
||||
assertEquals(entry.getGroupName(), json.getString("groupName"));
|
||||
assertEquals(entry.getMode().toString(), json.getString("mode"));
|
||||
assertNotNullAndNotEmpty(json.getString("dateCreated"));
|
||||
assertNotNullAndNotEmpty(json.getString("dateModified"));
|
||||
match++;
|
||||
|
|
|
@ -0,0 +1,182 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.federation.router;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.PrintStream;
|
||||
import java.net.InetSocketAddress;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
|
||||
import org.apache.hadoop.hdfs.server.federation.RouterDFSCluster.RouterContext;
|
||||
import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster;
|
||||
import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
|
||||
import org.apache.hadoop.hdfs.server.federation.store.impl.MountTableStoreImpl;
|
||||
import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
|
||||
import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
|
||||
import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
|
||||
import org.apache.hadoop.hdfs.tools.federation.RouterAdmin;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Assert;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
/**
|
||||
* Tests Router admin commands.
|
||||
*/
|
||||
public class TestRouterAdminCLI {
|
||||
private static StateStoreDFSCluster cluster;
|
||||
private static RouterContext routerContext;
|
||||
private static StateStoreService stateStore;
|
||||
|
||||
private static RouterAdmin admin;
|
||||
private static RouterClient client;
|
||||
|
||||
private static final String TEST_USER = "test-user";
|
||||
|
||||
private final ByteArrayOutputStream out = new ByteArrayOutputStream();
|
||||
private static final PrintStream OLD_OUT = System.out;
|
||||
|
||||
@BeforeClass
|
||||
public static void globalSetUp() throws Exception {
|
||||
cluster = new StateStoreDFSCluster(false, 1);
|
||||
// Build and start a router with State Store + admin + RPC
|
||||
Configuration conf = new RouterConfigBuilder()
|
||||
.stateStore()
|
||||
.admin()
|
||||
.rpc()
|
||||
.build();
|
||||
cluster.addRouterOverrides(conf);
|
||||
|
||||
// Start routers
|
||||
cluster.startRouters();
|
||||
|
||||
routerContext = cluster.getRandomRouter();
|
||||
Router router = routerContext.getRouter();
|
||||
stateStore = router.getStateStore();
|
||||
|
||||
Configuration routerConf = new Configuration();
|
||||
InetSocketAddress routerSocket = router.getAdminServerAddress();
|
||||
routerConf.setSocketAddr(DFSConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_KEY,
|
||||
routerSocket);
|
||||
admin = new RouterAdmin(routerConf);
|
||||
client = routerContext.getAdminClient();
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDown() {
|
||||
cluster.stopRouter(routerContext);
|
||||
cluster.shutdown();
|
||||
cluster = null;
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMountTableDefaultACL() throws Exception {
|
||||
String[] argv = new String[] {"-add", "/testpath0", "ns0", "/testdir0"};
|
||||
Assert.assertEquals(0, ToolRunner.run(admin, argv));
|
||||
|
||||
stateStore.loadCache(MountTableStoreImpl.class, true);
|
||||
GetMountTableEntriesRequest getRequest = GetMountTableEntriesRequest
|
||||
.newInstance("/testpath0");
|
||||
GetMountTableEntriesResponse getResponse = client.getMountTableManager()
|
||||
.getMountTableEntries(getRequest);
|
||||
MountTable mountTable = getResponse.getEntries().get(0);
|
||||
|
||||
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
|
||||
String group = ugi.getGroups().isEmpty() ? ugi.getShortUserName()
|
||||
: ugi.getPrimaryGroupName();
|
||||
assertEquals(ugi.getShortUserName(), mountTable.getOwnerName());
|
||||
assertEquals(group, mountTable.getGroupName());
|
||||
assertEquals((short) 0755, mountTable.getMode().toShort());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMountTablePermissions() throws Exception {
|
||||
// re-set system out for testing
|
||||
System.setOut(new PrintStream(out));
|
||||
// use superuser to add new mount table with only read permission
|
||||
String[] argv = new String[] {"-add", "/testpath2-1", "ns0", "/testdir2-1",
|
||||
"-owner", TEST_USER, "-group", TEST_USER, "-mode", "0455"};
|
||||
assertEquals(0, ToolRunner.run(admin, argv));
|
||||
|
||||
String superUser = UserGroupInformation.
|
||||
getCurrentUser().getShortUserName();
|
||||
// use normal user as current user to test
|
||||
UserGroupInformation remoteUser = UserGroupInformation
|
||||
.createRemoteUser(TEST_USER);
|
||||
UserGroupInformation.setLoginUser(remoteUser);
|
||||
|
||||
// verify read permission by executing other commands
|
||||
verifyExecutionResult("/testpath2-1", true, -1, -1);
|
||||
|
||||
// add new mount table with only write permission
|
||||
argv = new String[] {"-add", "/testpath2-2", "ns0", "/testdir2-2",
|
||||
"-owner", TEST_USER, "-group", TEST_USER, "-mode", "0255"};
|
||||
assertEquals(0, ToolRunner.run(admin, argv));
|
||||
verifyExecutionResult("/testpath2-2", false, 0, 0);
|
||||
|
||||
// set mount table entry with read and write permission
|
||||
argv = new String[] {"-add", "/testpath2-3", "ns0", "/testdir2-3",
|
||||
"-owner", TEST_USER, "-group", TEST_USER, "-mode", "0755"};
|
||||
assertEquals(0, ToolRunner.run(admin, argv));
|
||||
verifyExecutionResult("/testpath2-3", true, 0, 0);
|
||||
|
||||
// set back system out and login user
|
||||
System.setOut(OLD_OUT);
|
||||
remoteUser = UserGroupInformation.createRemoteUser(superUser);
|
||||
UserGroupInformation.setLoginUser(remoteUser);
|
||||
}
|
||||
|
||||
/**
|
||||
* Verify router admin commands execution result.
|
||||
*
|
||||
* @param mount
|
||||
* target mount table
|
||||
* @param canRead
|
||||
* whether can list mount tables under specified mount
|
||||
* @param addCommandCode
|
||||
* expected return code of add command executed for specified mount
|
||||
* @param rmCommandCode
|
||||
* expected return code of rm command executed for specified mount
|
||||
* @throws Exception
|
||||
*/
|
||||
private void verifyExecutionResult(String mount, boolean canRead,
|
||||
int addCommandCode, int rmCommandCode) throws Exception {
|
||||
String[] argv = null;
|
||||
stateStore.loadCache(MountTableStoreImpl.class, true);
|
||||
|
||||
out.reset();
|
||||
// execute ls command
|
||||
argv = new String[] {"-ls", mount};
|
||||
assertEquals(0, ToolRunner.run(admin, argv));
|
||||
assertEquals(canRead, out.toString().contains(mount));
|
||||
|
||||
// execute add/update command
|
||||
argv = new String[] {"-add", mount, "ns0", mount + "newdir"};
|
||||
assertEquals(addCommandCode, ToolRunner.run(admin, argv));
|
||||
|
||||
stateStore.loadCache(MountTableStoreImpl.class, true);
|
||||
// execute remove command
|
||||
argv = new String[] {"-rm", mount};
|
||||
assertEquals(rmCommandCode, ToolRunner.run(admin, argv));
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue