HDDS-1541. Implement addAcl,removeAcl,setAcl,getAcl for Key. Contributed by Ajay Kumat. (#885)

This commit is contained in:
Ajay Yadav 2019-06-05 14:42:10 -07:00 committed by Xiaoyu Yao
parent 0b1e288deb
commit 3b1c2577d7
24 changed files with 841 additions and 237 deletions

View File

@ -405,15 +405,7 @@ public class RpcClient implements ClientProtocol, KeyProviderTokenIssuer {
.setKeyName(bucketArgs.getEncryptionKey()).build(); .setKeyName(bucketArgs.getEncryptionKey()).build();
} }
List<OzoneAcl> listOfAcls = new ArrayList<>(); List<OzoneAcl> listOfAcls = getAclList();
//User ACL
listOfAcls.add(new OzoneAcl(ACLIdentityType.USER,
ugi.getUserName(), userRights));
//Group ACLs of the User
List<String> userGroups = Arrays.asList(UserGroupInformation
.createRemoteUser(ugi.getUserName()).getGroupNames());
userGroups.stream().forEach((group) -> listOfAcls.add(
new OzoneAcl(ACLIdentityType.GROUP, group, groupRights)));
//ACLs from BucketArgs //ACLs from BucketArgs
if(bucketArgs.getAcls() != null) { if(bucketArgs.getAcls() != null) {
listOfAcls.addAll(bucketArgs.getAcls()); listOfAcls.addAll(bucketArgs.getAcls());
@ -437,6 +429,16 @@ public class RpcClient implements ClientProtocol, KeyProviderTokenIssuer {
ozoneManagerClient.createBucket(builder.build()); ozoneManagerClient.createBucket(builder.build());
} }
/**
* Helper function to get default acl list for current user.
*
* @return listOfAcls
* */
private List<OzoneAcl> getAclList() {
return OzoneUtils.getAclList(ugi.getUserName(), ugi.getGroups(),
userRights, groupRights);
}
@Override @Override
public void addBucketAcls( public void addBucketAcls(
String volumeName, String bucketName, List<OzoneAcl> addAcls) String volumeName, String bucketName, List<OzoneAcl> addAcls)
@ -629,6 +631,7 @@ public class RpcClient implements ClientProtocol, KeyProviderTokenIssuer {
.setType(HddsProtos.ReplicationType.valueOf(type.toString())) .setType(HddsProtos.ReplicationType.valueOf(type.toString()))
.setFactor(HddsProtos.ReplicationFactor.valueOf(factor.getValue())) .setFactor(HddsProtos.ReplicationFactor.valueOf(factor.getValue()))
.addAllMetadata(metadata) .addAllMetadata(metadata)
.setAcls(getAclList())
.build(); .build();
OpenKeySession openKey = ozoneManagerClient.openKey(keyArgs); OpenKeySession openKey = ozoneManagerClient.openKey(keyArgs);
@ -819,6 +822,7 @@ public class RpcClient implements ClientProtocol, KeyProviderTokenIssuer {
.setKeyName(keyName) .setKeyName(keyName)
.setType(HddsProtos.ReplicationType.valueOf(type.toString())) .setType(HddsProtos.ReplicationType.valueOf(type.toString()))
.setFactor(HddsProtos.ReplicationFactor.valueOf(factor.getValue())) .setFactor(HddsProtos.ReplicationFactor.valueOf(factor.getValue()))
.setAcls(getAclList())
.build(); .build();
OmMultipartInfo multipartInfo = ozoneManagerClient OmMultipartInfo multipartInfo = ozoneManagerClient
.initiateMultipartUpload(keyArgs); .initiateMultipartUpload(keyArgs);
@ -848,6 +852,7 @@ public class RpcClient implements ClientProtocol, KeyProviderTokenIssuer {
.setIsMultipartKey(true) .setIsMultipartKey(true)
.setMultipartUploadID(uploadID) .setMultipartUploadID(uploadID)
.setMultipartUploadPartNumber(partNumber) .setMultipartUploadPartNumber(partNumber)
.setAcls(getAclList())
.build(); .build();
OpenKeySession openKey = ozoneManagerClient.openKey(keyArgs); OpenKeySession openKey = ozoneManagerClient.openKey(keyArgs);
@ -963,7 +968,10 @@ public class RpcClient implements ClientProtocol, KeyProviderTokenIssuer {
public void createDirectory(String volumeName, String bucketName, public void createDirectory(String volumeName, String bucketName,
String keyName) throws IOException { String keyName) throws IOException {
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
.setBucketName(bucketName).setKeyName(keyName).build(); .setBucketName(bucketName)
.setKeyName(keyName)
.setAcls(getAclList())
.build();
ozoneManagerClient.createDirectory(keyArgs); ozoneManagerClient.createDirectory(keyArgs);
} }
@ -990,6 +998,7 @@ public class RpcClient implements ClientProtocol, KeyProviderTokenIssuer {
.setDataSize(size) .setDataSize(size)
.setType(HddsProtos.ReplicationType.valueOf(type.name())) .setType(HddsProtos.ReplicationType.valueOf(type.name()))
.setFactor(HddsProtos.ReplicationFactor.valueOf(factor.getValue())) .setFactor(HddsProtos.ReplicationFactor.valueOf(factor.getValue()))
.setAcls(getAclList())
.build(); .build();
OpenKeySession keySession = OpenKeySession keySession =
ozoneManagerClient.createFile(keyArgs, overWrite, recursive); ozoneManagerClient.createFile(keyArgs, overWrite, recursive);

View File

@ -46,9 +46,10 @@ public class OzoneAcl {
private ACLIdentityType type; private ACLIdentityType type;
private String name; private String name;
private BitSet aclBitSet; private BitSet aclBitSet;
public static final BitSet ZERO_BITSET = new BitSet(0);
/** /**
* Constructor for OzoneAcl. * Default constructor.
*/ */
public OzoneAcl() { public OzoneAcl() {
} }

View File

@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.om.helpers;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.audit.Auditable; import org.apache.hadoop.ozone.audit.Auditable;
@ -45,13 +46,15 @@ public final class OmKeyArgs implements Auditable {
private final int multipartUploadPartNumber; private final int multipartUploadPartNumber;
private Map<String, String> metadata; private Map<String, String> metadata;
private boolean refreshPipeline; private boolean refreshPipeline;
private List<OzoneAcl> acls;
@SuppressWarnings("parameternumber") @SuppressWarnings("parameternumber")
private OmKeyArgs(String volumeName, String bucketName, String keyName, private OmKeyArgs(String volumeName, String bucketName, String keyName,
long dataSize, ReplicationType type, ReplicationFactor factor, long dataSize, ReplicationType type, ReplicationFactor factor,
List<OmKeyLocationInfo> locationInfoList, boolean isMultipart, List<OmKeyLocationInfo> locationInfoList, boolean isMultipart,
String uploadID, int partNumber, String uploadID, int partNumber,
Map<String, String> metadataMap, boolean refreshPipeline) { Map<String, String> metadataMap, boolean refreshPipeline,
List<OzoneAcl> acls) {
this.volumeName = volumeName; this.volumeName = volumeName;
this.bucketName = bucketName; this.bucketName = bucketName;
this.keyName = keyName; this.keyName = keyName;
@ -64,6 +67,7 @@ public final class OmKeyArgs implements Auditable {
this.multipartUploadPartNumber = partNumber; this.multipartUploadPartNumber = partNumber;
this.metadata = metadataMap; this.metadata = metadataMap;
this.refreshPipeline = refreshPipeline; this.refreshPipeline = refreshPipeline;
this.acls = acls;
} }
public boolean getIsMultipartKey() { public boolean getIsMultipartKey() {
@ -86,6 +90,10 @@ public final class OmKeyArgs implements Auditable {
return factor; return factor;
} }
public List<OzoneAcl> getAcls() {
return acls;
}
public String getVolumeName() { public String getVolumeName() {
return volumeName; return volumeName;
} }
@ -166,6 +174,7 @@ public final class OmKeyArgs implements Auditable {
private int multipartUploadPartNumber; private int multipartUploadPartNumber;
private Map<String, String> metadata = new HashMap<>(); private Map<String, String> metadata = new HashMap<>();
private boolean refreshPipeline; private boolean refreshPipeline;
private List<OzoneAcl> acls;
public Builder setVolumeName(String volume) { public Builder setVolumeName(String volume) {
this.volumeName = volume; this.volumeName = volume;
@ -202,6 +211,11 @@ public final class OmKeyArgs implements Auditable {
return this; return this;
} }
public Builder setAcls(List<OzoneAcl> listOfAcls) {
this.acls = listOfAcls;
return this;
}
public Builder setIsMultipartKey(boolean isMultipart) { public Builder setIsMultipartKey(boolean isMultipart) {
this.isMultipartKey = isMultipart; this.isMultipartKey = isMultipart;
return this; return this;
@ -235,7 +249,7 @@ public final class OmKeyArgs implements Auditable {
public OmKeyArgs build() { public OmKeyArgs build() {
return new OmKeyArgs(volumeName, bucketName, keyName, dataSize, type, return new OmKeyArgs(volumeName, bucketName, keyName, dataSize, type,
factor, locationInfoList, isMultipartKey, multipartUploadID, factor, locationInfoList, isMultipartKey, multipartUploadID,
multipartUploadPartNumber, metadata, refreshPipeline); multipartUploadPartNumber, metadata, refreshPipeline, acls);
} }
} }

View File

@ -28,6 +28,7 @@ import java.util.stream.Collectors;
import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo;
import org.apache.hadoop.ozone.protocolPB.OMPBHelper; import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
@ -50,6 +51,10 @@ public final class OmKeyInfo extends WithMetadata {
private HddsProtos.ReplicationType type; private HddsProtos.ReplicationType type;
private HddsProtos.ReplicationFactor factor; private HddsProtos.ReplicationFactor factor;
private FileEncryptionInfo encInfo; private FileEncryptionInfo encInfo;
/**
* ACL Information.
*/
private List<OzoneAclInfo> acls;
@SuppressWarnings("parameternumber") @SuppressWarnings("parameternumber")
OmKeyInfo(String volumeName, String bucketName, String keyName, OmKeyInfo(String volumeName, String bucketName, String keyName,
@ -58,7 +63,7 @@ public final class OmKeyInfo extends WithMetadata {
HddsProtos.ReplicationType type, HddsProtos.ReplicationType type,
HddsProtos.ReplicationFactor factor, HddsProtos.ReplicationFactor factor,
Map<String, String> metadata, Map<String, String> metadata,
FileEncryptionInfo encInfo) { FileEncryptionInfo encInfo, List<OzoneAclInfo> acls) {
this.volumeName = volumeName; this.volumeName = volumeName;
this.bucketName = bucketName; this.bucketName = bucketName;
this.keyName = keyName; this.keyName = keyName;
@ -81,6 +86,7 @@ public final class OmKeyInfo extends WithMetadata {
this.type = type; this.type = type;
this.metadata = metadata; this.metadata = metadata;
this.encInfo = encInfo; this.encInfo = encInfo;
this.acls = acls;
} }
public String getVolumeName() { public String getVolumeName() {
@ -216,6 +222,10 @@ public final class OmKeyInfo extends WithMetadata {
return encInfo; return encInfo;
} }
public List<OzoneAclInfo> getAcls() {
return acls;
}
/** /**
* Builder of OmKeyInfo. * Builder of OmKeyInfo.
*/ */
@ -232,6 +242,7 @@ public final class OmKeyInfo extends WithMetadata {
private HddsProtos.ReplicationFactor factor; private HddsProtos.ReplicationFactor factor;
private Map<String, String> metadata; private Map<String, String> metadata;
private FileEncryptionInfo encInfo; private FileEncryptionInfo encInfo;
private List<OzoneAclInfo> acls;
public Builder() { public Builder() {
this.metadata = new HashMap<>(); this.metadata = new HashMap<>();
@ -299,11 +310,16 @@ public final class OmKeyInfo extends WithMetadata {
return this; return this;
} }
public Builder setAcls(List<OzoneAclInfo> listOfAcls) {
this.acls = listOfAcls;
return this;
}
public OmKeyInfo build() { public OmKeyInfo build() {
return new OmKeyInfo( return new OmKeyInfo(
volumeName, bucketName, keyName, omKeyLocationInfoGroups, volumeName, bucketName, keyName, omKeyLocationInfoGroups,
dataSize, creationTime, modificationTime, type, factor, metadata, dataSize, creationTime, modificationTime, type, factor, metadata,
encInfo); encInfo, acls);
} }
} }
@ -327,6 +343,9 @@ public final class OmKeyInfo extends WithMetadata {
if (encInfo != null) { if (encInfo != null) {
kb.setFileEncryptionInfo(OMPBHelper.convert(encInfo)); kb.setFileEncryptionInfo(OMPBHelper.convert(encInfo));
} }
if(acls != null) {
kb.addAllAcls(acls);
}
return kb.build(); return kb.build();
} }
@ -345,7 +364,8 @@ public final class OmKeyInfo extends WithMetadata {
keyInfo.getFactor(), keyInfo.getFactor(),
KeyValueUtil.getFromProtobuf(keyInfo.getMetadataList()), KeyValueUtil.getFromProtobuf(keyInfo.getMetadataList()),
keyInfo.hasFileEncryptionInfo() ? OMPBHelper.convert(keyInfo keyInfo.hasFileEncryptionInfo() ? OMPBHelper.convert(keyInfo
.getFileEncryptionInfo()): null); .getFileEncryptionInfo()): null,
keyInfo.getAclsList());
} }
@Override @Override

View File

@ -36,6 +36,7 @@ import java.util.ArrayList;
import java.util.HashMap; import java.util.HashMap;
import java.util.Objects; import java.util.Objects;
import static org.apache.hadoop.ozone.OzoneAcl.ZERO_BITSET;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_REQUEST; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_REQUEST;
import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclRights.ALL; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclRights.ALL;
@ -81,8 +82,17 @@ public class OmOzoneAclMap {
if (!getMap(aclType).containsKey(acl.getName())) { if (!getMap(aclType).containsKey(acl.getName())) {
getMap(aclType).put(acl.getName(), acl.getAclBitSet()); getMap(aclType).put(acl.getName(), acl.getAclBitSet());
} else { } else {
// Check if we are adding new rights to existing acl.
BitSet temp = (BitSet) acl.getAclBitSet().clone();
BitSet curRights = (BitSet) getMap(aclType).get(acl.getName()).clone();
temp.or(curRights);
if (temp.equals(curRights)) {
// throw exception if acl is already added. // throw exception if acl is already added.
throw new OMException("Acl " + acl + " already exist.", INVALID_REQUEST); throw new OMException("Acl " + acl + " already exist.",
INVALID_REQUEST);
}
getMap(aclType).get(acl.getName()).or(acl.getAclBitSet());
} }
} }
@ -105,9 +115,25 @@ public class OmOzoneAclMap {
Objects.requireNonNull(acl, "Acl should not be null."); Objects.requireNonNull(acl, "Acl should not be null.");
OzoneAclType aclType = OzoneAclType.valueOf(acl.getType().name()); OzoneAclType aclType = OzoneAclType.valueOf(acl.getType().name());
if (getMap(aclType).containsKey(acl.getName())) { if (getMap(aclType).containsKey(acl.getName())) {
BitSet aclRights = getMap(aclType).get(acl.getName());
BitSet bits = (BitSet) acl.getAclBitSet().clone();
bits.and(aclRights);
if (bits.equals(ZERO_BITSET)) {
// throw exception if acl doesn't exist.
throw new OMException("Acl [" + acl + "] doesn't exist.",
INVALID_REQUEST);
}
acl.getAclBitSet().and(aclRights);
aclRights.xor(acl.getAclBitSet());
// Remove the acl as all rights are already set to 0.
if (aclRights.equals(ZERO_BITSET)) {
getMap(aclType).remove(acl.getName()); getMap(aclType).remove(acl.getName());
}
} else { } else {
// throw exception if acl is already added. // throw exception if acl doesn't exist.
throw new OMException("Acl [" + acl + "] doesn't exist.", throw new OMException("Acl [" + acl + "] doesn't exist.",
INVALID_REQUEST); INVALID_REQUEST);
} }

View File

@ -668,6 +668,11 @@ public final class OzoneManagerProtocolClientSideTranslatorPB
.setBucketName(args.getBucketName()) .setBucketName(args.getBucketName())
.setKeyName(args.getKeyName()); .setKeyName(args.getKeyName());
if(args.getAcls() != null) {
keyArgs.addAllAcls(args.getAcls().stream().distinct().map(a ->
OzoneAcl.toProtobuf(a)).collect(Collectors.toList()));
}
if (args.getFactor() != null) { if (args.getFactor() != null) {
keyArgs.setFactor(args.getFactor()); keyArgs.setFactor(args.getFactor());
} }
@ -991,6 +996,8 @@ public final class OzoneManagerProtocolClientSideTranslatorPB
.setBucketName(omKeyArgs.getBucketName()) .setBucketName(omKeyArgs.getBucketName())
.setKeyName(omKeyArgs.getKeyName()) .setKeyName(omKeyArgs.getKeyName())
.setFactor(omKeyArgs.getFactor()) .setFactor(omKeyArgs.getFactor())
.addAllAcls(omKeyArgs.getAcls().stream().map(a ->
OzoneAcl.toProtobuf(a)).collect(Collectors.toList()))
.setType(omKeyArgs.getType()); .setType(omKeyArgs.getType());
multipartInfoInitiateRequest.setKeyArgs(keyArgs.build()); multipartInfoInitiateRequest.setKeyArgs(keyArgs.build());
@ -1276,6 +1283,8 @@ public final class OzoneManagerProtocolClientSideTranslatorPB
.setVolumeName(args.getVolumeName()) .setVolumeName(args.getVolumeName())
.setBucketName(args.getBucketName()) .setBucketName(args.getBucketName())
.setKeyName(args.getKeyName()) .setKeyName(args.getKeyName())
.addAllAcls(args.getAcls().stream().map(a ->
OzoneAcl.toProtobuf(a)).collect(Collectors.toList()))
.build(); .build();
CreateDirectoryRequest request = CreateDirectoryRequest.newBuilder() CreateDirectoryRequest request = CreateDirectoryRequest.newBuilder()
.setKeyArgs(keyArgs) .setKeyArgs(keyArgs)
@ -1412,6 +1421,8 @@ public final class OzoneManagerProtocolClientSideTranslatorPB
.setDataSize(args.getDataSize()) .setDataSize(args.getDataSize())
.setType(args.getType()) .setType(args.getType())
.setFactor(args.getFactor()) .setFactor(args.getFactor())
.addAllAcls(args.getAcls().stream().map(a ->
OzoneAcl.toProtobuf(a)).collect(Collectors.toList()))
.build(); .build();
CreateFileRequest createFileRequest = CreateFileRequest.newBuilder() CreateFileRequest createFileRequest = CreateFileRequest.newBuilder()
.setKeyArgs(keyArgs) .setKeyArgs(keyArgs)

View File

@ -16,10 +16,10 @@
*/ */
package org.apache.hadoop.ozone.security.acl; package org.apache.hadoop.ozone.security.acl;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import java.util.StringTokenizer; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
/** /**
* Class representing an ozone object. * Class representing an ozone object.
@ -45,16 +45,14 @@ public final class OzoneObjInfo extends OzoneObj {
case VOLUME: case VOLUME:
return getVolumeName(); return getVolumeName();
case BUCKET: case BUCKET:
return getVolumeName() + OzoneConsts.OZONE_URI_DELIMITER return getVolumeName() + OZONE_URI_DELIMITER + getBucketName();
+ getBucketName();
case KEY: case KEY:
return getVolumeName() + OzoneConsts.OZONE_URI_DELIMITER return getVolumeName() + OZONE_URI_DELIMITER + getBucketName()
+ getBucketName() + OzoneConsts.OZONE_URI_DELIMITER + getKeyName(); + OZONE_URI_DELIMITER + getKeyName();
default: default:
throw new IllegalArgumentException("Unknown resource " + throw new IllegalArgumentException("Unknown resource " +
"type" + getResourceType()); "type" + getResourceType());
} }
} }
@Override @Override
@ -77,25 +75,36 @@ public final class OzoneObjInfo extends OzoneObj {
Builder builder = new Builder() Builder builder = new Builder()
.setResType(ResourceType.valueOf(proto.getResType().name())) .setResType(ResourceType.valueOf(proto.getResType().name()))
.setStoreType(StoreType.valueOf(proto.getStoreType().name())); .setStoreType(StoreType.valueOf(proto.getStoreType().name()));
StringTokenizer tokenizer = new StringTokenizer(proto.getPath(), String[] tokens = StringUtils.splitPreserveAllTokens(proto.getPath(),
OzoneConsts.OZONE_URI_DELIMITER); OZONE_URI_DELIMITER);
if(tokens == null) {
throw new IllegalArgumentException("Unexpected path:" + proto.getPath());
}
// Set volume name. // Set volume name.
if (tokenizer.hasMoreTokens()) { switch (proto.getResType()) {
builder.setVolumeName(tokenizer.nextToken()); case VOLUME:
builder.setVolumeName(tokens[0]);
break;
case BUCKET:
if (tokens.length < 2) {
throw new IllegalArgumentException("Unexpected argument for " +
"Ozone key. Path:" + proto.getPath());
} }
// Set bucket name. builder.setVolumeName(tokens[0]);
if (tokenizer.hasMoreTokens()) { builder.setBucketName(tokens[1]);
builder.setBucketName(tokenizer.nextToken()); break;
case KEY:
if (tokens.length != 3) {
throw new IllegalArgumentException("Unexpected argument for " +
"Ozone key. Path:" + proto.getPath());
} }
// Set key name builder.setVolumeName(tokens[0]);
if (tokenizer.hasMoreTokens()) { builder.setBucketName(tokens[1]);
StringBuffer sb = new StringBuffer(); builder.setKeyName(tokens[2]);
while (tokenizer.hasMoreTokens()) { break;
sb.append(OzoneConsts.OZONE_URI_DELIMITER); default:
sb.append(tokenizer.nextToken()); throw new IllegalArgumentException("Unexpected type for " +
sb.append(OzoneConsts.OZONE_URI_DELIMITER); "Ozone key. Type:" + proto.getResType());
}
builder.setKeyName(sb.toString());
} }
return builder.build(); return builder.build();
} }

View File

@ -23,6 +23,8 @@ import java.net.UnknownHostException;
import java.nio.charset.Charset; import java.nio.charset.Charset;
import java.text.ParseException; import java.text.ParseException;
import java.text.SimpleDateFormat; import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.List;
import java.util.Locale; import java.util.Locale;
import java.util.TimeZone; import java.util.TimeZone;
import java.util.UUID; import java.util.UUID;
@ -31,11 +33,16 @@ import java.util.concurrent.TimeUnit;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.HddsUtils;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType;
import org.apache.ratis.util.TimeDuration; import org.apache.ratis.util.TimeDuration;
import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.GROUP;
import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.USER;
/** /**
* Set of Utility functions used in ozone. * Set of Utility functions used in ozone.
*/ */
@ -236,4 +243,26 @@ public final class OzoneUtils {
return getTimeDuration(conf, key, defaultValue) return getTimeDuration(conf, key, defaultValue)
.toLong(TimeUnit.MILLISECONDS); .toLong(TimeUnit.MILLISECONDS);
} }
/**
* Helper function to get deafult acl list for current user.
*
* @param userName
* @param userGroups
* @return listOfAcls
* */
public static List<OzoneAcl> getAclList(String userName,
List<String> userGroups, ACLType userRights, ACLType groupRights) {
List<OzoneAcl> listOfAcls = new ArrayList<>();
// User ACL.
listOfAcls.add(new OzoneAcl(USER, userName, userRights));
if(userGroups != null) {
// Group ACLs of the User.
userGroups.stream().forEach((group) -> listOfAcls.add(
new OzoneAcl(GROUP, group, groupRights)));
}
return listOfAcls;
}
} }

View File

@ -615,6 +615,7 @@ message KeyArgs {
optional string multipartUploadID = 9; optional string multipartUploadID = 9;
optional uint32 multipartNumber = 10; optional uint32 multipartNumber = 10;
repeated hadoop.hdds.KeyValue metadata = 11; repeated hadoop.hdds.KeyValue metadata = 11;
repeated OzoneAclInfo acls = 12;
} }
message KeyLocation { message KeyLocation {
@ -652,6 +653,7 @@ message KeyInfo {
optional uint64 latestVersion = 10; optional uint64 latestVersion = 10;
repeated hadoop.hdds.KeyValue metadata = 11; repeated hadoop.hdds.KeyValue metadata = 11;
optional FileEncryptionInfoProto fileEncryptionInfo = 12; optional FileEncryptionInfoProto fileEncryptionInfo = 12;
repeated OzoneAclInfo acls = 13;
} }
message OzoneFileStatusProto { message OzoneFileStatusProto {

View File

@ -84,8 +84,10 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
import org.apache.hadoop.ozone.s3.util.OzoneS3Util; import org.apache.hadoop.ozone.s3.util.OzoneS3Util;
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType;
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType;
import org.apache.hadoop.ozone.security.acl.OzoneAclConfig;
import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.ozone.security.acl.OzoneObj;
import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.test.LambdaTestUtils;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
@ -94,6 +96,9 @@ import static java.nio.charset.StandardCharsets.UTF_8;
import org.apache.commons.io.FileUtils; import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang3.RandomStringUtils;
import org.apache.commons.lang3.RandomUtils; import org.apache.commons.lang3.RandomUtils;
import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE;
import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE;
import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.CoreMatchers.either; import static org.hamcrest.CoreMatchers.either;
import org.junit.Assert; import org.junit.Assert;
@ -607,8 +612,8 @@ public abstract class TestOzoneRpcClientAbstract {
String keyName = UUID.randomUUID().toString(); String keyName = UUID.randomUUID().toString();
OzoneOutputStream out = bucket.createKey(keyName, OzoneOutputStream out = bucket.createKey(keyName,
value.getBytes().length, ReplicationType.STAND_ALONE, value.getBytes().length, STAND_ALONE,
ReplicationFactor.ONE, new HashMap<>()); ONE, new HashMap<>());
out.write(value.getBytes()); out.write(value.getBytes());
out.close(); out.close();
OzoneKey key = bucket.getKey(keyName); OzoneKey key = bucket.getKey(keyName);
@ -617,8 +622,8 @@ public abstract class TestOzoneRpcClientAbstract {
byte[] fileContent = new byte[value.getBytes().length]; byte[] fileContent = new byte[value.getBytes().length];
is.read(fileContent); is.read(fileContent);
Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, Assert.assertTrue(verifyRatisReplication(volumeName, bucketName,
keyName, ReplicationType.STAND_ALONE, keyName, STAND_ALONE,
ReplicationFactor.ONE)); ONE));
Assert.assertEquals(value, new String(fileContent)); Assert.assertEquals(value, new String(fileContent));
Assert.assertTrue(key.getCreationTime() >= currentTime); Assert.assertTrue(key.getCreationTime() >= currentTime);
Assert.assertTrue(key.getModificationTime() >= currentTime); Assert.assertTrue(key.getModificationTime() >= currentTime);
@ -639,7 +644,7 @@ public abstract class TestOzoneRpcClientAbstract {
// create the initial key with size 0, write will allocate the first block. // create the initial key with size 0, write will allocate the first block.
OzoneOutputStream out = bucket.createKey(keyName, 0, OzoneOutputStream out = bucket.createKey(keyName, 0,
ReplicationType.STAND_ALONE, ReplicationFactor.ONE, new HashMap<>()); STAND_ALONE, ONE, new HashMap<>());
out.write(value.getBytes()); out.write(value.getBytes());
out.close(); out.close();
OmKeyArgs.Builder builder = new OmKeyArgs.Builder(); OmKeyArgs.Builder builder = new OmKeyArgs.Builder();
@ -677,7 +682,7 @@ public abstract class TestOzoneRpcClientAbstract {
OzoneOutputStream out = bucket.createKey(keyName, OzoneOutputStream out = bucket.createKey(keyName,
value.getBytes().length, ReplicationType.RATIS, value.getBytes().length, ReplicationType.RATIS,
ReplicationFactor.ONE, new HashMap<>()); ONE, new HashMap<>());
out.write(value.getBytes()); out.write(value.getBytes());
out.close(); out.close();
OzoneKey key = bucket.getKey(keyName); OzoneKey key = bucket.getKey(keyName);
@ -687,7 +692,7 @@ public abstract class TestOzoneRpcClientAbstract {
is.read(fileContent); is.read(fileContent);
is.close(); is.close();
Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, Assert.assertTrue(verifyRatisReplication(volumeName, bucketName,
keyName, ReplicationType.RATIS, ReplicationFactor.ONE)); keyName, ReplicationType.RATIS, ONE));
Assert.assertEquals(value, new String(fileContent)); Assert.assertEquals(value, new String(fileContent));
Assert.assertTrue(key.getCreationTime() >= currentTime); Assert.assertTrue(key.getCreationTime() >= currentTime);
Assert.assertTrue(key.getModificationTime() >= currentTime); Assert.assertTrue(key.getModificationTime() >= currentTime);
@ -832,7 +837,7 @@ public abstract class TestOzoneRpcClientAbstract {
// Write data into a key // Write data into a key
OzoneOutputStream out = bucket.createKey(keyName, OzoneOutputStream out = bucket.createKey(keyName,
value.getBytes().length, ReplicationType.RATIS, value.getBytes().length, ReplicationType.RATIS,
ReplicationFactor.ONE, new HashMap<>()); ONE, new HashMap<>());
out.write(value.getBytes()); out.write(value.getBytes());
out.close(); out.close();
@ -904,8 +909,8 @@ public abstract class TestOzoneRpcClientAbstract {
//String keyValue = "this is a test value.glx"; //String keyValue = "this is a test value.glx";
// create the initial key with size 0, write will allocate the first block. // create the initial key with size 0, write will allocate the first block.
OzoneOutputStream out = bucket.createKey(keyName, OzoneOutputStream out = bucket.createKey(keyName,
keyValue.getBytes().length, ReplicationType.STAND_ALONE, keyValue.getBytes().length, STAND_ALONE,
ReplicationFactor.ONE, new HashMap<>()); ONE, new HashMap<>());
out.write(keyValue.getBytes()); out.write(keyValue.getBytes());
out.close(); out.close();
@ -993,7 +998,7 @@ public abstract class TestOzoneRpcClientAbstract {
// Write data into a key // Write data into a key
OzoneOutputStream out = bucket.createKey(keyName, OzoneOutputStream out = bucket.createKey(keyName,
value.getBytes().length, ReplicationType.RATIS, value.getBytes().length, ReplicationType.RATIS,
ReplicationFactor.ONE, new HashMap<>()); ONE, new HashMap<>());
out.write(value.getBytes()); out.write(value.getBytes());
out.close(); out.close();
@ -1161,8 +1166,8 @@ public abstract class TestOzoneRpcClientAbstract {
volume.createBucket(bucketName); volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName);
OzoneOutputStream out = bucket.createKey(keyName, OzoneOutputStream out = bucket.createKey(keyName,
value.getBytes().length, ReplicationType.STAND_ALONE, value.getBytes().length, STAND_ALONE,
ReplicationFactor.ONE, new HashMap<>()); ONE, new HashMap<>());
out.write(value.getBytes()); out.write(value.getBytes());
out.close(); out.close();
OzoneKey key = bucket.getKey(keyName); OzoneKey key = bucket.getKey(keyName);
@ -1185,8 +1190,8 @@ public abstract class TestOzoneRpcClientAbstract {
volume.createBucket(bucketName); volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName);
OzoneOutputStream out = bucket.createKey(fromKeyName, OzoneOutputStream out = bucket.createKey(fromKeyName,
value.getBytes().length, ReplicationType.STAND_ALONE, value.getBytes().length, STAND_ALONE,
ReplicationFactor.ONE, new HashMap<>()); ONE, new HashMap<>());
out.write(value.getBytes()); out.write(value.getBytes());
out.close(); out.close();
OzoneKey key = bucket.getKey(fromKeyName); OzoneKey key = bucket.getKey(fromKeyName);
@ -1380,25 +1385,25 @@ public abstract class TestOzoneRpcClientAbstract {
byte[] value = RandomStringUtils.randomAscii(10240).getBytes(); byte[] value = RandomStringUtils.randomAscii(10240).getBytes();
OzoneOutputStream one = volAbucketA.createKey( OzoneOutputStream one = volAbucketA.createKey(
keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5), keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5),
value.length, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, value.length, STAND_ALONE, ONE,
new HashMap<>()); new HashMap<>());
one.write(value); one.write(value);
one.close(); one.close();
OzoneOutputStream two = volAbucketB.createKey( OzoneOutputStream two = volAbucketB.createKey(
keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5), keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5),
value.length, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, value.length, STAND_ALONE, ONE,
new HashMap<>()); new HashMap<>());
two.write(value); two.write(value);
two.close(); two.close();
OzoneOutputStream three = volBbucketA.createKey( OzoneOutputStream three = volBbucketA.createKey(
keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5), keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5),
value.length, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, value.length, STAND_ALONE, ONE,
new HashMap<>()); new HashMap<>());
three.write(value); three.write(value);
three.close(); three.close();
OzoneOutputStream four = volBbucketB.createKey( OzoneOutputStream four = volBbucketB.createKey(
keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5), keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5),
value.length, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, value.length, STAND_ALONE, ONE,
new HashMap<>()); new HashMap<>());
four.write(value); four.write(value);
four.close(); four.close();
@ -1413,25 +1418,25 @@ public abstract class TestOzoneRpcClientAbstract {
byte[] value = RandomStringUtils.randomAscii(10240).getBytes(); byte[] value = RandomStringUtils.randomAscii(10240).getBytes();
OzoneOutputStream one = volAbucketA.createKey( OzoneOutputStream one = volAbucketA.createKey(
keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5), keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5),
value.length, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, value.length, STAND_ALONE, ONE,
new HashMap<>()); new HashMap<>());
one.write(value); one.write(value);
one.close(); one.close();
OzoneOutputStream two = volAbucketB.createKey( OzoneOutputStream two = volAbucketB.createKey(
keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5), keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5),
value.length, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, value.length, STAND_ALONE, ONE,
new HashMap<>()); new HashMap<>());
two.write(value); two.write(value);
two.close(); two.close();
OzoneOutputStream three = volBbucketA.createKey( OzoneOutputStream three = volBbucketA.createKey(
keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5), keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5),
value.length, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, value.length, STAND_ALONE, ONE,
new HashMap<>()); new HashMap<>());
three.write(value); three.write(value);
three.close(); three.close();
OzoneOutputStream four = volBbucketB.createKey( OzoneOutputStream four = volBbucketB.createKey(
keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5), keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5),
value.length, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, value.length, STAND_ALONE, ONE,
new HashMap<>()); new HashMap<>());
four.write(value); four.write(value);
four.close(); four.close();
@ -1512,7 +1517,7 @@ public abstract class TestOzoneRpcClientAbstract {
volume.createBucket(bucketName); volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName);
OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName, OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
ReplicationType.STAND_ALONE, ReplicationFactor.ONE); STAND_ALONE, ONE);
assertNotNull(multipartInfo); assertNotNull(multipartInfo);
String uploadID = multipartInfo.getUploadID(); String uploadID = multipartInfo.getUploadID();
@ -1524,7 +1529,7 @@ public abstract class TestOzoneRpcClientAbstract {
// Call initiate multipart upload for the same key again, this should // Call initiate multipart upload for the same key again, this should
// generate a new uploadID. // generate a new uploadID.
multipartInfo = bucket.initiateMultipartUpload(keyName, multipartInfo = bucket.initiateMultipartUpload(keyName,
ReplicationType.STAND_ALONE, ReplicationFactor.ONE); STAND_ALONE, ONE);
assertNotNull(multipartInfo); assertNotNull(multipartInfo);
Assert.assertEquals(volumeName, multipartInfo.getVolumeName()); Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
@ -1580,7 +1585,7 @@ public abstract class TestOzoneRpcClientAbstract {
volume.createBucket(bucketName); volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName);
OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName, OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
ReplicationType.STAND_ALONE, ReplicationFactor.ONE); STAND_ALONE, ONE);
assertNotNull(multipartInfo); assertNotNull(multipartInfo);
String uploadID = multipartInfo.getUploadID(); String uploadID = multipartInfo.getUploadID();
@ -1618,7 +1623,7 @@ public abstract class TestOzoneRpcClientAbstract {
volume.createBucket(bucketName); volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName);
OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName, OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
ReplicationType.STAND_ALONE, ReplicationFactor.ONE); STAND_ALONE, ONE);
assertNotNull(multipartInfo); assertNotNull(multipartInfo);
String uploadID = multipartInfo.getUploadID(); String uploadID = multipartInfo.getUploadID();
@ -1746,7 +1751,6 @@ public abstract class TestOzoneRpcClientAbstract {
OzoneBucket bucket = volume.getBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName);
doMultipartUpload(bucket, keyName, (byte)98); doMultipartUpload(bucket, keyName, (byte)98);
} }
@ -1782,18 +1786,18 @@ public abstract class TestOzoneRpcClientAbstract {
OzoneBucket bucket = volume.getBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName);
// Initiate multipart upload // Initiate multipart upload
String uploadID = initiateMultipartUpload(bucket, keyName, ReplicationType String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
.STAND_ALONE, ReplicationFactor.ONE); ONE);
// Upload Parts // Upload Parts
Map<Integer, String> partsMap = new TreeMap<>(); Map<Integer, String> partsMap = new TreeMap<>();
// Uploading part 1 with less than min size // Uploading part 1 with less than min size
String partName = uploadPart(bucket, keyName, uploadID, 1, "data".getBytes( String partName = uploadPart(bucket, keyName, uploadID, 1,
UTF_8)); "data".getBytes(UTF_8));
partsMap.put(1, partName); partsMap.put(1, partName);
partName = uploadPart(bucket, keyName, uploadID, 2, "data".getBytes( partName = uploadPart(bucket, keyName, uploadID, 2,
UTF_8)); "data".getBytes(UTF_8));
partsMap.put(2, partName); partsMap.put(2, partName);
@ -1815,8 +1819,8 @@ public abstract class TestOzoneRpcClientAbstract {
volume.createBucket(bucketName); volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName);
String uploadID = initiateMultipartUpload(bucket, keyName, ReplicationType String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
.STAND_ALONE, ReplicationFactor.ONE); ONE);
// We have not uploaded any parts, but passing some list it should throw // We have not uploaded any parts, but passing some list it should throw
// error. // error.
@ -1840,8 +1844,8 @@ public abstract class TestOzoneRpcClientAbstract {
volume.createBucket(bucketName); volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName);
String uploadID = initiateMultipartUpload(bucket, keyName, ReplicationType String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
.STAND_ALONE, ReplicationFactor.ONE); ONE);
uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8));
// We have not uploaded any parts, but passing some list it should throw // We have not uploaded any parts, but passing some list it should throw
@ -1865,8 +1869,8 @@ public abstract class TestOzoneRpcClientAbstract {
volume.createBucket(bucketName); volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName);
String uploadID = initiateMultipartUpload(bucket, keyName, ReplicationType String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
.STAND_ALONE, ReplicationFactor.ONE); ONE);
uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8));
// We have not uploaded any parts, but passing some list it should throw // We have not uploaded any parts, but passing some list it should throw
@ -1905,8 +1909,8 @@ public abstract class TestOzoneRpcClientAbstract {
volume.createBucket(bucketName); volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName);
String uploadID = initiateMultipartUpload(bucket, keyName, ReplicationType String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
.STAND_ALONE, ReplicationFactor.ONE); ONE);
bucket.abortMultipartUpload(keyName, uploadID); bucket.abortMultipartUpload(keyName, uploadID);
} }
@ -1921,8 +1925,8 @@ public abstract class TestOzoneRpcClientAbstract {
volume.createBucket(bucketName); volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName);
String uploadID = initiateMultipartUpload(bucket, keyName, ReplicationType String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
.STAND_ALONE, ReplicationFactor.ONE); ONE);
uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8));
bucket.abortMultipartUpload(keyName, uploadID); bucket.abortMultipartUpload(keyName, uploadID);
} }
@ -1939,8 +1943,8 @@ public abstract class TestOzoneRpcClientAbstract {
OzoneBucket bucket = volume.getBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName);
Map<Integer, String> partsMap = new TreeMap<>(); Map<Integer, String> partsMap = new TreeMap<>();
String uploadID = initiateMultipartUpload(bucket, keyName, ReplicationType String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
.STAND_ALONE, ReplicationFactor.ONE); ONE);
String partName1 = uploadPart(bucket, keyName, uploadID, 1, String partName1 = uploadPart(bucket, keyName, uploadID, 1,
generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
partsMap.put(1, partName1); partsMap.put(1, partName1);
@ -1956,7 +1960,7 @@ public abstract class TestOzoneRpcClientAbstract {
OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts =
bucket.listParts(keyName, uploadID, 0, 3); bucket.listParts(keyName, uploadID, 0, 3);
Assert.assertEquals(ReplicationType.STAND_ALONE, Assert.assertEquals(STAND_ALONE,
ozoneMultipartUploadPartListParts.getReplicationType()); ozoneMultipartUploadPartListParts.getReplicationType());
Assert.assertEquals(3, Assert.assertEquals(3,
ozoneMultipartUploadPartListParts.getPartInfoList().size()); ozoneMultipartUploadPartListParts.getPartInfoList().size());
@ -1990,8 +1994,8 @@ public abstract class TestOzoneRpcClientAbstract {
OzoneBucket bucket = volume.getBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName);
Map<Integer, String> partsMap = new TreeMap<>(); Map<Integer, String> partsMap = new TreeMap<>();
String uploadID = initiateMultipartUpload(bucket, keyName, ReplicationType String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
.STAND_ALONE, ReplicationFactor.ONE); ONE);
String partName1 = uploadPart(bucket, keyName, uploadID, 1, String partName1 = uploadPart(bucket, keyName, uploadID, 1,
generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
partsMap.put(1, partName1); partsMap.put(1, partName1);
@ -2007,7 +2011,7 @@ public abstract class TestOzoneRpcClientAbstract {
OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts =
bucket.listParts(keyName, uploadID, 0, 2); bucket.listParts(keyName, uploadID, 0, 2);
Assert.assertEquals(ReplicationType.STAND_ALONE, Assert.assertEquals(STAND_ALONE,
ozoneMultipartUploadPartListParts.getReplicationType()); ozoneMultipartUploadPartListParts.getReplicationType());
Assert.assertEquals(2, Assert.assertEquals(2,
@ -2095,8 +2099,8 @@ public abstract class TestOzoneRpcClientAbstract {
OzoneBucket bucket = volume.getBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName);
String uploadID = initiateMultipartUpload(bucket, keyName, ReplicationType String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
.STAND_ALONE, ReplicationFactor.ONE); ONE);
uploadPart(bucket, keyName, uploadID, 1, uploadPart(bucket, keyName, uploadID, 1,
generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
@ -2108,7 +2112,7 @@ public abstract class TestOzoneRpcClientAbstract {
Assert.assertEquals(0, Assert.assertEquals(0,
ozoneMultipartUploadPartListParts.getPartInfoList().size()); ozoneMultipartUploadPartListParts.getPartInfoList().size());
Assert.assertEquals(ReplicationType.STAND_ALONE, Assert.assertEquals(STAND_ALONE,
ozoneMultipartUploadPartListParts.getReplicationType()); ozoneMultipartUploadPartListParts.getReplicationType());
// As we don't have any parts with greater than partNumberMarker and list // As we don't have any parts with greater than partNumberMarker and list
@ -2138,48 +2142,14 @@ public abstract class TestOzoneRpcClientAbstract {
public void testNativeAclsForVolume() throws Exception { public void testNativeAclsForVolume() throws Exception {
String volumeName = UUID.randomUUID().toString(); String volumeName = UUID.randomUUID().toString();
store.createVolume(volumeName); store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
OzoneObj ozObj = new OzoneObjInfo.Builder() OzoneObj ozObj = new OzoneObjInfo.Builder()
.setVolumeName(volumeName) .setVolumeName(volumeName)
.setResType(OzoneObj.ResourceType.VOLUME) .setResType(OzoneObj.ResourceType.VOLUME)
.setStoreType(OzoneObj.StoreType.OZONE) .setStoreType(OzoneObj.StoreType.OZONE)
.build(); .build();
// Get acls for volume.
List<OzoneAcl> volAcls = store.getAcl(ozObj);
volAcls.forEach(a -> assertTrue(volume.getAcls().contains(a)));
// Remove all acl's. validateOzoneAcl(ozObj);
for (OzoneAcl a : volAcls) {
store.removeAcl(ozObj, a);
}
List<OzoneAcl> newAcls = store.getAcl(ozObj);
OzoneVolume finalVolume = store.getVolume(volumeName);
assertTrue(finalVolume.getAcls().size() == 0);
assertTrue(newAcls.size() == 0);
// Add acl's and then call getAcl.
for (OzoneAcl a : volAcls) {
// Try removing an acl which doesn't exist, it should return false.
assertFalse(finalVolume.getAcls().contains(a));
assertFalse(store.removeAcl(ozObj, a));
assertTrue(store.addAcl(ozObj, a));
finalVolume = store.getVolume(volumeName);
assertTrue(finalVolume.getAcls().contains(a));
// Call addAcl again, this time operation will fail as
// acl is already added.
assertFalse(store.addAcl(ozObj, a));
}
assertTrue(finalVolume.getAcls().size() == volAcls.size());
// Reset acl's.
store.setAcl(ozObj, newAcls);
finalVolume = store.getVolume(volumeName);
newAcls = store.getAcl(ozObj);
assertTrue(newAcls.size() == 0);
assertTrue(finalVolume.getAcls().size() == 0);
} }
@Test @Test
@ -2199,42 +2169,138 @@ public abstract class TestOzoneRpcClientAbstract {
.setResType(OzoneObj.ResourceType.BUCKET) .setResType(OzoneObj.ResourceType.BUCKET)
.setStoreType(OzoneObj.StoreType.OZONE) .setStoreType(OzoneObj.StoreType.OZONE)
.build(); .build();
validateOzoneAcl(ozObj);
}
@Test
public void testNativeAclsForKey() throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String key1 = UUID.randomUUID().toString();
String key2 = UUID.randomUUID().toString();
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
assertNotNull("Bucket creation failed", bucket);
writeKey(key1, bucket);
writeKey(key2, bucket);
OzoneObj ozObj = new OzoneObjInfo.Builder()
.setVolumeName(volumeName)
.setBucketName(bucketName)
.setKeyName(key1)
.setResType(OzoneObj.ResourceType.KEY)
.setStoreType(OzoneObj.StoreType.OZONE)
.build();
validateOzoneAcl(ozObj);
}
/**
* Helper function to get default acl list for current user.
*
* @return list of default Acls.
* @throws IOException
* */
private List<OzoneAcl> getAclList(OzoneConfiguration conf)
throws IOException {
List<OzoneAcl> listOfAcls = new ArrayList<>();
//User ACL
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
OzoneAclConfig aclConfig = conf.getObject(OzoneAclConfig.class);
ACLType userRights = aclConfig.getUserDefaultRights();
ACLType groupRights = aclConfig.getGroupDefaultRights();
listOfAcls.add(new OzoneAcl(ACLIdentityType.USER,
ugi.getUserName(), userRights));
//Group ACLs of the User
List<String> userGroups = Arrays.asList(UserGroupInformation
.createRemoteUser(ugi.getUserName()).getGroupNames());
userGroups.stream().forEach((group) -> listOfAcls.add(
new OzoneAcl(ACLIdentityType.GROUP, group, groupRights)));
return listOfAcls;
}
/**
* Helper function to validate ozone Acl for given object.
* @param ozObj
* */
private void validateOzoneAcl(OzoneObj ozObj) throws IOException {
// Get acls for volume. // Get acls for volume.
List<OzoneAcl> volAcls = store.getAcl(ozObj); List<OzoneAcl> expectedAcls = getAclList(new OzoneConfiguration());
volAcls.forEach(a -> assertTrue(bucket.getAcls().contains(a)));
// Case:1 Add new acl permission to existing acl.
if(expectedAcls.size()>0) {
OzoneAcl oldAcl = expectedAcls.get(0);
OzoneAcl newAcl = new OzoneAcl(oldAcl.getType(), oldAcl.getName(),
ACLType.READ_ACL);
// Verify that operation successful.
assertTrue(store.addAcl(ozObj, newAcl));
List<OzoneAcl> acls = store.getAcl(ozObj);
assertTrue(acls.size() == expectedAcls.size());
boolean aclVerified = false;
for(OzoneAcl acl: acls) {
if(acl.getName().equals(newAcl.getName())) {
assertTrue(acl.getAclList().contains(ACLType.READ_ACL));
aclVerified = true;
}
}
assertTrue("New acl expected but not found.", aclVerified);
aclVerified = false;
// Case:2 Remove newly added acl permission.
assertTrue(store.removeAcl(ozObj, newAcl));
acls = store.getAcl(ozObj);
assertTrue(acls.size() == expectedAcls.size());
for(OzoneAcl acl: acls) {
if(acl.getName().equals(newAcl.getName())) {
assertFalse(acl.getAclList().contains(ACLType.READ_ACL));
aclVerified = true;
}
}
assertTrue("New acl expected but not found.", aclVerified);
} else {
fail("Default acl should not be empty.");
}
List<OzoneAcl> keyAcls = store.getAcl(ozObj);
expectedAcls.forEach(a -> assertTrue(keyAcls.contains(a)));
// Remove all acl's. // Remove all acl's.
for (OzoneAcl a : volAcls) { for (OzoneAcl a : expectedAcls) {
assertTrue(store.removeAcl(ozObj, a)); store.removeAcl(ozObj, a);
} }
List<OzoneAcl> newAcls = store.getAcl(ozObj); List<OzoneAcl> newAcls = store.getAcl(ozObj);
OzoneBucket finalBuck = volume.getBucket(bucketName);
assertTrue(finalBuck.getAcls().size() == 0);
assertTrue(newAcls.size() == 0); assertTrue(newAcls.size() == 0);
// Add acl's and then call getAcl. // Add acl's and then call getAcl.
for (OzoneAcl a : volAcls) { int aclCount = 0;
// Try removing an acl which doesn't exist, it should return false. for (OzoneAcl a : expectedAcls) {
assertFalse(finalBuck.getAcls().contains(a)); aclCount++;
assertFalse(store.removeAcl(ozObj, a));
// Add acl should succeed.
assertTrue(store.addAcl(ozObj, a)); assertTrue(store.addAcl(ozObj, a));
finalBuck = volume.getBucket(bucketName); assertTrue(store.getAcl(ozObj).size() == aclCount);
assertTrue(finalBuck.getAcls().contains(a));
// Call addAcl again, this time operation will return false as
// acl is already added.
assertFalse(store.addAcl(ozObj, a));
} }
assertTrue(finalBuck.getAcls().size() == volAcls.size()); newAcls = store.getAcl(ozObj);
assertTrue(newAcls.size() == expectedAcls.size());
List<OzoneAcl> finalNewAcls = newAcls;
expectedAcls.forEach(a -> assertTrue(finalNewAcls.contains(a)));
// Reset acl's. // Reset acl's.
store.setAcl(ozObj, newAcls); store.setAcl(ozObj, new ArrayList<>());
finalBuck = volume.getBucket(bucketName);
newAcls = store.getAcl(ozObj); newAcls = store.getAcl(ozObj);
assertTrue(newAcls.size() == 0); assertTrue(newAcls.size() == 0);
assertTrue(finalBuck.getAcls().size() == 0); }
private void writeKey(String key1, OzoneBucket bucket) throws IOException {
OzoneOutputStream out = bucket.createKey(key1, 1024, STAND_ALONE,
ONE, new HashMap<>());
out.write(RandomStringUtils.random(1024).getBytes());
out.close();
} }
private byte[] generateData(int size, byte val) { private byte[] generateData(int size, byte val) {

View File

@ -112,6 +112,7 @@ public class TestMultipleContainerReadWrite {
String dataString = RandomStringUtils.randomAscii(3 * (int)OzoneConsts.MB); String dataString = RandomStringUtils.randomAscii(3 * (int)OzoneConsts.MB);
KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs); KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
keyArgs.setSize(3 * (int)OzoneConsts.MB); keyArgs.setSize(3 * (int)OzoneConsts.MB);
keyArgs.setUserName(userName);
try (OutputStream outputStream = storageHandler.newKeyWriter(keyArgs)) { try (OutputStream outputStream = storageHandler.newKeyWriter(keyArgs)) {
outputStream.write(dataString.getBytes()); outputStream.write(dataString.getBytes());
@ -190,6 +191,7 @@ public class TestMultipleContainerReadWrite {
String dataString = RandomStringUtils.randomAscii(500); String dataString = RandomStringUtils.randomAscii(500);
KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs); KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
keyArgs.setSize(500); keyArgs.setSize(500);
keyArgs.setUserName(userName);
try (OutputStream outputStream = storageHandler.newKeyWriter(keyArgs)) { try (OutputStream outputStream = storageHandler.newKeyWriter(keyArgs)) {
outputStream.write(dataString.getBytes()); outputStream.write(dataString.getBytes());

View File

@ -44,6 +44,7 @@ import org.junit.rules.ExpectedException;
import java.io.IOException; import java.io.IOException;
import java.io.InputStream; import java.io.InputStream;
import java.io.OutputStream; import java.io.OutputStream;
import java.util.ArrayList;
import java.util.LinkedList; import java.util.LinkedList;
import java.util.List; import java.util.List;
@ -116,6 +117,7 @@ public class TestOmBlockVersioning {
.setKeyName(keyName) .setKeyName(keyName)
.setDataSize(1000) .setDataSize(1000)
.setRefreshPipeline(true) .setRefreshPipeline(true)
.setAcls(new ArrayList<>())
.build(); .build();
// 1st update, version 0 // 1st update, version 0
@ -220,6 +222,7 @@ public class TestOmBlockVersioning {
String dataString = RandomStringUtils.randomAlphabetic(100); String dataString = RandomStringUtils.randomAlphabetic(100);
KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs); KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
keyArgs.setUserName(userName);
// this write will create 1st version with one block // this write will create 1st version with one block
try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) { try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
stream.write(dataString.getBytes()); stream.write(dataString.getBytes());

View File

@ -486,6 +486,7 @@ public class TestOzoneManager {
String dataString = RandomStringUtils.randomAscii(100); String dataString = RandomStringUtils.randomAscii(100);
KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs); KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
keyArgs.setSize(100); keyArgs.setSize(100);
keyArgs.setUserName(userName);
try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) { try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
stream.write(dataString.getBytes()); stream.write(dataString.getBytes());
} }
@ -525,6 +526,7 @@ public class TestOzoneManager {
String dataString = RandomStringUtils.randomAscii(100); String dataString = RandomStringUtils.randomAscii(100);
KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs); KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
keyArgs.setSize(100); keyArgs.setSize(100);
keyArgs.setUserName(userName);
try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) { try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
stream.write(dataString.getBytes()); stream.write(dataString.getBytes());
} }
@ -567,6 +569,7 @@ public class TestOzoneManager {
KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs); KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
keyArgs.setSize(100); keyArgs.setSize(100);
keyArgs.setUserName(userName);
String dataString = RandomStringUtils.randomAscii(100); String dataString = RandomStringUtils.randomAscii(100);
try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) { try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
stream.write(dataString.getBytes()); stream.write(dataString.getBytes());
@ -577,6 +580,7 @@ public class TestOzoneManager {
// That is this overwrite only overwrites the keys on OM. We need to // That is this overwrite only overwrites the keys on OM. We need to
// garbage collect those blocks from datanode. // garbage collect those blocks from datanode.
KeyArgs keyArgs2 = new KeyArgs(volumeName, bucketName, keyName, userArgs); KeyArgs keyArgs2 = new KeyArgs(volumeName, bucketName, keyName, userArgs);
keyArgs2.setUserName(userName);
storageHandler.newKeyWriter(keyArgs2); storageHandler.newKeyWriter(keyArgs2);
Assert Assert
.assertEquals(numKeyAllocateFails, omMetrics.getNumKeyAllocateFails()); .assertEquals(numKeyAllocateFails, omMetrics.getNumKeyAllocateFails());

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.om; package org.apache.hadoop.ozone.om;
import java.io.IOException; import java.io.IOException;
import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang3.RandomStringUtils;
@ -134,6 +135,7 @@ public class TestScmSafeMode {
.setBucketName(bucketName) .setBucketName(bucketName)
.setKeyName(keyName) .setKeyName(keyName)
.setDataSize(1000) .setDataSize(1000)
.setAcls(Collections.emptyList())
.build(); .build();
OmVolumeArgs volArgs = new OmVolumeArgs.Builder() OmVolumeArgs volArgs = new OmVolumeArgs.Builder()
.setAdminName(adminName) .setAdminName(adminName)

View File

@ -66,7 +66,9 @@ import org.slf4j.LoggerFactory;
import java.io.IOException; import java.io.IOException;
import java.io.OutputStream; import java.io.OutputStream;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays;
import java.util.List; import java.util.List;
import java.util.Objects;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
/** /**
@ -444,6 +446,8 @@ public final class DistributedStorageHandler implements StorageHandler {
@Override @Override
public OutputStream newKeyWriter(KeyArgs args) throws IOException, public OutputStream newKeyWriter(KeyArgs args) throws IOException,
OzoneException { OzoneException {
Objects.requireNonNull(args.getUserName(),
"Username should not be null");
OmKeyArgs keyArgs = new OmKeyArgs.Builder() OmKeyArgs keyArgs = new OmKeyArgs.Builder()
.setVolumeName(args.getVolumeName()) .setVolumeName(args.getVolumeName())
.setBucketName(args.getBucketName()) .setBucketName(args.getBucketName())
@ -451,6 +455,9 @@ public final class DistributedStorageHandler implements StorageHandler {
.setDataSize(args.getSize()) .setDataSize(args.getSize())
.setType(xceiverClientManager.getType()) .setType(xceiverClientManager.getType())
.setFactor(xceiverClientManager.getFactor()) .setFactor(xceiverClientManager.getFactor())
.setAcls(OzoneUtils.getAclList(args.getUserName(),
args.getGroups() != null ? Arrays.asList(args.getGroups()) : null,
ACLType.ALL, ACLType.ALL))
.build(); .build();
// contact OM to allocate a block for key. // contact OM to allocate a block for key.
OpenKeySession openKey = ozoneManagerClient.openKey(keyArgs); OpenKeySession openKey = ozoneManagerClient.openKey(keyArgs);

View File

@ -17,6 +17,8 @@
package org.apache.hadoop.ozone.om; package org.apache.hadoop.ozone.om;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList;
import java.util.BitSet;
import java.util.List; import java.util.List;
import java.util.Objects; import java.util.Objects;
@ -40,6 +42,7 @@ import org.iq80.leveldb.DBException;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import static org.apache.hadoop.ozone.OzoneAcl.ZERO_BITSET;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND;
/** /**
@ -404,15 +407,32 @@ public class BucketManagerImpl implements BucketManager {
throw new OMException("Bucket " + bucket + " is not found", throw new OMException("Bucket " + bucket + " is not found",
BUCKET_NOT_FOUND); BUCKET_NOT_FOUND);
} }
List<OzoneAcl> list = bucketInfo.getAcls();
if(!validateAddAcl(acl, list)) { // Case 1: When we are adding more rights to existing user/group.
// New acl can't be added as it is not consistent with existing ACLs. boolean addToExistingAcl = false;
LOG.info("New acl:{} can't be added as it is not consistent with " + for(OzoneAcl a: bucketInfo.getAcls()) {
"existing ACLs:{}.", acl, StringUtils.join(",", list)); if(a.getName().equals(acl.getName()) &&
a.getType().equals(acl.getType())) {
BitSet bits = (BitSet) acl.getAclBitSet().clone();
bits.or(a.getAclBitSet());
if (bits.equals(a.getAclBitSet())) {
return false; return false;
} }
list.add(acl); a.getAclBitSet().or(acl.getAclBitSet());
OmBucketInfo updatedBucket = OmBucketInfo.newBuilder() addToExistingAcl = true;
break;
}
}
// Case 2: When a completely new acl is added.
if(!addToExistingAcl) {
List<OzoneAcl> newAcls = bucketInfo.getAcls();
if(newAcls == null) {
newAcls = new ArrayList<>();
}
newAcls.add(acl);
bucketInfo = OmBucketInfo.newBuilder()
.setVolumeName(bucketInfo.getVolumeName()) .setVolumeName(bucketInfo.getVolumeName())
.setBucketName(bucketInfo.getBucketName()) .setBucketName(bucketInfo.getBucketName())
.setStorageType(bucketInfo.getStorageType()) .setStorageType(bucketInfo.getStorageType())
@ -420,11 +440,11 @@ public class BucketManagerImpl implements BucketManager {
.setCreationTime(bucketInfo.getCreationTime()) .setCreationTime(bucketInfo.getCreationTime())
.setBucketEncryptionKey(bucketInfo.getEncryptionKeyInfo()) .setBucketEncryptionKey(bucketInfo.getEncryptionKeyInfo())
.addAllMetadata(bucketInfo.getMetadata()) .addAllMetadata(bucketInfo.getMetadata())
.setAcls(list) .setAcls(newAcls)
.build(); .build();
// TODO:HDDS-1619 OM HA changes required for all acl operations. }
metadataManager.getBucketTable().put(dbBucketKey, updatedBucket); metadataManager.getBucketTable().put(dbBucketKey, bucketInfo);
} catch (IOException ex) { } catch (IOException ex) {
if (!(ex instanceof OMException)) { if (!(ex instanceof OMException)) {
LOG.error("Add acl operation failed for bucket:{}/{} acl:{}", LOG.error("Add acl operation failed for bucket:{}/{} acl:{}",
@ -466,26 +486,31 @@ public class BucketManagerImpl implements BucketManager {
throw new OMException("Bucket " + bucket + " is not found", throw new OMException("Bucket " + bucket + " is not found",
BUCKET_NOT_FOUND); BUCKET_NOT_FOUND);
} }
List<OzoneAcl> list = bucketInfo.getAcls();
if (!list.contains(acl)) { // When we are removing subset of rights from existing acl.
// Return false if acl doesn't exist in current ACLs. for(OzoneAcl a: bucketInfo.getAcls()) {
LOG.info("Acl:{} not found in existing ACLs:{}.", acl, if(a.getName().equals(acl.getName()) &&
StringUtils.join(",", list)); a.getType().equals(acl.getType())) {
BitSet bits = (BitSet) acl.getAclBitSet().clone();
bits.and(a.getAclBitSet());
if (bits.equals(ZERO_BITSET)) {
return false; return false;
} }
list.remove(acl); bits = (BitSet) acl.getAclBitSet().clone();
OmBucketInfo updatedBucket = OmBucketInfo.newBuilder() bits.and(a.getAclBitSet());
.setVolumeName(bucketInfo.getVolumeName()) a.getAclBitSet().xor(bits);
.setBucketName(bucketInfo.getBucketName())
.setStorageType(bucketInfo.getStorageType())
.setIsVersionEnabled(bucketInfo.getIsVersionEnabled())
.setCreationTime(bucketInfo.getCreationTime())
.setBucketEncryptionKey(bucketInfo.getEncryptionKeyInfo())
.addAllMetadata(bucketInfo.getMetadata())
.setAcls(list)
.build();
metadataManager.getBucketTable().put(dbBucketKey, updatedBucket); if(a.getAclBitSet().equals(ZERO_BITSET)) {
bucketInfo.getAcls().remove(a);
}
break;
} else {
return false;
}
}
metadataManager.getBucketTable().put(dbBucketKey, bucketInfo);
} catch (IOException ex) { } catch (IOException ex) {
if (!(ex instanceof OMException)) { if (!(ex instanceof OMException)) {
LOG.error("Remove acl operation failed for bucket:{}/{} acl:{}", LOG.error("Remove acl operation failed for bucket:{}/{} acl:{}",
@ -552,23 +577,6 @@ public class BucketManagerImpl implements BucketManager {
return true; return true;
} }
/**
* Validates if a new acl addition is consistent with current ACL list.
* @param newAcl new acl to be added.
* @param currentAcls list of acls.
*
* @return true if newAcl addition to existing acls is valid, else false.
* */
private boolean validateAddAcl(OzoneAcl newAcl, List<OzoneAcl> currentAcls) {
// Check 1: Check for duplicate.
if(currentAcls.contains(newAcl)) {
return false;
}
return true;
}
/** /**
* Returns list of ACLs for given Ozone object. * Returns list of ACLs for given Ozone object.
* *

View File

@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.om;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.common.BlockGroup;
import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
@ -35,6 +36,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.KeyInfo; .KeyInfo;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.KeyLocation; .KeyLocation;
import org.apache.hadoop.ozone.security.acl.OzoneObj;
import org.apache.hadoop.utils.BackgroundService; import org.apache.hadoop.utils.BackgroundService;
import java.io.IOException; import java.io.IOException;
@ -285,4 +287,42 @@ public interface KeyManager extends OzoneManagerFS {
String keyName, String uploadID, int partNumberMarker, String keyName, String uploadID, int partNumberMarker,
int maxParts) throws IOException; int maxParts) throws IOException;
/**
* Add acl for Ozone object. Return true if acl is added successfully else
* false.
* @param obj Ozone object for which acl should be added.
* @param acl ozone acl top be added.
*
* @throws IOException if there is error.
* */
boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException;
/**
* Remove acl for Ozone object. Return true if acl is removed successfully
* else false.
* @param obj Ozone object.
* @param acl Ozone acl to be removed.
*
* @throws IOException if there is error.
* */
boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException;
/**
* Acls to be set for given Ozone object. This operations reset ACL for
* given object to list of ACLs provided in argument.
* @param obj Ozone object.
* @param acls List of acls.
*
* @throws IOException if there is error.
* */
boolean setAcl(OzoneObj obj, List<OzoneAcl> acls) throws IOException;
/**
* Returns list of ACLs for given Ozone object.
* @param obj Ozone object.
*
* @throws IOException if there is error.
* */
List<OzoneAcl> getAcl(OzoneObj obj) throws IOException;
} }

View File

@ -26,11 +26,13 @@ import java.util.HashMap;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Objects;
import java.util.TreeMap; import java.util.TreeMap;
import java.util.UUID; import java.util.UUID;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.security.GeneralSecurityException; import java.security.GeneralSecurityException;
import java.security.PrivilegedExceptionAction; import java.security.PrivilegedExceptionAction;
import java.util.stream.Collectors;
import com.google.common.base.Strings; import com.google.common.base.Strings;
import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.codec.digest.DigestUtils;
@ -49,6 +51,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline
import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.exceptions.SCMException;
import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto;
@ -74,7 +77,11 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.KeyInfo; .KeyInfo;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.KeyLocation; .KeyLocation;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclRights;
import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager; import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager;
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
import org.apache.hadoop.ozone.security.acl.OzoneObj;
import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.common.BlockGroup;
@ -108,6 +115,10 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT;
import static org.apache.hadoop.ozone.OzoneConsts.OM_MULTIPART_MIN_SIZE; import static org.apache.hadoop.ozone.OzoneConsts.OM_MULTIPART_MIN_SIZE;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND;
import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.KEY;
import static org.apache.hadoop.util.Time.monotonicNow; import static org.apache.hadoop.util.Time.monotonicNow;
import org.slf4j.Logger; import org.slf4j.Logger;
@ -218,14 +229,14 @@ public class KeyManagerImpl implements KeyManager {
if (metadataManager.getVolumeTable().get(volumeKey) == null) { if (metadataManager.getVolumeTable().get(volumeKey) == null) {
LOG.error("volume not found: {}", volumeName); LOG.error("volume not found: {}", volumeName);
throw new OMException("Volume not found", throw new OMException("Volume not found",
OMException.ResultCodes.VOLUME_NOT_FOUND); VOLUME_NOT_FOUND);
} }
// if the volume exists but bucket does not exist, throw bucket not found // if the volume exists but bucket does not exist, throw bucket not found
// exception // exception
LOG.error("bucket not found: {}/{} ", volumeName, bucketName); LOG.error("bucket not found: {}/{} ", volumeName, bucketName);
throw new OMException("Bucket not found", throw new OMException("Bucket not found",
OMException.ResultCodes.BUCKET_NOT_FOUND); BUCKET_NOT_FOUND);
} }
} }
@ -243,7 +254,7 @@ public class KeyManagerImpl implements KeyManager {
if (metadataManager.getBucketTable().get(bucketKey) == null) { if (metadataManager.getBucketTable().get(bucketKey) == null) {
LOG.error("bucket not found: {}/{} ", volumeName, bucketName); LOG.error("bucket not found: {}/{} ", volumeName, bucketName);
throw new OMException("Bucket not found", throw new OMException("Bucket not found",
ResultCodes.BUCKET_NOT_FOUND); BUCKET_NOT_FOUND);
} }
} }
@ -266,7 +277,7 @@ public class KeyManagerImpl implements KeyManager {
LOG.error("Allocate block for a key not in open status in meta store" + LOG.error("Allocate block for a key not in open status in meta store" +
" /{}/{}/{} with ID {}", volumeName, bucketName, keyName, clientID); " /{}/{}/{} with ID {}", volumeName, bucketName, keyName, clientID);
throw new OMException("Open Key not found", throw new OMException("Open Key not found",
OMException.ResultCodes.KEY_NOT_FOUND); KEY_NOT_FOUND);
} }
OmKeyLocationInfo omKeyLocationInfo = OmKeyLocationInfo omKeyLocationInfo =
@ -295,7 +306,7 @@ public class KeyManagerImpl implements KeyManager {
LOG.error("Allocate block for a key not in open status in meta store" + LOG.error("Allocate block for a key not in open status in meta store" +
" /{}/{}/{} with ID {}", volumeName, bucketName, keyName, clientID); " /{}/{}/{} with ID {}", volumeName, bucketName, keyName, clientID);
throw new OMException("Open Key not found", throw new OMException("Open Key not found",
OMException.ResultCodes.KEY_NOT_FOUND); KEY_NOT_FOUND);
} }
// current version not committed, so new blocks coming now are added to // current version not committed, so new blocks coming now are added to
@ -402,6 +413,9 @@ public class KeyManagerImpl implements KeyManager {
@Override @Override
public OpenKeySession openKey(OmKeyArgs args) throws IOException { public OpenKeySession openKey(OmKeyArgs args) throws IOException {
Preconditions.checkNotNull(args); Preconditions.checkNotNull(args);
Preconditions.checkNotNull(args.getAcls(), "Default acls " +
"should be set.");
String volumeName = args.getVolumeName(); String volumeName = args.getVolumeName();
String bucketName = args.getBucketName(); String bucketName = args.getBucketName();
String keyName = args.getKeyName(); String keyName = args.getKeyName();
@ -582,7 +596,7 @@ public class KeyManagerImpl implements KeyManager {
ReplicationFactor factor, ReplicationFactor factor,
ReplicationType type, long size, ReplicationType type, long size,
FileEncryptionInfo encInfo) { FileEncryptionInfo encInfo) {
return new OmKeyInfo.Builder() OmKeyInfo.Builder builder = new OmKeyInfo.Builder()
.setVolumeName(keyArgs.getVolumeName()) .setVolumeName(keyArgs.getVolumeName())
.setBucketName(keyArgs.getBucketName()) .setBucketName(keyArgs.getBucketName())
.setKeyName(keyArgs.getKeyName()) .setKeyName(keyArgs.getKeyName())
@ -593,8 +607,12 @@ public class KeyManagerImpl implements KeyManager {
.setDataSize(size) .setDataSize(size)
.setReplicationType(type) .setReplicationType(type)
.setReplicationFactor(factor) .setReplicationFactor(factor)
.setFileEncryptionInfo(encInfo) .setFileEncryptionInfo(encInfo);
.build(); if(keyArgs.getAcls() != null) {
builder.setAcls(keyArgs.getAcls().stream().map(a ->
OzoneAcl.toProtobuf(a)).collect(Collectors.toList()));
}
return builder.build();
} }
@Override @Override
@ -615,7 +633,7 @@ public class KeyManagerImpl implements KeyManager {
OmKeyInfo keyInfo = metadataManager.getOpenKeyTable().get(openKey); OmKeyInfo keyInfo = metadataManager.getOpenKeyTable().get(openKey);
if (keyInfo == null) { if (keyInfo == null) {
throw new OMException("Commit a key without corresponding entry " + throw new OMException("Commit a key without corresponding entry " +
objectKey, ResultCodes.KEY_NOT_FOUND); objectKey, KEY_NOT_FOUND);
} }
keyInfo.setDataSize(args.getDataSize()); keyInfo.setDataSize(args.getDataSize());
keyInfo.setModificationTime(Time.now()); keyInfo.setModificationTime(Time.now());
@ -655,7 +673,7 @@ public class KeyManagerImpl implements KeyManager {
LOG.debug("volume:{} bucket:{} Key:{} not found", LOG.debug("volume:{} bucket:{} Key:{} not found",
volumeName, bucketName, keyName); volumeName, bucketName, keyName);
throw new OMException("Key not found", throw new OMException("Key not found",
OMException.ResultCodes.KEY_NOT_FOUND); KEY_NOT_FOUND);
} }
if (grpcBlockTokenEnabled) { if (grpcBlockTokenEnabled) {
String remoteUser = getRemoteUser().getShortUserName(); String remoteUser = getRemoteUser().getShortUserName();
@ -700,7 +718,7 @@ public class KeyManagerImpl implements KeyManager {
LOG.debug("Get key failed for volume:{} bucket:{} key:{}", LOG.debug("Get key failed for volume:{} bucket:{} key:{}",
volumeName, bucketName, keyName, ex); volumeName, bucketName, keyName, ex);
throw new OMException(ex.getMessage(), throw new OMException(ex.getMessage(),
OMException.ResultCodes.KEY_NOT_FOUND); KEY_NOT_FOUND);
} finally { } finally {
metadataManager.getLock().releaseBucketLock(volumeName, bucketName); metadataManager.getLock().releaseBucketLock(volumeName, bucketName);
} }
@ -733,7 +751,7 @@ public class KeyManagerImpl implements KeyManager {
+ "Key: {} not found.", volumeName, bucketName, fromKeyName, + "Key: {} not found.", volumeName, bucketName, fromKeyName,
toKeyName, fromKeyName); toKeyName, fromKeyName);
throw new OMException("Key not found", throw new OMException("Key not found",
OMException.ResultCodes.KEY_NOT_FOUND); KEY_NOT_FOUND);
} }
// A rename is a no-op if the target and source name is same. // A rename is a no-op if the target and source name is same.
@ -790,7 +808,7 @@ public class KeyManagerImpl implements KeyManager {
OmKeyInfo keyInfo = metadataManager.getKeyTable().get(objectKey); OmKeyInfo keyInfo = metadataManager.getKeyTable().get(objectKey);
if (keyInfo == null) { if (keyInfo == null) {
throw new OMException("Key not found", throw new OMException("Key not found",
OMException.ResultCodes.KEY_NOT_FOUND); KEY_NOT_FOUND);
} else { } else {
// directly delete key with no blocks from db. This key need not be // directly delete key with no blocks from db. This key need not be
// moved to deleted table. // moved to deleted table.
@ -922,6 +940,8 @@ public class KeyManagerImpl implements KeyManager {
.setReplicationFactor(keyArgs.getFactor()) .setReplicationFactor(keyArgs.getFactor())
.setOmKeyLocationInfos(Collections.singletonList( .setOmKeyLocationInfos(Collections.singletonList(
new OmKeyLocationInfoGroup(0, locations))) new OmKeyLocationInfoGroup(0, locations)))
.setAcls(keyArgs.getAcls().stream().map(a ->
OzoneAcl.toProtobuf(a)).collect(Collectors.toList()))
.build(); .build();
DBStore store = metadataManager.getStore(); DBStore store = metadataManager.getStore();
try (BatchOperation batch = store.initBatchOperation()) { try (BatchOperation batch = store.initBatchOperation()) {
@ -1155,13 +1175,13 @@ public class KeyManagerImpl implements KeyManager {
.setDataSize(size) .setDataSize(size)
.setOmKeyLocationInfos( .setOmKeyLocationInfos(
Collections.singletonList(keyLocationInfoGroup)) Collections.singletonList(keyLocationInfoGroup))
.build(); .setAcls(omKeyArgs.getAcls().stream().map(a ->
OzoneAcl.toProtobuf(a)).collect(Collectors.toList())).build();
} else { } else {
// Already a version exists, so we should add it as a new version. // Already a version exists, so we should add it as a new version.
// But now as versioning is not supported, just following the commit // But now as versioning is not supported, just following the commit
// key approach. // key approach. When versioning support comes, then we can uncomment
// When versioning support comes, then we can uncomment below code // below code keyInfo.addNewVersion(locations);
// keyInfo.addNewVersion(locations);
keyInfo.updateLocationInfoList(locations); keyInfo.updateLocationInfoList(locations);
} }
DBStore store = metadataManager.getStore(); DBStore store = metadataManager.getStore();
@ -1330,6 +1350,305 @@ public class KeyManagerImpl implements KeyManager {
} }
} }
/**
* Add acl for Ozone object. Return true if acl is added successfully else
* false.
*
* @param obj Ozone object for which acl should be added.
* @param acl ozone acl top be added.
* @throws IOException if there is error.
*/
@Override
public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException {
validateOzoneObj(obj);
String volume = obj.getVolumeName();
String bucket = obj.getBucketName();
String keyName = obj.getKeyName();
metadataManager.getLock().acquireBucketLock(volume, bucket);
try {
validateBucket(volume, bucket);
String objectKey = metadataManager.getOzoneKey(volume, bucket, keyName);
OmKeyInfo keyInfo = metadataManager.getKeyTable().get(objectKey);
Table keyTable;
if (keyInfo == null) {
keyInfo = metadataManager.getOpenKeyTable().get(objectKey);
if (keyInfo == null) {
throw new OMException("Key not found. Key:" +
objectKey, KEY_NOT_FOUND);
}
keyTable = metadataManager.getOpenKeyTable();
} else {
keyTable = metadataManager.getKeyTable();
}
List<OzoneAclInfo> newAcls = new ArrayList<>(keyInfo.getAcls());
OzoneAclInfo newAcl = null;
for(OzoneAclInfo a: keyInfo.getAcls()) {
if(a.getName().equals(acl.getName())) {
List<OzoneAclRights> rights =
new ArrayList<>(a.getRightsList());
for (IAccessAuthorizer.ACLType aclType : acl.getAclList()) {
rights.add(OzoneAclRights.valueOf(aclType.name()));
}
newAcl = OzoneAclInfo.newBuilder()
.setType(a.getType())
.setName(a.getName())
.addAllRights(rights)
.build();
newAcls.remove(a);
newAcls.add(newAcl);
break;
}
}
if(newAcl == null) {
newAcls.add(OzoneAcl.toProtobuf(acl));
}
OmKeyInfo newObj = new OmKeyInfo.Builder()
.setBucketName(keyInfo.getBucketName())
.setKeyName(keyInfo.getKeyName())
.setReplicationFactor(keyInfo.getFactor())
.setReplicationType(keyInfo.getType())
.setVolumeName(keyInfo.getVolumeName())
.setOmKeyLocationInfos(keyInfo.getKeyLocationVersions())
.setCreationTime(keyInfo.getCreationTime())
.setModificationTime(keyInfo.getModificationTime())
.setAcls(newAcls)
.setDataSize(keyInfo.getDataSize())
.setFileEncryptionInfo(keyInfo.getFileEncryptionInfo())
.build();
keyTable.put(objectKey, newObj);
} catch (IOException ex) {
if (!(ex instanceof OMException)) {
LOG.error("Add acl operation failed for key:{}/{}/{}", volume,
bucket, keyName, ex);
}
throw ex;
} finally {
metadataManager.getLock().releaseBucketLock(volume, bucket);
}
return true;
}
/**
* Remove acl for Ozone object. Return true if acl is removed successfully
* else false.
*
* @param obj Ozone object.
* @param acl Ozone acl to be removed.
* @throws IOException if there is error.
*/
@Override
public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException {
validateOzoneObj(obj);
String volume = obj.getVolumeName();
String bucket = obj.getBucketName();
String keyName = obj.getKeyName();
metadataManager.getLock().acquireBucketLock(volume, bucket);
try {
validateBucket(volume, bucket);
String objectKey = metadataManager.getOzoneKey(volume, bucket, keyName);
OmKeyInfo keyInfo = metadataManager.getKeyTable().get(objectKey);
Table keyTable;
if (keyInfo == null) {
keyInfo = metadataManager.getOpenKeyTable().get(objectKey);
if (keyInfo == null) {
throw new OMException("Key not found. Key:" +
objectKey, KEY_NOT_FOUND);
}
keyTable = metadataManager.getOpenKeyTable();
} else {
keyTable = metadataManager.getKeyTable();
}
List<OzoneAclInfo> newAcls = new ArrayList<>(keyInfo.getAcls());
OzoneAclInfo newAcl = OzoneAcl.toProtobuf(acl);
if(newAcls.contains(OzoneAcl.toProtobuf(acl))) {
newAcls.remove(newAcl);
} else {
// Acl to be removed might be a subset of existing acls.
for(OzoneAclInfo a: keyInfo.getAcls()) {
if(a.getName().equals(acl.getName())) {
List<OzoneAclRights> rights =
new ArrayList<>(a.getRightsList());
for (IAccessAuthorizer.ACLType aclType : acl.getAclList()) {
rights.remove(OzoneAclRights.valueOf(aclType.name()));
}
newAcl = OzoneAclInfo.newBuilder()
.setType(a.getType())
.setName(a.getName())
.addAllRights(rights)
.build();
newAcls.remove(a);
newAcls.add(newAcl);
break;
}
}
if(newAcl == null) {
newAcls.add(OzoneAcl.toProtobuf(acl));
}
}
OmKeyInfo newObj = new OmKeyInfo.Builder()
.setBucketName(keyInfo.getBucketName())
.setKeyName(keyInfo.getKeyName())
.setReplicationFactor(keyInfo.getFactor())
.setReplicationType(keyInfo.getType())
.setVolumeName(keyInfo.getVolumeName())
.setOmKeyLocationInfos(keyInfo.getKeyLocationVersions())
.setCreationTime(keyInfo.getCreationTime())
.setModificationTime(keyInfo.getModificationTime())
.setAcls(newAcls)
.setDataSize(keyInfo.getDataSize())
.setFileEncryptionInfo(keyInfo.getFileEncryptionInfo())
.build();
keyTable.put(objectKey, newObj);
} catch (IOException ex) {
if (!(ex instanceof OMException)) {
LOG.error("Remove acl operation failed for key:{}/{}/{}", volume,
bucket, keyName, ex);
}
throw ex;
} finally {
metadataManager.getLock().releaseBucketLock(volume, bucket);
}
return true;
}
/**
* Acls to be set for given Ozone object. This operations reset ACL for given
* object to list of ACLs provided in argument.
*
* @param obj Ozone object.
* @param acls List of acls.
* @throws IOException if there is error.
*/
@Override
public boolean setAcl(OzoneObj obj, List<OzoneAcl> acls) throws IOException {
validateOzoneObj(obj);
String volume = obj.getVolumeName();
String bucket = obj.getBucketName();
String keyName = obj.getKeyName();
metadataManager.getLock().acquireBucketLock(volume, bucket);
try {
validateBucket(volume, bucket);
String objectKey = metadataManager.getOzoneKey(volume, bucket, keyName);
OmKeyInfo keyInfo = metadataManager.getKeyTable().get(objectKey);
Table keyTable;
if (keyInfo == null) {
keyInfo = metadataManager.getOpenKeyTable().get(objectKey);
if (keyInfo == null) {
throw new OMException("Key not found. Key:" +
objectKey, KEY_NOT_FOUND);
}
keyTable = metadataManager.getOpenKeyTable();
} else {
keyTable = metadataManager.getKeyTable();
}
List<OzoneAclInfo> newAcls = new ArrayList<>();
for (OzoneAcl a : acls) {
newAcls.add(OzoneAcl.toProtobuf(a));
}
OmKeyInfo newObj = new OmKeyInfo.Builder()
.setBucketName(keyInfo.getBucketName())
.setKeyName(keyInfo.getKeyName())
.setReplicationFactor(keyInfo.getFactor())
.setReplicationType(keyInfo.getType())
.setVolumeName(keyInfo.getVolumeName())
.setOmKeyLocationInfos(keyInfo.getKeyLocationVersions())
.setCreationTime(keyInfo.getCreationTime())
.setModificationTime(keyInfo.getModificationTime())
.setAcls(newAcls)
.setDataSize(keyInfo.getDataSize())
.setFileEncryptionInfo(keyInfo.getFileEncryptionInfo())
.build();
keyTable.put(objectKey, newObj);
} catch (IOException ex) {
if (!(ex instanceof OMException)) {
LOG.error("Set acl operation failed for key:{}/{}/{}", volume,
bucket, keyName, ex);
}
throw ex;
} finally {
metadataManager.getLock().releaseBucketLock(volume, bucket);
}
return true;
}
/**
* Returns list of ACLs for given Ozone object.
*
* @param obj Ozone object.
* @throws IOException if there is error.
*/
@Override
public List<OzoneAcl> getAcl(OzoneObj obj) throws IOException {
validateOzoneObj(obj);
String volume = obj.getVolumeName();
String bucket = obj.getBucketName();
String keyName = obj.getKeyName();
metadataManager.getLock().acquireBucketLock(volume, bucket);
try {
validateBucket(volume, bucket);
String objectKey = metadataManager.getOzoneKey(volume, bucket, keyName);
OmKeyInfo keyInfo = metadataManager.getKeyTable().get(objectKey);
if (keyInfo == null) {
keyInfo = metadataManager.getOpenKeyTable().get(objectKey);
if (keyInfo == null) {
throw new OMException("Key not found. Key:" +
objectKey, KEY_NOT_FOUND);
}
}
List<OzoneAcl> acls = new ArrayList<>();
for (OzoneAclInfo a : keyInfo.getAcls()) {
acls.add(OzoneAcl.fromProtobuf(a));
}
return acls;
} catch (IOException ex) {
if (!(ex instanceof OMException)) {
LOG.error("Get acl operation failed for key:{}/{}/{}", volume,
bucket, keyName, ex);
}
throw ex;
} finally {
metadataManager.getLock().releaseBucketLock(volume, bucket);
}
}
/**
* Helper method to validate ozone object.
* @param obj
* */
private void validateOzoneObj(OzoneObj obj) throws OMException {
Objects.requireNonNull(obj);
if (!obj.getResourceType().equals(KEY)) {
throw new IllegalArgumentException("Unexpected argument passed to " +
"KeyManager. OzoneObj type:" + obj.getResourceType());
}
String volume = obj.getVolumeName();
String bucket = obj.getBucketName();
String keyName = obj.getKeyName();
if (Strings.isNullOrEmpty(volume)) {
throw new OMException("Volume name is required.", VOLUME_NOT_FOUND);
}
if (Strings.isNullOrEmpty(bucket)) {
throw new OMException("Bucket name is required.", BUCKET_NOT_FOUND);
}
if (Strings.isNullOrEmpty(keyName)) {
throw new OMException("Key name is required.", KEY_NOT_FOUND);
}
}
/** /**
* OzoneFS api to get file status for an entry. * OzoneFS api to get file status for an entry.
* *
@ -1420,7 +1739,7 @@ public class KeyManagerImpl implements KeyManager {
return; return;
} }
OmKeyInfo dirDbKeyInfo = OmKeyInfo dirDbKeyInfo =
createDirectoryKey(volumeName, bucketName, keyName); createDirectoryKey(volumeName, bucketName, keyName, args.getAcls());
String dirDbKey = metadataManager String dirDbKey = metadataManager
.getOzoneKey(volumeName, bucketName, dirDbKeyInfo.getKeyName()); .getOzoneKey(volumeName, bucketName, dirDbKeyInfo.getKeyName());
metadataManager.getKeyTable().put(dirDbKey, dirDbKeyInfo); metadataManager.getKeyTable().put(dirDbKey, dirDbKeyInfo);
@ -1430,7 +1749,7 @@ public class KeyManagerImpl implements KeyManager {
} }
private OmKeyInfo createDirectoryKey(String volumeName, String bucketName, private OmKeyInfo createDirectoryKey(String volumeName, String bucketName,
String keyName) throws IOException { String keyName, List<OzoneAcl> acls) throws IOException {
// verify bucket exists // verify bucket exists
OmBucketInfo bucketInfo = getBucketInfo(volumeName, bucketName); OmBucketInfo bucketInfo = getBucketInfo(volumeName, bucketName);
@ -1448,6 +1767,8 @@ public class KeyManagerImpl implements KeyManager {
.setReplicationType(ReplicationType.RATIS) .setReplicationType(ReplicationType.RATIS)
.setReplicationFactor(ReplicationFactor.ONE) .setReplicationFactor(ReplicationFactor.ONE)
.setFileEncryptionInfo(encInfo) .setFileEncryptionInfo(encInfo)
.setAcls(acls.stream().map(a ->
OzoneAcl.toProtobuf(a)).collect(Collectors.toList()))
.build(); .build();
} }

View File

@ -2971,9 +2971,10 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
switch (obj.getResourceType()) { switch (obj.getResourceType()) {
case VOLUME: case VOLUME:
return volumeManager.addAcl(obj, acl); return volumeManager.addAcl(obj, acl);
case BUCKET: case BUCKET:
return bucketManager.addAcl(obj, acl); return bucketManager.addAcl(obj, acl);
case KEY:
return keyManager.addAcl(obj, acl);
default: default:
throw new OMException("Unexpected resource type: " + throw new OMException("Unexpected resource type: " +
obj.getResourceType(), INVALID_REQUEST); obj.getResourceType(), INVALID_REQUEST);
@ -3001,6 +3002,8 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
case BUCKET: case BUCKET:
return bucketManager.removeAcl(obj, acl); return bucketManager.removeAcl(obj, acl);
case KEY:
return keyManager.removeAcl(obj, acl);
default: default:
throw new OMException("Unexpected resource type: " + throw new OMException("Unexpected resource type: " +
obj.getResourceType(), INVALID_REQUEST); obj.getResourceType(), INVALID_REQUEST);
@ -3025,9 +3028,10 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
switch (obj.getResourceType()) { switch (obj.getResourceType()) {
case VOLUME: case VOLUME:
return volumeManager.setAcl(obj, acls); return volumeManager.setAcl(obj, acls);
case BUCKET: case BUCKET:
return bucketManager.setAcl(obj, acls); return bucketManager.setAcl(obj, acls);
case KEY:
return keyManager.setAcl(obj, acls);
default: default:
throw new OMException("Unexpected resource type: " + throw new OMException("Unexpected resource type: " +
obj.getResourceType(), INVALID_REQUEST); obj.getResourceType(), INVALID_REQUEST);
@ -3050,9 +3054,10 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
switch (obj.getResourceType()) { switch (obj.getResourceType()) {
case VOLUME: case VOLUME:
return volumeManager.getAcl(obj); return volumeManager.getAcl(obj);
case BUCKET: case BUCKET:
return bucketManager.getAcl(obj); return bucketManager.getAcl(obj);
case KEY:
return keyManager.getAcl(obj);
default: default:
throw new OMException("Unexpected resource type: " + throw new OMException("Unexpected resource type: " +
obj.getResourceType(), INVALID_REQUEST); obj.getResourceType(), INVALID_REQUEST);

View File

@ -542,7 +542,7 @@ public class VolumeManagerImpl implements VolumeManager {
try { try {
volumeArgs.addAcl(acl); volumeArgs.addAcl(acl);
} catch (OMException ex) { } catch (OMException ex) {
LOG.info("Add acl failed.", ex); LOG.debug("Add acl failed.", ex);
return false; return false;
} }
metadataManager.getVolumeTable().put(dbVolumeKey, volumeArgs); metadataManager.getVolumeTable().put(dbVolumeKey, volumeArgs);
@ -592,7 +592,7 @@ public class VolumeManagerImpl implements VolumeManager {
try { try {
volumeArgs.removeAcl(acl); volumeArgs.removeAcl(acl);
} catch (OMException ex) { } catch (OMException ex) {
LOG.info("Remove acl failed.", ex); LOG.debug("Remove acl failed.", ex);
return false; return false;
} }
metadataManager.getVolumeTable().put(dbVolumeKey, volumeArgs); metadataManager.getVolumeTable().put(dbVolumeKey, volumeArgs);

View File

@ -170,6 +170,8 @@ public final class OzoneManagerRatisServer {
omResponse.setMessage(stateMachineException.getCause().getMessage()); omResponse.setMessage(stateMachineException.getCause().getMessage());
omResponse.setStatus(parseErrorStatus( omResponse.setStatus(parseErrorStatus(
stateMachineException.getCause().getMessage())); stateMachineException.getCause().getMessage()));
LOG.debug("Error while executing ratis request. " +
"stateMachineException: ", stateMachineException);
return omResponse.build(); return omResponse.build();
} }

View File

@ -576,6 +576,8 @@ public class OzoneManagerRequestHandler implements RequestHandler {
.setIsMultipartKey(keyArgs.getIsMultipartKey()) .setIsMultipartKey(keyArgs.getIsMultipartKey())
.setMultipartUploadID(keyArgs.getMultipartUploadID()) .setMultipartUploadID(keyArgs.getMultipartUploadID())
.setMultipartUploadPartNumber(keyArgs.getMultipartNumber()) .setMultipartUploadPartNumber(keyArgs.getMultipartNumber())
.setAcls(keyArgs.getAclsList().stream().map(a ->
OzoneAcl.fromProtobuf(a)).collect(Collectors.toList()))
.build(); .build();
if (keyArgs.hasDataSize()) { if (keyArgs.hasDataSize()) {
omKeyArgs.setDataSize(keyArgs.getDataSize()); omKeyArgs.setDataSize(keyArgs.getDataSize());
@ -825,6 +827,8 @@ public class OzoneManagerRequestHandler implements RequestHandler {
.setBucketName(keyArgs.getBucketName()) .setBucketName(keyArgs.getBucketName())
.setKeyName(keyArgs.getKeyName()) .setKeyName(keyArgs.getKeyName())
.setType(keyArgs.getType()) .setType(keyArgs.getType())
.setAcls(keyArgs.getAclsList().stream().map(a ->
OzoneAcl.fromProtobuf(a)).collect(Collectors.toList()))
.setFactor(keyArgs.getFactor()) .setFactor(keyArgs.getFactor())
.build(); .build();
OmMultipartInfo multipartInfo = impl.initiateMultipartUpload(omKeyArgs); OmMultipartInfo multipartInfo = impl.initiateMultipartUpload(omKeyArgs);
@ -847,6 +851,8 @@ public class OzoneManagerRequestHandler implements RequestHandler {
.setBucketName(keyArgs.getBucketName()) .setBucketName(keyArgs.getBucketName())
.setKeyName(keyArgs.getKeyName()) .setKeyName(keyArgs.getKeyName())
.setType(keyArgs.getType()) .setType(keyArgs.getType())
.setAcls(keyArgs.getAclsList().stream().map(a ->
OzoneAcl.fromProtobuf(a)).collect(Collectors.toList()))
.setFactor(keyArgs.getFactor()) .setFactor(keyArgs.getFactor())
.build(); .build();
OmMultipartInfo multipartInfo = OmMultipartInfo multipartInfo =
@ -905,6 +911,8 @@ public class OzoneManagerRequestHandler implements RequestHandler {
.setVolumeName(keyArgs.getVolumeName()) .setVolumeName(keyArgs.getVolumeName())
.setBucketName(keyArgs.getBucketName()) .setBucketName(keyArgs.getBucketName())
.setKeyName(keyArgs.getKeyName()) .setKeyName(keyArgs.getKeyName())
.setAcls(keyArgs.getAclsList().stream().map(a ->
OzoneAcl.fromProtobuf(a)).collect(Collectors.toList()))
.setMultipartUploadID(keyArgs.getMultipartUploadID()) .setMultipartUploadID(keyArgs.getMultipartUploadID())
.build(); .build();
OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = impl OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = impl
@ -1050,6 +1058,8 @@ public class OzoneManagerRequestHandler implements RequestHandler {
.setVolumeName(keyArgs.getVolumeName()) .setVolumeName(keyArgs.getVolumeName())
.setBucketName(keyArgs.getBucketName()) .setBucketName(keyArgs.getBucketName())
.setKeyName(keyArgs.getKeyName()) .setKeyName(keyArgs.getKeyName())
.setAcls(keyArgs.getAclsList().stream().map(a ->
OzoneAcl.fromProtobuf(a)).collect(Collectors.toList()))
.build(); .build();
impl.createDirectory(omKeyArgs); impl.createDirectory(omKeyArgs);
} }
@ -1064,6 +1074,8 @@ public class OzoneManagerRequestHandler implements RequestHandler {
.setDataSize(keyArgs.getDataSize()) .setDataSize(keyArgs.getDataSize())
.setType(keyArgs.getType()) .setType(keyArgs.getType())
.setFactor(keyArgs.getFactor()) .setFactor(keyArgs.getFactor())
.setAcls(keyArgs.getAclsList().stream().map(a ->
OzoneAcl.fromProtobuf(a)).collect(Collectors.toList()))
.build(); .build();
OpenKeySession keySession = OpenKeySession keySession =
impl.createFile(omKeyArgs, request.getIsOverwrite(), impl.createFile(omKeyArgs, request.getIsOverwrite(),

View File

@ -22,6 +22,7 @@ package org.apache.hadoop.ozone.om;
import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections;
import java.util.UUID; import java.util.UUID;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException; import java.util.concurrent.TimeoutException;
@ -192,6 +193,7 @@ public class TestKeyDeletingService {
.setVolumeName(volumeName) .setVolumeName(volumeName)
.setBucketName(bucketName) .setBucketName(bucketName)
.setKeyName(keyName) .setKeyName(keyName)
.setAcls(Collections.emptyList())
.setLocationInfoList(new ArrayList<>()) .setLocationInfoList(new ArrayList<>())
.build(); .build();
//Open, Commit and Delete the Keys in the Key Manager. //Open, Commit and Delete the Keys in the Key Manager.

View File

@ -49,6 +49,8 @@ import org.apache.hadoop.hdds.scm.server.SCMConfigurator;
import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.*; import org.apache.hadoop.ozone.om.helpers.*;
import org.apache.hadoop.ozone.web.utils.OzoneUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.test.LambdaTestUtils;
@ -60,6 +62,7 @@ import org.junit.Test;
import org.mockito.Mockito; import org.mockito.Mockito;
import static org.apache.hadoop.ozone.OzoneConfigKeys.*; import static org.apache.hadoop.ozone.OzoneConfigKeys.*;
import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.ALL;
/** /**
* Test class for @{@link KeyManagerImpl}. * Test class for @{@link KeyManagerImpl}.
@ -173,11 +176,14 @@ public class TestKeyManagerImpl {
@Test @Test
public void openKeyFailureInSafeMode() throws Exception { public void openKeyFailureInSafeMode() throws Exception {
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
KeyManager keyManager1 = new KeyManagerImpl(mockScmBlockLocationProtocol, KeyManager keyManager1 = new KeyManagerImpl(mockScmBlockLocationProtocol,
metadataManager, conf, "om1", null); metadataManager, conf, "om1", null);
OmKeyArgs keyArgs = createBuilder() OmKeyArgs keyArgs = createBuilder()
.setKeyName(KEY_NAME) .setKeyName(KEY_NAME)
.setDataSize(1000) .setDataSize(1000)
.setAcls(OzoneUtils.getAclList(ugi.getUserName(), ugi.getGroups(),
ALL, ALL))
.build(); .build();
LambdaTestUtils.intercept(OMException.class, LambdaTestUtils.intercept(OMException.class,
"SafeModePrecheck failed for allocateBlock", () -> { "SafeModePrecheck failed for allocateBlock", () -> {
@ -355,7 +361,7 @@ public class TestKeyManagerImpl {
} }
} }
private OmKeyArgs createKeyArgs(String toKeyName) { private OmKeyArgs createKeyArgs(String toKeyName) throws IOException {
return createBuilder().setKeyName(toKeyName).build(); return createBuilder().setKeyName(toKeyName).build();
} }
@ -542,12 +548,15 @@ public class TestKeyManagerImpl {
return keyNames; return keyNames;
} }
private OmKeyArgs.Builder createBuilder() { private OmKeyArgs.Builder createBuilder() throws IOException {
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
return new OmKeyArgs.Builder() return new OmKeyArgs.Builder()
.setBucketName(BUCKET_NAME) .setBucketName(BUCKET_NAME)
.setFactor(ReplicationFactor.ONE) .setFactor(ReplicationFactor.ONE)
.setDataSize(0) .setDataSize(0)
.setType(ReplicationType.STAND_ALONE) .setType(ReplicationType.STAND_ALONE)
.setAcls(OzoneUtils.getAclList(ugi.getUserName(), ugi.getGroups(),
ALL, ALL))
.setVolumeName(VOLUME_NAME); .setVolumeName(VOLUME_NAME);
} }
} }