HDDS-1600. Add userName and IPAddress as part of OMRequest. (#857)
This commit is contained in:
parent
97607f3dce
commit
580b639908
|
@ -196,5 +196,9 @@ public class OMException extends IOException {
|
|||
FILE_ALREADY_EXISTS,
|
||||
|
||||
NOT_A_FILE,
|
||||
|
||||
PERMISSION_DENIED, // Error codes used during acl validation
|
||||
|
||||
TIMEOUT // Error codes used during acl validation
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.security.acl;
|
|||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.ozone.OzoneConsts;
|
||||
import org.apache.hadoop.ozone.om.exceptions.OMException;
|
||||
|
||||
import java.util.BitSet;
|
||||
|
||||
|
@ -35,11 +36,11 @@ public interface IAccessAuthorizer {
|
|||
*
|
||||
* @param ozoneObject object for which access needs to be checked.
|
||||
* @param context Context object encapsulating all user related information.
|
||||
* @throws OzoneAclException
|
||||
* @throws org.apache.hadoop.ozone.om.exceptions.OMException
|
||||
* @return true if user has access else false.
|
||||
*/
|
||||
boolean checkAccess(IOzoneObj ozoneObject, RequestContext context)
|
||||
throws OzoneAclException;
|
||||
throws OMException;
|
||||
|
||||
/**
|
||||
* ACL rights.
|
||||
|
|
|
@ -16,6 +16,8 @@
|
|||
*/
|
||||
package org.apache.hadoop.ozone.security.acl;
|
||||
|
||||
import org.apache.hadoop.ozone.om.exceptions.OMException;
|
||||
|
||||
/**
|
||||
* Default implementation for {@link IAccessAuthorizer}.
|
||||
* */
|
||||
|
@ -23,7 +25,7 @@ public class OzoneAccessAuthorizer implements IAccessAuthorizer {
|
|||
|
||||
@Override
|
||||
public boolean checkAccess(IOzoneObj ozoneObject, RequestContext context)
|
||||
throws OzoneAclException {
|
||||
throws OMException {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,71 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with this
|
||||
* work for additional information regarding copyright ownership. The ASF
|
||||
* licenses this file to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations under
|
||||
* the License.
|
||||
*/
|
||||
package org.apache.hadoop.ozone.security.acl;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Timeout exception thrown by Ozone. Ex: When checking ACLs for an Object if
|
||||
* security manager is not able to process the request in configured time than
|
||||
* {@link OzoneAclException} should be thrown.
|
||||
*/
|
||||
public class OzoneAclException extends IOException {
|
||||
|
||||
private ErrorCode errorCode;
|
||||
|
||||
/**
|
||||
* Constructs a new exception with {@code null} as its detail message. The
|
||||
* cause is not initialized, and may subsequently be initialized by a call to
|
||||
* {@link #initCause}.
|
||||
*/
|
||||
public OzoneAclException() {
|
||||
super("");
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a new exception with {@code null} as its detail message. The
|
||||
* cause is not initialized, and may subsequently be initialized by a call to
|
||||
* {@link #initCause}.
|
||||
*/
|
||||
public OzoneAclException(String errorMsg, ErrorCode code, Throwable ex) {
|
||||
super(errorMsg, ex);
|
||||
this.errorCode = code;
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a new exception with {@code null} as its detail message. The
|
||||
* cause is not initialized, and may subsequently be initialized by a call to
|
||||
* {@link #initCause}.
|
||||
*/
|
||||
public OzoneAclException(String errorMsg, ErrorCode code) {
|
||||
super(errorMsg);
|
||||
this.errorCode = code;
|
||||
}
|
||||
|
||||
/**
|
||||
* Error codes for OzoneAclException.
|
||||
*/
|
||||
public enum ErrorCode {
|
||||
TIMEOUT,
|
||||
PERMISSION_DENIED,
|
||||
OTHER
|
||||
}
|
||||
|
||||
public ErrorCode getErrorCode() {
|
||||
return errorCode;
|
||||
}
|
||||
}
|
|
@ -102,6 +102,9 @@ message OMRequest {
|
|||
|
||||
required string clientId = 3;
|
||||
|
||||
optional UserInfo userInfo = 4;
|
||||
|
||||
|
||||
optional CreateVolumeRequest createVolumeRequest = 11;
|
||||
optional SetVolumePropertyRequest setVolumePropertyRequest = 12;
|
||||
optional CheckVolumeAccessRequest checkVolumeAccessRequest = 13;
|
||||
|
@ -271,6 +274,8 @@ enum Status {
|
|||
DIRECTORY_NOT_FOUND = 45;
|
||||
FILE_ALREADY_EXISTS = 46;
|
||||
NOT_A_FILE = 47;
|
||||
PERMISSION_DENIED = 48;
|
||||
TIMEOUT = 49;
|
||||
}
|
||||
|
||||
|
||||
|
@ -284,6 +289,16 @@ message VolumeInfo {
|
|||
required uint64 creationTime = 7;
|
||||
}
|
||||
|
||||
/**
|
||||
User information which will be extracted during RPC context and used
|
||||
during validating Acl.
|
||||
*/
|
||||
message UserInfo {
|
||||
optional string userName = 1;
|
||||
optional string remoteAddress = 3;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
Creates a volume
|
||||
*/
|
||||
|
|
|
@ -24,10 +24,10 @@ import org.apache.hadoop.hdds.protocol.StorageType;
|
|||
import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
|
||||
import org.apache.hadoop.ozone.MiniOzoneCluster;
|
||||
import org.apache.hadoop.ozone.OzoneTestUtils;
|
||||
import org.apache.hadoop.ozone.om.exceptions.OMException;
|
||||
import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
|
||||
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
|
||||
import org.apache.hadoop.ozone.security.acl.IOzoneObj;
|
||||
import org.apache.hadoop.ozone.security.acl.OzoneAclException;
|
||||
import org.apache.hadoop.ozone.security.acl.RequestContext;
|
||||
import org.apache.hadoop.ozone.web.handlers.BucketArgs;
|
||||
import org.apache.hadoop.ozone.web.handlers.KeyArgs;
|
||||
|
@ -122,7 +122,7 @@ public class TestOmAcls {
|
|||
createVolumeArgs.setAdminName(adminUser);
|
||||
createVolumeArgs.setQuota(new OzoneQuota(i, OzoneQuota.Units.GB));
|
||||
logCapturer.clearOutput();
|
||||
OzoneTestUtils.expectOmException(ResultCodes.INTERNAL_ERROR,
|
||||
OzoneTestUtils.expectOmException(ResultCodes.PERMISSION_DENIED,
|
||||
() -> storageHandler.createVolume(createVolumeArgs));
|
||||
assertTrue(logCapturer.getOutput().contains("doesn't have CREATE " +
|
||||
"permission to access volume"));
|
||||
|
@ -131,7 +131,7 @@ public class TestOmAcls {
|
|||
bucketArgs.setAddAcls(new LinkedList<>());
|
||||
bucketArgs.setRemoveAcls(new LinkedList<>());
|
||||
bucketArgs.setStorageType(StorageType.DISK);
|
||||
OzoneTestUtils.expectOmException(ResultCodes.INTERNAL_ERROR,
|
||||
OzoneTestUtils.expectOmException(ResultCodes.PERMISSION_DENIED,
|
||||
() -> storageHandler.createBucket(bucketArgs));
|
||||
assertTrue(logCapturer.getOutput().contains("doesn't have CREATE " +
|
||||
"permission to access bucket"));
|
||||
|
@ -155,7 +155,7 @@ public class TestOmAcls {
|
|||
// write a key without specifying size at all
|
||||
String keyName = "testKey";
|
||||
KeyArgs keyArgs = new KeyArgs(keyName, bucketArgs);
|
||||
OzoneTestUtils.expectOmException(ResultCodes.INTERNAL_ERROR,
|
||||
OzoneTestUtils.expectOmException(ResultCodes.PERMISSION_DENIED,
|
||||
() -> storageHandler.newKeyWriter(keyArgs));
|
||||
assertTrue(logCapturer.getOutput().contains("doesn't have READ permission" +
|
||||
" to access key"));
|
||||
|
@ -169,7 +169,7 @@ class OzoneAccessAuthrizerTest implements IAccessAuthorizer {
|
|||
|
||||
@Override
|
||||
public boolean checkAccess(IOzoneObj ozoneObject, RequestContext context)
|
||||
throws OzoneAclException {
|
||||
throws OMException {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -82,6 +82,13 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
|||
<scope>test</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.jmockit</groupId>
|
||||
<artifactId>jmockit</artifactId>
|
||||
<version>1.24</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
|
||||
</dependencies>
|
||||
<build>
|
||||
<plugins>
|
||||
|
|
|
@ -24,6 +24,7 @@ import com.google.common.annotations.VisibleForTesting;
|
|||
import com.google.common.base.Preconditions;
|
||||
import com.google.protobuf.BlockingService;
|
||||
|
||||
import java.net.InetAddress;
|
||||
import java.security.PrivateKey;
|
||||
import java.security.PublicKey;
|
||||
import java.security.KeyPair;
|
||||
|
@ -126,8 +127,6 @@ import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
|
|||
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType;
|
||||
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType;
|
||||
import org.apache.hadoop.ozone.security.acl.OzoneAccessAuthorizer;
|
||||
import org.apache.hadoop.ozone.security.acl.OzoneAclException;
|
||||
import org.apache.hadoop.ozone.security.acl.OzoneAclException.ErrorCode;
|
||||
import org.apache.hadoop.ozone.security.acl.OzoneObj;
|
||||
import org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType;
|
||||
import org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType;
|
||||
|
@ -1733,18 +1732,36 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
|
|||
* @param vol - name of volume
|
||||
* @param bucket - bucket name
|
||||
* @param key - key
|
||||
* @throws OzoneAclException
|
||||
* @throws OMException
|
||||
*/
|
||||
private void checkAcls(ResourceType resType, StoreType store,
|
||||
ACLType acl, String vol, String bucket, String key)
|
||||
throws OzoneAclException {
|
||||
if(!isAclEnabled) {
|
||||
return;
|
||||
}
|
||||
throws OMException {
|
||||
checkAcls(resType, store, acl, vol, bucket, key,
|
||||
ProtobufRpcEngine.Server.getRemoteUser(),
|
||||
ProtobufRpcEngine.Server.getRemoteIp());
|
||||
}
|
||||
|
||||
/**
|
||||
* CheckAcls for the ozone object.
|
||||
* @param resType
|
||||
* @param storeType
|
||||
* @param aclType
|
||||
* @param vol
|
||||
* @param bucket
|
||||
* @param key
|
||||
* @param ugi
|
||||
* @param remoteAddress
|
||||
* @throws OMException
|
||||
*/
|
||||
@SuppressWarnings("parameternumber")
|
||||
public void checkAcls(ResourceType resType, StoreType storeType,
|
||||
ACLType aclType, String vol, String bucket, String key,
|
||||
UserGroupInformation ugi, InetAddress remoteAddress)
|
||||
throws OMException {
|
||||
OzoneObj obj = OzoneObjInfo.Builder.newBuilder()
|
||||
.setResType(resType)
|
||||
.setStoreType(store)
|
||||
.setStoreType(storeType)
|
||||
.setVolumeName(vol)
|
||||
.setBucketName(bucket)
|
||||
.setKeyName(key).build();
|
||||
|
@ -1753,17 +1770,26 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
|
|||
.setClientUgi(user)
|
||||
.setIp(ProtobufRpcEngine.Server.getRemoteIp())
|
||||
.setAclType(ACLIdentityType.USER)
|
||||
.setAclRights(acl)
|
||||
.setAclRights(aclType)
|
||||
.build();
|
||||
if (!accessAuthorizer.checkAccess(obj, context)) {
|
||||
LOG.warn("User {} doesn't have {} permission to access {}",
|
||||
user.getUserName(), acl, resType);
|
||||
throw new OzoneAclException("User " + user.getUserName() + " doesn't " +
|
||||
"have " + acl + " permission to access " + resType,
|
||||
ErrorCode.PERMISSION_DENIED);
|
||||
user.getUserName(), aclType, resType);
|
||||
throw new OMException("User " + user.getUserName() + " doesn't " +
|
||||
"have " + aclType + " permission to access " + resType,
|
||||
ResultCodes.PERMISSION_DENIED);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* Return true if Ozone acl's are enabled, else false.
|
||||
* @return boolean
|
||||
*/
|
||||
public boolean getAclsEnabled() {
|
||||
return isAclEnabled;
|
||||
}
|
||||
|
||||
/**
|
||||
* Changes the owner of a volume.
|
||||
*
|
||||
|
@ -2406,8 +2432,10 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
|
|||
*/
|
||||
@Override
|
||||
public void deleteBucket(String volume, String bucket) throws IOException {
|
||||
checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.WRITE, volume,
|
||||
bucket, null);
|
||||
if (isAclEnabled) {
|
||||
checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.WRITE, volume,
|
||||
bucket, null);
|
||||
}
|
||||
Map<String, String> auditMap = buildAuditMap(volume);
|
||||
auditMap.put(OzoneConsts.BUCKET, bucket);
|
||||
try {
|
||||
|
|
|
@ -19,13 +19,23 @@
|
|||
package org.apache.hadoop.ozone.om.request;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetAddress;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||
import org.apache.hadoop.ozone.om.OzoneManager;
|
||||
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
|
||||
import org.apache.hadoop.ozone.om.response.OMClientResponse;
|
||||
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
|
||||
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
|
||||
.OMRequest;
|
||||
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
|
||||
.OMResponse;
|
||||
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
|
||||
import org.apache.hadoop.ozone.security.acl.OzoneObj;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
|
||||
/**
|
||||
* OMClientRequest provides methods which every write OM request should
|
||||
|
@ -50,8 +60,10 @@ public abstract class OMClientRequest {
|
|||
* @return OMRequest that will be serialized and handed off to Ratis for
|
||||
* consensus.
|
||||
*/
|
||||
public abstract OMRequest preExecute(OzoneManager ozoneManager)
|
||||
throws IOException;
|
||||
public OMRequest preExecute(OzoneManager ozoneManager)
|
||||
throws IOException {
|
||||
return getOmRequest().toBuilder().setUserInfo(getUserInfo()).build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate the OMRequest and update the cache.
|
||||
|
@ -70,4 +82,95 @@ public abstract class OMClientRequest {
|
|||
return omRequest;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get User information from the OMRequest.
|
||||
* @return User Info.
|
||||
*/
|
||||
public OzoneManagerProtocolProtos.UserInfo getUserInfo() {
|
||||
UserGroupInformation user = ProtobufRpcEngine.Server.getRemoteUser();
|
||||
InetAddress remoteAddress = ProtobufRpcEngine.Server.getRemoteIp();
|
||||
OzoneManagerProtocolProtos.UserInfo.Builder userInfo =
|
||||
OzoneManagerProtocolProtos.UserInfo.newBuilder();
|
||||
|
||||
// Added not null checks, as in UT's these values might be null.
|
||||
if (user != null) {
|
||||
userInfo.setUserName(user.getUserName());
|
||||
}
|
||||
|
||||
if (remoteAddress != null) {
|
||||
userInfo.setRemoteAddress(remoteAddress.getHostAddress()).build();
|
||||
}
|
||||
|
||||
return userInfo.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Check Acls of ozone object.
|
||||
* @param ozoneManager
|
||||
* @param resType
|
||||
* @param storeType
|
||||
* @param aclType
|
||||
* @param vol
|
||||
* @param bucket
|
||||
* @param key
|
||||
* @throws IOException
|
||||
*/
|
||||
public void checkAcls(OzoneManager ozoneManager,
|
||||
OzoneObj.ResourceType resType,
|
||||
OzoneObj.StoreType storeType, IAccessAuthorizer.ACLType aclType,
|
||||
String vol, String bucket, String key) throws IOException {
|
||||
ozoneManager.checkAcls(resType, storeType, aclType, vol, bucket, key,
|
||||
createUGI(), getRemoteAddress());
|
||||
}
|
||||
|
||||
/**
|
||||
* Return UGI object created from OMRequest userInfo. If userInfo is not
|
||||
* set, returns null.
|
||||
* @return UserGroupInformation.
|
||||
*/
|
||||
@VisibleForTesting
|
||||
public UserGroupInformation createUGI() {
|
||||
if (omRequest.hasUserInfo()) {
|
||||
return UserGroupInformation.createRemoteUser(
|
||||
omRequest.getUserInfo().getUserName());
|
||||
} else {
|
||||
// This will never happen, as for every OM request preExecute, we
|
||||
// should add userInfo.
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Return InetAddress created from OMRequest userInfo. If userInfo is not
|
||||
* set, returns null.
|
||||
* @return InetAddress
|
||||
* @throws IOException
|
||||
*/
|
||||
@VisibleForTesting
|
||||
public InetAddress getRemoteAddress() throws IOException {
|
||||
if (omRequest.hasUserInfo()) {
|
||||
return InetAddress.getByName(omRequest.getUserInfo()
|
||||
.getRemoteAddress());
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Set parameters needed for return error response to client.
|
||||
* @param omResponse
|
||||
* @param ex - IOException
|
||||
* @return error response need to be returned to client - OMResponse.
|
||||
*/
|
||||
protected OMResponse createErrorOMResponse(OMResponse.Builder omResponse,
|
||||
IOException ex) {
|
||||
|
||||
omResponse.setSuccess(false);
|
||||
if (ex.getMessage() != null) {
|
||||
omResponse.setMessage(ex.getMessage());
|
||||
}
|
||||
omResponse.setStatus(OzoneManagerRatisUtils.exceptionToResponseStatus(ex));
|
||||
return omResponse.build();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.apache.hadoop.ozone.om.request.bucket;
|
|||
import java.io.IOException;
|
||||
|
||||
import com.google.common.base.Optional;
|
||||
import org.apache.hadoop.ozone.om.request.OMClientRequest;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -34,6 +33,7 @@ import org.apache.hadoop.ozone.om.OMMetrics;
|
|||
import org.apache.hadoop.ozone.om.OzoneManager;
|
||||
import org.apache.hadoop.ozone.om.exceptions.OMException;
|
||||
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
|
||||
import org.apache.hadoop.ozone.om.request.OMClientRequest;
|
||||
import org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse;
|
||||
import org.apache.hadoop.ozone.om.response.OMClientResponse;
|
||||
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
|
||||
|
@ -50,7 +50,8 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
|
|||
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
|
||||
.OMResponse;
|
||||
import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
|
||||
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
|
||||
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
|
||||
import org.apache.hadoop.ozone.security.acl.OzoneObj;
|
||||
import org.apache.hadoop.util.Time;
|
||||
import org.apache.hadoop.utils.db.cache.CacheKey;
|
||||
import org.apache.hadoop.utils.db.cache.CacheValue;
|
||||
|
@ -94,8 +95,8 @@ public class OMBucketCreateRequest extends OMClientRequest {
|
|||
}
|
||||
|
||||
newCreateBucketRequest.setBucketInfo(newBucketInfo.build());
|
||||
return getOmRequest().toBuilder().setCreateBucketRequest(
|
||||
newCreateBucketRequest.build()).build();
|
||||
return getOmRequest().toBuilder().setUserInfo(getUserInfo())
|
||||
.setCreateBucketRequest(newCreateBucketRequest.build()).build();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -114,15 +115,29 @@ public class OMBucketCreateRequest extends OMClientRequest {
|
|||
OMResponse.Builder omResponse = OMResponse.newBuilder().setCmdType(
|
||||
OzoneManagerProtocolProtos.Type.CreateBucket).setStatus(
|
||||
OzoneManagerProtocolProtos.Status.OK);
|
||||
OmBucketInfo omBucketInfo = null;
|
||||
OmBucketInfo omBucketInfo = OmBucketInfo.getFromProtobuf(bucketInfo);
|
||||
|
||||
try {
|
||||
// check Acl
|
||||
if (ozoneManager.getAclsEnabled()) {
|
||||
checkAcls(ozoneManager, OzoneObj.ResourceType.BUCKET,
|
||||
OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.CREATE,
|
||||
volumeName, bucketName, null);
|
||||
}
|
||||
} catch (IOException ex) {
|
||||
LOG.error("Bucket creation failed for bucket:{} in volume:{}",
|
||||
bucketName, volumeName, ex);
|
||||
omMetrics.incNumBucketCreateFails();
|
||||
return new OMBucketCreateResponse(omBucketInfo,
|
||||
createErrorOMResponse(omResponse, ex));
|
||||
}
|
||||
|
||||
String volumeKey = metadataManager.getVolumeKey(volumeName);
|
||||
String bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
|
||||
|
||||
metadataManager.getLock().acquireVolumeLock(volumeName);
|
||||
metadataManager.getLock().acquireBucketLock(volumeName, bucketName);
|
||||
|
||||
try {
|
||||
String volumeKey = metadataManager.getVolumeKey(volumeName);
|
||||
String bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
|
||||
|
||||
//Check if the volume exists
|
||||
if (metadataManager.getVolumeTable().get(volumeKey) == null) {
|
||||
|
@ -137,7 +152,6 @@ public class OMBucketCreateRequest extends OMClientRequest {
|
|||
OMException.ResultCodes.BUCKET_ALREADY_EXISTS);
|
||||
}
|
||||
|
||||
omBucketInfo = OmBucketInfo.getFromProtobuf(bucketInfo);
|
||||
LOG.debug("created bucket: {} in volume: {}", bucketName, volumeName);
|
||||
omMetrics.incNumBuckets();
|
||||
|
||||
|
@ -145,22 +159,21 @@ public class OMBucketCreateRequest extends OMClientRequest {
|
|||
metadataManager.getBucketTable().addCacheEntry(new CacheKey<>(bucketKey),
|
||||
new CacheValue<>(Optional.of(omBucketInfo), transactionLogIndex));
|
||||
|
||||
// TODO: check acls.
|
||||
// return response.
|
||||
omResponse.setCreateBucketResponse(
|
||||
CreateBucketResponse.newBuilder().build());
|
||||
return new OMBucketCreateResponse(omBucketInfo, omResponse.build());
|
||||
|
||||
} catch (IOException ex) {
|
||||
omMetrics.incNumBucketCreateFails();
|
||||
LOG.error("Bucket creation failed for bucket:{} in volume:{}",
|
||||
bucketName, volumeName, ex);
|
||||
omResponse.setStatus(
|
||||
OzoneManagerRatisUtils.exceptionToResponseStatus(ex));
|
||||
omResponse.setMessage(ex.getMessage());
|
||||
omResponse.setSuccess(false);
|
||||
return new OMBucketCreateResponse(omBucketInfo,
|
||||
createErrorOMResponse(omResponse, ex));
|
||||
} finally {
|
||||
metadataManager.getLock().releaseBucketLock(volumeName, bucketName);
|
||||
metadataManager.getLock().releaseVolumeLock(volumeName);
|
||||
}
|
||||
omResponse.setCreateBucketResponse(
|
||||
CreateBucketResponse.newBuilder().build());
|
||||
return new OMBucketCreateResponse(omBucketInfo, omResponse.build());
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -21,10 +21,13 @@ package org.apache.hadoop.ozone.om.request.bucket;
|
|||
import java.io.IOException;
|
||||
|
||||
import com.google.common.base.Optional;
|
||||
import org.apache.hadoop.ozone.om.request.OMClientRequest;
|
||||
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
|
||||
import org.apache.hadoop.ozone.security.acl.OzoneObj;
|
||||
import org.apache.hadoop.ozone.om.request.OMClientRequest;
|
||||
import org.apache.hadoop.ozone.om.OMMetadataManager;
|
||||
import org.apache.hadoop.ozone.om.OMMetrics;
|
||||
import org.apache.hadoop.ozone.om.OzoneManager;
|
||||
|
@ -32,12 +35,12 @@ import org.apache.hadoop.ozone.om.exceptions.OMException;
|
|||
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
|
||||
import org.apache.hadoop.ozone.om.response.bucket.OMBucketDeleteResponse;
|
||||
import org.apache.hadoop.ozone.om.response.OMClientResponse;
|
||||
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
|
||||
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
|
||||
.DeleteBucketResponse;
|
||||
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
|
||||
.OMRequest;
|
||||
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
|
||||
.OMResponse;
|
||||
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
|
||||
import org.apache.hadoop.utils.db.cache.CacheKey;
|
||||
import org.apache.hadoop.utils.db.cache.CacheValue;
|
||||
|
||||
|
@ -52,12 +55,6 @@ public class OMBucketDeleteRequest extends OMClientRequest {
|
|||
super(omRequest);
|
||||
}
|
||||
|
||||
@Override
|
||||
public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
|
||||
// For Delete Bucket there are no preExecute steps
|
||||
return getOmRequest();
|
||||
}
|
||||
|
||||
@Override
|
||||
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
|
||||
long transactionLogIndex) {
|
||||
|
@ -69,14 +66,28 @@ public class OMBucketDeleteRequest extends OMClientRequest {
|
|||
String volumeName = omRequest.getDeleteBucketRequest().getVolumeName();
|
||||
String bucketName = omRequest.getDeleteBucketRequest().getBucketName();
|
||||
|
||||
// acquire lock
|
||||
omMetadataManager.getLock().acquireBucketLock(volumeName, bucketName);
|
||||
|
||||
// Generate end user response
|
||||
OMResponse.Builder omResponse = OMResponse.newBuilder()
|
||||
.setStatus(OzoneManagerProtocolProtos.Status.OK)
|
||||
.setCmdType(omRequest.getCmdType());
|
||||
|
||||
try {
|
||||
// check Acl
|
||||
if (ozoneManager.getAclsEnabled()) {
|
||||
checkAcls(ozoneManager, OzoneObj.ResourceType.BUCKET,
|
||||
OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE,
|
||||
volumeName, bucketName, null);
|
||||
}
|
||||
} catch (IOException ex) {
|
||||
omMetrics.incNumBucketDeleteFails();
|
||||
LOG.error("Delete bucket failed for bucket:{} in volume:{}", bucketName,
|
||||
volumeName, ex);
|
||||
return new OMBucketDeleteResponse(volumeName, bucketName,
|
||||
createErrorOMResponse(omResponse, ex));
|
||||
}
|
||||
|
||||
// acquire lock
|
||||
omMetadataManager.getLock().acquireBucketLock(volumeName, bucketName);
|
||||
try {
|
||||
// No need to check volume exists here, as bucket cannot be created
|
||||
// with out volume creation.
|
||||
|
@ -101,17 +112,21 @@ public class OMBucketDeleteRequest extends OMClientRequest {
|
|||
omMetadataManager.getBucketTable().addCacheEntry(
|
||||
new CacheKey<>(bucketKey),
|
||||
new CacheValue<>(Optional.absent(), transactionLogIndex));
|
||||
// TODO: check acls.
|
||||
|
||||
// return response.
|
||||
omResponse.setDeleteBucketResponse(
|
||||
DeleteBucketResponse.newBuilder().build());
|
||||
return new OMBucketDeleteResponse(volumeName, bucketName,
|
||||
omResponse.build());
|
||||
|
||||
} catch (IOException ex) {
|
||||
omMetrics.incNumBucketDeleteFails();
|
||||
LOG.error("Delete bucket failed for bucket:{} in volume:{}", bucketName,
|
||||
volumeName, ex);
|
||||
omResponse.setSuccess(false).setMessage(ex.getMessage())
|
||||
.setStatus(OzoneManagerRatisUtils.exceptionToResponseStatus(ex));
|
||||
return new OMBucketDeleteResponse(volumeName, bucketName,
|
||||
createErrorOMResponse(omResponse, ex));
|
||||
} finally {
|
||||
omMetadataManager.getLock().releaseBucketLock(volumeName, bucketName);
|
||||
}
|
||||
return new OMBucketDeleteResponse(volumeName, bucketName,
|
||||
omResponse.build());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,10 +22,14 @@ import java.io.IOException;
|
|||
import java.util.List;
|
||||
|
||||
import com.google.common.base.Optional;
|
||||
import org.apache.hadoop.ozone.om.request.OMClientRequest;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
||||
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
|
||||
import org.apache.hadoop.ozone.security.acl.OzoneObj;
|
||||
import org.apache.hadoop.ozone.om.request.OMClientRequest;
|
||||
|
||||
import org.apache.hadoop.hdds.protocol.StorageType;
|
||||
import org.apache.hadoop.ozone.OzoneAcl;
|
||||
import org.apache.hadoop.ozone.om.OMMetadataManager;
|
||||
|
@ -44,8 +48,8 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
|
|||
.OMRequest;
|
||||
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
|
||||
.OMResponse;
|
||||
|
||||
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
|
||||
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
|
||||
.SetBucketPropertyResponse;
|
||||
import org.apache.hadoop.utils.db.cache.CacheKey;
|
||||
import org.apache.hadoop.utils.db.cache.CacheValue;
|
||||
|
||||
|
@ -59,10 +63,6 @@ public class OMBucketSetPropertyRequest extends OMClientRequest {
|
|||
public OMBucketSetPropertyRequest(OMRequest omRequest) {
|
||||
super(omRequest);
|
||||
}
|
||||
@Override
|
||||
public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
|
||||
return getOmRequest();
|
||||
}
|
||||
|
||||
@Override
|
||||
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
|
||||
|
@ -87,8 +87,6 @@ public class OMBucketSetPropertyRequest extends OMClientRequest {
|
|||
String volumeName = bucketArgs.getVolumeName();
|
||||
String bucketName = bucketArgs.getBucketName();
|
||||
|
||||
// acquire lock
|
||||
omMetadataManager.getLock().acquireBucketLock(volumeName, bucketName);
|
||||
|
||||
OMResponse.Builder omResponse = OMResponse.newBuilder().setCmdType(
|
||||
OzoneManagerProtocolProtos.Type.CreateBucket).setStatus(
|
||||
|
@ -96,6 +94,27 @@ public class OMBucketSetPropertyRequest extends OMClientRequest {
|
|||
OmBucketInfo omBucketInfo = null;
|
||||
|
||||
try {
|
||||
// check Acl
|
||||
if (ozoneManager.getAclsEnabled()) {
|
||||
checkAcls(ozoneManager, OzoneObj.ResourceType.BUCKET,
|
||||
OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE,
|
||||
volumeName, bucketName, null);
|
||||
}
|
||||
} catch (IOException ex) {
|
||||
if (omMetrics != null) {
|
||||
omMetrics.incNumBucketUpdateFails();
|
||||
}
|
||||
LOG.error("Setting bucket property failed for bucket:{} in volume:{}",
|
||||
bucketName, volumeName, ex);
|
||||
return new OMBucketSetPropertyResponse(omBucketInfo,
|
||||
createErrorOMResponse(omResponse, ex));
|
||||
}
|
||||
|
||||
// acquire lock
|
||||
omMetadataManager.getLock().acquireBucketLock(volumeName, bucketName);
|
||||
|
||||
try {
|
||||
|
||||
String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
|
||||
OmBucketInfo oldBucketInfo =
|
||||
omMetadataManager.getBucketTable().get(bucketKey);
|
||||
|
@ -151,19 +170,22 @@ public class OMBucketSetPropertyRequest extends OMClientRequest {
|
|||
new CacheKey<>(bucketKey),
|
||||
new CacheValue<>(Optional.of(omBucketInfo), transactionLogIndex));
|
||||
|
||||
// TODO: check acls.
|
||||
// return response.
|
||||
omResponse.setSetBucketPropertyResponse(
|
||||
SetBucketPropertyResponse.newBuilder().build());
|
||||
return new OMBucketSetPropertyResponse(omBucketInfo, omResponse.build());
|
||||
|
||||
} catch (IOException ex) {
|
||||
if (omMetrics != null) {
|
||||
omMetrics.incNumBucketUpdateFails();
|
||||
}
|
||||
LOG.error("Setting bucket property failed for bucket:{} in volume:{}",
|
||||
bucketName, volumeName, ex);
|
||||
omResponse.setSuccess(false).setMessage(ex.getMessage())
|
||||
.setStatus(OzoneManagerRatisUtils.exceptionToResponseStatus(ex));
|
||||
return new OMBucketSetPropertyResponse(omBucketInfo,
|
||||
createErrorOMResponse(omResponse, ex));
|
||||
} finally {
|
||||
omMetadataManager.getLock().releaseBucketLock(volumeName, bucketName);
|
||||
}
|
||||
return new OMBucketSetPropertyResponse(omBucketInfo, omResponse.build());
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.om.response;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.ozone.om.OMMetadataManager;
|
||||
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
|
||||
.OMResponse;
|
||||
|
@ -33,6 +34,7 @@ public abstract class OMClientResponse {
|
|||
private OMResponse omResponse;
|
||||
|
||||
public OMClientResponse(OMResponse omResponse) {
|
||||
Preconditions.checkNotNull(omResponse);
|
||||
this.omResponse = omResponse;
|
||||
}
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@ import java.io.IOException;
|
|||
import org.apache.hadoop.ozone.om.OMMetadataManager;
|
||||
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
|
||||
import org.apache.hadoop.ozone.om.response.OMClientResponse;
|
||||
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
|
||||
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
|
||||
.OMResponse;
|
||||
import org.apache.hadoop.utils.db.BatchOperation;
|
||||
|
@ -43,11 +44,14 @@ public final class OMBucketCreateResponse extends OMClientResponse {
|
|||
@Override
|
||||
public void addToDBBatch(OMMetadataManager omMetadataManager,
|
||||
BatchOperation batchOperation) throws IOException {
|
||||
String dbBucketKey =
|
||||
omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(),
|
||||
omBucketInfo.getBucketName());
|
||||
omMetadataManager.getBucketTable().putWithBatch(batchOperation, dbBucketKey,
|
||||
omBucketInfo);
|
||||
|
||||
if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) {
|
||||
String dbBucketKey =
|
||||
omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(),
|
||||
omBucketInfo.getBucketName());
|
||||
omMetadataManager.getBucketTable().putWithBatch(batchOperation,
|
||||
dbBucketKey, omBucketInfo);
|
||||
}
|
||||
}
|
||||
|
||||
public OmBucketInfo getOmBucketInfo() {
|
||||
|
|
|
@ -44,10 +44,12 @@ public final class OMBucketDeleteResponse extends OMClientResponse {
|
|||
@Override
|
||||
public void addToDBBatch(OMMetadataManager omMetadataManager,
|
||||
BatchOperation batchOperation) throws IOException {
|
||||
String dbBucketKey =
|
||||
omMetadataManager.getBucketKey(volumeName, bucketName);
|
||||
omMetadataManager.getBucketTable().deleteWithBatch(batchOperation,
|
||||
dbBucketKey);
|
||||
if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) {
|
||||
String dbBucketKey =
|
||||
omMetadataManager.getBucketKey(volumeName, bucketName);
|
||||
omMetadataManager.getBucketTable().deleteWithBatch(batchOperation,
|
||||
dbBucketKey);
|
||||
}
|
||||
}
|
||||
|
||||
public String getVolumeName() {
|
||||
|
|
|
@ -22,6 +22,7 @@ import java.io.IOException;
|
|||
import org.apache.hadoop.ozone.om.OMMetadataManager;
|
||||
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
|
||||
import org.apache.hadoop.ozone.om.response.OMClientResponse;
|
||||
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
|
||||
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
|
||||
.OMResponse;
|
||||
import org.apache.hadoop.utils.db.BatchOperation;
|
||||
|
@ -41,11 +42,13 @@ public class OMBucketSetPropertyResponse extends OMClientResponse {
|
|||
@Override
|
||||
public void addToDBBatch(OMMetadataManager omMetadataManager,
|
||||
BatchOperation batchOperation) throws IOException {
|
||||
String dbBucketKey =
|
||||
omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(),
|
||||
omBucketInfo.getBucketName());
|
||||
omMetadataManager.getBucketTable().putWithBatch(batchOperation, dbBucketKey,
|
||||
omBucketInfo);
|
||||
if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) {
|
||||
String dbBucketKey =
|
||||
omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(),
|
||||
omBucketInfo.getBucketName());
|
||||
omMetadataManager.getBucketTable().putWithBatch(batchOperation,
|
||||
dbBucketKey, omBucketInfo);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,119 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.ozone.om.request;
|
||||
|
||||
import java.net.InetAddress;
|
||||
import java.util.UUID;
|
||||
|
||||
import mockit.Mock;
|
||||
import mockit.MockUp;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
import org.junit.rules.TemporaryFolder;
|
||||
import org.mockito.Mockito;
|
||||
|
||||
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||
import org.apache.hadoop.ozone.om.OMConfigKeys;
|
||||
import org.apache.hadoop.ozone.om.OMMetadataManager;
|
||||
import org.apache.hadoop.ozone.om.OMMetrics;
|
||||
import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
|
||||
import org.apache.hadoop.ozone.om.OzoneManager;
|
||||
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
|
||||
import org.apache.hadoop.ozone.om.request.bucket.OMBucketCreateRequest;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
/**
|
||||
* Test OMClient Request with user information.
|
||||
*/
|
||||
public class TestOMClientRequestWithUserInfo {
|
||||
|
||||
@Rule
|
||||
public TemporaryFolder folder = new TemporaryFolder();
|
||||
|
||||
private OzoneManager ozoneManager;
|
||||
private OMMetrics omMetrics;
|
||||
private OMMetadataManager omMetadataManager;
|
||||
private UserGroupInformation userGroupInformation =
|
||||
UserGroupInformation.createRemoteUser("temp");
|
||||
private InetAddress inetAddress;
|
||||
|
||||
@Before
|
||||
public void setup() throws Exception {
|
||||
ozoneManager = Mockito.mock(OzoneManager.class);
|
||||
omMetrics = OMMetrics.create();
|
||||
OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
|
||||
ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
|
||||
folder.newFolder().getAbsolutePath());
|
||||
omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
|
||||
when(ozoneManager.getMetrics()).thenReturn(omMetrics);
|
||||
when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
|
||||
inetAddress = InetAddress.getByName("127.0.0.1");
|
||||
|
||||
new MockUp<ProtobufRpcEngine.Server>() {
|
||||
@Mock
|
||||
public UserGroupInformation getRemoteUser() {
|
||||
return userGroupInformation;
|
||||
}
|
||||
|
||||
public InetAddress getRemoteAddress() {
|
||||
return inetAddress;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUserInfo() throws Exception {
|
||||
|
||||
String bucketName = UUID.randomUUID().toString();
|
||||
String volumeName = UUID.randomUUID().toString();
|
||||
OzoneManagerProtocolProtos.OMRequest omRequest =
|
||||
TestOMRequestUtils.createBucketRequest(bucketName, volumeName, true,
|
||||
OzoneManagerProtocolProtos.StorageTypeProto.DISK);
|
||||
|
||||
OMBucketCreateRequest omBucketCreateRequest =
|
||||
new OMBucketCreateRequest(omRequest);
|
||||
|
||||
Assert.assertFalse(omRequest.hasUserInfo());
|
||||
|
||||
OzoneManagerProtocolProtos.OMRequest modifiedRequest =
|
||||
omBucketCreateRequest.preExecute(ozoneManager);
|
||||
|
||||
Assert.assertTrue(modifiedRequest.hasUserInfo());
|
||||
|
||||
// Now pass modified request to OMBucketCreateRequest and check ugi and
|
||||
// remote Address.
|
||||
omBucketCreateRequest = new OMBucketCreateRequest(modifiedRequest);
|
||||
|
||||
InetAddress remoteAddress = omBucketCreateRequest.getRemoteAddress();
|
||||
UserGroupInformation ugi = omBucketCreateRequest.createUGI();
|
||||
|
||||
|
||||
// Now check we have original user info and remote address or not.
|
||||
// Here from OMRequest user info, converted to UGI and InetAddress.
|
||||
Assert.assertEquals(inetAddress.getHostAddress(),
|
||||
remoteAddress.getHostAddress());
|
||||
Assert.assertEquals(userGroupInformation.getUserName(), ugi.getUserName());
|
||||
}
|
||||
}
|
|
@ -19,13 +19,17 @@
|
|||
|
||||
package org.apache.hadoop.ozone.om.request;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
|
||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
||||
import org.apache.hadoop.ozone.om.OMMetadataManager;
|
||||
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
|
||||
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
|
||||
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
|
||||
import org.apache.hadoop.util.Time;
|
||||
|
||||
import java.util.UUID;
|
||||
|
||||
/**
|
||||
* Helper class to test OMClientRequest classes.
|
||||
*/
|
||||
|
@ -70,4 +74,33 @@ public final class TestOMRequestUtils {
|
|||
omMetadataManager.getVolumeTable().put(
|
||||
omMetadataManager.getVolumeKey(volumeName), omVolumeArgs);
|
||||
}
|
||||
|
||||
public static OzoneManagerProtocolProtos.OMRequest createBucketRequest(
|
||||
String bucketName, String volumeName, boolean isVersionEnabled,
|
||||
OzoneManagerProtocolProtos.StorageTypeProto storageTypeProto) {
|
||||
OzoneManagerProtocolProtos.BucketInfo bucketInfo =
|
||||
OzoneManagerProtocolProtos.BucketInfo.newBuilder()
|
||||
.setBucketName(bucketName)
|
||||
.setVolumeName(volumeName)
|
||||
.setIsVersionEnabled(isVersionEnabled)
|
||||
.setStorageType(storageTypeProto)
|
||||
.addAllMetadata(getMetadataList()).build();
|
||||
OzoneManagerProtocolProtos.CreateBucketRequest.Builder req =
|
||||
OzoneManagerProtocolProtos.CreateBucketRequest.newBuilder();
|
||||
req.setBucketInfo(bucketInfo);
|
||||
return OzoneManagerProtocolProtos.OMRequest.newBuilder()
|
||||
.setCreateBucketRequest(req)
|
||||
.setCmdType(OzoneManagerProtocolProtos.Type.CreateBucket)
|
||||
.setClientId(UUID.randomUUID().toString()).build();
|
||||
}
|
||||
|
||||
public static List< HddsProtos.KeyValue> getMetadataList() {
|
||||
List<HddsProtos.KeyValue> metadataList = new ArrayList<>();
|
||||
metadataList.add(HddsProtos.KeyValue.newBuilder().setKey("key1").setValue(
|
||||
"value1").build());
|
||||
metadataList.add(HddsProtos.KeyValue.newBuilder().setKey("key2").setValue(
|
||||
"value2").build());
|
||||
return metadataList;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -19,8 +19,6 @@
|
|||
|
||||
package org.apache.hadoop.ozone.om.request.bucket;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
|
||||
import org.junit.After;
|
||||
|
@ -43,9 +41,9 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
|
|||
.OMResponse;
|
||||
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
|
||||
.StorageTypeProto;
|
||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
||||
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
|
||||
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
|
||||
import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
|
||||
import org.apache.hadoop.ozone.om.response.OMClientResponse;
|
||||
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
|
||||
import org.apache.hadoop.util.Time;
|
||||
|
@ -109,8 +107,8 @@ public class TestOMBucketCreateRequest {
|
|||
String volumeName = UUID.randomUUID().toString();
|
||||
String bucketName = UUID.randomUUID().toString();
|
||||
|
||||
OMRequest originalRequest = createBucketRequest(bucketName, volumeName,
|
||||
false, StorageTypeProto.SSD);
|
||||
OMRequest originalRequest = TestOMRequestUtils.createBucketRequest(
|
||||
bucketName, volumeName, false, StorageTypeProto.SSD);
|
||||
|
||||
OMBucketCreateRequest omBucketCreateRequest =
|
||||
new OMBucketCreateRequest(originalRequest);
|
||||
|
@ -161,8 +159,9 @@ public class TestOMBucketCreateRequest {
|
|||
private OMBucketCreateRequest doPreExecute(String volumeName,
|
||||
String bucketName) throws Exception {
|
||||
addCreateVolumeToTable(volumeName, omMetadataManager);
|
||||
OMRequest originalRequest = createBucketRequest(bucketName, volumeName,
|
||||
false, StorageTypeProto.SSD);
|
||||
OMRequest originalRequest =
|
||||
TestOMRequestUtils.createBucketRequest(bucketName, volumeName, false,
|
||||
StorageTypeProto.SSD);
|
||||
|
||||
OMBucketCreateRequest omBucketCreateRequest =
|
||||
new OMBucketCreateRequest(originalRequest);
|
||||
|
@ -239,32 +238,4 @@ public class TestOMBucketCreateRequest {
|
|||
omMetadataManager.getVolumeKey(volumeName), omVolumeArgs);
|
||||
}
|
||||
|
||||
|
||||
public static OMRequest createBucketRequest(String bucketName,
|
||||
String volumeName, boolean isVersionEnabled,
|
||||
StorageTypeProto storageTypeProto) {
|
||||
OzoneManagerProtocolProtos.BucketInfo bucketInfo =
|
||||
OzoneManagerProtocolProtos.BucketInfo.newBuilder()
|
||||
.setBucketName(bucketName)
|
||||
.setVolumeName(volumeName)
|
||||
.setIsVersionEnabled(isVersionEnabled)
|
||||
.setStorageType(storageTypeProto)
|
||||
.addAllMetadata(getMetadataList()).build();
|
||||
OzoneManagerProtocolProtos.CreateBucketRequest.Builder req =
|
||||
OzoneManagerProtocolProtos.CreateBucketRequest.newBuilder();
|
||||
req.setBucketInfo(bucketInfo);
|
||||
return OMRequest.newBuilder().setCreateBucketRequest(req)
|
||||
.setCmdType(OzoneManagerProtocolProtos.Type.CreateBucket)
|
||||
.setClientId(UUID.randomUUID().toString()).build();
|
||||
}
|
||||
|
||||
public static List< HddsProtos.KeyValue> getMetadataList() {
|
||||
List<HddsProtos.KeyValue> metadataList = new ArrayList<>();
|
||||
metadataList.add(HddsProtos.KeyValue.newBuilder().setKey("key1").setValue(
|
||||
"value1").build());
|
||||
metadataList.add(HddsProtos.KeyValue.newBuilder().setKey("key2").setValue(
|
||||
"value2").build());
|
||||
return metadataList;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -84,9 +84,8 @@ public class TestOMBucketDeleteRequest {
|
|||
OMBucketDeleteRequest omBucketDeleteRequest =
|
||||
new OMBucketDeleteRequest(omRequest);
|
||||
|
||||
// As preExecute of DeleteBucket request is do nothing, requests should
|
||||
// be same.
|
||||
Assert.assertEquals(omRequest,
|
||||
// As user info gets added.
|
||||
Assert.assertNotEquals(omRequest,
|
||||
omBucketDeleteRequest.preExecute(ozoneManager));
|
||||
}
|
||||
|
||||
|
|
|
@ -90,7 +90,8 @@ public class TestOMBucketSetPropertyRequest {
|
|||
OMBucketSetPropertyRequest omBucketSetPropertyRequest =
|
||||
new OMBucketSetPropertyRequest(omRequest);
|
||||
|
||||
Assert.assertEquals(omRequest,
|
||||
// As user info gets added.
|
||||
Assert.assertNotEquals(omRequest,
|
||||
omBucketSetPropertyRequest.preExecute(ozoneManager));
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue