HDDS-1600. Add userName and IPAddress as part of OMRequest. (#857)

This commit is contained in:
Bharat Viswanadham 2019-06-04 12:31:44 -07:00 committed by GitHub
parent 97607f3dce
commit 580b639908
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 471 additions and 198 deletions

View File

@ -196,5 +196,9 @@ public class OMException extends IOException {
FILE_ALREADY_EXISTS, FILE_ALREADY_EXISTS,
NOT_A_FILE, NOT_A_FILE,
PERMISSION_DENIED, // Error codes used during acl validation
TIMEOUT // Error codes used during acl validation
} }
} }

View File

@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.security.acl;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import java.util.BitSet; import java.util.BitSet;
@ -35,11 +36,11 @@ public interface IAccessAuthorizer {
* *
* @param ozoneObject object for which access needs to be checked. * @param ozoneObject object for which access needs to be checked.
* @param context Context object encapsulating all user related information. * @param context Context object encapsulating all user related information.
* @throws OzoneAclException * @throws org.apache.hadoop.ozone.om.exceptions.OMException
* @return true if user has access else false. * @return true if user has access else false.
*/ */
boolean checkAccess(IOzoneObj ozoneObject, RequestContext context) boolean checkAccess(IOzoneObj ozoneObject, RequestContext context)
throws OzoneAclException; throws OMException;
/** /**
* ACL rights. * ACL rights.

View File

@ -16,6 +16,8 @@
*/ */
package org.apache.hadoop.ozone.security.acl; package org.apache.hadoop.ozone.security.acl;
import org.apache.hadoop.ozone.om.exceptions.OMException;
/** /**
* Default implementation for {@link IAccessAuthorizer}. * Default implementation for {@link IAccessAuthorizer}.
* */ * */
@ -23,7 +25,7 @@ public class OzoneAccessAuthorizer implements IAccessAuthorizer {
@Override @Override
public boolean checkAccess(IOzoneObj ozoneObject, RequestContext context) public boolean checkAccess(IOzoneObj ozoneObject, RequestContext context)
throws OzoneAclException { throws OMException {
return true; return true;
} }
} }

View File

@ -1,71 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.ozone.security.acl;
import java.io.IOException;
/**
* Timeout exception thrown by Ozone. Ex: When checking ACLs for an Object if
* security manager is not able to process the request in configured time than
* {@link OzoneAclException} should be thrown.
*/
public class OzoneAclException extends IOException {
private ErrorCode errorCode;
/**
* Constructs a new exception with {@code null} as its detail message. The
* cause is not initialized, and may subsequently be initialized by a call to
* {@link #initCause}.
*/
public OzoneAclException() {
super("");
}
/**
* Constructs a new exception with {@code null} as its detail message. The
* cause is not initialized, and may subsequently be initialized by a call to
* {@link #initCause}.
*/
public OzoneAclException(String errorMsg, ErrorCode code, Throwable ex) {
super(errorMsg, ex);
this.errorCode = code;
}
/**
* Constructs a new exception with {@code null} as its detail message. The
* cause is not initialized, and may subsequently be initialized by a call to
* {@link #initCause}.
*/
public OzoneAclException(String errorMsg, ErrorCode code) {
super(errorMsg);
this.errorCode = code;
}
/**
* Error codes for OzoneAclException.
*/
public enum ErrorCode {
TIMEOUT,
PERMISSION_DENIED,
OTHER
}
public ErrorCode getErrorCode() {
return errorCode;
}
}

View File

@ -102,6 +102,9 @@ message OMRequest {
required string clientId = 3; required string clientId = 3;
optional UserInfo userInfo = 4;
optional CreateVolumeRequest createVolumeRequest = 11; optional CreateVolumeRequest createVolumeRequest = 11;
optional SetVolumePropertyRequest setVolumePropertyRequest = 12; optional SetVolumePropertyRequest setVolumePropertyRequest = 12;
optional CheckVolumeAccessRequest checkVolumeAccessRequest = 13; optional CheckVolumeAccessRequest checkVolumeAccessRequest = 13;
@ -271,6 +274,8 @@ enum Status {
DIRECTORY_NOT_FOUND = 45; DIRECTORY_NOT_FOUND = 45;
FILE_ALREADY_EXISTS = 46; FILE_ALREADY_EXISTS = 46;
NOT_A_FILE = 47; NOT_A_FILE = 47;
PERMISSION_DENIED = 48;
TIMEOUT = 49;
} }
@ -284,6 +289,16 @@ message VolumeInfo {
required uint64 creationTime = 7; required uint64 creationTime = 7;
} }
/**
User information which will be extracted during RPC context and used
during validating Acl.
*/
message UserInfo {
optional string userName = 1;
optional string remoteAddress = 3;
}
/** /**
Creates a volume Creates a volume
*/ */

View File

@ -24,10 +24,10 @@ import org.apache.hadoop.hdds.protocol.StorageType;
import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler; import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneTestUtils; import org.apache.hadoop.ozone.OzoneTestUtils;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
import org.apache.hadoop.ozone.security.acl.IOzoneObj; import org.apache.hadoop.ozone.security.acl.IOzoneObj;
import org.apache.hadoop.ozone.security.acl.OzoneAclException;
import org.apache.hadoop.ozone.security.acl.RequestContext; import org.apache.hadoop.ozone.security.acl.RequestContext;
import org.apache.hadoop.ozone.web.handlers.BucketArgs; import org.apache.hadoop.ozone.web.handlers.BucketArgs;
import org.apache.hadoop.ozone.web.handlers.KeyArgs; import org.apache.hadoop.ozone.web.handlers.KeyArgs;
@ -122,7 +122,7 @@ public class TestOmAcls {
createVolumeArgs.setAdminName(adminUser); createVolumeArgs.setAdminName(adminUser);
createVolumeArgs.setQuota(new OzoneQuota(i, OzoneQuota.Units.GB)); createVolumeArgs.setQuota(new OzoneQuota(i, OzoneQuota.Units.GB));
logCapturer.clearOutput(); logCapturer.clearOutput();
OzoneTestUtils.expectOmException(ResultCodes.INTERNAL_ERROR, OzoneTestUtils.expectOmException(ResultCodes.PERMISSION_DENIED,
() -> storageHandler.createVolume(createVolumeArgs)); () -> storageHandler.createVolume(createVolumeArgs));
assertTrue(logCapturer.getOutput().contains("doesn't have CREATE " + assertTrue(logCapturer.getOutput().contains("doesn't have CREATE " +
"permission to access volume")); "permission to access volume"));
@ -131,7 +131,7 @@ public class TestOmAcls {
bucketArgs.setAddAcls(new LinkedList<>()); bucketArgs.setAddAcls(new LinkedList<>());
bucketArgs.setRemoveAcls(new LinkedList<>()); bucketArgs.setRemoveAcls(new LinkedList<>());
bucketArgs.setStorageType(StorageType.DISK); bucketArgs.setStorageType(StorageType.DISK);
OzoneTestUtils.expectOmException(ResultCodes.INTERNAL_ERROR, OzoneTestUtils.expectOmException(ResultCodes.PERMISSION_DENIED,
() -> storageHandler.createBucket(bucketArgs)); () -> storageHandler.createBucket(bucketArgs));
assertTrue(logCapturer.getOutput().contains("doesn't have CREATE " + assertTrue(logCapturer.getOutput().contains("doesn't have CREATE " +
"permission to access bucket")); "permission to access bucket"));
@ -155,7 +155,7 @@ public class TestOmAcls {
// write a key without specifying size at all // write a key without specifying size at all
String keyName = "testKey"; String keyName = "testKey";
KeyArgs keyArgs = new KeyArgs(keyName, bucketArgs); KeyArgs keyArgs = new KeyArgs(keyName, bucketArgs);
OzoneTestUtils.expectOmException(ResultCodes.INTERNAL_ERROR, OzoneTestUtils.expectOmException(ResultCodes.PERMISSION_DENIED,
() -> storageHandler.newKeyWriter(keyArgs)); () -> storageHandler.newKeyWriter(keyArgs));
assertTrue(logCapturer.getOutput().contains("doesn't have READ permission" + assertTrue(logCapturer.getOutput().contains("doesn't have READ permission" +
" to access key")); " to access key"));
@ -169,7 +169,7 @@ class OzoneAccessAuthrizerTest implements IAccessAuthorizer {
@Override @Override
public boolean checkAccess(IOzoneObj ozoneObject, RequestContext context) public boolean checkAccess(IOzoneObj ozoneObject, RequestContext context)
throws OzoneAclException { throws OMException {
return false; return false;
} }
} }

View File

@ -82,6 +82,13 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<scope>test</scope> <scope>test</scope>
</dependency> </dependency>
<dependency>
<groupId>org.jmockit</groupId>
<artifactId>jmockit</artifactId>
<version>1.24</version>
<scope>test</scope>
</dependency>
</dependencies> </dependencies>
<build> <build>
<plugins> <plugins>

View File

@ -24,6 +24,7 @@ import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import com.google.protobuf.BlockingService; import com.google.protobuf.BlockingService;
import java.net.InetAddress;
import java.security.PrivateKey; import java.security.PrivateKey;
import java.security.PublicKey; import java.security.PublicKey;
import java.security.KeyPair; import java.security.KeyPair;
@ -126,8 +127,6 @@ import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType;
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType;
import org.apache.hadoop.ozone.security.acl.OzoneAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.OzoneAccessAuthorizer;
import org.apache.hadoop.ozone.security.acl.OzoneAclException;
import org.apache.hadoop.ozone.security.acl.OzoneAclException.ErrorCode;
import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.ozone.security.acl.OzoneObj;
import org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType; import org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType;
import org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType; import org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType;
@ -1733,18 +1732,36 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
* @param vol - name of volume * @param vol - name of volume
* @param bucket - bucket name * @param bucket - bucket name
* @param key - key * @param key - key
* @throws OzoneAclException * @throws OMException
*/ */
private void checkAcls(ResourceType resType, StoreType store, private void checkAcls(ResourceType resType, StoreType store,
ACLType acl, String vol, String bucket, String key) ACLType acl, String vol, String bucket, String key)
throws OzoneAclException { throws OMException {
if(!isAclEnabled) { checkAcls(resType, store, acl, vol, bucket, key,
return; ProtobufRpcEngine.Server.getRemoteUser(),
ProtobufRpcEngine.Server.getRemoteIp());
} }
/**
* CheckAcls for the ozone object.
* @param resType
* @param storeType
* @param aclType
* @param vol
* @param bucket
* @param key
* @param ugi
* @param remoteAddress
* @throws OMException
*/
@SuppressWarnings("parameternumber")
public void checkAcls(ResourceType resType, StoreType storeType,
ACLType aclType, String vol, String bucket, String key,
UserGroupInformation ugi, InetAddress remoteAddress)
throws OMException {
OzoneObj obj = OzoneObjInfo.Builder.newBuilder() OzoneObj obj = OzoneObjInfo.Builder.newBuilder()
.setResType(resType) .setResType(resType)
.setStoreType(store) .setStoreType(storeType)
.setVolumeName(vol) .setVolumeName(vol)
.setBucketName(bucket) .setBucketName(bucket)
.setKeyName(key).build(); .setKeyName(key).build();
@ -1753,17 +1770,26 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
.setClientUgi(user) .setClientUgi(user)
.setIp(ProtobufRpcEngine.Server.getRemoteIp()) .setIp(ProtobufRpcEngine.Server.getRemoteIp())
.setAclType(ACLIdentityType.USER) .setAclType(ACLIdentityType.USER)
.setAclRights(acl) .setAclRights(aclType)
.build(); .build();
if (!accessAuthorizer.checkAccess(obj, context)) { if (!accessAuthorizer.checkAccess(obj, context)) {
LOG.warn("User {} doesn't have {} permission to access {}", LOG.warn("User {} doesn't have {} permission to access {}",
user.getUserName(), acl, resType); user.getUserName(), aclType, resType);
throw new OzoneAclException("User " + user.getUserName() + " doesn't " + throw new OMException("User " + user.getUserName() + " doesn't " +
"have " + acl + " permission to access " + resType, "have " + aclType + " permission to access " + resType,
ErrorCode.PERMISSION_DENIED); ResultCodes.PERMISSION_DENIED);
} }
} }
/**
*
* Return true if Ozone acl's are enabled, else false.
* @return boolean
*/
public boolean getAclsEnabled() {
return isAclEnabled;
}
/** /**
* Changes the owner of a volume. * Changes the owner of a volume.
* *
@ -2406,8 +2432,10 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
*/ */
@Override @Override
public void deleteBucket(String volume, String bucket) throws IOException { public void deleteBucket(String volume, String bucket) throws IOException {
if (isAclEnabled) {
checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.WRITE, volume, checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.WRITE, volume,
bucket, null); bucket, null);
}
Map<String, String> auditMap = buildAuditMap(volume); Map<String, String> auditMap = buildAuditMap(volume);
auditMap.put(OzoneConsts.BUCKET, bucket); auditMap.put(OzoneConsts.BUCKET, bucket);
try { try {

View File

@ -19,13 +19,23 @@
package org.apache.hadoop.ozone.om.request; package org.apache.hadoop.ozone.om.request;
import java.io.IOException; import java.io.IOException;
import java.net.InetAddress;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.OMRequest; .OMRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.OMResponse;
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
import org.apache.hadoop.ozone.security.acl.OzoneObj;
import org.apache.hadoop.security.UserGroupInformation;
/** /**
* OMClientRequest provides methods which every write OM request should * OMClientRequest provides methods which every write OM request should
@ -50,8 +60,10 @@ public abstract class OMClientRequest {
* @return OMRequest that will be serialized and handed off to Ratis for * @return OMRequest that will be serialized and handed off to Ratis for
* consensus. * consensus.
*/ */
public abstract OMRequest preExecute(OzoneManager ozoneManager) public OMRequest preExecute(OzoneManager ozoneManager)
throws IOException; throws IOException {
return getOmRequest().toBuilder().setUserInfo(getUserInfo()).build();
}
/** /**
* Validate the OMRequest and update the cache. * Validate the OMRequest and update the cache.
@ -70,4 +82,95 @@ public abstract class OMClientRequest {
return omRequest; return omRequest;
} }
/**
* Get User information from the OMRequest.
* @return User Info.
*/
public OzoneManagerProtocolProtos.UserInfo getUserInfo() {
UserGroupInformation user = ProtobufRpcEngine.Server.getRemoteUser();
InetAddress remoteAddress = ProtobufRpcEngine.Server.getRemoteIp();
OzoneManagerProtocolProtos.UserInfo.Builder userInfo =
OzoneManagerProtocolProtos.UserInfo.newBuilder();
// Added not null checks, as in UT's these values might be null.
if (user != null) {
userInfo.setUserName(user.getUserName());
}
if (remoteAddress != null) {
userInfo.setRemoteAddress(remoteAddress.getHostAddress()).build();
}
return userInfo.build();
}
/**
* Check Acls of ozone object.
* @param ozoneManager
* @param resType
* @param storeType
* @param aclType
* @param vol
* @param bucket
* @param key
* @throws IOException
*/
public void checkAcls(OzoneManager ozoneManager,
OzoneObj.ResourceType resType,
OzoneObj.StoreType storeType, IAccessAuthorizer.ACLType aclType,
String vol, String bucket, String key) throws IOException {
ozoneManager.checkAcls(resType, storeType, aclType, vol, bucket, key,
createUGI(), getRemoteAddress());
}
/**
* Return UGI object created from OMRequest userInfo. If userInfo is not
* set, returns null.
* @return UserGroupInformation.
*/
@VisibleForTesting
public UserGroupInformation createUGI() {
if (omRequest.hasUserInfo()) {
return UserGroupInformation.createRemoteUser(
omRequest.getUserInfo().getUserName());
} else {
// This will never happen, as for every OM request preExecute, we
// should add userInfo.
return null;
}
}
/**
* Return InetAddress created from OMRequest userInfo. If userInfo is not
* set, returns null.
* @return InetAddress
* @throws IOException
*/
@VisibleForTesting
public InetAddress getRemoteAddress() throws IOException {
if (omRequest.hasUserInfo()) {
return InetAddress.getByName(omRequest.getUserInfo()
.getRemoteAddress());
} else {
return null;
}
}
/**
* Set parameters needed for return error response to client.
* @param omResponse
* @param ex - IOException
* @return error response need to be returned to client - OMResponse.
*/
protected OMResponse createErrorOMResponse(OMResponse.Builder omResponse,
IOException ex) {
omResponse.setSuccess(false);
if (ex.getMessage() != null) {
omResponse.setMessage(ex.getMessage());
}
omResponse.setStatus(OzoneManagerRatisUtils.exceptionToResponseStatus(ex));
return omResponse.build();
}
} }

View File

@ -21,7 +21,6 @@ package org.apache.hadoop.ozone.om.request.bucket;
import java.io.IOException; import java.io.IOException;
import com.google.common.base.Optional; import com.google.common.base.Optional;
import org.apache.hadoop.ozone.om.request.OMClientRequest;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -34,6 +33,7 @@ import org.apache.hadoop.ozone.om.OMMetrics;
import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.request.OMClientRequest;
import org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse; import org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse;
import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
@ -50,7 +50,8 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.OMResponse; .OMResponse;
import org.apache.hadoop.ozone.protocolPB.OMPBHelper; import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
import org.apache.hadoop.ozone.security.acl.OzoneObj;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import org.apache.hadoop.utils.db.cache.CacheKey; import org.apache.hadoop.utils.db.cache.CacheKey;
import org.apache.hadoop.utils.db.cache.CacheValue; import org.apache.hadoop.utils.db.cache.CacheValue;
@ -94,8 +95,8 @@ public class OMBucketCreateRequest extends OMClientRequest {
} }
newCreateBucketRequest.setBucketInfo(newBucketInfo.build()); newCreateBucketRequest.setBucketInfo(newBucketInfo.build());
return getOmRequest().toBuilder().setCreateBucketRequest( return getOmRequest().toBuilder().setUserInfo(getUserInfo())
newCreateBucketRequest.build()).build(); .setCreateBucketRequest(newCreateBucketRequest.build()).build();
} }
@Override @Override
@ -114,15 +115,29 @@ public class OMBucketCreateRequest extends OMClientRequest {
OMResponse.Builder omResponse = OMResponse.newBuilder().setCmdType( OMResponse.Builder omResponse = OMResponse.newBuilder().setCmdType(
OzoneManagerProtocolProtos.Type.CreateBucket).setStatus( OzoneManagerProtocolProtos.Type.CreateBucket).setStatus(
OzoneManagerProtocolProtos.Status.OK); OzoneManagerProtocolProtos.Status.OK);
OmBucketInfo omBucketInfo = null; OmBucketInfo omBucketInfo = OmBucketInfo.getFromProtobuf(bucketInfo);
try {
// check Acl
if (ozoneManager.getAclsEnabled()) {
checkAcls(ozoneManager, OzoneObj.ResourceType.BUCKET,
OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.CREATE,
volumeName, bucketName, null);
}
} catch (IOException ex) {
LOG.error("Bucket creation failed for bucket:{} in volume:{}",
bucketName, volumeName, ex);
omMetrics.incNumBucketCreateFails();
return new OMBucketCreateResponse(omBucketInfo,
createErrorOMResponse(omResponse, ex));
}
String volumeKey = metadataManager.getVolumeKey(volumeName);
String bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
metadataManager.getLock().acquireVolumeLock(volumeName); metadataManager.getLock().acquireVolumeLock(volumeName);
metadataManager.getLock().acquireBucketLock(volumeName, bucketName); metadataManager.getLock().acquireBucketLock(volumeName, bucketName);
try { try {
String volumeKey = metadataManager.getVolumeKey(volumeName);
String bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
//Check if the volume exists //Check if the volume exists
if (metadataManager.getVolumeTable().get(volumeKey) == null) { if (metadataManager.getVolumeTable().get(volumeKey) == null) {
@ -137,7 +152,6 @@ public class OMBucketCreateRequest extends OMClientRequest {
OMException.ResultCodes.BUCKET_ALREADY_EXISTS); OMException.ResultCodes.BUCKET_ALREADY_EXISTS);
} }
omBucketInfo = OmBucketInfo.getFromProtobuf(bucketInfo);
LOG.debug("created bucket: {} in volume: {}", bucketName, volumeName); LOG.debug("created bucket: {} in volume: {}", bucketName, volumeName);
omMetrics.incNumBuckets(); omMetrics.incNumBuckets();
@ -145,22 +159,21 @@ public class OMBucketCreateRequest extends OMClientRequest {
metadataManager.getBucketTable().addCacheEntry(new CacheKey<>(bucketKey), metadataManager.getBucketTable().addCacheEntry(new CacheKey<>(bucketKey),
new CacheValue<>(Optional.of(omBucketInfo), transactionLogIndex)); new CacheValue<>(Optional.of(omBucketInfo), transactionLogIndex));
// TODO: check acls. // return response.
omResponse.setCreateBucketResponse(
CreateBucketResponse.newBuilder().build());
return new OMBucketCreateResponse(omBucketInfo, omResponse.build());
} catch (IOException ex) { } catch (IOException ex) {
omMetrics.incNumBucketCreateFails(); omMetrics.incNumBucketCreateFails();
LOG.error("Bucket creation failed for bucket:{} in volume:{}", LOG.error("Bucket creation failed for bucket:{} in volume:{}",
bucketName, volumeName, ex); bucketName, volumeName, ex);
omResponse.setStatus( return new OMBucketCreateResponse(omBucketInfo,
OzoneManagerRatisUtils.exceptionToResponseStatus(ex)); createErrorOMResponse(omResponse, ex));
omResponse.setMessage(ex.getMessage());
omResponse.setSuccess(false);
} finally { } finally {
metadataManager.getLock().releaseBucketLock(volumeName, bucketName); metadataManager.getLock().releaseBucketLock(volumeName, bucketName);
metadataManager.getLock().releaseVolumeLock(volumeName); metadataManager.getLock().releaseVolumeLock(volumeName);
} }
omResponse.setCreateBucketResponse(
CreateBucketResponse.newBuilder().build());
return new OMBucketCreateResponse(omBucketInfo, omResponse.build());
} }

View File

@ -21,10 +21,13 @@ package org.apache.hadoop.ozone.om.request.bucket;
import java.io.IOException; import java.io.IOException;
import com.google.common.base.Optional; import com.google.common.base.Optional;
import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
import org.apache.hadoop.ozone.security.acl.OzoneObj;
import org.apache.hadoop.ozone.om.request.OMClientRequest;
import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OMMetrics;
import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.OzoneManager;
@ -32,12 +35,12 @@ import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.response.bucket.OMBucketDeleteResponse; import org.apache.hadoop.ozone.om.response.bucket.OMBucketDeleteResponse;
import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.DeleteBucketResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.OMRequest; .OMRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.OMResponse; .OMResponse;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
import org.apache.hadoop.utils.db.cache.CacheKey; import org.apache.hadoop.utils.db.cache.CacheKey;
import org.apache.hadoop.utils.db.cache.CacheValue; import org.apache.hadoop.utils.db.cache.CacheValue;
@ -52,12 +55,6 @@ public class OMBucketDeleteRequest extends OMClientRequest {
super(omRequest); super(omRequest);
} }
@Override
public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
// For Delete Bucket there are no preExecute steps
return getOmRequest();
}
@Override @Override
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
long transactionLogIndex) { long transactionLogIndex) {
@ -69,14 +66,28 @@ public class OMBucketDeleteRequest extends OMClientRequest {
String volumeName = omRequest.getDeleteBucketRequest().getVolumeName(); String volumeName = omRequest.getDeleteBucketRequest().getVolumeName();
String bucketName = omRequest.getDeleteBucketRequest().getBucketName(); String bucketName = omRequest.getDeleteBucketRequest().getBucketName();
// acquire lock
omMetadataManager.getLock().acquireBucketLock(volumeName, bucketName);
// Generate end user response // Generate end user response
OMResponse.Builder omResponse = OMResponse.newBuilder() OMResponse.Builder omResponse = OMResponse.newBuilder()
.setStatus(OzoneManagerProtocolProtos.Status.OK) .setStatus(OzoneManagerProtocolProtos.Status.OK)
.setCmdType(omRequest.getCmdType()); .setCmdType(omRequest.getCmdType());
try {
// check Acl
if (ozoneManager.getAclsEnabled()) {
checkAcls(ozoneManager, OzoneObj.ResourceType.BUCKET,
OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE,
volumeName, bucketName, null);
}
} catch (IOException ex) {
omMetrics.incNumBucketDeleteFails();
LOG.error("Delete bucket failed for bucket:{} in volume:{}", bucketName,
volumeName, ex);
return new OMBucketDeleteResponse(volumeName, bucketName,
createErrorOMResponse(omResponse, ex));
}
// acquire lock
omMetadataManager.getLock().acquireBucketLock(volumeName, bucketName);
try { try {
// No need to check volume exists here, as bucket cannot be created // No need to check volume exists here, as bucket cannot be created
// with out volume creation. // with out volume creation.
@ -101,17 +112,21 @@ public class OMBucketDeleteRequest extends OMClientRequest {
omMetadataManager.getBucketTable().addCacheEntry( omMetadataManager.getBucketTable().addCacheEntry(
new CacheKey<>(bucketKey), new CacheKey<>(bucketKey),
new CacheValue<>(Optional.absent(), transactionLogIndex)); new CacheValue<>(Optional.absent(), transactionLogIndex));
// TODO: check acls.
// return response.
omResponse.setDeleteBucketResponse(
DeleteBucketResponse.newBuilder().build());
return new OMBucketDeleteResponse(volumeName, bucketName,
omResponse.build());
} catch (IOException ex) { } catch (IOException ex) {
omMetrics.incNumBucketDeleteFails(); omMetrics.incNumBucketDeleteFails();
LOG.error("Delete bucket failed for bucket:{} in volume:{}", bucketName, LOG.error("Delete bucket failed for bucket:{} in volume:{}", bucketName,
volumeName, ex); volumeName, ex);
omResponse.setSuccess(false).setMessage(ex.getMessage()) return new OMBucketDeleteResponse(volumeName, bucketName,
.setStatus(OzoneManagerRatisUtils.exceptionToResponseStatus(ex)); createErrorOMResponse(omResponse, ex));
} finally { } finally {
omMetadataManager.getLock().releaseBucketLock(volumeName, bucketName); omMetadataManager.getLock().releaseBucketLock(volumeName, bucketName);
} }
return new OMBucketDeleteResponse(volumeName, bucketName,
omResponse.build());
} }
} }

View File

@ -22,10 +22,14 @@ import java.io.IOException;
import java.util.List; import java.util.List;
import com.google.common.base.Optional; import com.google.common.base.Optional;
import org.apache.hadoop.ozone.om.request.OMClientRequest;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
import org.apache.hadoop.ozone.security.acl.OzoneObj;
import org.apache.hadoop.ozone.om.request.OMClientRequest;
import org.apache.hadoop.hdds.protocol.StorageType; import org.apache.hadoop.hdds.protocol.StorageType;
import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetadataManager;
@ -44,8 +48,8 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.OMRequest; .OMRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.OMResponse; .OMResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; .SetBucketPropertyResponse;
import org.apache.hadoop.utils.db.cache.CacheKey; import org.apache.hadoop.utils.db.cache.CacheKey;
import org.apache.hadoop.utils.db.cache.CacheValue; import org.apache.hadoop.utils.db.cache.CacheValue;
@ -59,10 +63,6 @@ public class OMBucketSetPropertyRequest extends OMClientRequest {
public OMBucketSetPropertyRequest(OMRequest omRequest) { public OMBucketSetPropertyRequest(OMRequest omRequest) {
super(omRequest); super(omRequest);
} }
@Override
public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
return getOmRequest();
}
@Override @Override
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
@ -87,8 +87,6 @@ public class OMBucketSetPropertyRequest extends OMClientRequest {
String volumeName = bucketArgs.getVolumeName(); String volumeName = bucketArgs.getVolumeName();
String bucketName = bucketArgs.getBucketName(); String bucketName = bucketArgs.getBucketName();
// acquire lock
omMetadataManager.getLock().acquireBucketLock(volumeName, bucketName);
OMResponse.Builder omResponse = OMResponse.newBuilder().setCmdType( OMResponse.Builder omResponse = OMResponse.newBuilder().setCmdType(
OzoneManagerProtocolProtos.Type.CreateBucket).setStatus( OzoneManagerProtocolProtos.Type.CreateBucket).setStatus(
@ -96,6 +94,27 @@ public class OMBucketSetPropertyRequest extends OMClientRequest {
OmBucketInfo omBucketInfo = null; OmBucketInfo omBucketInfo = null;
try { try {
// check Acl
if (ozoneManager.getAclsEnabled()) {
checkAcls(ozoneManager, OzoneObj.ResourceType.BUCKET,
OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE,
volumeName, bucketName, null);
}
} catch (IOException ex) {
if (omMetrics != null) {
omMetrics.incNumBucketUpdateFails();
}
LOG.error("Setting bucket property failed for bucket:{} in volume:{}",
bucketName, volumeName, ex);
return new OMBucketSetPropertyResponse(omBucketInfo,
createErrorOMResponse(omResponse, ex));
}
// acquire lock
omMetadataManager.getLock().acquireBucketLock(volumeName, bucketName);
try {
String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
OmBucketInfo oldBucketInfo = OmBucketInfo oldBucketInfo =
omMetadataManager.getBucketTable().get(bucketKey); omMetadataManager.getBucketTable().get(bucketKey);
@ -151,19 +170,22 @@ public class OMBucketSetPropertyRequest extends OMClientRequest {
new CacheKey<>(bucketKey), new CacheKey<>(bucketKey),
new CacheValue<>(Optional.of(omBucketInfo), transactionLogIndex)); new CacheValue<>(Optional.of(omBucketInfo), transactionLogIndex));
// TODO: check acls. // return response.
omResponse.setSetBucketPropertyResponse(
SetBucketPropertyResponse.newBuilder().build());
return new OMBucketSetPropertyResponse(omBucketInfo, omResponse.build());
} catch (IOException ex) { } catch (IOException ex) {
if (omMetrics != null) { if (omMetrics != null) {
omMetrics.incNumBucketUpdateFails(); omMetrics.incNumBucketUpdateFails();
} }
LOG.error("Setting bucket property failed for bucket:{} in volume:{}", LOG.error("Setting bucket property failed for bucket:{} in volume:{}",
bucketName, volumeName, ex); bucketName, volumeName, ex);
omResponse.setSuccess(false).setMessage(ex.getMessage()) return new OMBucketSetPropertyResponse(omBucketInfo,
.setStatus(OzoneManagerRatisUtils.exceptionToResponseStatus(ex)); createErrorOMResponse(omResponse, ex));
} finally { } finally {
omMetadataManager.getLock().releaseBucketLock(volumeName, bucketName); omMetadataManager.getLock().releaseBucketLock(volumeName, bucketName);
} }
return new OMBucketSetPropertyResponse(omBucketInfo, omResponse.build());
} }
/** /**

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.om.response;
import java.io.IOException; import java.io.IOException;
import com.google.common.base.Preconditions;
import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.OMResponse; .OMResponse;
@ -33,6 +34,7 @@ public abstract class OMClientResponse {
private OMResponse omResponse; private OMResponse omResponse;
public OMClientResponse(OMResponse omResponse) { public OMClientResponse(OMResponse omResponse) {
Preconditions.checkNotNull(omResponse);
this.omResponse = omResponse; this.omResponse = omResponse;
} }

View File

@ -23,6 +23,7 @@ import java.io.IOException;
import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.OMResponse; .OMResponse;
import org.apache.hadoop.utils.db.BatchOperation; import org.apache.hadoop.utils.db.BatchOperation;
@ -43,11 +44,14 @@ public final class OMBucketCreateResponse extends OMClientResponse {
@Override @Override
public void addToDBBatch(OMMetadataManager omMetadataManager, public void addToDBBatch(OMMetadataManager omMetadataManager,
BatchOperation batchOperation) throws IOException { BatchOperation batchOperation) throws IOException {
if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) {
String dbBucketKey = String dbBucketKey =
omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(), omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(),
omBucketInfo.getBucketName()); omBucketInfo.getBucketName());
omMetadataManager.getBucketTable().putWithBatch(batchOperation, dbBucketKey, omMetadataManager.getBucketTable().putWithBatch(batchOperation,
omBucketInfo); dbBucketKey, omBucketInfo);
}
} }
public OmBucketInfo getOmBucketInfo() { public OmBucketInfo getOmBucketInfo() {

View File

@ -44,11 +44,13 @@ public final class OMBucketDeleteResponse extends OMClientResponse {
@Override @Override
public void addToDBBatch(OMMetadataManager omMetadataManager, public void addToDBBatch(OMMetadataManager omMetadataManager,
BatchOperation batchOperation) throws IOException { BatchOperation batchOperation) throws IOException {
if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) {
String dbBucketKey = String dbBucketKey =
omMetadataManager.getBucketKey(volumeName, bucketName); omMetadataManager.getBucketKey(volumeName, bucketName);
omMetadataManager.getBucketTable().deleteWithBatch(batchOperation, omMetadataManager.getBucketTable().deleteWithBatch(batchOperation,
dbBucketKey); dbBucketKey);
} }
}
public String getVolumeName() { public String getVolumeName() {
return volumeName; return volumeName;

View File

@ -22,6 +22,7 @@ import java.io.IOException;
import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.OMResponse; .OMResponse;
import org.apache.hadoop.utils.db.BatchOperation; import org.apache.hadoop.utils.db.BatchOperation;
@ -41,11 +42,13 @@ public class OMBucketSetPropertyResponse extends OMClientResponse {
@Override @Override
public void addToDBBatch(OMMetadataManager omMetadataManager, public void addToDBBatch(OMMetadataManager omMetadataManager,
BatchOperation batchOperation) throws IOException { BatchOperation batchOperation) throws IOException {
if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) {
String dbBucketKey = String dbBucketKey =
omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(), omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(),
omBucketInfo.getBucketName()); omBucketInfo.getBucketName());
omMetadataManager.getBucketTable().putWithBatch(batchOperation, dbBucketKey, omMetadataManager.getBucketTable().putWithBatch(batchOperation,
omBucketInfo); dbBucketKey, omBucketInfo);
}
} }
} }

View File

@ -0,0 +1,119 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.hadoop.ozone.om.request;
import java.net.InetAddress;
import java.util.UUID;
import mockit.Mock;
import mockit.MockUp;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import org.mockito.Mockito;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OMMetrics;
import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.ozone.om.request.bucket.OMBucketCreateRequest;
import org.apache.hadoop.security.UserGroupInformation;
import static org.mockito.Mockito.when;
/**
* Test OMClient Request with user information.
*/
public class TestOMClientRequestWithUserInfo {
@Rule
public TemporaryFolder folder = new TemporaryFolder();
private OzoneManager ozoneManager;
private OMMetrics omMetrics;
private OMMetadataManager omMetadataManager;
private UserGroupInformation userGroupInformation =
UserGroupInformation.createRemoteUser("temp");
private InetAddress inetAddress;
@Before
public void setup() throws Exception {
ozoneManager = Mockito.mock(OzoneManager.class);
omMetrics = OMMetrics.create();
OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
folder.newFolder().getAbsolutePath());
omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
when(ozoneManager.getMetrics()).thenReturn(omMetrics);
when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
inetAddress = InetAddress.getByName("127.0.0.1");
new MockUp<ProtobufRpcEngine.Server>() {
@Mock
public UserGroupInformation getRemoteUser() {
return userGroupInformation;
}
public InetAddress getRemoteAddress() {
return inetAddress;
}
};
}
@Test
public void testUserInfo() throws Exception {
String bucketName = UUID.randomUUID().toString();
String volumeName = UUID.randomUUID().toString();
OzoneManagerProtocolProtos.OMRequest omRequest =
TestOMRequestUtils.createBucketRequest(bucketName, volumeName, true,
OzoneManagerProtocolProtos.StorageTypeProto.DISK);
OMBucketCreateRequest omBucketCreateRequest =
new OMBucketCreateRequest(omRequest);
Assert.assertFalse(omRequest.hasUserInfo());
OzoneManagerProtocolProtos.OMRequest modifiedRequest =
omBucketCreateRequest.preExecute(ozoneManager);
Assert.assertTrue(modifiedRequest.hasUserInfo());
// Now pass modified request to OMBucketCreateRequest and check ugi and
// remote Address.
omBucketCreateRequest = new OMBucketCreateRequest(modifiedRequest);
InetAddress remoteAddress = omBucketCreateRequest.getRemoteAddress();
UserGroupInformation ugi = omBucketCreateRequest.createUGI();
// Now check we have original user info and remote address or not.
// Here from OMRequest user info, converted to UGI and InetAddress.
Assert.assertEquals(inetAddress.getHostAddress(),
remoteAddress.getHostAddress());
Assert.assertEquals(userGroupInformation.getUserName(), ugi.getUserName());
}
}

View File

@ -19,13 +19,17 @@
package org.apache.hadoop.ozone.om.request; package org.apache.hadoop.ozone.om.request;
import java.util.ArrayList;
import java.util.List;
import java.util.UUID;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import java.util.UUID;
/** /**
* Helper class to test OMClientRequest classes. * Helper class to test OMClientRequest classes.
*/ */
@ -70,4 +74,33 @@ public final class TestOMRequestUtils {
omMetadataManager.getVolumeTable().put( omMetadataManager.getVolumeTable().put(
omMetadataManager.getVolumeKey(volumeName), omVolumeArgs); omMetadataManager.getVolumeKey(volumeName), omVolumeArgs);
} }
public static OzoneManagerProtocolProtos.OMRequest createBucketRequest(
String bucketName, String volumeName, boolean isVersionEnabled,
OzoneManagerProtocolProtos.StorageTypeProto storageTypeProto) {
OzoneManagerProtocolProtos.BucketInfo bucketInfo =
OzoneManagerProtocolProtos.BucketInfo.newBuilder()
.setBucketName(bucketName)
.setVolumeName(volumeName)
.setIsVersionEnabled(isVersionEnabled)
.setStorageType(storageTypeProto)
.addAllMetadata(getMetadataList()).build();
OzoneManagerProtocolProtos.CreateBucketRequest.Builder req =
OzoneManagerProtocolProtos.CreateBucketRequest.newBuilder();
req.setBucketInfo(bucketInfo);
return OzoneManagerProtocolProtos.OMRequest.newBuilder()
.setCreateBucketRequest(req)
.setCmdType(OzoneManagerProtocolProtos.Type.CreateBucket)
.setClientId(UUID.randomUUID().toString()).build();
}
public static List< HddsProtos.KeyValue> getMetadataList() {
List<HddsProtos.KeyValue> metadataList = new ArrayList<>();
metadataList.add(HddsProtos.KeyValue.newBuilder().setKey("key1").setValue(
"value1").build());
metadataList.add(HddsProtos.KeyValue.newBuilder().setKey("key2").setValue(
"value2").build());
return metadataList;
}
} }

View File

@ -19,8 +19,6 @@
package org.apache.hadoop.ozone.om.request.bucket; package org.apache.hadoop.ozone.om.request.bucket;
import java.util.ArrayList;
import java.util.List;
import java.util.UUID; import java.util.UUID;
import org.junit.After; import org.junit.After;
@ -43,9 +41,9 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.OMResponse; .OMResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.StorageTypeProto; .StorageTypeProto;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
@ -109,8 +107,8 @@ public class TestOMBucketCreateRequest {
String volumeName = UUID.randomUUID().toString(); String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString();
OMRequest originalRequest = createBucketRequest(bucketName, volumeName, OMRequest originalRequest = TestOMRequestUtils.createBucketRequest(
false, StorageTypeProto.SSD); bucketName, volumeName, false, StorageTypeProto.SSD);
OMBucketCreateRequest omBucketCreateRequest = OMBucketCreateRequest omBucketCreateRequest =
new OMBucketCreateRequest(originalRequest); new OMBucketCreateRequest(originalRequest);
@ -161,8 +159,9 @@ public class TestOMBucketCreateRequest {
private OMBucketCreateRequest doPreExecute(String volumeName, private OMBucketCreateRequest doPreExecute(String volumeName,
String bucketName) throws Exception { String bucketName) throws Exception {
addCreateVolumeToTable(volumeName, omMetadataManager); addCreateVolumeToTable(volumeName, omMetadataManager);
OMRequest originalRequest = createBucketRequest(bucketName, volumeName, OMRequest originalRequest =
false, StorageTypeProto.SSD); TestOMRequestUtils.createBucketRequest(bucketName, volumeName, false,
StorageTypeProto.SSD);
OMBucketCreateRequest omBucketCreateRequest = OMBucketCreateRequest omBucketCreateRequest =
new OMBucketCreateRequest(originalRequest); new OMBucketCreateRequest(originalRequest);
@ -239,32 +238,4 @@ public class TestOMBucketCreateRequest {
omMetadataManager.getVolumeKey(volumeName), omVolumeArgs); omMetadataManager.getVolumeKey(volumeName), omVolumeArgs);
} }
public static OMRequest createBucketRequest(String bucketName,
String volumeName, boolean isVersionEnabled,
StorageTypeProto storageTypeProto) {
OzoneManagerProtocolProtos.BucketInfo bucketInfo =
OzoneManagerProtocolProtos.BucketInfo.newBuilder()
.setBucketName(bucketName)
.setVolumeName(volumeName)
.setIsVersionEnabled(isVersionEnabled)
.setStorageType(storageTypeProto)
.addAllMetadata(getMetadataList()).build();
OzoneManagerProtocolProtos.CreateBucketRequest.Builder req =
OzoneManagerProtocolProtos.CreateBucketRequest.newBuilder();
req.setBucketInfo(bucketInfo);
return OMRequest.newBuilder().setCreateBucketRequest(req)
.setCmdType(OzoneManagerProtocolProtos.Type.CreateBucket)
.setClientId(UUID.randomUUID().toString()).build();
}
public static List< HddsProtos.KeyValue> getMetadataList() {
List<HddsProtos.KeyValue> metadataList = new ArrayList<>();
metadataList.add(HddsProtos.KeyValue.newBuilder().setKey("key1").setValue(
"value1").build());
metadataList.add(HddsProtos.KeyValue.newBuilder().setKey("key2").setValue(
"value2").build());
return metadataList;
}
} }

View File

@ -84,9 +84,8 @@ public class TestOMBucketDeleteRequest {
OMBucketDeleteRequest omBucketDeleteRequest = OMBucketDeleteRequest omBucketDeleteRequest =
new OMBucketDeleteRequest(omRequest); new OMBucketDeleteRequest(omRequest);
// As preExecute of DeleteBucket request is do nothing, requests should // As user info gets added.
// be same. Assert.assertNotEquals(omRequest,
Assert.assertEquals(omRequest,
omBucketDeleteRequest.preExecute(ozoneManager)); omBucketDeleteRequest.preExecute(ozoneManager));
} }

View File

@ -90,7 +90,8 @@ public class TestOMBucketSetPropertyRequest {
OMBucketSetPropertyRequest omBucketSetPropertyRequest = OMBucketSetPropertyRequest omBucketSetPropertyRequest =
new OMBucketSetPropertyRequest(omRequest); new OMBucketSetPropertyRequest(omRequest);
Assert.assertEquals(omRequest, // As user info gets added.
Assert.assertNotEquals(omRequest,
omBucketSetPropertyRequest.preExecute(ozoneManager)); omBucketSetPropertyRequest.preExecute(ozoneManager));
} }