HDDS-1856. Make required changes for Non-HA to use new HA code in OM. (#1174)

This commit is contained in:
Bharat Viswanadham 2019-07-30 22:39:03 -07:00 committed by GitHub
parent 0f2dad6679
commit e5c4131485
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
52 changed files with 1054 additions and 916 deletions

View File

@ -1378,7 +1378,6 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
OZONE_OM_HANDLER_COUNT_DEFAULT);
RPC.setProtocolEngine(configuration, OzoneManagerProtocolPB.class,
ProtobufRpcEngine.class);
this.omServerProtocol = new OzoneManagerProtocolServerSideTranslatorPB(
this, omRatisServer, isRatisEnabled);
@ -1461,6 +1460,11 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
scheduleOMMetricsWriteTask = null;
}
omRpcServer.stop();
// When ratis is not enabled, we need to call stop() to stop
// OzoneManageDoubleBuffer in OM server protocol.
if (!isRatisEnabled) {
omServerProtocol.stop();
}
if (omRatisServer != null) {
omRatisServer.stop();
}

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.om.ratis;
import java.io.IOException;
import java.util.Queue;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
@ -61,6 +62,15 @@ public class OzoneManagerDoubleBuffer {
private Queue<DoubleBufferEntry<OMClientResponse>> currentBuffer;
private Queue<DoubleBufferEntry<OMClientResponse>> readyBuffer;
// future objects which hold the future returned by add method.
private volatile Queue<CompletableFuture<Void>> currentFutureQueue;
// Once we have an entry in current buffer, we swap the currentFutureQueue
// with readyFutureQueue. After flush is completed in flushTransaction
// daemon thread, we complete the futures in readyFutureQueue and clear them.
private volatile Queue<CompletableFuture<Void>> readyFutureQueue;
private Daemon daemon;
private final OMMetadataManager omMetadataManager;
private final AtomicLong flushedTransactionCount = new AtomicLong(0);
@ -71,10 +81,29 @@ public class OzoneManagerDoubleBuffer {
private final OzoneManagerRatisSnapshot ozoneManagerRatisSnapShot;
private final boolean isRatisEnabled;
public OzoneManagerDoubleBuffer(OMMetadataManager omMetadataManager,
OzoneManagerRatisSnapshot ozoneManagerRatisSnapShot) {
this(omMetadataManager, ozoneManagerRatisSnapShot, true);
}
public OzoneManagerDoubleBuffer(OMMetadataManager omMetadataManager,
OzoneManagerRatisSnapshot ozoneManagerRatisSnapShot,
boolean isRatisEnabled) {
this.currentBuffer = new ConcurrentLinkedQueue<>();
this.readyBuffer = new ConcurrentLinkedQueue<>();
this.isRatisEnabled = isRatisEnabled;
if (!isRatisEnabled) {
this.currentFutureQueue = new ConcurrentLinkedQueue<>();
this.readyFutureQueue = new ConcurrentLinkedQueue<>();
} else {
this.currentFutureQueue = null;
this.readyFutureQueue = null;
}
this.omMetadataManager = omMetadataManager;
this.ozoneManagerRatisSnapShot = ozoneManagerRatisSnapShot;
this.ozoneManagerDoubleBufferMetrics =
@ -88,6 +117,9 @@ public class OzoneManagerDoubleBuffer {
}
/**
* Runs in a background thread and batches the transaction in currentBuffer
* and commit to DB.
@ -138,6 +170,15 @@ public class OzoneManagerDoubleBuffer {
// set metrics.
updateMetrics(flushedTransactionsSize);
if (!isRatisEnabled) {
// Once all entries are flushed, we can complete their future.
readyFutureQueue.iterator().forEachRemaining((entry) -> {
entry.complete(null);
});
readyFutureQueue.clear();
}
}
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
@ -248,10 +289,20 @@ public class OzoneManagerDoubleBuffer {
* @param response
* @param transactionIndex
*/
public synchronized void add(OMClientResponse response,
public synchronized CompletableFuture<Void> add(OMClientResponse response,
long transactionIndex) {
currentBuffer.add(new DoubleBufferEntry<>(transactionIndex, response));
notify();
if (!isRatisEnabled) {
CompletableFuture<Void> future = new CompletableFuture<>();
currentFutureQueue.add(future);
return future;
} else {
// In Non-HA case we don't need future to be returned, and this return
// status is not used.
return null;
}
}
/**
@ -279,6 +330,13 @@ public class OzoneManagerDoubleBuffer {
Queue<DoubleBufferEntry<OMClientResponse>> temp = currentBuffer;
currentBuffer = readyBuffer;
readyBuffer = temp;
if (!isRatisEnabled) {
// Swap future queue.
Queue<CompletableFuture<Void>> tempFuture = currentFutureQueue;
currentFutureQueue = readyFutureQueue;
readyFutureQueue = tempFuture;
}
}
@VisibleForTesting

View File

@ -0,0 +1,15 @@
package org.apache.hadoop.ozone.om.ratis.utils;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import java.util.concurrent.CompletableFuture;
/**
* Helper interface for OzoneManagerDoubleBuffer.
*
*/
public interface OzoneManagerDoubleBufferHelper {
CompletableFuture<Void> add(OMClientResponse response,
long transactionIndex);
}

View File

@ -33,6 +33,7 @@ import org.apache.hadoop.ozone.audit.AuditEventStatus;
import org.apache.hadoop.ozone.audit.AuditLogger;
import org.apache.hadoop.ozone.audit.AuditMessage;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
@ -85,7 +86,8 @@ public abstract class OMClientRequest implements RequestAuditor {
* @return the response that will be returned to the client.
*/
public abstract OMClientResponse validateAndUpdateCache(
OzoneManager ozoneManager, long transactionLogIndex);
OzoneManager ozoneManager, long transactionLogIndex,
OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper);
@VisibleForTesting
public OMRequest getOmRequest() {
@ -213,5 +215,4 @@ public abstract class OMClientRequest implements RequestAuditor {
auditMap.put(OzoneConsts.VOLUME, volume);
return auditMap;
}
}

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.ozone.om.request.bucket;
import java.io.IOException;
import com.google.common.base.Optional;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -106,7 +107,8 @@ public class OMBucketCreateRequest extends OMClientRequest {
@Override
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
long transactionLogIndex) {
long transactionLogIndex,
OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
OMMetrics omMetrics = ozoneManager.getMetrics();
omMetrics.incNumBucketCreates();
@ -125,6 +127,13 @@ public class OMBucketCreateRequest extends OMClientRequest {
AuditLogger auditLogger = ozoneManager.getAuditLogger();
OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo();
String volumeKey = metadataManager.getVolumeKey(volumeName);
String bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
IOException exception = null;
boolean acquiredBucketLock = false;
boolean acquiredVolumeLock = false;
OMClientResponse omClientResponse = null;
try {
// check Acl
if (ozoneManager.getAclsEnabled()) {
@ -132,24 +141,9 @@ public class OMBucketCreateRequest extends OMClientRequest {
OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.CREATE,
volumeName, bucketName, null);
}
} catch (IOException ex) {
LOG.error("Bucket creation failed for bucket:{} in volume:{}",
bucketName, volumeName, ex);
omMetrics.incNumBucketCreateFails();
auditLog(auditLogger, buildAuditMessage(OMAction.CREATE_BUCKET,
omBucketInfo.toAuditMap(), ex, userInfo));
return new OMBucketCreateResponse(omBucketInfo,
createErrorOMResponse(omResponse, ex));
}
String volumeKey = metadataManager.getVolumeKey(volumeName);
String bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
IOException exception = null;
boolean acquiredBucketLock = false;
metadataManager.getLock().acquireLock(VOLUME_LOCK, volumeName);
try {
acquiredVolumeLock = metadataManager.getLock().acquireLock(VOLUME_LOCK,
volumeName);
acquiredBucketLock = metadataManager.getLock().acquireLock(BUCKET_LOCK,
volumeName, bucketName);
//Check if the volume exists
@ -169,15 +163,27 @@ public class OMBucketCreateRequest extends OMClientRequest {
metadataManager.getBucketTable().addCacheEntry(new CacheKey<>(bucketKey),
new CacheValue<>(Optional.of(omBucketInfo), transactionLogIndex));
omResponse.setCreateBucketResponse(
CreateBucketResponse.newBuilder().build());
omClientResponse = new OMBucketCreateResponse(omBucketInfo,
omResponse.build());
} catch (IOException ex) {
exception = ex;
omClientResponse = new OMBucketCreateResponse(omBucketInfo,
createErrorOMResponse(omResponse, exception));
} finally {
if (omClientResponse != null) {
omClientResponse.setFlushFuture(
ozoneManagerDoubleBufferHelper.add(omClientResponse,
transactionLogIndex));
}
if (acquiredBucketLock) {
metadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName,
bucketName);
}
metadataManager.getLock().releaseLock(VOLUME_LOCK, volumeName);
if (acquiredVolumeLock) {
metadataManager.getLock().releaseLock(VOLUME_LOCK, volumeName);
}
}
// Performing audit logging outside of the lock.
@ -188,15 +194,12 @@ public class OMBucketCreateRequest extends OMClientRequest {
if (exception == null) {
LOG.debug("created bucket: {} in volume: {}", bucketName, volumeName);
omMetrics.incNumBuckets();
omResponse.setCreateBucketResponse(
CreateBucketResponse.newBuilder().build());
return new OMBucketCreateResponse(omBucketInfo, omResponse.build());
return omClientResponse;
} else {
omMetrics.incNumBucketCreateFails();
LOG.error("Bucket creation failed for bucket:{} in volume:{}",
bucketName, volumeName, exception);
return new OMBucketCreateResponse(omBucketInfo,
createErrorOMResponse(omResponse, exception));
return omClientResponse;
}
}

View File

@ -22,6 +22,7 @@ import java.io.IOException;
import java.util.Map;
import com.google.common.base.Optional;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -63,7 +64,8 @@ public class OMBucketDeleteRequest extends OMClientRequest {
@Override
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
long transactionLogIndex) {
long transactionLogIndex,
OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
OMMetrics omMetrics = ozoneManager.getMetrics();
omMetrics.incNumBucketDeletes();
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
@ -83,8 +85,10 @@ public class OMBucketDeleteRequest extends OMClientRequest {
auditMap.put(OzoneConsts.BUCKET, bucketName);
OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo();
IOException exception = null;
boolean acquiredLock = false;
OMClientResponse omClientResponse = null;
try {
// check Acl
if (ozoneManager.getAclsEnabled()) {
@ -92,21 +96,12 @@ public class OMBucketDeleteRequest extends OMClientRequest {
OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE,
volumeName, bucketName, null);
}
} catch (IOException ex) {
omMetrics.incNumBucketDeleteFails();
LOG.error("Delete bucket failed for bucket:{} in volume:{}", bucketName,
volumeName, ex);
auditLog(auditLogger, buildAuditMessage(OMAction.DELETE_BUCKET,
auditMap, ex, userInfo));
return new OMBucketDeleteResponse(volumeName, bucketName,
createErrorOMResponse(omResponse, ex));
}
IOException exception = null;
// acquire lock
omMetadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName,
bucketName);
try {
// acquire lock
acquiredLock = omMetadataManager.getLock().acquireLock(BUCKET_LOCK,
volumeName, bucketName);
// No need to check volume exists here, as bucket cannot be created
// with out volume creation.
//Check if bucket exists
@ -131,11 +126,26 @@ public class OMBucketDeleteRequest extends OMClientRequest {
new CacheKey<>(bucketKey),
new CacheValue<>(Optional.absent(), transactionLogIndex));
omResponse.setDeleteBucketResponse(
DeleteBucketResponse.newBuilder().build());
// Add to double buffer.
omClientResponse = new OMBucketDeleteResponse(volumeName, bucketName,
omResponse.build());
} catch (IOException ex) {
exception = ex;
omClientResponse = new OMBucketDeleteResponse(volumeName, bucketName,
createErrorOMResponse(omResponse, exception));
} finally {
omMetadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName,
bucketName);
if (omClientResponse != null) {
omClientResponse.setFlushFuture(
ozoneManagerDoubleBufferHelper.add(omClientResponse,
transactionLogIndex));
}
if (acquiredLock) {
omMetadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName,
bucketName);
}
}
// Performing audit logging outside of the lock.
@ -145,16 +155,12 @@ public class OMBucketDeleteRequest extends OMClientRequest {
// return response.
if (exception == null) {
LOG.debug("Deleted bucket:{} in volume:{}", bucketName, volumeName);
omResponse.setDeleteBucketResponse(
DeleteBucketResponse.newBuilder().build());
return new OMBucketDeleteResponse(volumeName, bucketName,
omResponse.build());
return omClientResponse;
} else {
omMetrics.incNumBucketDeleteFails();
LOG.error("Delete bucket failed for bucket:{} in volume:{}", bucketName,
volumeName, exception);
return new OMBucketDeleteResponse(volumeName, bucketName,
createErrorOMResponse(omResponse, exception));
return omClientResponse;
}
}
}

View File

@ -25,6 +25,7 @@ import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import org.apache.hadoop.ozone.audit.AuditLogger;
import org.apache.hadoop.ozone.audit.OMAction;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -73,7 +74,8 @@ public class OMBucketSetPropertyRequest extends OMClientRequest {
@Override
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
long transactionLogIndex) {
long transactionLogIndex,
OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
SetBucketPropertyRequest setBucketPropertyRequest =
@ -101,7 +103,9 @@ public class OMBucketSetPropertyRequest extends OMClientRequest {
AuditLogger auditLogger = ozoneManager.getAuditLogger();
OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo();
IOException exception = null;
boolean acquiredLock = false;
OMClientResponse omClientResponse = null;
try {
// check Acl
if (ozoneManager.getAclsEnabled()) {
@ -109,23 +113,11 @@ public class OMBucketSetPropertyRequest extends OMClientRequest {
OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE,
volumeName, bucketName, null);
}
} catch (IOException ex) {
LOG.error("Setting bucket property failed for bucket:{} in volume:{}",
bucketName, volumeName, ex);
omMetrics.incNumBucketUpdateFails();
auditLog(auditLogger, buildAuditMessage(OMAction.UPDATE_BUCKET,
omBucketArgs.toAuditMap(), ex, userInfo));
return new OMBucketSetPropertyResponse(omBucketInfo,
createErrorOMResponse(omResponse, ex));
}
IOException exception = null;
// acquire lock
omMetadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName,
bucketName);
try {
// acquire lock
acquiredLock = omMetadataManager.getLock().acquireLock(BUCKET_LOCK,
volumeName, bucketName);
String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
OmBucketInfo oldBucketInfo =
@ -182,11 +174,24 @@ public class OMBucketSetPropertyRequest extends OMClientRequest {
new CacheKey<>(bucketKey),
new CacheValue<>(Optional.of(omBucketInfo), transactionLogIndex));
omResponse.setSetBucketPropertyResponse(
SetBucketPropertyResponse.newBuilder().build());
omClientResponse = new OMBucketSetPropertyResponse(omBucketInfo,
omResponse.build());
} catch (IOException ex) {
exception = ex;
omClientResponse = new OMBucketSetPropertyResponse(omBucketInfo,
createErrorOMResponse(omResponse, exception));
} finally {
omMetadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName,
bucketName);
if (omClientResponse != null) {
omClientResponse.setFlushFuture(
ozoneManagerDoubleBufferHelper.add(omClientResponse,
transactionLogIndex));
}
if (acquiredLock) {
omMetadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName,
bucketName);
}
}
// Performing audit logging outside of the lock.
@ -197,15 +202,12 @@ public class OMBucketSetPropertyRequest extends OMClientRequest {
if (exception == null) {
LOG.debug("Setting bucket property for bucket:{} in volume:{}",
bucketName, volumeName);
omResponse.setSetBucketPropertyResponse(
SetBucketPropertyResponse.newBuilder().build());
return new OMBucketSetPropertyResponse(omBucketInfo, omResponse.build());
return omClientResponse;
} else {
LOG.error("Setting bucket property failed for bucket:{} in volume:{}",
bucketName, volumeName, exception);
omMetrics.incNumBucketUpdateFails();
return new OMBucketSetPropertyResponse(omBucketInfo,
createErrorOMResponse(omResponse, exception));
return omClientResponse;
}
}

View File

@ -26,6 +26,7 @@ import java.util.Map;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -100,7 +101,8 @@ public class OMDirectoryCreateRequest extends OMKeyRequest {
@Override
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
long transactionLogIndex) {
long transactionLogIndex,
OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
KeyArgs keyArgs = getOmRequest().getCreateDirectoryRequest().getKeyArgs();
@ -123,8 +125,7 @@ public class OMDirectoryCreateRequest extends OMKeyRequest {
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
boolean acquiredLock = false;
IOException exception = null;
OmKeyInfo dirKeyInfo = null;
OMClientResponse omClientResponse = null;
try {
// check Acl
if (ozoneManager.getAclsEnabled()) {
@ -154,13 +155,13 @@ public class OMDirectoryCreateRequest extends OMKeyRequest {
BUCKET_NOT_FOUND);
}
// Need to check if any files exist in the given path, if they exist we
// cannot create a directory with the given key.
OMFileRequest.OMDirectoryResult omDirectoryResult =
OMFileRequest.verifyFilesInPath(omMetadataManager,
volumeName, bucketName, keyName, Paths.get(keyName));
OmKeyInfo dirKeyInfo = null;
if (omDirectoryResult == FILE_EXISTS ||
omDirectoryResult == FILE_EXISTS_IN_GIVENPATH) {
throw new OMException("Unable to create directory: " +keyName
@ -180,9 +181,21 @@ public class OMDirectoryCreateRequest extends OMKeyRequest {
// exception? Current KeyManagerImpl code does just return, following
// similar approach.
omResponse.setCreateDirectoryResponse(
CreateDirectoryResponse.newBuilder());
omClientResponse = new OMDirectoryCreateResponse(dirKeyInfo,
omResponse.build());
} catch (IOException ex) {
exception = ex;
omClientResponse = new OMDirectoryCreateResponse(null,
createErrorOMResponse(omResponse, exception));
} finally {
if (omClientResponse != null) {
omClientResponse.setFlushFuture(
ozoneManagerDoubleBufferHelper.add(omClientResponse,
transactionLogIndex));
}
if (acquiredLock) {
omMetadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName,
bucketName);
@ -195,16 +208,12 @@ public class OMDirectoryCreateRequest extends OMKeyRequest {
if (exception == null) {
LOG.debug("Directory is successfully created for Key: {} in " +
"volume/bucket:{}/{}", keyName, volumeName, bucketName);
omResponse.setCreateDirectoryResponse(
CreateDirectoryResponse.newBuilder());
return new OMDirectoryCreateResponse(dirKeyInfo,
omResponse.build());
return omClientResponse;
} else {
LOG.error("CreateDirectory failed for Key: {} in volume/bucket:{}/{}",
keyName, volumeName, bucketName, exception);
omMetrics.incNumCreateDirectoryFails();
return new OMDirectoryCreateResponse(null,
createErrorOMResponse(omResponse, exception));
return omClientResponse;
}
}

View File

@ -30,6 +30,7 @@ import javax.annotation.Nonnull;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -147,7 +148,8 @@ public class OMFileCreateRequest extends OMKeyRequest {
@Override
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
long transactionLogIndex) {
long transactionLogIndex,
OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
CreateFileRequest createFileRequest = getOmRequest().getCreateFileRequest();
KeyArgs keyArgs = createFileRequest.getKeyArgs();
@ -174,7 +176,7 @@ public class OMFileCreateRequest extends OMKeyRequest {
OmKeyInfo omKeyInfo = null;
final List<OmKeyLocationInfo> locations = new ArrayList<>();
OMClientResponse omClientResponse = null;
try {
// check Acl
if (ozoneManager.getAclsEnabled()) {
@ -265,19 +267,31 @@ public class OMFileCreateRequest extends OMKeyRequest {
keyName), keyArgs.getDataSize(), locations,
encryptionInfo.orNull());
omClientResponse = prepareCreateKeyResponse(keyArgs, omKeyInfo,
locations, encryptionInfo.orNull(), exception,
createFileRequest.getClientID(), transactionLogIndex, volumeName,
bucketName, keyName, ozoneManager,
OMAction.CREATE_FILE);
} catch (IOException ex) {
exception = ex;
omClientResponse = prepareCreateKeyResponse(keyArgs, omKeyInfo,
locations, encryptionInfo.orNull(), exception,
createFileRequest.getClientID(), transactionLogIndex,
volumeName, bucketName, keyName, ozoneManager,
OMAction.CREATE_FILE);
} finally {
if (omClientResponse != null) {
omClientResponse.setFlushFuture(
ozoneManagerDoubleBufferHelper.add(omClientResponse,
transactionLogIndex));
}
if (acquiredLock) {
omMetadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName,
bucketName);
}
}
return prepareCreateKeyResponse(keyArgs, omKeyInfo, locations,
encryptionInfo.orNull(), exception, createFileRequest.getClientID(),
transactionLogIndex, volumeName, bucketName, keyName, ozoneManager,
OMAction.CREATE_FILE);
return omClientResponse;
}

View File

@ -25,6 +25,7 @@ import java.util.Map;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.apache.hadoop.util.Time;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -136,7 +137,8 @@ public class OMAllocateBlockRequest extends OMKeyRequest {
@Override
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
long transactionLogIndex) {
long transactionLogIndex,
OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
OzoneManagerProtocolProtos.AllocateBlockRequest allocateBlockRequest =
getOmRequest().getAllocateBlockRequest();
@ -165,6 +167,8 @@ public class OMAllocateBlockRequest extends OMKeyRequest {
OzoneManagerProtocolProtos.Type.AllocateBlock).setStatus(
OzoneManagerProtocolProtos.Status.OK).setSuccess(true);
IOException exception = null;
OmKeyInfo omKeyInfo = null;
try {
// check Acl
if (ozoneManager.getAclsEnabled()) {
@ -172,39 +176,17 @@ public class OMAllocateBlockRequest extends OMKeyRequest {
OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE,
volumeName, bucketName, keyName);
}
} catch (IOException ex) {
LOG.error("AllocateBlock failed for Key: {} in volume/bucket:{}/{}",
keyName, bucketName, volumeName, ex);
omMetrics.incNumBlockAllocateCallFails();
auditLog(auditLogger, buildAuditMessage(OMAction.ALLOCATE_BLOCK, auditMap,
ex, getOmRequest().getUserInfo()));
return new OMAllocateBlockResponse(null, -1L,
createErrorOMResponse(omResponse, ex));
}
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
try {
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
validateBucketAndVolume(omMetadataManager, volumeName,
bucketName);
} catch (IOException ex) {
LOG.error("AllocateBlock failed for Key: {} in volume/bucket:{}/{}",
keyName, bucketName, volumeName, ex);
omMetrics.incNumBlockAllocateCallFails();
auditLog(auditLogger, buildAuditMessage(OMAction.ALLOCATE_BLOCK, auditMap,
ex, getOmRequest().getUserInfo()));
return new OMAllocateBlockResponse(null, -1L,
createErrorOMResponse(omResponse, ex));
}
String openKey = omMetadataManager.getOpenKey(
volumeName, bucketName, keyName, clientID);
String openKey = omMetadataManager.getOpenKey(
volumeName, bucketName, keyName, clientID);
IOException exception = null;
OmKeyInfo omKeyInfo = null;
// Here we don't acquire bucket/volume lock because for a single client
// allocateBlock is called in serial fashion.
// Here we don't acquire bucket/volume lock because for a single client
// allocateBlock is called in serial fashion.
try {
omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
if (omKeyInfo == null) {
throw new OMException("Open Key not found " + openKey, KEY_NOT_FOUND);
@ -229,18 +211,23 @@ public class OMAllocateBlockRequest extends OMKeyRequest {
auditLog(auditLogger, buildAuditMessage(OMAction.ALLOCATE_BLOCK, auditMap,
exception, getOmRequest().getUserInfo()));
OMClientResponse omClientResponse = null;
if (exception == null) {
omResponse.setAllocateBlockResponse(AllocateBlockResponse.newBuilder()
.setKeyLocation(blockLocation).build());
return new OMAllocateBlockResponse(omKeyInfo, clientID,
omResponse.build());
omClientResponse = new OMAllocateBlockResponse(omKeyInfo,
clientID, omResponse.build());
} else {
omMetrics.incNumBlockAllocateCallFails();
return new OMAllocateBlockResponse(null, -1L,
omClientResponse = new OMAllocateBlockResponse(null, -1L,
createErrorOMResponse(omResponse, exception));
}
omClientResponse.setFlushFuture(
ozoneManagerDoubleBufferHelper.add(omClientResponse,
transactionLogIndex));
return omClientResponse;
}
}

View File

@ -25,6 +25,7 @@ import java.util.stream.Collectors;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -38,7 +39,6 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.om.response.key.OMKeyCommitResponse;
import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.CommitKeyRequest;
@ -87,7 +87,8 @@ public class OMKeyCommitRequest extends OMKeyRequest {
@Override
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
long transactionLogIndex) {
long transactionLogIndex,
OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
CommitKeyRequest commitKeyRequest = getOmRequest().getCommitKeyRequest();
@ -109,6 +110,11 @@ public class OMKeyCommitRequest extends OMKeyRequest {
OzoneManagerProtocolProtos.Type.CommitKey).setStatus(
OzoneManagerProtocolProtos.Status.OK).setSuccess(true);
IOException exception = null;
OmKeyInfo omKeyInfo = null;
OMClientResponse omClientResponse = null;
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
try {
// check Acl
if (ozoneManager.getAclsEnabled()) {
@ -116,32 +122,20 @@ public class OMKeyCommitRequest extends OMKeyRequest {
OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE,
volumeName, bucketName, keyName);
}
} catch (IOException ex) {
LOG.error("CommitKey failed for Key: {} in volume/bucket:{}/{}",
keyName, bucketName, volumeName, ex);
omMetrics.incNumKeyCommitFails();
auditLog(auditLogger, buildAuditMessage(OMAction.COMMIT_KEY, auditMap,
ex, getOmRequest().getUserInfo()));
return new OMKeyCreateResponse(null, -1L,
createErrorOMResponse(omResponse, ex));
}
List<OmKeyLocationInfo> locationInfoList = commitKeyArgs
.getKeyLocationsList().stream().map(OmKeyLocationInfo::getFromProtobuf)
.collect(Collectors.toList());
List<OmKeyLocationInfo> locationInfoList = commitKeyArgs
.getKeyLocationsList().stream()
.map(OmKeyLocationInfo::getFromProtobuf)
.collect(Collectors.toList());
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
String dbOzoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
keyName);
String dbOpenKey = omMetadataManager.getOpenKey(volumeName, bucketName,
keyName, commitKeyRequest.getClientID());
String dbOzoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
keyName);
String dbOpenKey = omMetadataManager.getOpenKey(volumeName, bucketName,
keyName, commitKeyRequest.getClientID());
omMetadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName,
bucketName);
omMetadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName,
bucketName);
IOException exception = null;
OmKeyInfo omKeyInfo = null;
try {
validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
omKeyInfo = omMetadataManager.getOpenKeyTable().get(dbOpenKey);
if (omKeyInfo == null) {
@ -164,9 +158,20 @@ public class OMKeyCommitRequest extends OMKeyRequest {
new CacheKey<>(dbOzoneKey),
new CacheValue<>(Optional.of(omKeyInfo), transactionLogIndex));
omResponse.setCommitKeyResponse(CommitKeyResponse.newBuilder().build());
omClientResponse =
new OMKeyCommitResponse(omKeyInfo, commitKeyRequest.getClientID(),
omResponse.build());
} catch (IOException ex) {
exception = ex;
omClientResponse = new OMKeyCommitResponse(null, -1L,
createErrorOMResponse(omResponse, exception));
} finally {
if (omClientResponse != null) {
omClientResponse.setFlushFuture(
ozoneManagerDoubleBufferHelper.add(omClientResponse,
transactionLogIndex));
}
omMetadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName,
bucketName);
}
@ -188,13 +193,12 @@ public class OMKeyCommitRequest extends OMKeyRequest {
if (omKeyInfo.getKeyLocationVersions().size() == 1) {
omMetrics.incNumKeys();
}
return new OMKeyCommitResponse(omKeyInfo, commitKeyRequest.getClientID(),
omResponse.build());
return omClientResponse;
} else {
LOG.error("CommitKey failed for Key: {} in volume/bucket:{}/{}",
keyName, bucketName, volumeName, exception);
omMetrics.incNumKeyCommitFails();
return new OMKeyCommitResponse(null, -1L,
createErrorOMResponse(omResponse, exception));
return omClientResponse;
}
}

View File

@ -25,6 +25,7 @@ import java.util.stream.Collectors;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -140,7 +141,8 @@ public class OMKeyCreateRequest extends OMKeyRequest {
@Override
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
long transactionLogIndex) {
long transactionLogIndex,
OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
CreateKeyRequest createKeyRequest = getOmRequest().getCreateKeyRequest();
@ -159,6 +161,7 @@ public class OMKeyCreateRequest extends OMKeyRequest {
Optional<FileEncryptionInfo> encryptionInfo = Optional.absent();
IOException exception = null;
boolean acquireLock = false;
OMClientResponse omClientResponse = null;
try {
// check Acl
if (ozoneManager.getAclsEnabled()) {
@ -182,22 +185,29 @@ public class OMKeyCreateRequest extends OMKeyRequest {
omKeyInfo = prepareKeyInfo(omMetadataManager, keyArgs,
omMetadataManager.getOzoneKey(volumeName, bucketName, keyName),
keyArgs.getDataSize(), locations, encryptionInfo.orNull());
omClientResponse = prepareCreateKeyResponse(keyArgs, omKeyInfo,
locations, encryptionInfo.orNull(), exception,
createKeyRequest.getClientID(), transactionLogIndex, volumeName,
bucketName, keyName, ozoneManager, OMAction.ALLOCATE_KEY);
} catch (IOException ex) {
LOG.error("Key open failed for volume:{} bucket:{} key:{}",
volumeName, bucketName, keyName, ex);
exception = ex;
omClientResponse = prepareCreateKeyResponse(keyArgs, omKeyInfo, locations,
encryptionInfo.orNull(), exception, createKeyRequest.getClientID(),
transactionLogIndex, volumeName, bucketName, keyName, ozoneManager,
OMAction.ALLOCATE_KEY);
} finally {
if (omClientResponse != null) {
omClientResponse.setFlushFuture(
ozoneManagerDoubleBufferHelper.add(omClientResponse,
transactionLogIndex));
}
if (acquireLock) {
omMetadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName,
bucketName);
}
}
return prepareCreateKeyResponse(keyArgs, omKeyInfo, locations,
encryptionInfo.orNull(), exception, createKeyRequest.getClientID(),
transactionLogIndex, volumeName, bucketName, keyName, ozoneManager,
OMAction.ALLOCATE_KEY);
return omClientResponse;
}
}

View File

@ -22,6 +22,7 @@ import java.io.IOException;
import java.util.Map;
import com.google.common.base.Optional;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -34,7 +35,6 @@ import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponse;
import org.apache.hadoop.ozone.om.response.key.OMKeyDeleteResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
@ -82,7 +82,8 @@ public class OMKeyDeleteRequest extends OMKeyRequest {
@Override
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
long transactionLogIndex) {
long transactionLogIndex,
OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
DeleteKeyRequest deleteKeyRequest = getOmRequest().getDeleteKeyRequest();
OzoneManagerProtocolProtos.KeyArgs deleteKeyArgs =
@ -104,7 +105,10 @@ public class OMKeyDeleteRequest extends OMKeyRequest {
OzoneManagerProtocolProtos.OMResponse.newBuilder().setCmdType(
OzoneManagerProtocolProtos.Type.DeleteKey).setStatus(
OzoneManagerProtocolProtos.Status.OK).setSuccess(true);
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
IOException exception = null;
boolean acquiredLock = false;
OMClientResponse omClientResponse = null;
try {
// check Acl
if (ozoneManager.getAclsEnabled()) {
@ -112,32 +116,18 @@ public class OMKeyDeleteRequest extends OMKeyRequest {
OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.DELETE,
volumeName, bucketName, keyName);
}
} catch (IOException ex) {
LOG.error("Delete failed for Key: {} in volume/bucket:{}/{}",
keyName, bucketName, volumeName, ex);
omMetrics.incNumKeyDeleteFails();
auditLog(auditLogger, buildAuditMessage(OMAction.DELETE_KEY, auditMap,
ex, userInfo));
return new OMKeyCreateResponse(null, -1L,
createErrorOMResponse(omResponse, ex));
}
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
String objectKey = omMetadataManager.getOzoneKey(
volumeName, bucketName, keyName);
String objectKey = omMetadataManager.getOzoneKey(
volumeName, bucketName, keyName);
omMetadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName,
bucketName);
IOException exception = null;
OmKeyInfo omKeyInfo = null;
try {
acquiredLock = omMetadataManager.getLock().acquireLock(BUCKET_LOCK,
volumeName, bucketName);
// Not doing bucket/volume checks here. In this way we can avoid db
// checks for them.
// TODO: Once we have volume/bucket full cache, we can add
// them back, as these checks will be inexpensive at that time.
omKeyInfo = omMetadataManager.getKeyTable().get(objectKey);
OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(objectKey);
if (omKeyInfo == null) {
throw new OMException("Key not found", KEY_NOT_FOUND);
@ -154,11 +144,25 @@ public class OMKeyDeleteRequest extends OMKeyRequest {
// validation, so we don't need to add to cache.
// TODO: Revisit if we need it later.
omClientResponse = new OMKeyDeleteResponse(omKeyInfo,
deleteKeyArgs.getModificationTime(),
omResponse.setDeleteKeyResponse(
DeleteKeyResponse.newBuilder()).build());
} catch (IOException ex) {
exception = ex;
omClientResponse = new OMKeyDeleteResponse(null, 0,
createErrorOMResponse(omResponse, exception));
} finally {
omMetadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName,
bucketName);
if (omClientResponse != null) {
omClientResponse.setFlushFuture(
ozoneManagerDoubleBufferHelper.add(omClientResponse,
transactionLogIndex));
}
if (acquiredLock) {
omMetadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName,
bucketName);
}
}
// Performing audit logging outside of the lock.
@ -168,14 +172,10 @@ public class OMKeyDeleteRequest extends OMKeyRequest {
// return response.
if (exception == null) {
omMetrics.decNumKeys();
return new OMKeyDeleteResponse(
omKeyInfo, deleteKeyArgs.getModificationTime(),
omResponse.setDeleteKeyResponse(
DeleteKeyResponse.newBuilder()).build());
return omClientResponse;
} else {
omMetrics.incNumKeyDeleteFails();
return new OMKeyDeleteResponse(null, 0,
createErrorOMResponse(omResponse, exception));
return omClientResponse;
}
}

View File

@ -1,6 +1,7 @@
package org.apache.hadoop.ozone.om.request.key;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.om.response.key.OMKeyPurgeResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
@ -28,7 +29,8 @@ public class OMKeyPurgeRequest extends OMKeyRequest {
@Override
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
long transactionLogIndex) {
long transactionLogIndex,
OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
PurgeKeysRequest purgeKeysRequest = getOmRequest().getPurgeKeysRequest();
List<String> purgeKeysList = purgeKeysRequest.getKeysList();
@ -43,6 +45,11 @@ public class OMKeyPurgeRequest extends OMKeyRequest {
.setSuccess(true)
.build();
return new OMKeyPurgeResponse(purgeKeysList, omResponse);
OMClientResponse omClientResponse = new OMKeyPurgeResponse(purgeKeysList,
omResponse);
omClientResponse.setFlushFuture(
ozoneManagerDoubleBufferHelper.add(omClientResponse,
transactionLogIndex));
return omClientResponse;
}
}

View File

@ -23,6 +23,7 @@ import java.util.Map;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -83,7 +84,8 @@ public class OMKeyRenameRequest extends OMKeyRequest {
@Override
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
long transactionLogIndex) {
long transactionLogIndex,
OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
RenameKeyRequest renameKeyRequest = getOmRequest().getRenameKeyRequest();
@ -107,6 +109,11 @@ public class OMKeyRenameRequest extends OMKeyRequest {
OzoneManagerProtocolProtos.Type.CommitKey).setStatus(
OzoneManagerProtocolProtos.Status.OK).setSuccess(true);
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
boolean acquiredLock = false;
OMClientResponse omClientResponse = null;
IOException exception = null;
OmKeyInfo fromKeyValue = null;
try {
if (toKeyName.length() == 0 || fromKeyName.length() == 0) {
throw new OMException("Key name is empty",
@ -118,25 +125,9 @@ public class OMKeyRenameRequest extends OMKeyRequest {
OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE,
volumeName, bucketName, fromKeyName);
}
} catch (IOException ex) {
LOG.error(
"Rename key failed for volume:{} bucket:{} fromKey:{} toKey:{}. "
+ "Key: {} not found.", volumeName, bucketName, fromKeyName,
toKeyName, fromKeyName);
omMetrics.incNumKeyRenameFails();
auditLog(auditLogger, buildAuditMessage(OMAction.RENAME_KEY, auditMap,
ex, getOmRequest().getUserInfo()));
return new OMKeyRenameResponse(null, null, null,
createErrorOMResponse(omResponse, ex));
}
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
omMetadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName,
bucketName);
IOException exception = null;
OmKeyInfo fromKeyValue = null;
try {
acquiredLock = omMetadataManager.getLock().acquireLock(BUCKET_LOCK,
volumeName, bucketName);
// Not doing bucket/volume checks here. In this way we can avoid db
// checks for them.
@ -177,11 +168,23 @@ public class OMKeyRenameRequest extends OMKeyRequest {
keyTable.addCacheEntry(new CacheKey<>(toKey),
new CacheValue<>(Optional.of(fromKeyValue), transactionLogIndex));
omClientResponse = new OMKeyRenameResponse(fromKeyValue, toKeyName,
fromKeyName, omResponse.setRenameKeyResponse(
RenameKeyResponse.newBuilder()).build());
} catch (IOException ex) {
exception = ex;
omClientResponse = new OMKeyRenameResponse(null, null, null,
createErrorOMResponse(omResponse, exception));
} finally {
omMetadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName,
bucketName);
if (omClientResponse != null) {
omClientResponse.setFlushFuture(
ozoneManagerDoubleBufferHelper.add(omClientResponse,
transactionLogIndex));
}
if (acquiredLock) {
omMetadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName,
bucketName);
}
}
@ -189,16 +192,16 @@ public class OMKeyRenameRequest extends OMKeyRequest {
exception, getOmRequest().getUserInfo()));
if (exception == null) {
return new OMKeyRenameResponse(fromKeyValue, toKeyName, fromKeyName,
omResponse.setRenameKeyResponse(
RenameKeyResponse.newBuilder()).build());
LOG.debug("Rename Key is successfully completed for volume:{} bucket:{}" +
" fromKey:{} toKey:{}. ", volumeName, bucketName, fromKeyName,
toKeyName);
return omClientResponse;
} else {
LOG.error(
"Rename key failed for volume:{} bucket:{} fromKey:{} toKey:{}. "
+ "Key: {} not found.", volumeName, bucketName, fromKeyName,
toKeyName, fromKeyName);
return new OMKeyRenameResponse(null, null, null,
createErrorOMResponse(omResponse, exception));
return omClientResponse;
}
}
}

View File

@ -25,6 +25,7 @@ import java.util.Map;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -113,7 +114,8 @@ public class S3BucketCreateRequest extends OMVolumeRequest {
@Override
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
long transactionLogIndex) {
long transactionLogIndex,
OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
S3CreateBucketRequest s3CreateBucketRequest =
getOmRequest().getCreateS3BucketRequest();
@ -141,15 +143,13 @@ public class S3BucketCreateRequest extends OMVolumeRequest {
// ahead and create a bucket.
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
IOException exception = null;
VolumeList volumeList = null;
OmVolumeArgs omVolumeArgs = null;
OmBucketInfo omBucketInfo = null;
boolean volumeCreated = false;
boolean acquiredVolumeLock = false;
boolean acquiredUserLock = false;
boolean acquiredS3Lock = false;
String volumeName = formatOzoneVolumeName(userName);
OMClientResponse omClientResponse = null;
try {
// check Acl
if (ozoneManager.getAclsEnabled()) {
@ -167,6 +167,7 @@ public class S3BucketCreateRequest extends OMVolumeRequest {
OMException.ResultCodes.S3_BUCKET_ALREADY_EXISTS);
}
OMVolumeCreateResponse omVolumeCreateResponse = null;
try {
acquiredVolumeLock =
omMetadataManager.getLock().acquireLock(VOLUME_LOCK, volumeName);
@ -176,16 +177,18 @@ public class S3BucketCreateRequest extends OMVolumeRequest {
// ozone volume.
String volumeKey = omMetadataManager.getVolumeKey(volumeName);
if (!omMetadataManager.getVolumeTable().isExist(volumeKey)) {
omVolumeArgs = createOmVolumeArgs(volumeName, userName,
OmVolumeArgs omVolumeArgs = createOmVolumeArgs(volumeName, userName,
s3CreateBucketRequest.getS3CreateVolumeInfo()
.getCreationTime());
volumeList = omMetadataManager.getUserTable().get(
VolumeList volumeList = omMetadataManager.getUserTable().get(
omMetadataManager.getUserKey(userName));
volumeList = addVolumeToOwnerList(volumeList,
volumeName, userName, ozoneManager.getMaxUserVolumeCount());
createVolume(omMetadataManager, omVolumeArgs, volumeList, volumeKey,
omMetadataManager.getUserKey(userName), transactionLogIndex);
volumeCreated = true;
omVolumeCreateResponse = new OMVolumeCreateResponse(omVolumeArgs,
volumeList, omResponse.build());
}
} finally {
if (acquiredUserLock) {
@ -198,7 +201,8 @@ public class S3BucketCreateRequest extends OMVolumeRequest {
// check if ozone bucket exists, if it does not exist create ozone
// bucket
omBucketInfo = createBucket(omMetadataManager, volumeName, s3BucketName,
OmBucketInfo omBucketInfo = createBucket(omMetadataManager, volumeName,
s3BucketName,
s3CreateBucketRequest.getS3CreateVolumeInfo().getCreationTime(),
transactionLogIndex);
@ -207,9 +211,25 @@ public class S3BucketCreateRequest extends OMVolumeRequest {
new CacheKey<>(s3BucketName), new CacheValue<>(
Optional.of(formatS3MappingName(volumeName, s3BucketName)),
transactionLogIndex));
OMBucketCreateResponse omBucketCreateResponse =
new OMBucketCreateResponse(omBucketInfo, omResponse.build());
omClientResponse = new S3BucketCreateResponse(omVolumeCreateResponse,
omBucketCreateResponse, s3BucketName,
formatS3MappingName(volumeName, s3BucketName),
omResponse.setCreateS3BucketResponse(
S3CreateBucketResponse.newBuilder()).build());
} catch (IOException ex) {
exception = ex;
omClientResponse = new S3BucketCreateResponse(null, null, null, null,
createErrorOMResponse(omResponse, exception));
} finally {
if (omClientResponse != null) {
omClientResponse.setFlushFuture(
ozoneManagerDoubleBufferHelper.add(omClientResponse,
transactionLogIndex));
}
if (acquiredS3Lock) {
omMetadataManager.getLock().releaseLock(S3_BUCKET_LOCK, s3BucketName);
}
@ -227,25 +247,16 @@ public class S3BucketCreateRequest extends OMVolumeRequest {
OMVolumeCreateResponse omVolumeCreateResponse = null;
if (volumeCreated) {
omMetrics.incNumVolumes();
omVolumeCreateResponse = new OMVolumeCreateResponse(omVolumeArgs,
volumeList, omResponse.build());
}
omMetrics.incNumBuckets();
OMBucketCreateResponse omBucketCreateResponse =
new OMBucketCreateResponse(omBucketInfo, omResponse.build());
omMetrics.incNumS3Buckets();
return new S3BucketCreateResponse(omVolumeCreateResponse,
omBucketCreateResponse, s3BucketName,
formatS3MappingName(volumeName, s3BucketName),
omResponse.setCreateS3BucketResponse(
S3CreateBucketResponse.newBuilder()).build());
return omClientResponse;
} else {
LOG.error("S3Bucket Creation Failed for userName: {}, s3BucketName {}, " +
"VolumeName {}", userName, s3BucketName, volumeName);
omMetrics.incNumS3BucketCreateFails();
return new S3BucketCreateResponse(null, null, null, null,
createErrorOMResponse(omResponse, exception));
return omClientResponse;
}
}

View File

@ -23,6 +23,7 @@ import java.util.HashMap;
import java.util.Map;
import com.google.common.base.Optional;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -86,7 +87,8 @@ public class S3BucketDeleteRequest extends OMVolumeRequest {
@Override
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
long transactionLogIndex) {
long transactionLogIndex,
OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
S3DeleteBucketRequest s3DeleteBucketRequest =
getOmRequest().getDeleteS3BucketRequest();
@ -103,6 +105,7 @@ public class S3BucketDeleteRequest extends OMVolumeRequest {
boolean acquiredBucketLock = false;
String volumeName = null;
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
OMClientResponse omClientResponse = null;
try {
// check Acl
if (ozoneManager.getAclsEnabled()) {
@ -137,9 +140,22 @@ public class S3BucketDeleteRequest extends OMVolumeRequest {
new CacheKey<>(s3BucketName),
new CacheValue<>(Optional.absent(), transactionLogIndex));
}
omResponse.setDeleteS3BucketResponse(
OzoneManagerProtocolProtos.S3DeleteBucketResponse.newBuilder());
omClientResponse = new S3BucketDeleteResponse(s3BucketName, volumeName,
omResponse.build());
} catch (IOException ex) {
exception = ex;
omClientResponse = new S3BucketDeleteResponse(null, null,
createErrorOMResponse(omResponse, exception));
} finally {
if (omClientResponse != null) {
omClientResponse.setFlushFuture(
ozoneManagerDoubleBufferHelper.add(omClientResponse,
transactionLogIndex));
}
if (acquiredBucketLock) {
omMetadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName,
s3BucketName);
@ -161,16 +177,13 @@ public class S3BucketDeleteRequest extends OMVolumeRequest {
LOG.debug("S3Bucket {} successfully deleted", s3BucketName);
omMetrics.decNumS3Buckets();
omMetrics.decNumBuckets();
omResponse.setDeleteS3BucketResponse(
OzoneManagerProtocolProtos.S3DeleteBucketResponse.newBuilder());
return new S3BucketDeleteResponse(s3BucketName, volumeName,
omResponse.build());
return omClientResponse;
} else {
LOG.error("S3Bucket Deletion failed for S3Bucket:{}", s3BucketName,
exception);
omMetrics.incNumS3BucketDeleteFails();
return new S3BucketDeleteResponse(null, null,
createErrorOMResponse(omResponse, exception));
return omClientResponse;
}
}

View File

@ -26,6 +26,7 @@ import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.apache.hadoop.ozone.om.request.key.OMKeyRequest;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.om.response.s3.multipart.S3InitiateMultipartUploadResponse;
@ -84,7 +85,8 @@ public class S3InitiateMultipartUploadRequest extends OMKeyRequest {
@Override
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
long transactionLogIndex) {
long transactionLogIndex,
OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
MultipartInfoInitiateRequest multipartInfoInitiateRequest =
getOmRequest().getInitiateMultiPartUploadRequest();
@ -104,6 +106,12 @@ public class S3InitiateMultipartUploadRequest extends OMKeyRequest {
IOException exception = null;
OmMultipartKeyInfo multipartKeyInfo = null;
OmKeyInfo omKeyInfo = null;
OMResponse.Builder omResponse = OMResponse.newBuilder()
.setCmdType(OzoneManagerProtocolProtos.Type.InitiateMultiPartUpload)
.setStatus(OzoneManagerProtocolProtos.Status.OK)
.setSuccess(true);
OMClientResponse omClientResponse = null;
try {
// check Acl
if (ozoneManager.getAclsEnabled()) {
@ -167,9 +175,26 @@ public class S3InitiateMultipartUploadRequest extends OMKeyRequest {
new CacheKey<>(multipartKey),
new CacheValue<>(Optional.of(multipartKeyInfo), transactionLogIndex));
omClientResponse =
new S3InitiateMultipartUploadResponse(multipartKeyInfo, omKeyInfo,
omResponse.setInitiateMultiPartUploadResponse(
MultipartInfoInitiateResponse.newBuilder()
.setVolumeName(volumeName)
.setBucketName(bucketName)
.setKeyName(keyName)
.setMultipartUploadID(keyArgs.getMultipartUploadID()))
.build());
} catch (IOException ex) {
exception = ex;
omClientResponse = new S3InitiateMultipartUploadResponse(null, null,
createErrorOMResponse(omResponse, exception));
} finally {
if (omClientResponse != null) {
omClientResponse.setFlushFuture(
ozoneManagerDoubleBufferHelper.add(omClientResponse,
transactionLogIndex));
}
if (acquiredBucketLock) {
omMetadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName,
bucketName);
@ -177,10 +202,6 @@ public class S3InitiateMultipartUploadRequest extends OMKeyRequest {
}
OMResponse.Builder omResponse = OMResponse.newBuilder()
.setCmdType(OzoneManagerProtocolProtos.Type.InitiateMultiPartUpload)
.setStatus(OzoneManagerProtocolProtos.Status.OK)
.setSuccess(true);
// audit log
auditLog(ozoneManager.getAuditLogger(), buildAuditMessage(
@ -192,22 +213,14 @@ public class S3InitiateMultipartUploadRequest extends OMKeyRequest {
"Volume/Bucket {}/{} is successfully completed", keyName,
volumeName, bucketName);
return new S3InitiateMultipartUploadResponse(multipartKeyInfo, omKeyInfo,
omResponse.setInitiateMultiPartUploadResponse(
MultipartInfoInitiateResponse.newBuilder()
.setVolumeName(volumeName)
.setBucketName(bucketName)
.setKeyName(keyName)
.setMultipartUploadID(keyArgs.getMultipartUploadID()))
.build());
return omClientResponse;
} else {
ozoneManager.getMetrics().incNumInitiateMultipartUploadFails();
LOG.error("S3 InitiateMultipart Upload request for Key {} in " +
"Volume/Bucket {}/{} is failed", keyName, volumeName, bucketName,
exception);
return new S3InitiateMultipartUploadResponse(null, null,
createErrorOMResponse(omResponse, exception));
return omClientResponse;
}
}
}

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.ozone.om.request.s3.multipart;
import java.io.IOException;
import com.google.common.base.Optional;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -77,7 +78,8 @@ public class S3MultipartUploadAbortRequest extends OMKeyRequest {
@Override
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
long transactionLogIndex) {
long transactionLogIndex,
OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
OzoneManagerProtocolProtos.KeyArgs keyArgs =
getOmRequest().getAbortMultiPartUploadRequest().getKeyArgs();
@ -90,6 +92,11 @@ public class S3MultipartUploadAbortRequest extends OMKeyRequest {
IOException exception = null;
OmMultipartKeyInfo multipartKeyInfo = null;
String multipartKey = null;
OMResponse.Builder omResponse = OMResponse.newBuilder()
.setCmdType(OzoneManagerProtocolProtos.Type.AbortMultiPartUpload)
.setStatus(OzoneManagerProtocolProtos.Status.OK)
.setSuccess(true);
OMClientResponse omClientResponse = null;
try {
// check Acl
if (ozoneManager.getAclsEnabled()) {
@ -132,9 +139,21 @@ public class S3MultipartUploadAbortRequest extends OMKeyRequest {
new CacheValue<>(Optional.absent(), transactionLogIndex));
}
omClientResponse = new S3MultipartUploadAbortResponse(multipartKey,
keyArgs.getModificationTime(), multipartKeyInfo,
omResponse.setAbortMultiPartUploadResponse(
MultipartUploadAbortResponse.newBuilder()).build());
} catch (IOException ex) {
exception = ex;
omClientResponse = new S3MultipartUploadAbortResponse(multipartKey,
keyArgs.getModificationTime(), multipartKeyInfo,
createErrorOMResponse(omResponse, exception));
} finally {
if (omClientResponse != null) {
omClientResponse.setFlushFuture(
ozoneManagerDoubleBufferHelper.add(omClientResponse,
transactionLogIndex));
}
if (acquiredLock) {
omMetadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName,
bucketName);
@ -146,28 +165,18 @@ public class S3MultipartUploadAbortRequest extends OMKeyRequest {
OMAction.ABORT_MULTIPART_UPLOAD, buildKeyArgsAuditMap(keyArgs),
exception, getOmRequest().getUserInfo()));
OMResponse.Builder omResponse = OMResponse.newBuilder()
.setCmdType(OzoneManagerProtocolProtos.Type.AbortMultiPartUpload)
.setStatus(OzoneManagerProtocolProtos.Status.OK)
.setSuccess(true);
if (exception == null) {
LOG.debug("Abort Multipart request is successfully completed for " +
"KeyName {} in VolumeName/Bucket {}/{}", keyName, volumeName,
bucketName);
return new S3MultipartUploadAbortResponse(multipartKey,
keyArgs.getModificationTime(), multipartKeyInfo,
omResponse.setAbortMultiPartUploadResponse(
MultipartUploadAbortResponse.newBuilder()).build());
} else {
ozoneManager.getMetrics().incNumAbortMultipartUploadFails();
LOG.error("Abort Multipart request is failed for " +
"KeyName {} in VolumeName/Bucket {}/{}", keyName, volumeName,
bucketName, exception);
return new S3MultipartUploadAbortResponse(multipartKey,
keyArgs.getModificationTime(), multipartKeyInfo,
createErrorOMResponse(omResponse, exception));
}
return omClientResponse;
}
}

View File

@ -26,6 +26,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.apache.hadoop.ozone.om.request.key.OMKeyRequest;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.om.response.s3.multipart
@ -44,6 +45,8 @@ import org.apache.hadoop.ozone.security.acl.OzoneObj;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.utils.db.cache.CacheKey;
import org.apache.hadoop.utils.db.cache.CacheValue;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.stream.Collectors;
@ -56,6 +59,10 @@ import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_L
*/
public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
private static final Logger LOG =
LoggerFactory.getLogger(S3MultipartUploadCommitPartRequest.class);
public S3MultipartUploadCommitPartRequest(OMRequest omRequest) {
super(omRequest);
}
@ -74,7 +81,8 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
@Override
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
long transactionLogIndex) {
long transactionLogIndex,
OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
MultipartCommitUploadPartRequest multipartCommitUploadPartRequest =
getOmRequest().getCommitMultiPartUploadRequest();
@ -89,13 +97,19 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
ozoneManager.getMetrics().incNumCommitMultipartUploadParts();
boolean acquiredLock = false;
OmMultipartKeyInfo multipartKeyInfo = null;
OmKeyInfo omKeyInfo = null;
String openKey = null;
String multipartKey = null;
OzoneManagerProtocolProtos.PartKeyInfo oldPartKeyInfo = null;
IOException exception = null;
String partName = null;
OMResponse.Builder omResponse = OMResponse.newBuilder()
.setCmdType(OzoneManagerProtocolProtos.Type.CommitMultiPartUpload)
.setStatus(OzoneManagerProtocolProtos.Status.OK)
.setSuccess(true);
OMClientResponse omClientResponse = null;
OzoneManagerProtocolProtos.PartKeyInfo oldPartKeyInfo = null;
String openKey = null;
OmKeyInfo omKeyInfo = null;
String multipartKey = null;
OmMultipartKeyInfo multipartKeyInfo = null;
try {
// check Acl
if (ozoneManager.getAclsEnabled()) {
@ -111,11 +125,11 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
String uploadID = keyArgs.getMultipartUploadID();
multipartKey = omMetadataManager.getMultipartKey(volumeName, bucketName,
keyName, uploadID);
multipartKey = omMetadataManager.getMultipartKey(volumeName,
bucketName, keyName, uploadID);
multipartKeyInfo = omMetadataManager
.getMultipartInfoTable().get(multipartKey);
multipartKeyInfo =
omMetadataManager.getMultipartInfoTable().get(multipartKey);
long clientID = multipartCommitUploadPartRequest.getClientID();
@ -124,7 +138,6 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
if (omKeyInfo == null) {
throw new OMException("Failed to commit Multipart Upload key, as " +
openKey + "entry is not found in the openKey table", KEY_NOT_FOUND);
@ -180,9 +193,23 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
new CacheValue<>(Optional.absent(), transactionLogIndex));
}
omResponse.setCommitMultiPartUploadResponse(
MultipartCommitUploadPartResponse.newBuilder().setPartName(partName));
omClientResponse = new S3MultipartUploadCommitPartResponse(multipartKey,
openKey, keyArgs.getModificationTime(), omKeyInfo, multipartKeyInfo,
oldPartKeyInfo, omResponse.build());
} catch (IOException ex) {
exception = ex;
omClientResponse = new S3MultipartUploadCommitPartResponse(multipartKey,
openKey, keyArgs.getModificationTime(), omKeyInfo, multipartKeyInfo,
oldPartKeyInfo, createErrorOMResponse(omResponse, exception));
} finally {
if (omClientResponse != null) {
omClientResponse.setFlushFuture(
ozoneManagerDoubleBufferHelper.add(omClientResponse,
transactionLogIndex));
}
if (acquiredLock) {
omMetadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName,
bucketName);
@ -194,24 +221,16 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
OMAction.COMMIT_MULTIPART_UPLOAD_PARTKEY, buildKeyArgsAuditMap(keyArgs),
exception, getOmRequest().getUserInfo()));
OMResponse.Builder omResponse = OMResponse.newBuilder()
.setCmdType(OzoneManagerProtocolProtos.Type.CommitMultiPartUpload)
.setStatus(OzoneManagerProtocolProtos.Status.OK)
.setSuccess(true);
if (exception == null) {
omResponse.setCommitMultiPartUploadResponse(
MultipartCommitUploadPartResponse.newBuilder().setPartName(partName));
return new S3MultipartUploadCommitPartResponse(multipartKey, openKey,
keyArgs.getModificationTime(), omKeyInfo, multipartKeyInfo,
oldPartKeyInfo, omResponse.build());
} else {
ozoneManager.getMetrics().incNumCommitMultipartUploadPartFails();
return new S3MultipartUploadCommitPartResponse(multipartKey, openKey,
keyArgs.getModificationTime(), omKeyInfo, multipartKeyInfo,
oldPartKeyInfo, createErrorOMResponse(omResponse, exception));
LOG.debug("MultipartUpload Commit is successfully for Key:{} in " +
"Volume/Bucket {}/{}", keyName, volumeName, bucketName);
} else {
LOG.error("MultipartUpload Commit is failed for Key:{} in " +
"Volume/Bucket {}/{}", keyName, volumeName, bucketName, exception);
ozoneManager.getMetrics().incNumCommitMultipartUploadPartFails();
}
return omClientResponse;
}
}

View File

@ -19,12 +19,14 @@
package org.apache.hadoop.ozone.om.request.volume;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import com.google.common.base.Preconditions;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.ozone.audit.AuditLogger;
import org.apache.hadoop.ozone.audit.OMAction;
import org.apache.hadoop.ozone.om.response.volume.OMVolumeCreateResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
@ -84,7 +86,8 @@ public class OMVolumeCreateRequest extends OMVolumeRequest {
@Override
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
long transactionLogIndex) {
long transactionLogIndex,
OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
CreateVolumeRequest createVolumeRequest =
getOmRequest().getCreateVolumeRequest();
@ -101,41 +104,34 @@ public class OMVolumeCreateRequest extends OMVolumeRequest {
OzoneManagerProtocolProtos.Type.CreateVolume).setStatus(
OzoneManagerProtocolProtos.Status.OK).setSuccess(true);
OmVolumeArgs omVolumeArgs = null;
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
AuditLogger auditLogger = ozoneManager.getAuditLogger();
OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo();
// Doing this here, so we can do protobuf conversion outside of lock.
boolean acquiredVolumeLock = false;
boolean acquiredUserLock = false;
IOException exception = null;
OMClientResponse omClientResponse = null;
OmVolumeArgs omVolumeArgs = null;
Map<String, String> auditMap = new HashMap<>();
try {
omVolumeArgs = OmVolumeArgs.getFromProtobuf(volumeInfo);
auditMap = omVolumeArgs.toAuditMap();
// check Acl
if (ozoneManager.getAclsEnabled()) {
checkAcls(ozoneManager, OzoneObj.ResourceType.VOLUME,
OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.CREATE, volume,
null, null);
}
} catch (IOException ex) {
omMetrics.incNumVolumeCreateFails();
auditLog(auditLogger, buildAuditMessage(OMAction.CREATE_VOLUME,
buildVolumeAuditMap(volume), ex, userInfo));
LOG.error("Volume creation failed for user:{} volume:{}", owner, volume,
ex);
return new OMVolumeCreateResponse(omVolumeArgs, null,
createErrorOMResponse(omResponse, ex));
}
VolumeList volumeList = null;
boolean acquiredUserLock = false;
IOException exception = null;
VolumeList volumeList = null;
// acquire lock.
acquiredVolumeLock = omMetadataManager.getLock().acquireLock(VOLUME_LOCK,
volume);
// acquire lock.
omMetadataManager.getLock().acquireLock(VOLUME_LOCK, volume);
try {
acquiredUserLock = omMetadataManager.getLock().acquireLock(USER_LOCK,
owner);
String dbVolumeKey = omMetadataManager.getVolumeKey(volume);
OmVolumeArgs dbVolumeArgs =
@ -147,7 +143,12 @@ public class OMVolumeCreateRequest extends OMVolumeRequest {
volumeList = addVolumeToOwnerList(volumeList, volume, owner,
ozoneManager.getMaxUserVolumeCount());
createVolume(omMetadataManager, omVolumeArgs, volumeList, dbVolumeKey,
dbUserKey, transactionLogIndex);
dbUserKey, transactionLogIndex);
omResponse.setCreateVolumeResponse(CreateVolumeResponse.newBuilder()
.build());
omClientResponse = new OMVolumeCreateResponse(omVolumeArgs, volumeList,
omResponse.build());
LOG.debug("volume:{} successfully created", omVolumeArgs.getVolume());
} else {
LOG.debug("volume:{} already exists", omVolumeArgs.getVolume());
@ -157,34 +158,38 @@ public class OMVolumeCreateRequest extends OMVolumeRequest {
} catch (IOException ex) {
exception = ex;
omClientResponse = new OMVolumeCreateResponse(null, null,
createErrorOMResponse(omResponse, exception));
} finally {
if (omClientResponse != null) {
omClientResponse.setFlushFuture(
ozoneManagerDoubleBufferHelper.add(omClientResponse,
transactionLogIndex));
}
if (acquiredUserLock) {
omMetadataManager.getLock().releaseLock(USER_LOCK, owner);
}
omMetadataManager.getLock().releaseLock(VOLUME_LOCK, volume);
if (acquiredVolumeLock) {
omMetadataManager.getLock().releaseLock(VOLUME_LOCK, volume);
}
}
// Performing audit logging outside of the lock.
auditLog(auditLogger, buildAuditMessage(OMAction.CREATE_VOLUME,
omVolumeArgs.toAuditMap(), exception, userInfo));
auditLog(ozoneManager.getAuditLogger(),
buildAuditMessage(OMAction.CREATE_VOLUME, auditMap, exception,
getOmRequest().getUserInfo()));
// return response after releasing lock.
if (exception == null) {
LOG.debug("created volume:{} for user:{}", omVolumeArgs.getVolume(),
owner);
LOG.debug("created volume:{} for user:{}", volume, owner);
omMetrics.incNumVolumes();
omResponse.setCreateVolumeResponse(CreateVolumeResponse.newBuilder()
.build());
return new OMVolumeCreateResponse(omVolumeArgs, volumeList,
omResponse.build());
} else {
LOG.error("Volume creation failed for user:{} volume:{}", owner,
volumeInfo.getVolume(), exception);
volume, exception);
omMetrics.incNumVolumeCreateFails();
return new OMVolumeCreateResponse(omVolumeArgs, volumeList,
createErrorOMResponse(omResponse, exception));
}
return omClientResponse;
}
}

View File

@ -22,17 +22,16 @@ import java.io.IOException;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.ozone.audit.AuditLogger;
import org.apache.hadoop.ozone.audit.OMAction;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.OMMetrics;
import org.apache.hadoop.ozone.om.response.volume.OMVolumeCreateResponse;
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
import org.apache.hadoop.ozone.security.acl.OzoneObj;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
@ -65,7 +64,8 @@ public class OMVolumeDeleteRequest extends OMVolumeRequest {
@Override
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
long transactionLogIndex) {
long transactionLogIndex,
OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
DeleteVolumeRequest deleteVolumeRequest =
getOmRequest().getDeleteVolumeRequest();
@ -80,9 +80,12 @@ public class OMVolumeDeleteRequest extends OMVolumeRequest {
OzoneManagerProtocolProtos.Type.DeleteVolume).setStatus(
OzoneManagerProtocolProtos.Status.OK).setSuccess(true);
AuditLogger auditLogger = ozoneManager.getAuditLogger();
OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo();
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
boolean acquiredUserLock = false;
boolean acquiredVolumeLock = false;
IOException exception = null;
String owner = null;
OMClientResponse omClientResponse = null;
try {
// check Acl
if (ozoneManager.getAclsEnabled()) {
@ -90,25 +93,12 @@ public class OMVolumeDeleteRequest extends OMVolumeRequest {
OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.DELETE, volume,
null, null);
}
} catch (IOException ex) {
LOG.error("Volume deletion failed for volume:{}", volume, ex);
omMetrics.incNumVolumeDeleteFails();
auditLog(auditLogger, buildAuditMessage(OMAction.DELETE_VOLUME,
buildVolumeAuditMap(volume), ex, userInfo));
return new OMVolumeCreateResponse(null, null,
createErrorOMResponse(omResponse, ex));
}
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
OmVolumeArgs omVolumeArgs = null;
OzoneManagerProtocolProtos.VolumeList newVolumeList = null;
OmVolumeArgs omVolumeArgs = null;
String owner = null;
boolean acquiredUserLock = false;
IOException exception = null;
OzoneManagerProtocolProtos.VolumeList newVolumeList = null;
omMetadataManager.getLock().acquireLock(VOLUME_LOCK, volume);
try {
acquiredVolumeLock = omMetadataManager.getLock().acquireLock(VOLUME_LOCK,
volume);
owner = getVolumeInfo(omMetadataManager, volume).getOwnerName();
acquiredUserLock = omMetadataManager.getLock().acquireLock(USER_LOCK,
owner);
@ -134,34 +124,44 @@ public class OMVolumeDeleteRequest extends OMVolumeRequest {
new CacheKey<>(dbVolumeKey), new CacheValue<>(Optional.absent(),
transactionLogIndex));
omResponse.setDeleteVolumeResponse(
DeleteVolumeResponse.newBuilder().build());
omClientResponse = new OMVolumeDeleteResponse(volume, owner,
newVolumeList, omResponse.build());
} catch (IOException ex) {
exception = ex;
omClientResponse = new OMVolumeDeleteResponse(null, null, null,
createErrorOMResponse(omResponse, exception));
} finally {
if (omClientResponse != null) {
omClientResponse.setFlushFuture(
ozoneManagerDoubleBufferHelper.add(omClientResponse,
transactionLogIndex));
}
if (acquiredUserLock) {
omMetadataManager.getLock().releaseLock(USER_LOCK, owner);
}
omMetadataManager.getLock().releaseLock(VOLUME_LOCK, volume);
if (acquiredVolumeLock) {
omMetadataManager.getLock().releaseLock(VOLUME_LOCK, volume);
}
}
// Performing audit logging outside of the lock.
auditLog(auditLogger, buildAuditMessage(OMAction.DELETE_VOLUME,
buildVolumeAuditMap(volume), exception, userInfo));
auditLog(ozoneManager.getAuditLogger(),
buildAuditMessage(OMAction.DELETE_VOLUME, buildVolumeAuditMap(volume),
exception, getOmRequest().getUserInfo()));
// return response after releasing lock.
if (exception == null) {
LOG.debug("Volume deleted for user:{} volume:{}", owner, volume);
omMetrics.decNumVolumes();
omResponse.setDeleteVolumeResponse(
DeleteVolumeResponse.newBuilder().build());
return new OMVolumeDeleteResponse(volume, owner, newVolumeList,
omResponse.build());
} else {
LOG.error("Volume deletion failed for user:{} volume:{}",
owner, volume, exception);
omMetrics.incNumVolumeDeleteFails();
return new OMVolumeDeleteResponse(null, null, null,
createErrorOMResponse(omResponse, exception));
}
return omClientResponse;
}
@ -187,3 +187,4 @@ public class OMVolumeDeleteRequest extends OMVolumeRequest {
}
}

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.ozone.om.request.volume;
import java.io.IOException;
import java.util.Map;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
@ -65,7 +66,8 @@ public class OMVolumeSetOwnerRequest extends OMVolumeRequest {
@Override
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
long transactionLogIndex) {
long transactionLogIndex,
OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
SetVolumePropertyRequest setVolumePropertyRequest =
getOmRequest().getSetVolumePropertyRequest();
@ -95,6 +97,13 @@ public class OMVolumeSetOwnerRequest extends OMVolumeRequest {
Map<String, String> auditMap = buildVolumeAuditMap(volume);
auditMap.put(OzoneConsts.OWNER, newOwner);
boolean acquiredUserLocks = false;
boolean acquiredVolumeLock = false;
IOException exception = null;
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
String oldOwner = null;
OMClientResponse omClientResponse = null;
try {
// check Acl
if (ozoneManager.getAclsEnabled()) {
@ -102,29 +111,21 @@ public class OMVolumeSetOwnerRequest extends OMVolumeRequest {
OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE_ACL,
volume, null, null);
}
} catch (IOException ex) {
LOG.error("Changing volume ownership failed for user:{} volume:{}",
newOwner, volume);
omMetrics.incNumVolumeUpdateFails();
auditLog(auditLogger, buildAuditMessage(OMAction.SET_OWNER, auditMap,
ex, userInfo));
return new OMVolumeSetOwnerResponse(null, null, null, null,
createErrorOMResponse(omResponse, ex));
}
long maxUserVolumeCount = ozoneManager.getMaxUserVolumeCount();
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
String dbVolumeKey = omMetadataManager.getVolumeKey(volume);
String oldOwner = null;
OzoneManagerProtocolProtos.VolumeList oldOwnerVolumeList = null;
OzoneManagerProtocolProtos.VolumeList newOwnerVolumeList = null;
OmVolumeArgs omVolumeArgs = null;
IOException exception = null;
boolean acquiredUserLocks = false;
long maxUserVolumeCount = ozoneManager.getMaxUserVolumeCount();
String dbVolumeKey = omMetadataManager.getVolumeKey(volume);
OzoneManagerProtocolProtos.VolumeList oldOwnerVolumeList = null;
OzoneManagerProtocolProtos.VolumeList newOwnerVolumeList = null;
OmVolumeArgs omVolumeArgs = null;
acquiredVolumeLock = omMetadataManager.getLock().acquireLock(VOLUME_LOCK,
volume);
omMetadataManager.getLock().acquireLock(VOLUME_LOCK, volume);
try {
omVolumeArgs = omMetadataManager.getVolumeTable().get(dbVolumeKey);
if (omVolumeArgs == null) {
@ -165,13 +166,28 @@ public class OMVolumeSetOwnerRequest extends OMVolumeRequest {
new CacheKey<>(dbVolumeKey),
new CacheValue<>(Optional.of(omVolumeArgs), transactionLogIndex));
omResponse.setSetVolumePropertyResponse(
SetVolumePropertyResponse.newBuilder().build());
omClientResponse = new OMVolumeSetOwnerResponse(oldOwner,
oldOwnerVolumeList, newOwnerVolumeList, omVolumeArgs,
omResponse.build());
} catch (IOException ex) {
exception = ex;
omClientResponse = new OMVolumeSetOwnerResponse(null, null, null, null,
createErrorOMResponse(omResponse, exception));
} finally {
if (omClientResponse != null) {
omClientResponse.setFlushFuture(
ozoneManagerDoubleBufferHelper.add(omClientResponse,
transactionLogIndex));
}
if (acquiredUserLocks) {
omMetadataManager.getLock().releaseMultiUserLock(newOwner, oldOwner);
}
omMetadataManager.getLock().acquireLock(VOLUME_LOCK, volume);
if (acquiredVolumeLock) {
omMetadataManager.getLock().acquireLock(VOLUME_LOCK, volume);
}
}
// Performing audit logging outside of the lock.
@ -182,16 +198,12 @@ public class OMVolumeSetOwnerRequest extends OMVolumeRequest {
if (exception == null) {
LOG.debug("Successfully changed Owner of Volume {} from {} -> {}", volume,
oldOwner, newOwner);
omResponse.setSetVolumePropertyResponse(
SetVolumePropertyResponse.newBuilder().build());
return new OMVolumeSetOwnerResponse(oldOwner, oldOwnerVolumeList,
newOwnerVolumeList, omVolumeArgs, omResponse.build());
} else {
LOG.error("Changing volume ownership failed for user:{} volume:{}",
newOwner, volume, exception);
omMetrics.incNumVolumeUpdateFails();
return new OMVolumeSetOwnerResponse(null, null, null, null,
createErrorOMResponse(omResponse, exception));
}
return omClientResponse;
}
}

View File

@ -23,6 +23,7 @@ import java.util.Map;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -66,7 +67,8 @@ public class OMVolumeSetQuotaRequest extends OMVolumeRequest {
@Override
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
long transactionLogIndex) {
long transactionLogIndex,
OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
SetVolumePropertyRequest setVolumePropertyRequest =
getOmRequest().getSetVolumePropertyRequest();
@ -98,6 +100,10 @@ public class OMVolumeSetQuotaRequest extends OMVolumeRequest {
auditMap.put(OzoneConsts.QUOTA,
String.valueOf(setVolumePropertyRequest.getQuotaInBytes()));
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
IOException exception = null;
boolean acquireVolumeLock = false;
OMClientResponse omClientResponse = null;
try {
// check Acl
if (ozoneManager.getAclsEnabled()) {
@ -105,24 +111,11 @@ public class OMVolumeSetQuotaRequest extends OMVolumeRequest {
OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE, volume,
null, null);
}
} catch (IOException ex) {
LOG.error("Changing volume quota failed for volume:{} quota:{}", volume,
setVolumePropertyRequest.getQuotaInBytes(), ex);
omMetrics.incNumVolumeUpdateFails();
auditLog(auditLogger, buildAuditMessage(OMAction.SET_QUOTA, auditMap,
ex, userInfo));
return new OMVolumeSetQuotaResponse(null,
createErrorOMResponse(omResponse, ex));
}
OmVolumeArgs omVolumeArgs = null;
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
IOException exception = null;
OmVolumeArgs omVolumeArgs = null;
omMetadataManager.getLock().acquireLock(VOLUME_LOCK, volume);
try {
acquireVolumeLock = omMetadataManager.getLock().acquireLock(VOLUME_LOCK,
volume);
String dbVolumeKey = omMetadataManager.getVolumeKey(volume);
omVolumeArgs = omMetadataManager.getVolumeTable().get(dbVolumeKey);
@ -138,10 +131,23 @@ public class OMVolumeSetQuotaRequest extends OMVolumeRequest {
new CacheKey<>(dbVolumeKey),
new CacheValue<>(Optional.of(omVolumeArgs), transactionLogIndex));
omResponse.setSetVolumePropertyResponse(
SetVolumePropertyResponse.newBuilder().build());
omClientResponse = new OMVolumeSetQuotaResponse(omVolumeArgs,
omResponse.build());
} catch (IOException ex) {
exception = ex;
omClientResponse = new OMVolumeSetQuotaResponse(null,
createErrorOMResponse(omResponse, exception));
} finally {
omMetadataManager.getLock().releaseLock(VOLUME_LOCK, volume);
if (omClientResponse != null) {
omClientResponse.setFlushFuture(
ozoneManagerDoubleBufferHelper.add(omClientResponse,
transactionLogIndex));
}
if (acquireVolumeLock) {
omMetadataManager.getLock().releaseLock(VOLUME_LOCK, volume);
}
}
// Performing audit logging outside of the lock.
@ -150,17 +156,17 @@ public class OMVolumeSetQuotaRequest extends OMVolumeRequest {
// return response after releasing lock.
if (exception == null) {
omResponse.setSetVolumePropertyResponse(
SetVolumePropertyResponse.newBuilder().build());
return new OMVolumeSetQuotaResponse(omVolumeArgs, omResponse.build());
LOG.debug("Changing volume quota is successfully completed for volume: " +
"{} quota:{}", volume, setVolumePropertyRequest.getQuotaInBytes());
} else {
omMetrics.incNumVolumeUpdateFails();
LOG.error("Changing volume quota failed for volume:{} quota:{}", volume,
setVolumePropertyRequest.getQuotaInBytes(), exception);
return new OMVolumeSetQuotaResponse(null,
createErrorOMResponse(omResponse, exception));
}
return omClientResponse;
}
}

View File

@ -19,6 +19,7 @@
package org.apache.hadoop.ozone.om.response;
import java.io.IOException;
import java.util.concurrent.CompletableFuture;
import com.google.common.base.Preconditions;
import org.apache.hadoop.ozone.om.OMMetadataManager;
@ -32,6 +33,7 @@ import org.apache.hadoop.utils.db.BatchOperation;
public abstract class OMClientResponse {
private OMResponse omResponse;
private CompletableFuture<Void> flushFuture = null;
public OMClientResponse(OMResponse omResponse) {
Preconditions.checkNotNull(omResponse);
@ -55,5 +57,13 @@ public abstract class OMClientResponse {
return omResponse;
}
public void setFlushFuture(CompletableFuture<Void> flushFuture) {
this.flushFuture = flushFuture;
}
public CompletableFuture<Void> getFlushFuture() {
return flushFuture;
}
}

View File

@ -79,14 +79,7 @@ public class OzoneManagerHARequestHandlerImpl
OzoneManagerRatisUtils.createClientRequest(omRequest);
OMClientResponse omClientResponse =
omClientRequest.validateAndUpdateCache(getOzoneManager(),
transactionLogIndex);
// Add OMClient Response to double buffer.
// Each OMClient Response should handle what needs to be done in error
// case.
ozoneManagerDoubleBuffer.add(omClientResponse, transactionLogIndex);
transactionLogIndex, ozoneManagerDoubleBuffer::add);
return omClientResponse.getOMResponse();
default:
// As all request types are not changed so we need to call handle

View File

@ -22,6 +22,7 @@ import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.exceptions.NotLeaderException;
import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB;
import org.apache.hadoop.ozone.om.ratis.OzoneManagerDoubleBuffer;
import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
import org.apache.hadoop.ozone.om.request.OMClientRequest;
@ -52,6 +53,7 @@ public class OzoneManagerProtocolServerSideTranslatorPB implements
private final RequestHandler handler;
private final boolean isRatisEnabled;
private final OzoneManager ozoneManager;
private final OzoneManagerDoubleBuffer ozoneManagerDoubleBuffer;
/**
* Constructs an instance of the server handler.
@ -65,6 +67,12 @@ public class OzoneManagerProtocolServerSideTranslatorPB implements
handler = new OzoneManagerRequestHandler(impl);
this.omRatisServer = ratisServer;
this.isRatisEnabled = enableRatis;
this.ozoneManagerDoubleBuffer =
new OzoneManagerDoubleBuffer(ozoneManager.getMetadataManager(), (i) -> {
// Do nothing.
// For OM NON-HA code, there is no need to save transaction index.
// As we wait until the double buffer flushes DB to disk.
}, isRatisEnabled);
}
@ -191,4 +199,10 @@ public class OzoneManagerProtocolServerSideTranslatorPB implements
private OMResponse submitRequestDirectlyToOM(OMRequest request) {
return handler.handle(request);
}
public void stop() {
if (!isRatisEnabled) {
ozoneManagerDoubleBuffer.stop();
}
}
}

View File

@ -0,0 +1,83 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.hadoop.ozone.om.request.bucket;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.rules.TemporaryFolder;
import org.mockito.Mockito;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.audit.AuditLogger;
import org.apache.hadoop.ozone.audit.AuditMessage;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OMMetrics;
import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.when;
/**
* Base test class for Bucket request.
*/
@SuppressWarnings("visibilityModifier")
public class TestBucketRequest {
@Rule
public TemporaryFolder folder = new TemporaryFolder();
protected OzoneManager ozoneManager;
protected OMMetrics omMetrics;
protected OMMetadataManager omMetadataManager;
protected AuditLogger auditLogger;
// Just setting ozoneManagerDoubleBuffer which does nothing.
protected OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper =
((response, transactionIndex) -> {
return null;
});
@Before
public void setup() throws Exception {
ozoneManager = Mockito.mock(OzoneManager.class);
omMetrics = OMMetrics.create();
OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
folder.newFolder().getAbsolutePath());
omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
when(ozoneManager.getMetrics()).thenReturn(omMetrics);
when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
auditLogger = Mockito.mock(AuditLogger.class);
when(ozoneManager.getAuditLogger()).thenReturn(auditLogger);
Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class));
}
@After
public void stop() {
omMetrics.unRegister();
Mockito.framework().clearInlineMocks();
}
}

View File

@ -21,22 +21,11 @@ package org.apache.hadoop.ozone.om.request.bucket;
import java.util.UUID;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import org.mockito.Mockito;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.audit.AuditLogger;
import org.apache.hadoop.ozone.audit.AuditMessage;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.junit.Assert;
import org.junit.Test;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OMMetrics;
import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.OMRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
@ -50,44 +39,10 @@ import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.util.Time;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.when;
/**
* Tests OMBucketCreateRequest class, which handles CreateBucket request.
*/
public class TestOMBucketCreateRequest {
@Rule
public TemporaryFolder folder = new TemporaryFolder();
private OzoneManager ozoneManager;
private OMMetrics omMetrics;
private OMMetadataManager omMetadataManager;
private AuditLogger auditLogger;
@Before
public void setup() throws Exception {
ozoneManager = Mockito.mock(OzoneManager.class);
omMetrics = OMMetrics.create();
OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
folder.newFolder().getAbsolutePath());
omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
when(ozoneManager.getMetrics()).thenReturn(omMetrics);
when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
auditLogger = Mockito.mock(AuditLogger.class);
when(ozoneManager.getAuditLogger()).thenReturn(auditLogger);
Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class));
}
@After
public void stop() {
omMetrics.unRegister();
}
public class TestOMBucketCreateRequest extends TestBucketRequest {
@Test
public void testPreExecute() throws Exception {
@ -129,7 +84,8 @@ public class TestOMBucketCreateRequest {
Assert.assertNull(omMetadataManager.getBucketTable().get(bucketKey));
OMClientResponse omClientResponse =
omBucketCreateRequest.validateAndUpdateCache(ozoneManager, 1);
omBucketCreateRequest.validateAndUpdateCache(ozoneManager, 1,
ozoneManagerDoubleBufferHelper);
OMResponse omResponse = omClientResponse.getOMResponse();
Assert.assertNotNull(omResponse.getCreateBucketResponse());
@ -155,7 +111,8 @@ public class TestOMBucketCreateRequest {
// Try create same bucket again
OMClientResponse omClientResponse =
omBucketCreateRequest.validateAndUpdateCache(ozoneManager, 2);
omBucketCreateRequest.validateAndUpdateCache(ozoneManager, 2,
ozoneManagerDoubleBufferHelper);
OMResponse omResponse = omClientResponse.getOMResponse();
Assert.assertNotNull(omResponse.getCreateBucketResponse());
@ -192,7 +149,8 @@ public class TestOMBucketCreateRequest {
OMClientResponse omClientResponse =
omBucketCreateRequest.validateAndUpdateCache(ozoneManager, 1);
omBucketCreateRequest.validateAndUpdateCache(ozoneManager, 1,
ozoneManagerDoubleBufferHelper);
// As now after validateAndUpdateCache it should add entry to cache, get
// should return non null value.

View File

@ -21,22 +21,8 @@ package org.apache.hadoop.ozone.om.request.bucket;
import java.util.UUID;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import org.mockito.Mockito;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.audit.AuditLogger;
import org.apache.hadoop.ozone.audit.AuditMessage;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OMMetrics;
import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
@ -45,42 +31,10 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.OMRequest;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.when;
/**
* Tests OMBucketDeleteRequest class which handles DeleteBucket request.
*/
public class TestOMBucketDeleteRequest {
@Rule
public TemporaryFolder folder = new TemporaryFolder();
private OzoneManager ozoneManager;
private OMMetrics omMetrics;
private OMMetadataManager omMetadataManager;
private AuditLogger auditLogger;
@Before
public void setup() throws Exception {
ozoneManager = Mockito.mock(OzoneManager.class);
omMetrics = OMMetrics.create();
OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
folder.newFolder().getAbsolutePath());
omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
when(ozoneManager.getMetrics()).thenReturn(omMetrics);
when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
auditLogger = Mockito.mock(AuditLogger.class);
when(ozoneManager.getAuditLogger()).thenReturn(auditLogger);
Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class));
}
@After
public void stop() {
omMetrics.unRegister();
}
public class TestOMBucketDeleteRequest extends TestBucketRequest {
@Test
public void testPreExecute() throws Exception {
@ -111,7 +65,8 @@ public class TestOMBucketDeleteRequest {
TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
omMetadataManager);
omBucketDeleteRequest.validateAndUpdateCache(ozoneManager, 1);
omBucketDeleteRequest.validateAndUpdateCache(ozoneManager, 1,
ozoneManagerDoubleBufferHelper);
Assert.assertNull(omMetadataManager.getBucketTable().get(
omMetadataManager.getBucketKey(volumeName, bucketName)));
@ -131,7 +86,8 @@ public class TestOMBucketDeleteRequest {
OMClientResponse omClientResponse =
omBucketDeleteRequest.validateAndUpdateCache(ozoneManager, 1);
omBucketDeleteRequest.validateAndUpdateCache(ozoneManager, 1,
ozoneManagerDoubleBufferHelper);
Assert.assertNull(omMetadataManager.getBucketTable().get(
omMetadataManager.getBucketKey(volumeName, bucketName)));

View File

@ -21,22 +21,9 @@ package org.apache.hadoop.ozone.om.request.bucket;
import java.util.UUID;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import org.mockito.Mockito;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.audit.AuditLogger;
import org.apache.hadoop.ozone.audit.AuditMessage;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OMMetrics;
import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
@ -47,42 +34,11 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.SetBucketPropertyRequest;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.when;
/**
* Tests OMBucketSetPropertyRequest class which handles OMSetBucketProperty
* request.
*/
public class TestOMBucketSetPropertyRequest {
@Rule
public TemporaryFolder folder = new TemporaryFolder();
private OzoneManager ozoneManager;
private OMMetrics omMetrics;
private OMMetadataManager omMetadataManager;
private AuditLogger auditLogger;
@Before
public void setup() throws Exception {
ozoneManager = Mockito.mock(OzoneManager.class);
omMetrics = OMMetrics.create();
OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
folder.newFolder().getAbsolutePath());
omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
when(ozoneManager.getMetrics()).thenReturn(omMetrics);
when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
auditLogger = Mockito.mock(AuditLogger.class);
when(ozoneManager.getAuditLogger()).thenReturn(auditLogger);
Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class));
}
@After
public void stop() {
omMetrics.unRegister();
}
public class TestOMBucketSetPropertyRequest extends TestBucketRequest {
@Test
public void testPreExecute() throws Exception {
@ -119,7 +75,8 @@ public class TestOMBucketSetPropertyRequest {
new OMBucketSetPropertyRequest(omRequest);
OMClientResponse omClientResponse =
omBucketSetPropertyRequest.validateAndUpdateCache(ozoneManager, 1);
omBucketSetPropertyRequest.validateAndUpdateCache(ozoneManager, 1,
ozoneManagerDoubleBufferHelper);
Assert.assertEquals(true,
omMetadataManager.getBucketTable().get(
@ -146,7 +103,8 @@ public class TestOMBucketSetPropertyRequest {
new OMBucketSetPropertyRequest(omRequest);
OMClientResponse omClientResponse =
omBucketSetPropertyRequest.validateAndUpdateCache(ozoneManager, 1);
omBucketSetPropertyRequest.validateAndUpdateCache(ozoneManager, 1,
ozoneManagerDoubleBufferHelper);
Assert.assertEquals(OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND,
omClientResponse.getOMResponse().getStatus());

View File

@ -23,6 +23,7 @@ import java.util.UUID;
import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.utils.db.cache.CacheKey;
import org.junit.After;
@ -65,7 +66,11 @@ public class TestOMDirectoryCreateRequest {
private OMMetrics omMetrics;
private OMMetadataManager omMetadataManager;
private AuditLogger auditLogger;
// Just setting ozoneManagerDoubleBuffer which does nothing.
private OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper =
((response, transactionIndex) -> {
return null;
});
@Before
public void setup() throws Exception {
@ -136,7 +141,8 @@ public class TestOMDirectoryCreateRequest {
omDirectoryCreateRequest = new OMDirectoryCreateRequest(modifiedOmRequest);
OMClientResponse omClientResponse =
omDirectoryCreateRequest.validateAndUpdateCache(ozoneManager, 100L);
omDirectoryCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
ozoneManagerDoubleBufferHelper);
Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
== OzoneManagerProtocolProtos.Status.OK);
@ -167,7 +173,8 @@ public class TestOMDirectoryCreateRequest {
omDirectoryCreateRequest = new OMDirectoryCreateRequest(modifiedOmRequest);
OMClientResponse omClientResponse =
omDirectoryCreateRequest.validateAndUpdateCache(ozoneManager, 100L);
omDirectoryCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
ozoneManagerDoubleBufferHelper);
Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
== OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND);
@ -207,7 +214,8 @@ public class TestOMDirectoryCreateRequest {
omDirectoryCreateRequest = new OMDirectoryCreateRequest(modifiedOmRequest);
OMClientResponse omClientResponse =
omDirectoryCreateRequest.validateAndUpdateCache(ozoneManager, 100L);
omDirectoryCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
ozoneManagerDoubleBufferHelper);
Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
== OzoneManagerProtocolProtos.Status.OK);
@ -251,7 +259,8 @@ public class TestOMDirectoryCreateRequest {
omDirectoryCreateRequest = new OMDirectoryCreateRequest(modifiedOmRequest);
OMClientResponse omClientResponse =
omDirectoryCreateRequest.validateAndUpdateCache(ozoneManager, 100L);
omDirectoryCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
ozoneManagerDoubleBufferHelper);
Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
== OzoneManagerProtocolProtos.Status.OK);
@ -295,7 +304,8 @@ public class TestOMDirectoryCreateRequest {
omDirectoryCreateRequest = new OMDirectoryCreateRequest(modifiedOmRequest);
OMClientResponse omClientResponse =
omDirectoryCreateRequest.validateAndUpdateCache(ozoneManager, 100L);
omDirectoryCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
ozoneManagerDoubleBufferHelper);
Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
== OzoneManagerProtocolProtos.Status.FILE_ALREADY_EXISTS);

View File

@ -140,7 +140,8 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
omFileCreateRequest = new OMFileCreateRequest(modifiedOmRequest);
OMClientResponse omFileCreateResponse =
omFileCreateRequest.validateAndUpdateCache(ozoneManager, 100L);
omFileCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
ozoneManagerDoubleBufferHelper);
Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
omFileCreateResponse.getOMResponse().getStatus());
@ -193,7 +194,8 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
OMClientResponse omFileCreateResponse =
omFileCreateRequest.validateAndUpdateCache(ozoneManager, 100L);
omFileCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
ozoneManagerDoubleBufferHelper);
Assert.assertEquals(BUCKET_NOT_FOUND,
omFileCreateResponse.getOMResponse().getStatus());
@ -299,7 +301,8 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
omFileCreateRequest = new OMFileCreateRequest(modifiedOmRequest);
OMClientResponse omFileCreateResponse =
omFileCreateRequest.validateAndUpdateCache(ozoneManager, 100L);
omFileCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
ozoneManagerDoubleBufferHelper);
if (fail) {
Assert.assertTrue(omFileCreateResponse.getOMResponse()

View File

@ -80,7 +80,8 @@ public class TestOMAllocateBlockRequest extends TestOMKeyRequest {
Assert.assertTrue(omKeyLocationInfo.size() == 0);
OMClientResponse omAllocateBlockResponse =
omAllocateBlockRequest.validateAndUpdateCache(ozoneManager, 100L);
omAllocateBlockRequest.validateAndUpdateCache(ozoneManager, 100L,
ozoneManagerDoubleBufferHelper);
Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
omAllocateBlockResponse.getOMResponse().getStatus());
@ -126,7 +127,8 @@ public class TestOMAllocateBlockRequest extends TestOMKeyRequest {
OMClientResponse omAllocateBlockResponse =
omAllocateBlockRequest.validateAndUpdateCache(ozoneManager, 100L);
omAllocateBlockRequest.validateAndUpdateCache(ozoneManager, 100L,
ozoneManagerDoubleBufferHelper);
Assert.assertTrue(omAllocateBlockResponse.getOMResponse().getStatus()
== OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND);
@ -147,7 +149,8 @@ public class TestOMAllocateBlockRequest extends TestOMKeyRequest {
TestOMRequestUtils.addVolumeToDB(volumeName, "ozone", omMetadataManager);
OMClientResponse omAllocateBlockResponse =
omAllocateBlockRequest.validateAndUpdateCache(ozoneManager, 100L);
omAllocateBlockRequest.validateAndUpdateCache(ozoneManager, 100L,
ozoneManagerDoubleBufferHelper);
Assert.assertTrue(omAllocateBlockResponse.getOMResponse().getStatus()
== OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND);
@ -169,7 +172,8 @@ public class TestOMAllocateBlockRequest extends TestOMKeyRequest {
OMClientResponse omAllocateBlockResponse =
omAllocateBlockRequest.validateAndUpdateCache(ozoneManager, 100L);
omAllocateBlockRequest.validateAndUpdateCache(ozoneManager, 100L,
ozoneManagerDoubleBufferHelper);
Assert.assertTrue(omAllocateBlockResponse.getOMResponse().getStatus()
== OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND);

View File

@ -81,7 +81,7 @@ public class TestOMKeyCommitRequest extends TestOMKeyRequest {
OMClientResponse omClientResponse =
omKeyCommitRequest.validateAndUpdateCache(ozoneManager,
100L);
100L, ozoneManagerDoubleBufferHelper);
Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
omClientResponse.getOMResponse().getStatus());
@ -134,7 +134,7 @@ public class TestOMKeyCommitRequest extends TestOMKeyRequest {
OMClientResponse omClientResponse =
omKeyCommitRequest.validateAndUpdateCache(ozoneManager,
100L);
100L, ozoneManagerDoubleBufferHelper);
Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND,
omClientResponse.getOMResponse().getStatus());
@ -166,7 +166,7 @@ public class TestOMKeyCommitRequest extends TestOMKeyRequest {
OMClientResponse omClientResponse =
omKeyCommitRequest.validateAndUpdateCache(ozoneManager,
100L);
100L, ozoneManagerDoubleBufferHelper);
Assert.assertEquals(OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND,
omClientResponse.getOMResponse().getStatus());
@ -200,7 +200,7 @@ public class TestOMKeyCommitRequest extends TestOMKeyRequest {
OMClientResponse omClientResponse =
omKeyCommitRequest.validateAndUpdateCache(ozoneManager,
100L);
100L, ozoneManagerDoubleBufferHelper);
Assert.assertEquals(OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND,
omClientResponse.getOMResponse().getStatus());

View File

@ -76,7 +76,8 @@ public class TestOMKeyCreateRequest extends TestOMKeyRequest {
Assert.assertNull(omKeyInfo);
OMClientResponse omKeyCreateResponse =
omKeyCreateRequest.validateAndUpdateCache(ozoneManager, 100L);
omKeyCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
ozoneManagerDoubleBufferHelper);
Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
omKeyCreateResponse.getOMResponse().getStatus());
@ -139,7 +140,8 @@ public class TestOMKeyCreateRequest extends TestOMKeyRequest {
Assert.assertNull(omKeyInfo);
OMClientResponse omKeyCreateResponse =
omKeyCreateRequest.validateAndUpdateCache(ozoneManager, 100L);
omKeyCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
ozoneManagerDoubleBufferHelper);
Assert.assertEquals(
OzoneManagerProtocolProtos.Status.NO_SUCH_MULTIPART_UPLOAD_ERROR,
@ -176,7 +178,8 @@ public class TestOMKeyCreateRequest extends TestOMKeyRequest {
Assert.assertNull(omKeyInfo);
OMClientResponse omKeyCreateResponse =
omKeyCreateRequest.validateAndUpdateCache(ozoneManager, 100L);
omKeyCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
ozoneManagerDoubleBufferHelper);
Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND,
omKeyCreateResponse.getOMResponse().getStatus());
@ -215,7 +218,8 @@ public class TestOMKeyCreateRequest extends TestOMKeyRequest {
Assert.assertNull(omKeyInfo);
OMClientResponse omKeyCreateResponse =
omKeyCreateRequest.validateAndUpdateCache(ozoneManager, 100L);
omKeyCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
ozoneManagerDoubleBufferHelper);
Assert.assertEquals(OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND,
omKeyCreateResponse.getOMResponse().getStatus());

View File

@ -71,7 +71,7 @@ public class TestOMKeyDeleteRequest extends TestOMKeyRequest {
OMClientResponse omClientResponse =
omKeyDeleteRequest.validateAndUpdateCache(ozoneManager,
100L);
100L, ozoneManagerDoubleBufferHelper);
Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
omClientResponse.getOMResponse().getStatus());
@ -101,7 +101,7 @@ public class TestOMKeyDeleteRequest extends TestOMKeyRequest {
OMClientResponse omClientResponse =
omKeyDeleteRequest.validateAndUpdateCache(ozoneManager,
100L);
100L, ozoneManagerDoubleBufferHelper);
Assert.assertEquals(OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND,
omClientResponse.getOMResponse().getStatus());
@ -121,7 +121,7 @@ public class TestOMKeyDeleteRequest extends TestOMKeyRequest {
OMClientResponse omClientResponse =
omKeyDeleteRequest.validateAndUpdateCache(ozoneManager,
100L);
100L, ozoneManagerDoubleBufferHelper);
Assert.assertEquals(OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND,
omClientResponse.getOMResponse().getStatus());

View File

@ -107,7 +107,8 @@ public class TestOMKeyPurgeRequestAndResponse extends TestOMKeyRequest {
new OMKeyPurgeRequest(preExecutedRequest);
OMClientResponse omClientResponse =
omKeyPurgeRequest.validateAndUpdateCache(ozoneManager, 100L);
omKeyPurgeRequest.validateAndUpdateCache(ozoneManager, 100L,
ozoneManagerDoubleBufferHelper);
OMResponse omResponse = OMResponse.newBuilder()
.setPurgeKeysResponse(PurgeKeysResponse.getDefaultInstance())

View File

@ -56,7 +56,8 @@ public class TestOMKeyRenameRequest extends TestOMKeyRequest {
new OMKeyRenameRequest(modifiedOmRequest);
OMClientResponse omKeyRenameResponse =
omKeyRenameRequest.validateAndUpdateCache(ozoneManager, 100L);
omKeyRenameRequest.validateAndUpdateCache(ozoneManager, 100L,
ozoneManagerDoubleBufferHelper);
Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
omKeyRenameResponse.getOMResponse().getStatus());
@ -103,7 +104,8 @@ public class TestOMKeyRenameRequest extends TestOMKeyRequest {
new OMKeyRenameRequest(modifiedOmRequest);
OMClientResponse omKeyRenameResponse =
omKeyRenameRequest.validateAndUpdateCache(ozoneManager, 100L);
omKeyRenameRequest.validateAndUpdateCache(ozoneManager, 100L,
ozoneManagerDoubleBufferHelper);
Assert.assertEquals(OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND,
omKeyRenameResponse.getOMResponse().getStatus());
@ -125,7 +127,8 @@ public class TestOMKeyRenameRequest extends TestOMKeyRequest {
new OMKeyRenameRequest(modifiedOmRequest);
OMClientResponse omKeyRenameResponse =
omKeyRenameRequest.validateAndUpdateCache(ozoneManager, 100L);
omKeyRenameRequest.validateAndUpdateCache(ozoneManager, 100L,
ozoneManagerDoubleBufferHelper);
Assert.assertEquals(OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND,
omKeyRenameResponse.getOMResponse().getStatus());
@ -149,7 +152,8 @@ public class TestOMKeyRenameRequest extends TestOMKeyRequest {
new OMKeyRenameRequest(modifiedOmRequest);
OMClientResponse omKeyRenameResponse =
omKeyRenameRequest.validateAndUpdateCache(ozoneManager, 100L);
omKeyRenameRequest.validateAndUpdateCache(ozoneManager, 100L,
ozoneManagerDoubleBufferHelper);
Assert.assertEquals(OzoneManagerProtocolProtos.Status.INVALID_KEY_NAME,
omKeyRenameResponse.getOMResponse().getStatus());
@ -174,7 +178,8 @@ public class TestOMKeyRenameRequest extends TestOMKeyRequest {
new OMKeyRenameRequest(modifiedOmRequest);
OMClientResponse omKeyRenameResponse =
omKeyRenameRequest.validateAndUpdateCache(ozoneManager, 100L);
omKeyRenameRequest.validateAndUpdateCache(ozoneManager, 100L,
ozoneManagerDoubleBufferHelper);
Assert.assertEquals(OzoneManagerProtocolProtos.Status.INVALID_KEY_NAME,
omKeyRenameResponse.getOMResponse().getStatus());

View File

@ -22,6 +22,7 @@ import java.util.ArrayList;
import java.util.List;
import java.util.UUID;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
@ -82,6 +83,12 @@ public class TestOMKeyRequest {
protected long scmBlockSize = 1000L;
protected long dataSize;
// Just setting ozoneManagerDoubleBuffer which does nothing.
protected OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper =
((response, transactionIndex) -> {
return null;
});
@Before
public void setup() throws Exception {

View File

@ -22,24 +22,11 @@ package org.apache.hadoop.ozone.om.request.s3.bucket;
import java.util.UUID;
import org.apache.commons.lang.RandomStringUtils;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import org.mockito.Mockito;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.audit.AuditLogger;
import org.apache.hadoop.ozone.audit.AuditMessage;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OMMetrics;
import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.OMRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
@ -49,45 +36,11 @@ import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import static org.junit.Assert.fail;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.when;
/**
* Tests S3BucketCreateRequest class, which handles S3 CreateBucket request.
*/
public class TestS3BucketCreateRequest {
@Rule
public TemporaryFolder folder = new TemporaryFolder();
private OzoneManager ozoneManager;
private OMMetrics omMetrics;
private OMMetadataManager omMetadataManager;
private AuditLogger auditLogger;
@Before
public void setup() throws Exception {
ozoneManager = Mockito.mock(OzoneManager.class);
omMetrics = OMMetrics.create();
OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
folder.newFolder().getAbsolutePath());
omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
when(ozoneManager.getMetrics()).thenReturn(omMetrics);
when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
auditLogger = Mockito.mock(AuditLogger.class);
when(ozoneManager.getAuditLogger()).thenReturn(auditLogger);
Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class));
}
@After
public void stop() {
omMetrics.unRegister();
Mockito.framework().clearInlineMocks();
}
public class TestS3BucketCreateRequest extends TestS3BucketRequest {
@Test
public void testPreExecute() throws Exception {
@ -152,7 +105,8 @@ public class TestS3BucketCreateRequest {
// Try create same bucket again
OMClientResponse omClientResponse =
s3BucketCreateRequest.validateAndUpdateCache(ozoneManager, 2);
s3BucketCreateRequest.validateAndUpdateCache(ozoneManager, 2,
ozoneManagerDoubleBufferHelper);
OMResponse omResponse = omClientResponse.getOMResponse();
Assert.assertNotNull(omResponse.getCreateBucketResponse());
@ -177,7 +131,8 @@ public class TestS3BucketCreateRequest {
// Try create same bucket again
OMClientResponse omClientResponse =
s3BucketCreateRequest.validateAndUpdateCache(ozoneManager, 2);
s3BucketCreateRequest.validateAndUpdateCache(ozoneManager, 2,
ozoneManagerDoubleBufferHelper);
OMResponse omResponse = omClientResponse.getOMResponse();
Assert.assertNotNull(omResponse.getCreateBucketResponse());
@ -213,7 +168,8 @@ public class TestS3BucketCreateRequest {
OMClientResponse omClientResponse =
s3BucketCreateRequest.validateAndUpdateCache(ozoneManager, 1);
s3BucketCreateRequest.validateAndUpdateCache(ozoneManager, 1,
ozoneManagerDoubleBufferHelper);
// As now after validateAndUpdateCache it should add entry to cache, get
// should return non null value.

View File

@ -22,22 +22,9 @@ package org.apache.hadoop.ozone.om.request.s3.bucket;
import java.util.UUID;
import org.apache.commons.lang.RandomStringUtils;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import org.mockito.Mockito;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.audit.AuditLogger;
import org.apache.hadoop.ozone.audit.AuditMessage;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OMMetrics;
import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
@ -47,43 +34,11 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
import org.apache.hadoop.test.GenericTestUtils;
import static org.junit.Assert.fail;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.when;
/**
* Tests S3BucketDelete Request.
*/
public class TestS3BucketDeleteRequest {
@Rule
public TemporaryFolder folder = new TemporaryFolder();
private OzoneManager ozoneManager;
private OMMetrics omMetrics;
private OMMetadataManager omMetadataManager;
private AuditLogger auditLogger;
@Before
public void setup() throws Exception {
ozoneManager = Mockito.mock(OzoneManager.class);
omMetrics = OMMetrics.create();
OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
folder.newFolder().getAbsolutePath());
omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
when(ozoneManager.getMetrics()).thenReturn(omMetrics);
when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
auditLogger = Mockito.mock(AuditLogger.class);
when(ozoneManager.getAuditLogger()).thenReturn(auditLogger);
Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class));
}
@After
public void stop() {
omMetrics.unRegister();
Mockito.framework().clearInlineMocks();
}
public class TestS3BucketDeleteRequest extends TestS3BucketRequest {
@Test
public void testPreExecute() throws Exception {
@ -104,7 +59,8 @@ public class TestS3BucketDeleteRequest {
new S3BucketDeleteRequest(omRequest);
OMClientResponse s3BucketDeleteResponse =
s3BucketDeleteRequest.validateAndUpdateCache(ozoneManager, 1L);
s3BucketDeleteRequest.validateAndUpdateCache(ozoneManager, 1L,
ozoneManagerDoubleBufferHelper);
Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
s3BucketDeleteResponse.getOMResponse().getStatus());
@ -120,7 +76,8 @@ public class TestS3BucketDeleteRequest {
new S3BucketDeleteRequest(omRequest);
OMClientResponse s3BucketDeleteResponse =
s3BucketDeleteRequest.validateAndUpdateCache(ozoneManager, 1L);
s3BucketDeleteRequest.validateAndUpdateCache(ozoneManager, 1L,
ozoneManagerDoubleBufferHelper);
Assert.assertEquals(OzoneManagerProtocolProtos.Status.S3_BUCKET_NOT_FOUND,
s3BucketDeleteResponse.getOMResponse().getStatus());

View File

@ -0,0 +1,82 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.hadoop.ozone.om.request.s3.bucket;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.rules.TemporaryFolder;
import org.mockito.Mockito;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.audit.AuditLogger;
import org.apache.hadoop.ozone.audit.AuditMessage;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OMMetrics;
import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.when;
/**
* Base test class for S3 Bucket request.
*/
@SuppressWarnings("visibilityModifier")
public class TestS3BucketRequest {
@Rule
public TemporaryFolder folder = new TemporaryFolder();
protected OzoneManager ozoneManager;
protected OMMetrics omMetrics;
protected OMMetadataManager omMetadataManager;
protected AuditLogger auditLogger;
// Just setting ozoneManagerDoubleBuffer which does nothing.
protected OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper =
((response, transactionIndex) -> {
return null;
});
@Before
public void setup() throws Exception {
ozoneManager = Mockito.mock(OzoneManager.class);
omMetrics = OMMetrics.create();
OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
folder.newFolder().getAbsolutePath());
omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
when(ozoneManager.getMetrics()).thenReturn(omMetrics);
when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
auditLogger = Mockito.mock(AuditLogger.class);
when(ozoneManager.getAuditLogger()).thenReturn(auditLogger);
Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class));
}
@After
public void stop() {
omMetrics.unRegister();
Mockito.framework().clearInlineMocks();
}
}

View File

@ -60,7 +60,7 @@ public class TestS3InitiateMultipartUploadRequest
OMClientResponse omClientResponse =
s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager,
100L);
100L, ozoneManagerDoubleBufferHelper);
Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
omClientResponse.getOMResponse().getStatus());
@ -106,7 +106,7 @@ public class TestS3InitiateMultipartUploadRequest
OMClientResponse omClientResponse =
s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager,
100L);
100L, ozoneManagerDoubleBufferHelper);
Assert.assertEquals(OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND,
omClientResponse.getOMResponse().getStatus());
@ -136,7 +136,7 @@ public class TestS3InitiateMultipartUploadRequest
OMClientResponse omClientResponse =
s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager,
100L);
100L, ozoneManagerDoubleBufferHelper);
Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND,
omClientResponse.getOMResponse().getStatus());

View File

@ -36,9 +36,10 @@ import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OMMetrics;
import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.OMRequest;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.when;
@ -56,6 +57,12 @@ public class TestS3MultipartRequest {
protected OMMetadataManager omMetadataManager;
protected AuditLogger auditLogger;
// Just setting ozoneManagerDoubleBuffer which does nothing.
protected OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper =
((response, transactionIndex) -> {
return null;
});
@Before
public void setup() throws Exception {

View File

@ -45,7 +45,7 @@ public class TestS3MultipartUploadAbortRequest extends TestS3MultipartRequest {
OMClientResponse omClientResponse =
s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager,
1L);
1L, ozoneManagerDoubleBufferHelper);
String multipartUploadID = omClientResponse.getOMResponse()
.getInitiateMultiPartUploadResponse().getMultipartUploadID();
@ -58,7 +58,8 @@ public class TestS3MultipartUploadAbortRequest extends TestS3MultipartRequest {
new S3MultipartUploadAbortRequest(abortMPURequest);
omClientResponse =
s3MultipartUploadAbortRequest.validateAndUpdateCache(ozoneManager, 2L);
s3MultipartUploadAbortRequest.validateAndUpdateCache(ozoneManager, 2L,
ozoneManagerDoubleBufferHelper);
String multipartKey = omMetadataManager.getMultipartKey(volumeName,
@ -92,7 +93,8 @@ public class TestS3MultipartUploadAbortRequest extends TestS3MultipartRequest {
new S3MultipartUploadAbortRequest(abortMPURequest);
OMClientResponse omClientResponse =
s3MultipartUploadAbortRequest.validateAndUpdateCache(ozoneManager, 2L);
s3MultipartUploadAbortRequest.validateAndUpdateCache(ozoneManager, 2L,
ozoneManagerDoubleBufferHelper);
// Check table and response.
Assert.assertEquals(
@ -119,7 +121,8 @@ public class TestS3MultipartUploadAbortRequest extends TestS3MultipartRequest {
new S3MultipartUploadAbortRequest(abortMPURequest);
OMClientResponse omClientResponse =
s3MultipartUploadAbortRequest.validateAndUpdateCache(ozoneManager, 2L);
s3MultipartUploadAbortRequest.validateAndUpdateCache(ozoneManager, 2L,
ozoneManagerDoubleBufferHelper);
// Check table and response.
Assert.assertEquals(
@ -147,7 +150,8 @@ public class TestS3MultipartUploadAbortRequest extends TestS3MultipartRequest {
new S3MultipartUploadAbortRequest(abortMPURequest);
OMClientResponse omClientResponse =
s3MultipartUploadAbortRequest.validateAndUpdateCache(ozoneManager, 2L);
s3MultipartUploadAbortRequest.validateAndUpdateCache(ozoneManager, 2L,
ozoneManagerDoubleBufferHelper);
// Check table and response.
Assert.assertEquals(

View File

@ -65,7 +65,7 @@ public class TestS3MultipartUploadCommitPartRequest
OMClientResponse omClientResponse =
s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager,
1L);
1L, ozoneManagerDoubleBufferHelper);
long clientID = Time.now();
String multipartUploadID = omClientResponse.getOMResponse()
@ -84,7 +84,7 @@ public class TestS3MultipartUploadCommitPartRequest
omClientResponse =
s3MultipartUploadCommitPartRequest.validateAndUpdateCache(ozoneManager,
2L);
2L, ozoneManagerDoubleBufferHelper);
Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
@ -129,7 +129,7 @@ public class TestS3MultipartUploadCommitPartRequest
OMClientResponse omClientResponse =
s3MultipartUploadCommitPartRequest.validateAndUpdateCache(ozoneManager,
2L);
2L, ozoneManagerDoubleBufferHelper);
Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
@ -168,7 +168,7 @@ public class TestS3MultipartUploadCommitPartRequest
OMClientResponse omClientResponse =
s3MultipartUploadCommitPartRequest.validateAndUpdateCache(ozoneManager,
2L);
2L, ozoneManagerDoubleBufferHelper);
Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
== OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND);
@ -200,7 +200,7 @@ public class TestS3MultipartUploadCommitPartRequest
OMClientResponse omClientResponse =
s3MultipartUploadCommitPartRequest.validateAndUpdateCache(ozoneManager,
2L);
2L, ozoneManagerDoubleBufferHelper);
Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
== OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND);

View File

@ -21,25 +21,12 @@ package org.apache.hadoop.ozone.om.request.volume;
import java.util.UUID;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import org.mockito.Mockito;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.audit.AuditMessage;
import org.apache.hadoop.ozone.audit.AuditLogger;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OMMetrics;
import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.CreateVolumeRequest;
@ -48,44 +35,15 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.VolumeInfo;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.when;
/**
* Tests create volume request.
*/
public class TestOMVolumeCreateRequest {
@Rule
public TemporaryFolder folder = new TemporaryFolder();
private OzoneManager ozoneManager;
private OMMetrics omMetrics;
private OMMetadataManager omMetadataManager;
private AuditLogger auditLogger;
public class TestOMVolumeCreateRequest extends TestOMVolumeRequest {
@Before
public void setup() throws Exception {
ozoneManager = Mockito.mock(OzoneManager.class);
omMetrics = OMMetrics.create();
OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
folder.newFolder().getAbsolutePath());
omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
when(ozoneManager.getMetrics()).thenReturn(omMetrics);
when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
when(ozoneManager.getMaxUserVolumeCount()).thenReturn(10L);
auditLogger = Mockito.mock(AuditLogger.class);
when(ozoneManager.getAuditLogger()).thenReturn(auditLogger);
Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class));
}
@After
public void stop() {
omMetrics.unRegister();
Mockito.framework().clearInlineMocks();
}
@Test
public void testPreExecute() throws Exception {
String volumeName = UUID.randomUUID().toString();
@ -113,7 +71,8 @@ public class TestOMVolumeCreateRequest {
try {
OMClientResponse omClientResponse =
omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, 1);
omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, 1,
ozoneManagerDoubleBufferHelper);
} catch (IllegalArgumentException ex){
GenericTestUtils.assertExceptionContains("should be greater than zero",
ex);
@ -147,7 +106,8 @@ public class TestOMVolumeCreateRequest {
omVolumeCreateRequest = new OMVolumeCreateRequest(modifiedRequest);
OMClientResponse omClientResponse =
omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, 1);
omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, 1,
ozoneManagerDoubleBufferHelper);
OzoneManagerProtocolProtos.OMResponse omResponse =
omClientResponse.getOMResponse();
@ -187,7 +147,8 @@ public class TestOMVolumeCreateRequest {
modifiedRequest = omVolumeCreateRequest.preExecute(ozoneManager);
omClientResponse =
omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, 2L);
omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, 2L,
ozoneManagerDoubleBufferHelper);
Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
omClientResponse.getOMResponse().getStatus());
@ -219,7 +180,8 @@ public class TestOMVolumeCreateRequest {
omVolumeCreateRequest = new OMVolumeCreateRequest(modifiedRequest);
OMClientResponse omClientResponse =
omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, 1);
omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, 1,
ozoneManagerDoubleBufferHelper);
OzoneManagerProtocolProtos.OMResponse omResponse =
omClientResponse.getOMResponse();

View File

@ -21,71 +21,24 @@ package org.apache.hadoop.ozone.om.request.volume;
import java.util.UUID;
import com.google.common.base.Optional;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Assert;;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import org.mockito.Mockito;
import org.apache.hadoop.ozone.audit.AuditLogger;
import org.apache.hadoop.ozone.audit.AuditMessage;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.utils.db.cache.CacheKey;
import org.apache.hadoop.utils.db.cache.CacheValue;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OMMetrics;
import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.DeleteVolumeRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.OMRequest;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
/**
* Tests delete volume request.
*/
public class TestOMVolumeDeleteRequest {
@Rule
public TemporaryFolder folder = new TemporaryFolder();
private OzoneManager ozoneManager;
private OMMetrics omMetrics;
private OMMetadataManager omMetadataManager;
private AuditLogger auditLogger;
@Before
public void setup() throws Exception {
ozoneManager = mock(OzoneManager.class);
omMetrics = OMMetrics.create();
OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
folder.newFolder().getAbsolutePath());
omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
when(ozoneManager.getMetrics()).thenReturn(omMetrics);
auditLogger = Mockito.mock(AuditLogger.class);
when(ozoneManager.getAuditLogger()).thenReturn(auditLogger);
Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class));
}
@After
public void stop() {
omMetrics.unRegister();
Mockito.framework().clearInlineMocks();
}
public class TestOMVolumeDeleteRequest extends TestOMVolumeRequest {
@Test
public void testPreExecute() throws Exception {
@ -124,7 +77,8 @@ public class TestOMVolumeDeleteRequest {
Assert.assertNotNull(omMetadataManager.getUserTable().get(ownerKey));
OMClientResponse omClientResponse =
omVolumeDeleteRequest.validateAndUpdateCache(ozoneManager, 1);
omVolumeDeleteRequest.validateAndUpdateCache(ozoneManager, 1,
ozoneManagerDoubleBufferHelper);
OzoneManagerProtocolProtos.OMResponse omResponse =
omClientResponse.getOMResponse();
@ -154,7 +108,8 @@ public class TestOMVolumeDeleteRequest {
omVolumeDeleteRequest.preExecute(ozoneManager);
OMClientResponse omClientResponse =
omVolumeDeleteRequest.validateAndUpdateCache(ozoneManager, 1);
omVolumeDeleteRequest.validateAndUpdateCache(ozoneManager, 1,
ozoneManagerDoubleBufferHelper);
OzoneManagerProtocolProtos.OMResponse omResponse =
omClientResponse.getOMResponse();
@ -191,7 +146,8 @@ public class TestOMVolumeDeleteRequest {
TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager);
OMClientResponse omClientResponse =
omVolumeDeleteRequest.validateAndUpdateCache(ozoneManager, 1L);
omVolumeDeleteRequest.validateAndUpdateCache(ozoneManager, 1L,
ozoneManagerDoubleBufferHelper);
OzoneManagerProtocolProtos.OMResponse omResponse =
omClientResponse.getOMResponse();

View File

@ -0,0 +1,80 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.hadoop.ozone.om.request.volume;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.audit.AuditLogger;
import org.apache.hadoop.ozone.audit.AuditMessage;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OMMetrics;
import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.rules.TemporaryFolder;
import org.mockito.Mockito;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.when;
/**
* Base test class for Volume request.
*/
@SuppressWarnings("visibilitymodifier")
public class TestOMVolumeRequest {
@Rule
public TemporaryFolder folder = new TemporaryFolder();
protected OzoneManager ozoneManager;
protected OMMetrics omMetrics;
protected OMMetadataManager omMetadataManager;
protected AuditLogger auditLogger;
// Just setting ozoneManagerDoubleBuffer which does nothing.
protected OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper =
((response, transactionIndex) -> {
return null;
});
@Before
public void setup() throws Exception {
ozoneManager = Mockito.mock(OzoneManager.class);
omMetrics = OMMetrics.create();
OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
folder.newFolder().getAbsolutePath());
omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
when(ozoneManager.getMetrics()).thenReturn(omMetrics);
when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
when(ozoneManager.getMaxUserVolumeCount()).thenReturn(10L);
auditLogger = Mockito.mock(AuditLogger.class);
when(ozoneManager.getAuditLogger()).thenReturn(auditLogger);
Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class));
}
@After
public void stop() {
omMetrics.unRegister();
Mockito.framework().clearInlineMocks();
}
}

View File

@ -20,65 +20,19 @@ package org.apache.hadoop.ozone.om.request.volume;
import java.util.UUID;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import org.mockito.Mockito;
import org.apache.hadoop.ozone.audit.AuditLogger;
import org.apache.hadoop.ozone.audit.AuditMessage;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OMMetrics;
import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.OMRequest;
import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.when;
/**
* Tests set volume property request.
*/
public class TestOMVolumeSetOwnerRequest {
@Rule
public TemporaryFolder folder = new TemporaryFolder();
private OzoneManager ozoneManager;
private OMMetrics omMetrics;
private OMMetadataManager omMetadataManager;
private AuditLogger auditLogger;
@Before
public void setup() throws Exception {
ozoneManager = Mockito.mock(OzoneManager.class);
omMetrics = OMMetrics.create();
OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
folder.newFolder().getAbsolutePath());
omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
when(ozoneManager.getMetrics()).thenReturn(omMetrics);
when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
when(ozoneManager.getMaxUserVolumeCount()).thenReturn(10L);
auditLogger = Mockito.mock(AuditLogger.class);
when(ozoneManager.getAuditLogger()).thenReturn(auditLogger);
Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class));
}
@After
public void stop() {
omMetrics.unRegister();
Mockito.framework().clearInlineMocks();
}
public class TestOMVolumeSetOwnerRequest extends TestOMVolumeRequest {
@Test
public void testPreExecute() throws Exception {
@ -121,7 +75,8 @@ public class TestOMVolumeSetOwnerRequest {
OMClientResponse omClientResponse =
omVolumeSetOwnerRequest.validateAndUpdateCache(ozoneManager, 1);
omVolumeSetOwnerRequest.validateAndUpdateCache(ozoneManager, 1,
ozoneManagerDoubleBufferHelper);
OzoneManagerProtocolProtos.OMResponse omResponse =
omClientResponse.getOMResponse();
@ -168,7 +123,8 @@ public class TestOMVolumeSetOwnerRequest {
omVolumeSetOwnerRequest.preExecute(ozoneManager);
OMClientResponse omClientResponse =
omVolumeSetOwnerRequest.validateAndUpdateCache(ozoneManager, 1);
omVolumeSetOwnerRequest.validateAndUpdateCache(ozoneManager, 1,
ozoneManagerDoubleBufferHelper);
OzoneManagerProtocolProtos.OMResponse omResponse =
omClientResponse.getOMResponse();
@ -193,7 +149,8 @@ public class TestOMVolumeSetOwnerRequest {
omVolumeSetOwnerRequest.preExecute(ozoneManager);
OMClientResponse omClientResponse =
omVolumeSetOwnerRequest.validateAndUpdateCache(ozoneManager, 1);
omVolumeSetOwnerRequest.validateAndUpdateCache(ozoneManager, 1,
ozoneManagerDoubleBufferHelper);
OzoneManagerProtocolProtos.OMResponse omResponse =
omClientResponse.getOMResponse();

View File

@ -20,64 +20,20 @@ package org.apache.hadoop.ozone.om.request.volume;
import java.util.UUID;
import org.apache.hadoop.ozone.audit.AuditLogger;
import org.apache.hadoop.ozone.audit.AuditMessage;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import org.mockito.Mockito;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OMMetrics;
import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.OMRequest;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.when;
/**
* Tests set volume property request.
*/
public class TestOMVolumeSetQuotaRequest {
@Rule
public TemporaryFolder folder = new TemporaryFolder();
private OzoneManager ozoneManager;
private OMMetrics omMetrics;
private OMMetadataManager omMetadataManager;
private AuditLogger auditLogger;
@Before
public void setup() throws Exception {
ozoneManager = Mockito.mock(OzoneManager.class);
omMetrics = OMMetrics.create();
OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
folder.newFolder().getAbsolutePath());
omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
when(ozoneManager.getMetrics()).thenReturn(omMetrics);
when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
auditLogger = Mockito.mock(AuditLogger.class);
when(ozoneManager.getAuditLogger()).thenReturn(auditLogger);
Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class));
}
@After
public void stop() {
omMetrics.unRegister();
Mockito.framework().clearInlineMocks();
}
public class TestOMVolumeSetQuotaRequest extends TestOMVolumeRequest {
@Test
public void testPreExecute() throws Exception {
@ -125,7 +81,8 @@ public class TestOMVolumeSetQuotaRequest {
OMClientResponse omClientResponse =
omVolumeSetQuotaRequest.validateAndUpdateCache(ozoneManager, 1);
omVolumeSetQuotaRequest.validateAndUpdateCache(ozoneManager, 1,
ozoneManagerDoubleBufferHelper);
OzoneManagerProtocolProtos.OMResponse omResponse =
omClientResponse.getOMResponse();
@ -158,7 +115,8 @@ public class TestOMVolumeSetQuotaRequest {
omVolumeSetQuotaRequest.preExecute(ozoneManager);
OMClientResponse omClientResponse =
omVolumeSetQuotaRequest.validateAndUpdateCache(ozoneManager, 1);
omVolumeSetQuotaRequest.validateAndUpdateCache(ozoneManager, 1,
ozoneManagerDoubleBufferHelper);
OzoneManagerProtocolProtos.OMResponse omResponse =
omClientResponse.getOMResponse();
@ -184,7 +142,8 @@ public class TestOMVolumeSetQuotaRequest {
omVolumeSetQuotaRequest.preExecute(ozoneManager);
OMClientResponse omClientResponse =
omVolumeSetQuotaRequest.validateAndUpdateCache(ozoneManager, 1);
omVolumeSetQuotaRequest.validateAndUpdateCache(ozoneManager, 1,
ozoneManagerDoubleBufferHelper);
OzoneManagerProtocolProtos.OMResponse omResponse =
omClientResponse.getOMResponse();