From cdfbec47ce050b79ec37b35e38f88b9eff6bf168 Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Mon, 7 Jan 2019 11:09:14 -0800 Subject: [PATCH] HDDS-930. Multipart Upload: Abort multiupload request. Contributed by Bharat Viswanadham. --- .../hadoop/ozone/client/OzoneBucket.java | 11 +++ .../ozone/client/protocol/ClientProtocol.java | 11 +++ .../hadoop/ozone/client/rest/RestClient.java | 7 ++ .../hadoop/ozone/client/rpc/RpcClient.java | 15 ++++ .../om/protocol/OzoneManagerProtocol.java | 7 ++ ...ManagerProtocolClientSideTranslatorPB.java | 35 +++++++++ .../src/main/proto/OzoneManagerProtocol.proto | 12 +++ .../ozone/client/rpc/TestOzoneRpcClient.java | 62 +++++++++++++++ .../apache/hadoop/ozone/om/KeyManager.java | 7 ++ .../hadoop/ozone/om/KeyManagerImpl.java | 77 ++++++++++++++++++- .../org/apache/hadoop/ozone/om/OMMetrics.java | 19 +++++ .../apache/hadoop/ozone/om/OzoneManager.java | 19 +++++ .../ozone/om/exceptions/OMException.java | 1 + ...ManagerProtocolServerSideTranslatorPB.java | 34 ++++++++ 14 files changed, 316 insertions(+), 1 deletion(-) diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java index e6a6e12f1ff..9d1571699e6 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java @@ -386,6 +386,17 @@ public OmMultipartUploadCompleteInfo completeMultipartUpload(String key, partsMap); } + /** + * Abort multipart upload request. + * @param keyName + * @param uploadID + * @throws IOException + */ + public void abortMultipartUpload(String keyName, String uploadID) throws + IOException { + proxy.abortMultipartUpload(volumeName, name, keyName, uploadID); + } + /** * An Iterator to iterate over {@link OzoneKey} list. */ diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java index 5960943c1f2..8d5d3a2bf59 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java @@ -434,4 +434,15 @@ OmMultipartUploadCompleteInfo completeMultipartUpload(String volumeName, String bucketName, String keyName, String uploadID, Map partsMap) throws IOException; + /** + * Abort Multipart upload request for the given key with given uploadID. + * @param volumeName + * @param bucketName + * @param keyName + * @param uploadID + * @throws IOException + */ + void abortMultipartUpload(String volumeName, + String bucketName, String keyName, String uploadID) throws IOException; + } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java index 85e559fbcc7..9b373aeec5f 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java @@ -985,4 +985,11 @@ public OmMultipartUploadCompleteInfo completeMultipartUpload( throw new UnsupportedOperationException("Ozone REST protocol does not " + "support this operation."); } + + @Override + public void abortMultipartUpload(String volumeName, + String bucketName, String keyName, String uploadID) throws IOException { + throw new UnsupportedOperationException("Ozone REST protocol does not " + + "support this operation."); + } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 72da5f8816b..b6fb6b5d027 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -727,6 +727,7 @@ public OzoneOutputStream createMultipartKey(String volumeName, .setDataSize(size) .setIsMultipartKey(true) .setMultipartUploadID(uploadID) + .setMultipartUploadPartNumber(partNumber) .build(); OpenKeySession openKey = ozoneManagerClient.openKey(keyArgs); @@ -780,4 +781,18 @@ public OmMultipartUploadCompleteInfo completeMultipartUpload( } + @Override + public void abortMultipartUpload(String volumeName, + String bucketName, String keyName, String uploadID) throws IOException { + HddsClientUtils.verifyResourceName(volumeName, bucketName); + HddsClientUtils.checkNotNull(keyName, uploadID); + OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName) + .setMultipartUploadID(uploadID) + .build(); + ozoneManagerClient.abortMultipartUpload(omKeyArgs); + } + } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java index 09da1abdd0d..9b4a42e3ae0 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java @@ -338,5 +338,12 @@ OmMultipartUploadCompleteInfo completeMultipartUpload( OmKeyArgs omKeyArgs, OmMultipartUploadList multipartUploadList) throws IOException; + /** + * Abort multipart upload. + * @param omKeyArgs + * @throws IOException + */ + void abortMultipartUpload(OmKeyArgs omKeyArgs) throws IOException; + } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index 220e839f1ea..460fcb3816e 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -90,6 +90,10 @@ .MultipartInfoInitiateRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .MultipartInfoInitiateResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos. + MultipartUploadAbortRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .MultipartUploadAbortResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .MultipartUploadCompleteRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -633,6 +637,10 @@ public OpenKeySession openKey(OmKeyArgs args) throws IOException { keyArgs.setMultipartUploadID(args.getMultipartUploadID()); } + if (args.getMultipartUploadPartNumber() > 0) { + keyArgs.setMultipartNumber(args.getMultipartUploadPartNumber()); + } + keyArgs.setIsMultipartKey(args.getIsMultipartKey()); @@ -1053,6 +1061,33 @@ public OmMultipartUploadCompleteInfo completeMultipartUpload( return info; } + @Override + public void abortMultipartUpload(OmKeyArgs omKeyArgs) throws IOException { + KeyArgs.Builder keyArgs = KeyArgs.newBuilder() + .setVolumeName(omKeyArgs.getVolumeName()) + .setBucketName(omKeyArgs.getBucketName()) + .setKeyName(omKeyArgs.getKeyName()) + .setMultipartUploadID(omKeyArgs.getMultipartUploadID()); + + MultipartUploadAbortRequest.Builder multipartUploadAbortRequest = + MultipartUploadAbortRequest.newBuilder(); + multipartUploadAbortRequest.setKeyArgs(keyArgs); + + OMRequest omRequest = createOMRequest( + Type.AbortMultiPartUpload) + .setAbortMultiPartUploadRequest(multipartUploadAbortRequest.build()) + .build(); + + MultipartUploadAbortResponse response = + submitRequest(omRequest).getAbortMultiPartUploadResponse(); + + if (response.getStatus() != Status.OK) { + throw new IOException("Abort multipart upload failed, error:" + + response.getStatus()); + } + + } + public List getServiceList() throws IOException { ServiceListRequest req = ServiceListRequest.newBuilder().build(); diff --git a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto index b32d324b3e3..eed3e140cfa 100644 --- a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto +++ b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto @@ -66,6 +66,7 @@ enum Type { InitiateMultiPartUpload = 45; CommitMultiPartUpload = 46; CompleteMultiPartUpload = 47; + AbortMultiPartUpload = 48; ServiceList = 51; } @@ -107,6 +108,7 @@ message OMRequest { optional MultipartInfoInitiateRequest initiateMultiPartUploadRequest = 45; optional MultipartCommitUploadPartRequest commitMultiPartUploadRequest = 46; optional MultipartUploadCompleteRequest completeMultiPartUploadRequest = 47; + optional MultipartUploadAbortRequest abortMultiPartUploadRequest = 48; optional ServiceListRequest serviceListRequest = 51; } @@ -149,6 +151,7 @@ message OMResponse { optional MultipartInfoInitiateResponse initiateMultiPartUploadResponse = 45; optional MultipartCommitUploadPartResponse commitMultiPartUploadResponse = 46; optional MultipartUploadCompleteResponse completeMultiPartUploadResponse = 47; + optional MultipartUploadAbortResponse abortMultiPartUploadResponse = 48; optional ServiceListResponse ServiceListResponse = 51; } @@ -184,6 +187,7 @@ enum Status { MISSING_UPLOAD_PARTS = 28; COMPLETE_MULTIPART_UPLOAD_ERROR = 29; ENTITY_TOO_SMALL = 30; + ABORT_MULTIPART_UPLOAD_FAILED = 31; } @@ -608,6 +612,14 @@ message Part { required string partName = 2; } +message MultipartUploadAbortRequest { + required KeyArgs keyArgs = 1; +} + +message MultipartUploadAbortResponse { + required Status status = 1; +} + /** The OM service that takes care of Ozone namespace. */ diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java index f82da1644b3..47922cb8dca 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java @@ -1665,6 +1665,68 @@ public void testMultipartUploadWithMissingParts() throws Exception { } } + @Test + public void testAbortUploadFail() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + String keyName = UUID.randomUUID().toString(); + + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + volume.createBucket(bucketName); + OzoneBucket bucket = volume.getBucket(bucketName); + + try { + bucket.abortMultipartUpload(keyName, "random"); + fail("testAbortUploadFail failed"); + } catch (IOException ex) { + GenericTestUtils.assertExceptionContains( + "NO_SUCH_MULTIPART_UPLOAD_ERROR", ex); + } + } + + + @Test + public void testAbortUploadSuccessWithOutAnyParts() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + String keyName = UUID.randomUUID().toString(); + + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + volume.createBucket(bucketName); + OzoneBucket bucket = volume.getBucket(bucketName); + + try { + String uploadID = initiateMultipartUpload(bucket, keyName, ReplicationType + .STAND_ALONE, ReplicationFactor.ONE); + bucket.abortMultipartUpload(keyName, uploadID); + } catch (IOException ex) { + fail("testAbortUploadSuccess failed"); + } + } + + @Test + public void testAbortUploadSuccessWithParts() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + String keyName = UUID.randomUUID().toString(); + + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + volume.createBucket(bucketName); + OzoneBucket bucket = volume.getBucket(bucketName); + + try { + String uploadID = initiateMultipartUpload(bucket, keyName, ReplicationType + .STAND_ALONE, ReplicationFactor.ONE); + uploadPart(bucket, keyName, uploadID, 1, "data".getBytes("UTF-8")); + bucket.abortMultipartUpload(keyName, uploadID); + } catch (IOException ex) { + fail("testAbortUploadSuccess failed"); + } + } + private byte[] generateData(int size, byte val) { byte[] chars = new byte[size]; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java index 29e0c608b1c..9c885b41a97 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java @@ -213,4 +213,11 @@ OmMultipartCommitUploadPartInfo commitMultipartUploadPart( */ OmMultipartUploadCompleteInfo completeMultipartUpload(OmKeyArgs omKeyArgs, OmMultipartUploadList multipartUploadList) throws IOException; + + /** + * Abort multipart upload request. + * @param omKeyArgs + * @throws IOException + */ + void abortMultipartUpload(OmKeyArgs omKeyArgs) throws IOException; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index 1347e1cabcb..2e8979a4e60 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -213,6 +213,8 @@ public OpenKeySession openKey(OmKeyArgs args) throws IOException { try { if (args.getIsMultipartKey()) { + Preconditions.checkArgument(args.getMultipartUploadPartNumber() > 0, + "PartNumber Should be greater than zero"); // When key is multipart upload part key, we should take replication // type and replication factor from original key which has done // initiate multipart upload. If we have not found any such, we throw @@ -686,8 +688,16 @@ public OmMultipartCommitUploadPartInfo commitMultipartUploadPart( keyInfo.setDataSize(omKeyArgs.getDataSize()); keyInfo.updateLocationInfoList(omKeyArgs.getLocationInfoList()); - partName = keyName + clientID; + partName = metadataManager.getOzoneKey(volumeName, bucketName, keyName) + + clientID; if (multipartKeyInfo == null) { + // This can occur when user started uploading part by the time commit + // of that part happens, in between the user might have requested + // abort multipart upload. If we just throw exception, then the data + // will not be garbage collected, so move this part to delete table + // and throw error + // Move this part to delete table. + metadataManager.getDeletedTable().put(partName, keyInfo); throw new OMException("No such Multipart upload is with specified " + "uploadId " + uploadID, ResultCodes.NO_SUCH_MULTIPART_UPLOAD); } else { @@ -886,4 +896,69 @@ public OmMultipartUploadCompleteInfo completeMultipartUpload( metadataManager.getLock().releaseBucketLock(volumeName, bucketName); } } + + @Override + public void abortMultipartUpload(OmKeyArgs omKeyArgs) throws IOException { + + Preconditions.checkNotNull(omKeyArgs); + String volumeName = omKeyArgs.getVolumeName(); + String bucketName = omKeyArgs.getBucketName(); + String keyName = omKeyArgs.getKeyName(); + String uploadID = omKeyArgs.getMultipartUploadID(); + Preconditions.checkNotNull(uploadID, "uploadID cannot be null"); + metadataManager.getLock().acquireBucketLock(volumeName, bucketName); + + try { + String multipartKey = metadataManager.getMultipartKey(volumeName, + bucketName, keyName, uploadID); + OmMultipartKeyInfo multipartKeyInfo = metadataManager + .getMultipartInfoTable().get(multipartKey); + OmKeyInfo openKeyInfo = metadataManager.getOpenKeyTable().get( + multipartKey); + + // If there is no entry in openKeyTable, then there is no multipart + // upload initiated for this key. + if (openKeyInfo == null) { + LOG.error("Abort Multipart Upload Failed: volume: " + volumeName + + "bucket: " + bucketName + "key: " + keyName + "with error no " + + "such uploadID:" + uploadID); + throw new OMException("Abort Multipart Upload Failed: volume: " + + volumeName + "bucket: " + bucketName + "key: " + keyName, + ResultCodes.NO_SUCH_MULTIPART_UPLOAD); + } else { + // Move all the parts to delete table + TreeMap partKeyInfoMap = multipartKeyInfo + .getPartKeyInfoList(); + DBStore store = metadataManager.getStore(); + try (BatchOperation batch = store.initBatchOperation()) { + for (Map.Entry partKeyInfoEntry : partKeyInfoMap + .entrySet()) { + PartKeyInfo partKeyInfo = partKeyInfoEntry.getValue(); + OmKeyInfo currentKeyPartInfo = OmKeyInfo.getFromProtobuf( + partKeyInfo.getPartKeyInfo()); + metadataManager.getDeletedTable().putWithBatch(batch, + partKeyInfo.getPartName(), currentKeyPartInfo); + } + // Finally delete the entry from the multipart info table and open + // key table + metadataManager.getMultipartInfoTable().deleteWithBatch(batch, + multipartKey); + metadataManager.getOpenKeyTable().deleteWithBatch(batch, + multipartKey); + store.commitBatchOperation(batch); + } + } + } catch (OMException ex) { + throw ex; + } catch (IOException ex) { + LOG.error("Abort Multipart Upload Failed: volume: " + volumeName + + "bucket: " + bucketName + "key: " + keyName, ex); + throw new OMException(ex.getMessage(), ResultCodes + .COMPLETE_MULTIPART_UPLOAD_FAILED); + } finally { + metadataManager.getLock().releaseBucketLock(volumeName, bucketName); + } + + } + } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java index 89e1679b7c0..83142f6087f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java @@ -90,6 +90,8 @@ public class OMMetrics { private @Metric MutableCounterLong numCommitMultipartUploadParts; private @Metric MutableCounterLong getNumCommitMultipartUploadPartFails; private @Metric MutableCounterLong numCompleteMultipartUploadFails; + private @Metric MutableCounterLong numAbortMultipartUploads; + private @Metric MutableCounterLong numAbortMultipartUploadFails; // Metrics for total number of volumes, buckets and keys @@ -258,6 +260,15 @@ public void incNumCompleteMultipartUploadFails() { numCompleteMultipartUploadFails.incr(); } + public void incNumAbortMultipartUploads() { + numKeyOps.incr(); + numAbortMultipartUploads.incr(); + } + + public void incNumAbortMultipartUploadFails() { + numAbortMultipartUploadFails.incr(); + } + public void incNumGetServiceLists() { numGetServiceLists.incr(); } @@ -576,6 +587,14 @@ public long getNumInitiateMultipartUploadFails() { return numInitiateMultipartUploadFails.value(); } + public long getNumAbortMultipartUploads() { + return numAbortMultipartUploads.value(); + } + + public long getNumAbortMultipartUploadFails() { + return numAbortMultipartUploadFails.value(); + } + public void unRegister() { MetricsSystem ms = DefaultMetricsSystem.instance(); ms.unregisterSource(SOURCE_NAME); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 941b80c7757..d8352471c57 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -1672,6 +1672,25 @@ public OmMultipartUploadCompleteInfo completeMultipartUpload( } } + @Override + public void abortMultipartUpload(OmKeyArgs omKeyArgs) throws IOException { + + Map auditMap = (omKeyArgs == null) ? new LinkedHashMap<>() : + omKeyArgs.toAuditMap(); + metrics.incNumAbortMultipartUploads(); + try { + keyManager.abortMultipartUpload(omKeyArgs); + AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction + .COMPLETE_MULTIPART_UPLOAD, auditMap)); + } catch (IOException ex) { + metrics.incNumAbortMultipartUploadFails(); + AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction + .COMPLETE_MULTIPART_UPLOAD, auditMap, ex)); + throw ex; + } + + } + diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java index 58f75315c66..4da941adde7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java @@ -123,6 +123,7 @@ public enum ResultCodes { MISSING_UPLOAD_PARTS, COMPLETE_MULTIPART_UPLOAD_FAILED, ENTITY_TOO_SMALL, + ABORT_MULTIPART_UPLOAD_FAILED, INVALID_REQUEST; } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java index de88bc61c07..c7b86a0aa18 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java @@ -98,6 +98,10 @@ .LookupKeyRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .LookupKeyResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos. + MultipartUploadAbortRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .MultipartUploadAbortResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .MultipartCommitUploadPartRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -346,6 +350,12 @@ private OMResponse submitRequestToOM(OMRequest request) responseBuilder.setCompleteMultiPartUploadResponse( completeMultipartUploadResponse); break; + case AbortMultiPartUpload: + MultipartUploadAbortResponse multipartUploadAbortResponse = + abortMultipartUpload(request.getAbortMultiPartUploadRequest()); + responseBuilder.setAbortMultiPartUploadResponse( + multipartUploadAbortResponse); + break; case ServiceList: ServiceListResponse serviceListResponse = getServiceList( request.getServiceListRequest()); @@ -415,6 +425,8 @@ private Status exceptionToResponseStatus(IOException ex) { return Status.MISSING_UPLOAD_PARTS; case ENTITY_TOO_SMALL: return Status.ENTITY_TOO_SMALL; + case ABORT_MULTIPART_UPLOAD_FAILED: + return Status.ABORT_MULTIPART_UPLOAD_FAILED; default: return Status.INTERNAL_ERROR; } @@ -913,4 +925,26 @@ private MultipartUploadCompleteResponse completeMultipartUpload( } return response.build(); } + + private MultipartUploadAbortResponse abortMultipartUpload( + MultipartUploadAbortRequest multipartUploadAbortRequest) { + MultipartUploadAbortResponse.Builder response = + MultipartUploadAbortResponse.newBuilder(); + + try { + KeyArgs keyArgs = multipartUploadAbortRequest.getKeyArgs(); + OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() + .setVolumeName(keyArgs.getVolumeName()) + .setBucketName(keyArgs.getBucketName()) + .setKeyName(keyArgs.getKeyName()) + .setMultipartUploadID(keyArgs.getMultipartUploadID()) + .build(); + impl.abortMultipartUpload(omKeyArgs); + response.setStatus(Status.OK); + } catch (IOException ex) { + response.setStatus(exceptionToResponseStatus(ex)); + } + return response.build(); + } + }