HDDS-901. MultipartUpload: S3 API for Initiate multipart upload. Contributed by Bharat Viswanadham.

This commit is contained in:
Márton Elek 2019-01-07 12:06:22 +01:00
parent d14c56d150
commit 992dd9d189
5 changed files with 253 additions and 0 deletions

View File

@ -0,0 +1,42 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
*** Settings ***
Documentation S3 gateway test with aws cli
Library OperatingSystem
Library String
Resource ../commonlib.robot
Resource commonawslib.robot
Test Setup Setup s3 tests
*** Variables ***
${ENDPOINT_URL} http://s3g:9878
${BUCKET} generated
*** Test Cases ***
Initiate Multipart Upload
${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key multipartKey
${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0
Should contain ${result} ${BUCKET}
Should contain ${result} multipartKey
Should contain ${result} UploadId
# initiate again
${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key multipartKey
${nextUploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0
Should contain ${result} ${BUCKET}
Should contain ${result} multipartKey
Should contain ${result} UploadId
Should Not Be Equal ${uploadID} ${nextUploadID}

View File

@ -0,0 +1,69 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.hadoop.ozone.s3.endpoint;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
/**
* Response for Initiate Multipart Upload request.
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlRootElement(name = "InitiateMultipartUploadResult",
namespace = "http://s3.amazonaws.com/doc/2006-03-01/")
public class MultipartUploadInitiateResponse {
@XmlElement(name = "Bucket")
private String bucket;
@XmlElement(name = "Key")
private String key;
@XmlElement(name = "UploadId")
private String uploadID;
public String getBucket() {
return bucket;
}
public void setBucket(String bucket) {
this.bucket = bucket;
}
public String getKey() {
return key;
}
public void setKey(String key) {
this.key = key;
}
public String getUploadID() {
return uploadID;
}
public void setUploadID(String uploadID) {
this.uploadID = uploadID;
}
}

View File

@ -21,11 +21,15 @@ import javax.ws.rs.DELETE;
import javax.ws.rs.GET; import javax.ws.rs.GET;
import javax.ws.rs.HEAD; import javax.ws.rs.HEAD;
import javax.ws.rs.HeaderParam; import javax.ws.rs.HeaderParam;
import javax.ws.rs.POST;
import javax.ws.rs.PUT; import javax.ws.rs.PUT;
import javax.ws.rs.Path; import javax.ws.rs.Path;
import javax.ws.rs.PathParam; import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.Context; import javax.ws.rs.core.Context;
import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response; import javax.ws.rs.core.Response;
import javax.ws.rs.core.Response.ResponseBuilder; import javax.ws.rs.core.Response.ResponseBuilder;
import javax.ws.rs.core.Response.Status; import javax.ws.rs.core.Response.Status;
@ -44,6 +48,7 @@ import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneKeyDetails; import org.apache.hadoop.ozone.client.OzoneKeyDetails;
import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
import org.apache.hadoop.ozone.s3.SignedChunksInputStream; import org.apache.hadoop.ozone.s3.SignedChunksInputStream;
import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.OS3Exception;
import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
@ -340,6 +345,51 @@ public class ObjectEndpoint extends EndpointBase {
} }
@POST
@Produces(MediaType.APPLICATION_XML)
public Response initiateMultipartUpload(
@PathParam("bucket") String bucket,
@PathParam("path") String key,
@QueryParam("uploads") String uploads) throws IOException, OS3Exception {
try {
OzoneBucket ozoneBucket = getBucket(bucket);
String storageType = headers.getHeaderString(STORAGE_CLASS_HEADER);
ReplicationType replicationType;
ReplicationFactor replicationFactor;
if (storageType == null || storageType.equals("")) {
replicationType = S3StorageType.getDefault().getType();
replicationFactor = S3StorageType.getDefault().getFactor();
} else {
try {
replicationType = S3StorageType.valueOf(storageType).getType();
replicationFactor = S3StorageType.valueOf(storageType).getFactor();
} catch (IllegalArgumentException ex) {
throw S3ErrorTable.newError(S3ErrorTable.INVALID_ARGUMENT,
storageType);
}
}
OmMultipartInfo multipartInfo = ozoneBucket
.initiateMultipartUpload(key, replicationType, replicationFactor);
MultipartUploadInitiateResponse multipartUploadInitiateResponse = new
MultipartUploadInitiateResponse();
multipartUploadInitiateResponse.setBucket(bucket);
multipartUploadInitiateResponse.setKey(key);
multipartUploadInitiateResponse.setUploadID(multipartInfo.getUploadID());
return Response.status(Status.OK).entity(
multipartUploadInitiateResponse).build();
} catch (IOException ex) {
throw ex;
}
}
@VisibleForTesting @VisibleForTesting
public void setHeaders(HttpHeaders headers) { public void setHeaders(HttpHeaders headers) {
this.headers = headers; this.headers = headers;

View File

@ -28,6 +28,7 @@ import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.TreeMap; import java.util.TreeMap;
import java.util.UUID;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationFactor;
@ -36,6 +37,7 @@ import org.apache.hadoop.hdds.protocol.StorageType;
import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
/** /**
* In-memory ozone bucket for testing. * In-memory ozone bucket for testing.
@ -46,6 +48,7 @@ public class OzoneBucketStub extends OzoneBucket {
private Map<String, byte[]> keyContents = new HashMap<>(); private Map<String, byte[]> keyContents = new HashMap<>();
private Map<String, String> multipartUploadIdMap = new HashMap<>();
/** /**
* Constructs OzoneBucket instance. * Constructs OzoneBucket instance.
* *
@ -147,4 +150,14 @@ public class OzoneBucketStub extends OzoneBucket {
throws IOException { throws IOException {
throw new UnsupportedOperationException(); throw new UnsupportedOperationException();
} }
@Override
public OmMultipartInfo initiateMultipartUpload(String keyName,
ReplicationType type,
ReplicationFactor factor)
throws IOException {
String uploadID = UUID.randomUUID().toString();
multipartUploadIdMap.put(keyName, uploadID);
return new OmMultipartInfo(getVolumeName(), getName(), keyName, uploadID);
}
} }

View File

@ -0,0 +1,79 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.hadoop.ozone.s3.endpoint;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClientStub;
import org.apache.hadoop.ozone.client.OzoneVolume;
import org.junit.Test;
import org.mockito.Mockito;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.Response;
import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertNotNull;
import static org.mockito.Mockito.when;
/**
* This class tests Initiate Multipart Upload request.
*/
public class TestInitiateMultipartUpload {
@Test
public void testInitiateMultipartUpload() throws Exception {
String bucket = "s3bucket";
String key = "key1";
OzoneClientStub client = new OzoneClientStub();
client.getObjectStore().createS3Bucket("ozone", bucket);
String volumeName = client.getObjectStore().getOzoneVolumeName(bucket);
OzoneVolume volume = client.getObjectStore().getVolume(volumeName);
OzoneBucket ozoneBucket = volume.getBucket("s3bucket");
HttpHeaders headers = Mockito.mock(HttpHeaders.class);
when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn(
"STANDARD");
ObjectEndpoint rest = new ObjectEndpoint();
rest.setHeaders(headers);
rest.setClient(client);
Response response = rest.initiateMultipartUpload(bucket, key, "");
assertEquals(response.getStatus(), 200);
MultipartUploadInitiateResponse multipartUploadInitiateResponse =
(MultipartUploadInitiateResponse) response.getEntity();
assertNotNull(multipartUploadInitiateResponse.getUploadID());
String uploadID = multipartUploadInitiateResponse.getUploadID();
// Calling again should return different uploadID.
response = rest.initiateMultipartUpload(bucket, key, "");
assertEquals(response.getStatus(), 200);
multipartUploadInitiateResponse =
(MultipartUploadInitiateResponse) response.getEntity();
assertNotNull(multipartUploadInitiateResponse.getUploadID());
assertNotEquals(multipartUploadInitiateResponse.getUploadID(), uploadID);
}
}