Merge pull request #309 from andreisavu/681-s3-rrs

Issue 681. Enhance jcloud to support aws-s3 Reduce Redundancy Storage (RRS)
This commit is contained in:
Adrian Cole 2012-01-11 15:47:45 -08:00
commit f7b32779a7
18 changed files with 326 additions and 49 deletions

View File

@ -232,6 +232,12 @@ public class S3AsyncBlobStore extends BaseAsyncBlobStore {
*/
@Override
public ListenableFuture<String> putBlob(String container, Blob blob) {
return putBlob(container, blob, PutOptions.NONE);
}
@Override
public ListenableFuture<String> putBlob(String container, Blob blob, PutOptions overrides) {
// TODO: Make use of options overrides
PutObjectOptions options = new PutObjectOptions();
try {
AccessControlList acl = bucketAcls.getUnchecked(container);
@ -256,12 +262,6 @@ public class S3AsyncBlobStore extends BaseAsyncBlobStore {
return async.deleteObject(container, key);
}
@Override
public ListenableFuture<String> putBlob(String container, Blob blob, PutOptions options) {
// TODO implement options
return putBlob(container, blob);
}
@Override
public ListenableFuture<Boolean> createContainerInLocation(Location location, String container,
CreateContainerOptions options) {

View File

@ -232,15 +232,7 @@ public class S3BlobStore extends BaseBlobStore {
*/
@Override
public String putBlob(String container, Blob blob) {
PutObjectOptions options = new PutObjectOptions();
try {
AccessControlList acl = bucketAcls.getUnchecked(container);
if (acl != null && acl.hasPermission(GroupGranteeURI.ALL_USERS, Permission.READ))
options.withAcl(CannedAccessPolicy.PUBLIC_READ);
} catch (CacheLoader.InvalidCacheLoadException e) {
// nulls not permitted from cache loader
}
return sync.putObject(container, blob2Object.apply(blob), options);
return putBlob(container, blob, PutOptions.NONE);
}
/**
@ -252,9 +244,17 @@ public class S3BlobStore extends BaseBlobStore {
* object
*/
@Override
public String putBlob(String container, Blob blob, PutOptions options) {
// TODO implement options
return putBlob(container, blob);
public String putBlob(String container, Blob blob, PutOptions overrides) {
// TODO: Make use of options overrides
PutObjectOptions options = new PutObjectOptions();
try {
AccessControlList acl = bucketAcls.getUnchecked(container);
if (acl != null && acl.hasPermission(GroupGranteeURI.ALL_USERS, Permission.READ))
options.withAcl(CannedAccessPolicy.PUBLIC_READ);
} catch (CacheLoader.InvalidCacheLoadException e) {
// nulls not permitted from cache loader
}
return sync.putObject(container, blob2Object.apply(blob), options);
}
/**

View File

@ -28,9 +28,11 @@ import org.jclouds.http.HttpUtils;
import org.jclouds.rest.InvocationContext;
import org.jclouds.rest.internal.GeneratedHttpRequest;
import org.jclouds.s3.domain.MutableObjectMetadata;
import org.jclouds.s3.domain.ObjectMetadata;
import org.jclouds.s3.domain.internal.MutableObjectMetadataImpl;
import com.google.common.base.Function;
import org.jclouds.s3.reference.S3Headers;
/**
* @author Adrian Cole

View File

@ -34,6 +34,7 @@ import org.jclouds.s3.domain.CannedAccessPolicy;
import com.google.common.collect.ImmutableMultimap;
import com.google.common.collect.Multimap;
import org.jclouds.s3.domain.ObjectMetadata;
/**
* Contains options supported in the REST API for the PUT object operation.

View File

@ -34,12 +34,12 @@ public class PutOptions implements Cloneable {
public static final ImmutablePutOptions NONE = new ImmutablePutOptions(new PutOptions());
private boolean multipart;
private boolean multipart = false;
public PutOptions() {
}
PutOptions(boolean multipart) {
public PutOptions(boolean multipart) {
this.multipart = multipart;
}

View File

@ -22,6 +22,7 @@ import static org.jclouds.blobstore.attr.BlobScopes.CONTAINER;
import java.util.Map;
import org.jclouds.aws.s3.blobstore.options.AWSS3PutObjectOptions;
import org.jclouds.javax.annotation.Nullable;
import javax.ws.rs.DELETE;
import javax.ws.rs.GET;
@ -54,9 +55,12 @@ import org.jclouds.s3.Bucket;
import org.jclouds.s3.S3AsyncClient;
import org.jclouds.s3.S3Client;
import org.jclouds.s3.binders.BindAsHostPrefixIfConfigured;
import org.jclouds.s3.binders.BindS3ObjectMetadataToRequest;
import org.jclouds.s3.domain.ObjectMetadata;
import org.jclouds.s3.domain.S3Object;
import org.jclouds.s3.filters.RequestAuthorizeSignature;
import org.jclouds.s3.functions.BindRegionToXmlPayload;
import org.jclouds.s3.functions.ObjectKey;
import org.jclouds.s3.functions.ReturnFalseIfBucketAlreadyOwnedByYouOrIllegalState;
import org.jclouds.s3.options.PutBucketOptions;
import org.jclouds.s3.options.PutObjectOptions;
@ -74,6 +78,7 @@ import com.google.common.util.concurrent.ListenableFuture;
@RequestFilters(RequestAuthorizeSignature.class)
@BlobScope(CONTAINER)
public interface AWSS3AsyncClient extends S3AsyncClient {
/**
* @see S3Client#putBucketInRegion
*/

View File

@ -21,30 +21,33 @@ package org.jclouds.aws.s3;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.jclouds.aws.s3.blobstore.options.AWSS3PutObjectOptions;
import org.jclouds.concurrent.Timeout;
import org.jclouds.io.Payload;
import org.jclouds.s3.S3Client;
import org.jclouds.s3.domain.ObjectMetadata;
import org.jclouds.s3.domain.S3Object;
import org.jclouds.s3.options.PutObjectOptions;
/**
* Provides access to amazon-specific S3 features
*
*
* @author Adrian Cole
* @see AWSS3AsyncClient
*/
@Timeout(duration = 90, timeUnit = TimeUnit.SECONDS)
public interface AWSS3Client extends S3Client {
/**
* This operation initiates a multipart upload and returns an upload ID. This upload ID is used
* to associate all the parts in the specific multipart upload. You specify this upload ID in
* each of your subsequent upload part requests (see Upload Part). You also include this upload
* ID in the final request to either complete or abort the multipart upload request.
*
*
* <h4>Note</h4> If you create an object using the multipart upload APIs, currently you cannot
* copy the object between regions.
*
*
*
*
* @param bucketName
* namespace of the object you are to upload
* @param objectMetadata
@ -61,8 +64,8 @@ public interface AWSS3Client extends S3Client {
* parts will be freed. However, if any part uploads are currently in progress, those part
* uploads might or might not succeed. As a result, it might be necessary to abort a given
* multipart upload multiple times in order to completely free all storage consumed by all parts.
*
*
*
*
* @param bucketName
* namespace of the object you are deleting
* @param key
@ -77,20 +80,20 @@ public interface AWSS3Client extends S3Client {
* Initiate Multipart Upload) before you can upload any part. In response to your initiate
* request. Amazon S3 returns an upload ID, a unique identifier, that you must include in your
* upload part request.
*
*
* <p/>
* Part numbers can be any number from 1 to 10,000, inclusive. A part number uniquely identifies
* a part and also defines its position within the object being created. If you upload a new part
* using the same part number that was used with a previous part, the previously uploaded part is
* overwritten. Each part must be at least 5 MB in size, except the last part. There is no size
* limit on the last part of your multipart upload.
*
*
* <p/>
* To ensure that data is not corrupted when traversing the network, specify the Content-MD5
* header in the upload part request. Amazon S3 checks the part data against the provided MD5
* value. If they do not match, Amazon S3 returns an error.
*
*
*
*
* @param bucketName
* namespace of the object you are storing
* @param key
@ -109,7 +112,7 @@ public interface AWSS3Client extends S3Client {
String uploadPart(String bucketName, String key, int partNumber, String uploadId, Payload part);
/**
*
*
This operation completes a multipart upload by assembling previously uploaded parts.
* <p/>
* You first initiate the multipart upload and then upload all parts using the Upload Parts
@ -129,7 +132,7 @@ public interface AWSS3Client extends S3Client {
* <p/>
* Note that if Complete Multipart Upload fails, applications should be prepared to retry the
* failed requests.
*
*
* @param bucketName
* namespace of the object you are deleting
* @param key

View File

@ -25,9 +25,12 @@ import javax.inject.Inject;
import javax.inject.Named;
import javax.inject.Provider;
import com.google.common.cache.CacheLoader;
import org.jclouds.Constants;
import org.jclouds.aws.s3.AWSS3AsyncClient;
import org.jclouds.aws.s3.AWSS3Client;
import org.jclouds.aws.s3.blobstore.options.AWSS3PutObjectOptions;
import org.jclouds.aws.s3.blobstore.options.AWSS3PutOptions;
import org.jclouds.aws.s3.blobstore.strategy.AsyncMultipartUploadStrategy;
import org.jclouds.blobstore.BlobStoreContext;
import org.jclouds.blobstore.domain.Blob;
@ -37,6 +40,8 @@ import org.jclouds.blobstore.strategy.internal.FetchBlobMetadata;
import org.jclouds.blobstore.util.BlobUtils;
import org.jclouds.collect.Memoized;
import org.jclouds.domain.Location;
import org.jclouds.s3.S3AsyncClient;
import org.jclouds.s3.S3Client;
import org.jclouds.s3.blobstore.S3AsyncBlobStore;
import org.jclouds.s3.blobstore.functions.BlobToObject;
import org.jclouds.s3.blobstore.functions.BucketToResourceList;
@ -49,14 +54,20 @@ import org.jclouds.s3.domain.AccessControlList;
import com.google.common.base.Supplier;
import com.google.common.cache.LoadingCache;
import com.google.common.util.concurrent.ListenableFuture;
import org.jclouds.s3.domain.CannedAccessPolicy;
import org.jclouds.s3.domain.ObjectMetadata;
import static org.jclouds.s3.domain.ObjectMetadata.StorageClass.REDUCED_REDUNDANCY;
/**
*
* @author Tibor Kiss
*
* @author Tibor Kiss, Andrei Savu
*/
public class AWSS3AsyncBlobStore extends S3AsyncBlobStore {
private final Provider<AsyncMultipartUploadStrategy> multipartUploadStrategy;
private final LoadingCache<String, AccessControlList> bucketAcls;
private final BlobToObject blob2Object;
@Inject
public AWSS3AsyncBlobStore(BlobStoreContext context, BlobUtils blobUtils,
@ -71,12 +82,39 @@ public class AWSS3AsyncBlobStore extends S3AsyncBlobStore {
container2BucketListOptions, bucket2ResourceList, object2Blob, blob2ObjectGetOptions, blob2Object,
object2BlobMd, fetchBlobMetadataProvider, bucketAcls);
this.multipartUploadStrategy = multipartUploadStrategy;
this.bucketAcls = bucketAcls;
this.blob2Object = blob2Object;
}
@Override
public ListenableFuture<String> putBlob(String container, Blob blob, PutOptions options) {
// need to use a provider if the strategy object is stateful
return multipartUploadStrategy.get().execute(container, blob);
if (options.isMultipart()) {
// need to use a provider if the strategy object is stateful
return multipartUploadStrategy.get().execute(container, blob, options);
} else if (options instanceof AWSS3PutOptions &&
((AWSS3PutOptions) options).getStorageClass() == REDUCED_REDUNDANCY) {
return putBlobWithReducedRedundancy(container, blob);
} else {
return super.putBlob(container, blob, options);
}
}
private ListenableFuture<String> putBlobWithReducedRedundancy(String container, Blob blob) {
AWSS3PutObjectOptions options = new AWSS3PutObjectOptions();
try {
AccessControlList acl = bucketAcls.getUnchecked(container);
if (acl != null && acl.hasPermission(AccessControlList.GroupGranteeURI.ALL_USERS,
AccessControlList.Permission.READ)) {
options.withAcl(CannedAccessPolicy.PUBLIC_READ);
}
options.storageClass(ObjectMetadata.StorageClass.REDUCED_REDUNDANCY);
} catch (CacheLoader.InvalidCacheLoadException e) {
// nulls not permitted from cache loader
}
return S3AsyncClient.class.cast(getContext().getProviderSpecificContext().getApi())
.putObject(container, blob2Object.apply(blob), options);
}
}

View File

@ -23,7 +23,10 @@ import java.util.Set;
import javax.inject.Inject;
import javax.inject.Provider;
import com.google.common.cache.CacheLoader;
import org.jclouds.aws.s3.AWSS3Client;
import org.jclouds.aws.s3.blobstore.options.AWSS3PutObjectOptions;
import org.jclouds.aws.s3.blobstore.options.AWSS3PutOptions;
import org.jclouds.aws.s3.blobstore.strategy.MultipartUploadStrategy;
import org.jclouds.blobstore.BlobStoreContext;
import org.jclouds.blobstore.domain.Blob;
@ -33,6 +36,7 @@ import org.jclouds.blobstore.strategy.internal.FetchBlobMetadata;
import org.jclouds.blobstore.util.BlobUtils;
import org.jclouds.collect.Memoized;
import org.jclouds.domain.Location;
import org.jclouds.s3.S3Client;
import org.jclouds.s3.blobstore.S3BlobStore;
import org.jclouds.s3.blobstore.functions.BlobToObject;
import org.jclouds.s3.blobstore.functions.BucketToResourceList;
@ -44,15 +48,22 @@ import org.jclouds.s3.domain.AccessControlList;
import com.google.common.base.Supplier;
import com.google.common.cache.LoadingCache;
import org.jclouds.s3.domain.CannedAccessPolicy;
import org.jclouds.s3.domain.ObjectMetadata;
import org.jclouds.s3.options.PutObjectOptions;
import static org.jclouds.s3.domain.ObjectMetadata.StorageClass.REDUCED_REDUNDANCY;
/**
* Proived AWS S3 specific extensions.
*
* @author Tibor Kiss
* Provide AWS S3 specific extensions.
*
* @author Tibor Kiss, Andrei Savu
*/
public class AWSS3BlobStore extends S3BlobStore {
private final Provider<MultipartUploadStrategy> multipartUploadStrategy;
private final LoadingCache<String, AccessControlList> bucketAcls;
private final BlobToObject blob2Object;
@Inject
AWSS3BlobStore(BlobStoreContext context, BlobUtils blobUtils, Supplier<Location> defaultLocation,
@ -66,11 +77,39 @@ public class AWSS3BlobStore extends S3BlobStore {
bucket2ResourceList, object2Blob, blob2ObjectGetOptions, blob2Object, object2BlobMd,
fetchBlobMetadataProvider, bucketAcls);
this.multipartUploadStrategy = multipartUploadStrategy;
this.bucketAcls = bucketAcls;
this.blob2Object = blob2Object;
}
@Override
public String putBlob(String container, Blob blob, PutOptions options) {
// need to use a provider if the strategy object is stateful
return multipartUploadStrategy.get().execute(container, blob);
if (options.isMultipart()) {
// need to use a provider if the strategy object is stateful
return multipartUploadStrategy.get().execute(container, blob, options);
} else if ((options instanceof AWSS3PutOptions) &&
(((AWSS3PutOptions) options).getStorageClass() == REDUCED_REDUNDANCY)) {
return putBlobWithReducedRedundancy(container, blob);
} else {
return super.putBlob(container, blob, options);
}
}
private String putBlobWithReducedRedundancy(String container, Blob blob) {
AWSS3PutObjectOptions options = new AWSS3PutObjectOptions();
try {
AccessControlList acl = bucketAcls.getUnchecked(container);
if (acl != null && acl.hasPermission(AccessControlList.GroupGranteeURI.ALL_USERS,
AccessControlList.Permission.READ)) {
options.withAcl(CannedAccessPolicy.PUBLIC_READ);
}
options.storageClass(ObjectMetadata.StorageClass.REDUCED_REDUNDANCY);
} catch (CacheLoader.InvalidCacheLoadException e) {
// nulls not permitted from cache loader
}
return S3Client.class.cast(getContext().getProviderSpecificContext().getApi())
.putObject(container, blob2Object.apply(blob), options);
}
}

View File

@ -28,6 +28,7 @@ import org.jclouds.aws.s3.blobstore.strategy.internal.ParallelMultipartUploadStr
import org.jclouds.aws.s3.blobstore.strategy.internal.SequentialMultipartUploadStrategy;
import org.jclouds.blobstore.BlobStoreContext;
import org.jclouds.blobstore.internal.BlobStoreContextImpl;
import org.jclouds.blobstore.options.PutOptions;
import org.jclouds.s3.blobstore.S3AsyncBlobStore;
import org.jclouds.s3.blobstore.S3BlobStore;
import org.jclouds.s3.blobstore.config.S3BlobStoreContextModule;

View File

@ -0,0 +1,71 @@
/**
* Licensed to jclouds, Inc. (jclouds) under one or more
* contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. jclouds licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.jclouds.aws.s3.blobstore.options;
import org.jclouds.s3.domain.CannedAccessPolicy;
import org.jclouds.s3.domain.ObjectMetadata;
import org.jclouds.s3.options.PutObjectOptions;
import org.jclouds.s3.reference.S3Headers;
/**
* Contains options supported in the AWS S3 REST API for the PUT object operation
*
* @see PutObjectOptions
* @author Andrei Savu
*/
public class AWSS3PutObjectOptions extends PutObjectOptions {
public static class Builder {
/**
* @see AWSS3PutObjectOptions#storageClass
*/
public static AWSS3PutObjectOptions storageClass(ObjectMetadata.StorageClass storageClass) {
AWSS3PutObjectOptions options = new AWSS3PutObjectOptions();
return options.storageClass(storageClass);
}
/**
* @see AWSS3PutObjectOptions#withAcl
*/
public static AWSS3PutObjectOptions withAcl(CannedAccessPolicy acl) {
AWSS3PutObjectOptions options = new AWSS3PutObjectOptions();
return options.withAcl(acl);
}
}
private ObjectMetadata.StorageClass storageClass = ObjectMetadata.StorageClass.STANDARD;
public AWSS3PutObjectOptions storageClass(ObjectMetadata.StorageClass storageClass) {
this.storageClass = storageClass;
if (storageClass != ObjectMetadata.StorageClass.STANDARD) {
this.replaceHeader(S3Headers.STORAGE_CLASS, this.storageClass.toString());
}
return this;
}
public ObjectMetadata.StorageClass getStorageClass() {
return storageClass;
}
@Override
public AWSS3PutObjectOptions withAcl(CannedAccessPolicy acl) {
return (AWSS3PutObjectOptions) super.withAcl(acl);
}
}

View File

@ -0,0 +1,80 @@
/**
* Licensed to jclouds, Inc. (jclouds) under one or more
* contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. jclouds licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.jclouds.aws.s3.blobstore.options;
import org.jclouds.blobstore.options.PutOptions;
import org.jclouds.s3.domain.ObjectMetadata;
/**
* Contains AWS-S3 specific options supported in the put blob operation
*
* @author Andrei Savu
*/
public class AWSS3PutOptions extends PutOptions {
public static class Builder {
/**
* @see AWSS3PutOptions#multipart()
*/
public static AWSS3PutOptions multipart() {
AWSS3PutOptions options = new AWSS3PutOptions();
return (AWSS3PutOptions) options.multipart();
}
/**
* @see AWSS3PutOptions#storageClass
*/
public static AWSS3PutOptions storageClass(ObjectMetadata.StorageClass storageClass) {
AWSS3PutOptions options = new AWSS3PutOptions();
return options.storageClass(storageClass);
}
}
private ObjectMetadata.StorageClass storageClass;
public AWSS3PutOptions() {
storageClass = ObjectMetadata.StorageClass.STANDARD;
}
public AWSS3PutOptions(boolean multipart, ObjectMetadata.StorageClass storageClass) {
super(multipart);
this.storageClass = storageClass;
}
public AWSS3PutOptions storageClass(ObjectMetadata.StorageClass storageClass) {
this.storageClass = storageClass;
return this;
}
public ObjectMetadata.StorageClass getStorageClass() {
return storageClass;
}
@Override
public AWSS3PutOptions clone() {
return new AWSS3PutOptions(isMultipart(), storageClass);
}
@Override
public String toString() {
return "[multipart=" + isMultipart() +
" storageClass=" + storageClass + "]";
}
}

View File

@ -23,6 +23,7 @@ import org.jclouds.blobstore.domain.Blob;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.inject.ImplementedBy;
import org.jclouds.blobstore.options.PutOptions;
/**
* @see <a href="http://docs.amazonwebservices.com/AmazonS3/latest/dev/index.html?qfacts.html"
@ -32,6 +33,6 @@ import com.google.inject.ImplementedBy;
@ImplementedBy(ParallelMultipartUploadStrategy.class)
public interface AsyncMultipartUploadStrategy extends MultipartUpload {
ListenableFuture<String> execute(String container, Blob blob);
ListenableFuture<String> execute(String container, Blob blob, PutOptions options);
}

View File

@ -22,6 +22,7 @@ import org.jclouds.aws.s3.blobstore.strategy.internal.SequentialMultipartUploadS
import org.jclouds.blobstore.domain.Blob;
import com.google.inject.ImplementedBy;
import org.jclouds.blobstore.options.PutOptions;
/**
* @see <a href="http://docs.amazonwebservices.com/AmazonS3/latest/dev/index.html?qfacts.html"
@ -31,5 +32,5 @@ import com.google.inject.ImplementedBy;
@ImplementedBy(SequentialMultipartUploadStrategy.class)
public interface MultipartUploadStrategy extends MultipartUpload {
String execute(String container, Blob blob);
String execute(String container, Blob blob, PutOptions options);
}

View File

@ -45,6 +45,7 @@ import org.jclouds.aws.s3.blobstore.AWSS3AsyncBlobStore;
import org.jclouds.aws.s3.blobstore.strategy.AsyncMultipartUploadStrategy;
import org.jclouds.blobstore.domain.Blob;
import org.jclouds.blobstore.internal.BlobRuntimeException;
import org.jclouds.blobstore.options.PutOptions;
import org.jclouds.blobstore.reference.BlobStoreConstants;
import org.jclouds.concurrent.Futures;
import org.jclouds.io.Payload;
@ -153,7 +154,7 @@ public class ParallelMultipartUploadStrategy implements AsyncMultipartUploadStra
}
@Override
public ListenableFuture<String> execute(final String container, final Blob blob) {
public ListenableFuture<String> execute(final String container, final Blob blob, final PutOptions options) {
return Futures.makeListenable(
ioWorkerExecutor.submit(new Callable<String>() {
@Override
@ -240,7 +241,7 @@ public class ParallelMultipartUploadStrategy implements AsyncMultipartUploadStra
throw rtex;
}
} else {
ListenableFuture<String> futureETag = ablobstore.putBlob(container, blob);
ListenableFuture<String> futureETag = ablobstore.putBlob(container, blob, options);
return maxTime != null ?
futureETag.get(maxTime,TimeUnit.SECONDS) : futureETag.get();
}

View File

@ -31,6 +31,7 @@ import org.jclouds.aws.s3.blobstore.AWSS3BlobStore;
import org.jclouds.aws.s3.blobstore.strategy.MultipartUploadStrategy;
import org.jclouds.blobstore.KeyNotFoundException;
import org.jclouds.blobstore.domain.Blob;
import org.jclouds.blobstore.options.PutOptions;
import org.jclouds.blobstore.reference.BlobStoreConstants;
import org.jclouds.io.Payload;
import org.jclouds.io.PayloadSlicer;
@ -88,7 +89,7 @@ public class SequentialMultipartUploadStrategy implements MultipartUploadStrateg
}
@Override
public String execute(String container, Blob blob) {
public String execute(String container, Blob blob, PutOptions options) {
String key = blob.getMetadata().getName();
Payload payload = blob.getPayload();
MultipartUploadSlicingAlgorithm algorithm = new MultipartUploadSlicingAlgorithm();
@ -125,7 +126,7 @@ public class SequentialMultipartUploadStrategy implements MultipartUploadStrateg
throw rtex;
}
} else {
return ablobstore.putBlob(container, blob);
return ablobstore.putBlob(container, blob, options);
}
}
}

View File

@ -21,8 +21,11 @@ package org.jclouds.aws.s3;
import static com.google.common.io.ByteStreams.join;
import static com.google.common.io.ByteStreams.newInputStreamSupplier;
import static com.google.common.io.ByteStreams.toByteArray;
import static org.jclouds.aws.s3.blobstore.options.AWSS3PutOptions.Builder.storageClass;
import static org.jclouds.crypto.CryptoStreams.md5;
import static org.jclouds.io.Payloads.newByteArrayPayload;
import static org.jclouds.s3.domain.ObjectMetadata.StorageClass;
import static org.jclouds.s3.options.ListBucketOptions.Builder.withPrefix;
import static org.testng.Assert.assertEquals;
import java.io.ByteArrayInputStream;
@ -32,6 +35,8 @@ import java.io.IOException;
import java.io.InputStream;
import java.util.zip.GZIPInputStream;
import org.jclouds.aws.s3.blobstore.AWSS3BlobStore;
import org.jclouds.aws.s3.blobstore.options.AWSS3PutOptions;
import org.jclouds.blobstore.BlobStore;
import org.jclouds.blobstore.KeyNotFoundException;
import org.jclouds.blobstore.domain.Blob;
@ -39,9 +44,13 @@ import org.jclouds.blobstore.options.PutOptions;
import org.jclouds.http.BaseJettyTest;
import org.jclouds.http.apachehc.config.ApacheHCHttpCommandExecutorServiceModule;
import org.jclouds.io.Payload;
import org.jclouds.s3.S3Client;
import org.jclouds.s3.S3ClientLiveTest;
import org.jclouds.s3.domain.ListBucketResponse;
import org.jclouds.s3.domain.ObjectMetadata;
import org.jclouds.s3.domain.ObjectMetadataBuilder;
import org.jclouds.s3.domain.S3Object;
import org.jclouds.s3.options.ListBucketOptions;
import org.testng.ITestContext;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
@ -154,6 +163,29 @@ public class AWSS3ClientLiveTest extends S3ClientLiveTest {
Blob blob = blobStore.blobBuilder("const.txt")
.payload(new File("target/const.txt")).build();
blobStore.putBlob(containerName, blob, PutOptions.Builder.multipart());
} finally {
returnContainer(containerName);
}
}
public void testPutWithReducedRedundancyStorage() throws InterruptedException {
String containerName = getContainerName();
try {
String blobName = "test-rrs";
BlobStore blobStore = context.getBlobStore();
blobStore.createContainerInLocation(null, containerName);
Blob blob = blobStore.blobBuilder(blobName).payload("something").build();
blobStore.putBlob(containerName, blob,
storageClass(StorageClass.REDUCED_REDUNDANCY));
S3Client s3Client = S3Client.class.cast(context.getProviderSpecificContext().getApi());
ListBucketResponse response = s3Client.listBucket(containerName, withPrefix(blobName));
ObjectMetadata metadata = response.iterator().next();
assertEquals(metadata.getStorageClass(), StorageClass.REDUCED_REDUNDANCY);
} finally {
returnContainer(containerName);
}

View File

@ -33,6 +33,7 @@ import org.jclouds.aws.s3.blobstore.AWSS3BlobStore;
import org.jclouds.blobstore.BlobStoreContext;
import org.jclouds.blobstore.domain.Blob;
import org.jclouds.blobstore.domain.MutableBlobMetadata;
import org.jclouds.blobstore.options.PutOptions;
import org.jclouds.io.MutableContentMetadata;
import org.jclouds.io.Payload;
import org.jclouds.io.PayloadSlicer;
@ -102,7 +103,7 @@ public class SequentialMultipartUploadStrategyTest {
replay(ometa);
SequentialMultipartUploadStrategy strategy = new SequentialMultipartUploadStrategy(ablobStore, slicer);
strategy.execute(container, blob);
strategy.execute(container, blob, PutOptions.NONE);
verify(ablobStore);
verify(slicer);
@ -167,7 +168,7 @@ public class SequentialMultipartUploadStrategyTest {
SequentialMultipartUploadStrategy strategy = new SequentialMultipartUploadStrategy(ablobStore, slicer);
try {
strategy.execute(container, blob);
strategy.execute(container, blob, PutOptions.NONE);
fail("Should throw RuntimeException with TimeoutException cause!");
} catch (RuntimeException rtex) {
TimeoutException timeout = Throwables2.getFirstThrowableOfType(rtex, TimeoutException.class);