mirror of https://github.com/apache/jclouds.git
Merge remote-tracking branch 'jclouds-labs-aws-local/promote-glacier-moved' into promoted-glacier
This commit is contained in:
commit
afa92c7478
|
@ -0,0 +1,7 @@
|
|||
AWS Glacier
|
||||
======================
|
||||
You can find more information about our Glacier provider in this [guide](http://jclouds.apache.org/guides/glacier/)
|
||||
|
||||
License
|
||||
-------
|
||||
Licensed under the Apache License, Version 2.0
|
|
@ -0,0 +1,2 @@
|
|||
Export-Package: \
|
||||
org.jclouds.glacier.*;version="${project.version}";-noimport:=true
|
|
@ -0,0 +1,250 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
|
||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
contributor license agreements. See the NOTICE file distributed with
|
||||
this work for additional information regarding copyright ownership.
|
||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
(the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
-->
|
||||
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<groupId>org.apache.jclouds</groupId>
|
||||
<artifactId>jclouds-project</artifactId>
|
||||
<version>2.5.0-SNAPSHOT</version>
|
||||
<relativePath />
|
||||
</parent>
|
||||
|
||||
<!-- TODO: when out of labs, switch to org.jclouds.api -->
|
||||
<groupId>org.apache.jclouds.labs</groupId>
|
||||
<artifactId>glacier</artifactId>
|
||||
<name>Apache jclouds :: AWS :: Glacier API</name>
|
||||
<description>jclouds components to access an implementation of glacier</description>
|
||||
|
||||
<properties>
|
||||
<test.aws.identity>FIXME_IDENTITY</test.aws.identity>
|
||||
<test.aws.credential>FIXME_CREDENTIAL</test.aws.credential>
|
||||
<test.glacier.endpoint>https://glacier.us-east-1.amazonaws.com</test.glacier.endpoint>
|
||||
<test.glacier.api-version>2012-06-01</test.glacier.api-version>
|
||||
<test.glacier.build-version />
|
||||
<test.glacier.identity>${test.aws.identity}</test.glacier.identity>
|
||||
<test.glacier.credential>${test.aws.credential}</test.glacier.credential>
|
||||
</properties>
|
||||
|
||||
<repositories>
|
||||
<repository>
|
||||
<id>apache-snapshots</id>
|
||||
<url>https://repository.apache.org/content/repositories/snapshots</url>
|
||||
<releases>
|
||||
<enabled>false</enabled>
|
||||
</releases>
|
||||
<snapshots>
|
||||
<enabled>true</enabled>
|
||||
</snapshots>
|
||||
</repository>
|
||||
</repositories>
|
||||
|
||||
<!-- For modernizer, which depends on jclouds-resources snapshot. -->
|
||||
<pluginRepositories>
|
||||
<pluginRepository>
|
||||
<id>apache-snapshots</id>
|
||||
<url>https://repository.apache.org/content/repositories/snapshots</url>
|
||||
<releases>
|
||||
<enabled>false</enabled>
|
||||
</releases>
|
||||
<snapshots>
|
||||
<enabled>true</enabled>
|
||||
</snapshots>
|
||||
</pluginRepository>
|
||||
</pluginRepositories>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.apache.jclouds.api</groupId>
|
||||
<artifactId>sts</artifactId>
|
||||
<version>${project.parent.version}</version>
|
||||
<type>jar</type>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.jclouds</groupId>
|
||||
<artifactId>jclouds-blobstore</artifactId>
|
||||
<version>${project.parent.version}</version>
|
||||
<type>jar</type>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.jclouds</groupId>
|
||||
<artifactId>jclouds-core</artifactId>
|
||||
<version>${project.parent.version}</version>
|
||||
<type>test-jar</type>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.jclouds</groupId>
|
||||
<artifactId>jclouds-blobstore</artifactId>
|
||||
<version>${project.parent.version}</version>
|
||||
<type>test-jar</type>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.jclouds.driver</groupId>
|
||||
<artifactId>jclouds-log4j</artifactId>
|
||||
<version>${project.parent.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.logging.log4j</groupId>
|
||||
<artifactId>log4j-core</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.squareup.okhttp3</groupId>
|
||||
<artifactId>mockwebserver</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.assertj</groupId>
|
||||
<artifactId>assertj-core</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.auto.service</groupId>
|
||||
<artifactId>auto-service</artifactId>
|
||||
<optional>true</optional>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<profiles>
|
||||
<profile>
|
||||
<id>live</id>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>integration</id>
|
||||
<phase>integration-test</phase>
|
||||
<goals>
|
||||
<goal>test</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<suiteXmlFiles>
|
||||
<suiteXmlFile>src/test/resources/testng.xml</suiteXmlFile>
|
||||
</suiteXmlFiles>
|
||||
<groups>live</groups>
|
||||
<excludedGroups>livelong</excludedGroups>
|
||||
<systemPropertyVariables>
|
||||
<jclouds.blobstore.httpstream.url>
|
||||
${jclouds.blobstore.httpstream.url}
|
||||
</jclouds.blobstore.httpstream.url>
|
||||
<jclouds.blobstore.httpstream.md5>
|
||||
${jclouds.blobstore.httpstream.md5}
|
||||
</jclouds.blobstore.httpstream.md5>
|
||||
<test.glacier.endpoint>${test.glacier.endpoint}</test.glacier.endpoint>
|
||||
<test.glacier.api-version>${test.glacier.api-version}</test.glacier.api-version>
|
||||
<test.glacier.build-version>${test.glacier.build-version}</test.glacier.build-version>
|
||||
<test.glacier.identity>${test.glacier.identity}</test.glacier.identity>
|
||||
<test.glacier.credential>${test.glacier.credential}</test.glacier.credential>
|
||||
</systemPropertyVariables>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</profile>
|
||||
<profile>
|
||||
<id>livelong</id>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>integration</id>
|
||||
<phase>integration-test</phase>
|
||||
<goals>
|
||||
<goal>test</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<suiteXmlFiles>
|
||||
<suiteXmlFile>src/test/resources/testng.xml</suiteXmlFile>
|
||||
</suiteXmlFiles>
|
||||
<groups>live</groups>
|
||||
<excludedGroups>liveshort, setup</excludedGroups>
|
||||
<systemPropertyVariables>
|
||||
<jclouds.blobstore.httpstream.url>
|
||||
${jclouds.blobstore.httpstream.url}
|
||||
</jclouds.blobstore.httpstream.url>
|
||||
<jclouds.blobstore.httpstream.md5>
|
||||
${jclouds.blobstore.httpstream.md5}
|
||||
</jclouds.blobstore.httpstream.md5>
|
||||
<test.glacier.endpoint>${test.glacier.endpoint}</test.glacier.endpoint>
|
||||
<test.glacier.api-version>${test.glacier.api-version}</test.glacier.api-version>
|
||||
<test.glacier.build-version>${test.glacier.build-version}</test.glacier.build-version>
|
||||
<test.glacier.identity>${test.glacier.identity}</test.glacier.identity>
|
||||
<test.glacier.credential>${test.glacier.credential}</test.glacier.credential>
|
||||
</systemPropertyVariables>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</profile>
|
||||
<profile>
|
||||
<id>livelongsetup</id>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>integration</id>
|
||||
<phase>integration-test</phase>
|
||||
<goals>
|
||||
<goal>test</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<suiteXmlFiles>
|
||||
<suiteXmlFile>src/test/resources/testng.xml</suiteXmlFile>
|
||||
</suiteXmlFiles>
|
||||
<groups>live</groups>
|
||||
<excludedGroups>liveshort, longtest</excludedGroups>
|
||||
<systemPropertyVariables>
|
||||
<jclouds.blobstore.httpstream.url>
|
||||
${jclouds.blobstore.httpstream.url}
|
||||
</jclouds.blobstore.httpstream.url>
|
||||
<jclouds.blobstore.httpstream.md5>
|
||||
${jclouds.blobstore.httpstream.md5}
|
||||
</jclouds.blobstore.httpstream.md5>
|
||||
<test.glacier.endpoint>${test.glacier.endpoint}</test.glacier.endpoint>
|
||||
<test.glacier.api-version>${test.glacier.api-version}</test.glacier.api-version>
|
||||
<test.glacier.build-version>${test.glacier.build-version}</test.glacier.build-version>
|
||||
<test.glacier.identity>${test.glacier.identity}</test.glacier.identity>
|
||||
<test.glacier.credential>${test.glacier.credential}</test.glacier.credential>
|
||||
</systemPropertyVariables>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</profile>
|
||||
</profiles>
|
||||
</project>
|
|
@ -0,0 +1,95 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier;
|
||||
|
||||
import static org.jclouds.Constants.PROPERTY_IDEMPOTENT_METHODS;
|
||||
import static org.jclouds.aws.reference.AWSConstants.PROPERTY_HEADER_TAG;
|
||||
import static org.jclouds.reflect.Reflection2.typeToken;
|
||||
|
||||
import java.net.URI;
|
||||
import java.util.Properties;
|
||||
|
||||
import org.jclouds.apis.ApiMetadata;
|
||||
import org.jclouds.blobstore.BlobStoreContext;
|
||||
import org.jclouds.glacier.blobstore.config.GlacierBlobStoreContextModule;
|
||||
import org.jclouds.glacier.config.GlacierHttpApiModule;
|
||||
import org.jclouds.glacier.config.GlacierParserModule;
|
||||
import org.jclouds.glacier.reference.GlacierHeaders;
|
||||
import org.jclouds.rest.internal.BaseHttpApiMetadata;
|
||||
|
||||
import com.google.auto.service.AutoService;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.inject.Module;
|
||||
|
||||
/**
|
||||
* Implementation of ApiMetadata for Amazon Glacier API
|
||||
*/
|
||||
@AutoService(ApiMetadata.class)
|
||||
public class GlacierApiMetadata extends BaseHttpApiMetadata {
|
||||
|
||||
private static Builder builder() {
|
||||
return new Builder();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder toBuilder() {
|
||||
return builder().fromApiMetadata(this);
|
||||
}
|
||||
|
||||
public GlacierApiMetadata() {
|
||||
this(builder());
|
||||
}
|
||||
|
||||
protected GlacierApiMetadata(Builder builder) {
|
||||
super(new Builder());
|
||||
}
|
||||
|
||||
public static Properties defaultProperties() {
|
||||
Properties properties = BaseHttpApiMetadata.defaultProperties();
|
||||
properties.setProperty(PROPERTY_HEADER_TAG, GlacierHeaders.DEFAULT_AMAZON_HEADERTAG);
|
||||
properties.setProperty(PROPERTY_IDEMPOTENT_METHODS, "DELETE,GET,HEAD,OPTIONS,POST,PUT");
|
||||
return properties;
|
||||
}
|
||||
|
||||
public static class Builder extends BaseHttpApiMetadata.Builder<GlacierClient, Builder> {
|
||||
|
||||
protected Builder() {
|
||||
super(GlacierClient.class);
|
||||
id("glacier")
|
||||
.name("Amazon Glacier API")
|
||||
.identityName("Access Key ID")
|
||||
.credentialName("Secret Access Key")
|
||||
.defaultEndpoint("https://glacier.us-east-1.amazonaws.com")
|
||||
.documentation(URI.create("http://docs.aws.amazon.com/amazonglacier/latest/dev/amazon-glacier-api.html"))
|
||||
.version("2012-06-01")
|
||||
.defaultProperties(GlacierApiMetadata.defaultProperties())
|
||||
.view(typeToken(BlobStoreContext.class))
|
||||
.defaultModules(ImmutableSet.<Class<? extends Module>> of(GlacierHttpApiModule.class,
|
||||
GlacierParserModule.class, GlacierBlobStoreContextModule.class));
|
||||
}
|
||||
|
||||
@Override
|
||||
public GlacierApiMetadata build() {
|
||||
return new GlacierApiMetadata(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Builder self() {
|
||||
return this;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,482 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier;
|
||||
|
||||
import static org.jclouds.blobstore.attr.BlobScopes.CONTAINER;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.net.URI;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.inject.Named;
|
||||
import javax.ws.rs.DELETE;
|
||||
import javax.ws.rs.GET;
|
||||
import javax.ws.rs.POST;
|
||||
import javax.ws.rs.PUT;
|
||||
import javax.ws.rs.Path;
|
||||
import javax.ws.rs.PathParam;
|
||||
|
||||
import org.jclouds.Fallbacks.NullOnNotFoundOr404;
|
||||
import org.jclouds.blobstore.attr.BlobScope;
|
||||
import org.jclouds.glacier.binders.BindArchiveOutputRangeToHeaders;
|
||||
import org.jclouds.glacier.binders.BindArchiveSizeToHeaders;
|
||||
import org.jclouds.glacier.binders.BindContentRangeToHeaders;
|
||||
import org.jclouds.glacier.binders.BindDescriptionToHeaders;
|
||||
import org.jclouds.glacier.binders.BindHashesToHeaders;
|
||||
import org.jclouds.glacier.binders.BindJobRequestToJsonPayload;
|
||||
import org.jclouds.glacier.binders.BindMultipartTreeHashToHeaders;
|
||||
import org.jclouds.glacier.binders.BindPartSizeToHeaders;
|
||||
import org.jclouds.glacier.domain.ArchiveMetadataCollection;
|
||||
import org.jclouds.glacier.domain.JobMetadata;
|
||||
import org.jclouds.glacier.domain.JobRequest;
|
||||
import org.jclouds.glacier.domain.MultipartUploadMetadata;
|
||||
import org.jclouds.glacier.domain.PaginatedJobCollection;
|
||||
import org.jclouds.glacier.domain.PaginatedMultipartUploadCollection;
|
||||
import org.jclouds.glacier.domain.PaginatedVaultCollection;
|
||||
import org.jclouds.glacier.domain.VaultMetadata;
|
||||
import org.jclouds.glacier.fallbacks.FalseOnIllegalArgumentException;
|
||||
import org.jclouds.glacier.filters.RequestAuthorizeSignature;
|
||||
import org.jclouds.glacier.functions.GetPayloadFromHttpContent;
|
||||
import org.jclouds.glacier.functions.ParseArchiveIdHeader;
|
||||
import org.jclouds.glacier.functions.ParseArchiveMetadataCollectionFromHttpContent;
|
||||
import org.jclouds.glacier.functions.ParseJobIdHeader;
|
||||
import org.jclouds.glacier.functions.ParseJobMetadataFromHttpContent;
|
||||
import org.jclouds.glacier.functions.ParseJobMetadataListFromHttpContent;
|
||||
import org.jclouds.glacier.functions.ParseMultipartUploadIdHeader;
|
||||
import org.jclouds.glacier.functions.ParseMultipartUploadListFromHttpContent;
|
||||
import org.jclouds.glacier.functions.ParseMultipartUploadPartListFromHttpContent;
|
||||
import org.jclouds.glacier.functions.ParseMultipartUploadTreeHashHeader;
|
||||
import org.jclouds.glacier.functions.ParseVaultMetadataFromHttpContent;
|
||||
import org.jclouds.glacier.functions.ParseVaultMetadataListFromHttpContent;
|
||||
import org.jclouds.glacier.options.PaginationOptions;
|
||||
import org.jclouds.glacier.predicates.validators.DescriptionValidator;
|
||||
import org.jclouds.glacier.predicates.validators.PartSizeValidator;
|
||||
import org.jclouds.glacier.predicates.validators.PayloadValidator;
|
||||
import org.jclouds.glacier.predicates.validators.VaultNameValidator;
|
||||
import org.jclouds.glacier.reference.GlacierHeaders;
|
||||
import org.jclouds.glacier.util.ContentRange;
|
||||
import org.jclouds.io.Payload;
|
||||
import org.jclouds.rest.annotations.BinderParam;
|
||||
import org.jclouds.rest.annotations.Fallback;
|
||||
import org.jclouds.rest.annotations.Headers;
|
||||
import org.jclouds.rest.annotations.ParamValidators;
|
||||
import org.jclouds.rest.annotations.RequestFilters;
|
||||
import org.jclouds.rest.annotations.ResponseParser;
|
||||
|
||||
import com.google.common.hash.HashCode;
|
||||
|
||||
/** Provides access to Amazon Glacier resources via their REST API. */
|
||||
@Headers(keys = GlacierHeaders.VERSION, values = "2012-06-01")
|
||||
@RequestFilters(RequestAuthorizeSignature.class)
|
||||
@BlobScope(CONTAINER)
|
||||
public interface GlacierClient extends Closeable {
|
||||
|
||||
/**
|
||||
* Creates a new vault to store archives. An account can only create up to 1,000 vaults per region, but each vault
|
||||
* can contain an unlimited number of archives.
|
||||
*
|
||||
* @param vaultName
|
||||
* A name for the Vault being created.
|
||||
* @return A reference to an URI pointing to the resource created.
|
||||
*/
|
||||
@Named("CreateVault")
|
||||
@PUT
|
||||
@Path("/-/vaults/{vault}")
|
||||
URI createVault(@ParamValidators(VaultNameValidator.class) @PathParam("vault") String vaultName);
|
||||
|
||||
/**
|
||||
* Deletes a vault. Vaults can only be deleted if there are no archives in the vault in the last inventory computed
|
||||
* by Amazon, and there are no new uploads after it. This is very important, as Amazon only updates inventories once
|
||||
* every 24 hours.
|
||||
*
|
||||
* @param vaultName
|
||||
* Name of the Vault being deleted.
|
||||
* @return False if the vault was not empty and therefore not deleted, true otherwise.
|
||||
*/
|
||||
@Named("DeleteVault")
|
||||
@DELETE
|
||||
@Path("/-/vaults/{vault}")
|
||||
@Fallback(FalseOnIllegalArgumentException.class)
|
||||
boolean deleteVault(@ParamValidators(VaultNameValidator.class) @PathParam("vault") String vaultName);
|
||||
|
||||
/**
|
||||
* Retrieves the metadata for a vault. The response include information like the vault ARN, the creation data, the
|
||||
* number of archives contained within the vault and total size of these archives. The number of archives and
|
||||
* the total size is based on the last inventory computed by amazon and the information may be outdated.
|
||||
*
|
||||
* @param vaultName
|
||||
* Name of the Vault being described.
|
||||
* @return A VaultMetadata object containing all the information relevant to the vault if the vault exists,
|
||||
* null otherwise.
|
||||
*/
|
||||
@Named("DescribeVault")
|
||||
@GET
|
||||
@Path("/-/vaults/{vault}")
|
||||
@ResponseParser(ParseVaultMetadataFromHttpContent.class)
|
||||
@Fallback(NullOnNotFoundOr404.class)
|
||||
VaultMetadata describeVault(@ParamValidators(VaultNameValidator.class) @PathParam("vault") String vaultName);
|
||||
|
||||
/**
|
||||
* Lists vaults according to specified options. By default this operation returns up to 1,000 vaults.
|
||||
*
|
||||
* @param options
|
||||
* Options used for pagination.
|
||||
* @return A PaginatedVaultCollection object containing the list of vaults.
|
||||
*/
|
||||
@Named("ListVaults")
|
||||
@GET
|
||||
@Path("/-/vaults")
|
||||
@ResponseParser(ParseVaultMetadataListFromHttpContent.class)
|
||||
PaginatedVaultCollection listVaults(PaginationOptions options);
|
||||
|
||||
/**
|
||||
* @see GlacierClient#listVaults(PaginationOptions)
|
||||
*/
|
||||
@Named("ListVaults")
|
||||
@GET
|
||||
@Path("/-/vaults")
|
||||
@ResponseParser(ParseVaultMetadataListFromHttpContent.class)
|
||||
PaginatedVaultCollection listVaults();
|
||||
|
||||
/**
|
||||
* Stores an archive in a vault. Archives up to 4GB in size can be uploaded using this operation. Once the archive
|
||||
* is uploaded it is immutable, this means that archive or its description cannot be modified. Except for the
|
||||
* optional description Glacier does not support additional metadata.
|
||||
*
|
||||
* @param vaultName
|
||||
* Name of the Vault where the archive is being stored.
|
||||
* @param payload
|
||||
* Payload to be uploaded.
|
||||
* @param description
|
||||
* Description for the archive.
|
||||
* @return A String containing the Archive identifier in Amazon Glacier.
|
||||
*/
|
||||
@Named("UploadArchive")
|
||||
@POST
|
||||
@Path("/-/vaults/{vault}/archives")
|
||||
@ResponseParser(ParseArchiveIdHeader.class)
|
||||
String uploadArchive(
|
||||
@ParamValidators(VaultNameValidator.class) @PathParam("vault") String vaultName,
|
||||
@ParamValidators(PayloadValidator.class) @BinderParam(BindHashesToHeaders.class) Payload payload,
|
||||
@ParamValidators(DescriptionValidator.class) @BinderParam(BindDescriptionToHeaders.class) String description);
|
||||
|
||||
/**
|
||||
* @see GlacierClient#uploadArchive(String, org.jclouds.io.Payload, String)
|
||||
*/
|
||||
@Named("UploadArchive")
|
||||
@POST
|
||||
@Path("/-/vaults/{vault}/archives")
|
||||
@ResponseParser(ParseArchiveIdHeader.class)
|
||||
String uploadArchive(
|
||||
@ParamValidators(VaultNameValidator.class) @PathParam("vault") String vaultName,
|
||||
@ParamValidators(PayloadValidator.class) @BinderParam(BindHashesToHeaders.class) Payload payload);
|
||||
|
||||
/**
|
||||
* Deletes an archive from a vault. Be aware that after deleting an archive it may still be listed in the
|
||||
* inventories until amazon compute a new inventory.
|
||||
*
|
||||
* @param vaultName
|
||||
* Name of the Vault where the archive is stored.
|
||||
* @param archiveId
|
||||
* Amazon Glacier archive identifier.
|
||||
* @return False if the archive was not deleted, true otherwise.
|
||||
*/
|
||||
@Named("DeleteArchive")
|
||||
@DELETE
|
||||
@Path("/-/vaults/{vault}/archives/{archive}")
|
||||
boolean deleteArchive(
|
||||
@ParamValidators(VaultNameValidator.class) @PathParam("vault") String vaultName,
|
||||
@PathParam("archive") String archiveId);
|
||||
|
||||
/**
|
||||
* Starts a new multipart upload. Using a multipart upload you can upload archives up to 40,000GB (10,000 parts,
|
||||
* 4GB each). The part size must be a megabyte multiplied by a power of 2 and every part must be the same size
|
||||
* except the last one, which can have a smaller size. In addition, you don't need to know the archive size to
|
||||
* initiate a multipart upload.
|
||||
*
|
||||
* @param vaultName
|
||||
* Name of the Vault where the archive is going to be stored.
|
||||
* @param partSizeInMB
|
||||
* Content size for each part.
|
||||
* @param description
|
||||
* The archive description.
|
||||
* @return The Multipart Upload Id.
|
||||
*/
|
||||
@Named("InitiateMultipartUpload")
|
||||
@POST
|
||||
@Path("/-/vaults/{vault}/multipart-uploads")
|
||||
@ResponseParser(ParseMultipartUploadIdHeader.class)
|
||||
String initiateMultipartUpload(
|
||||
@ParamValidators(VaultNameValidator.class) @PathParam("vault") String vaultName,
|
||||
@ParamValidators(PartSizeValidator.class) @BinderParam(BindPartSizeToHeaders.class) long partSizeInMB,
|
||||
@ParamValidators(DescriptionValidator.class) @BinderParam(BindDescriptionToHeaders.class) String description);
|
||||
|
||||
/**
|
||||
* @see GlacierClient#initiateMultipartUpload(String, long, String)
|
||||
*/
|
||||
@Named("InitiateMultipartUpload")
|
||||
@POST
|
||||
@Path("/-/vaults/{vault}/multipart-uploads")
|
||||
@ResponseParser(ParseMultipartUploadIdHeader.class)
|
||||
String initiateMultipartUpload(
|
||||
@ParamValidators(VaultNameValidator.class) @PathParam("vault") String vaultName,
|
||||
@ParamValidators(PartSizeValidator.class) @BinderParam(BindPartSizeToHeaders.class) long partSizeInMB);
|
||||
|
||||
|
||||
/**
|
||||
* Uploads one of the multipart upload parts. The part size has to match the one specified in the multipart upload
|
||||
* request, and the range needs to be aligned. In addition, to ensure that the data is not corrupted, the tree hash
|
||||
* and the linear hash are checked by Glacier.
|
||||
*
|
||||
* @param vaultName
|
||||
* Name of the Vault where the archive is going to be stored.
|
||||
* @param uploadId
|
||||
* Multipart upload identifier.
|
||||
* @param range
|
||||
* The content range that this part is uploading.
|
||||
* @param payload
|
||||
* Content for this part.
|
||||
* @return Tree-hash of the payload calculated by Amazon. This hash needs to be stored to complete the multipart
|
||||
* upload.
|
||||
*/
|
||||
@Named("UploadPart")
|
||||
@PUT
|
||||
@Path("/-/vaults/{vault}/multipart-uploads/{uploadId}")
|
||||
@ResponseParser(ParseMultipartUploadTreeHashHeader.class)
|
||||
HashCode uploadPart(
|
||||
@ParamValidators(VaultNameValidator.class) @PathParam("vault") String vaultName,
|
||||
@PathParam("uploadId") String uploadId,
|
||||
@BinderParam(BindContentRangeToHeaders.class) ContentRange range,
|
||||
@ParamValidators(PayloadValidator.class) @BinderParam(BindHashesToHeaders.class) Payload payload);
|
||||
|
||||
/**
|
||||
* Completes the multipart upload. After uploading all the parts this operation should be called to inform Glacier.
|
||||
* Again, the checksum and the ranges of the whole archive will be computed to verify the data.
|
||||
*
|
||||
* @param vaultName
|
||||
* Name of the Vault where the archive is going to be stored.
|
||||
* @param uploadId
|
||||
* Multipart upload identifier.
|
||||
* @param hashes
|
||||
* Map containing the pairs partnumber-treehash of each uploaded part.
|
||||
* @param archiveSize
|
||||
* Size of the complete archive.
|
||||
* @return A String containing the Archive identifier in Amazon Glacier.
|
||||
*/
|
||||
@Named("CompleteMultipartUpload")
|
||||
@POST
|
||||
@Path("/-/vaults/{vault}/multipart-uploads/{uploadId}")
|
||||
@ResponseParser(ParseArchiveIdHeader.class)
|
||||
String completeMultipartUpload(
|
||||
@ParamValidators(VaultNameValidator.class) @PathParam("vault") String vaultName,
|
||||
@PathParam("uploadId") String uploadId,
|
||||
@BinderParam(BindMultipartTreeHashToHeaders.class) Map<Integer, HashCode> hashes,
|
||||
@BinderParam(BindArchiveSizeToHeaders.class) long archiveSize);
|
||||
|
||||
|
||||
/**
|
||||
* Aborts the multipart upload. Once aborted, you cannot upload any more parts to it.
|
||||
*
|
||||
* @param vaultName
|
||||
* Name of the Vault where the archive was going to be stored.
|
||||
* @param uploadId
|
||||
* Multipart upload identifier.
|
||||
* @return True if the multipart upload was aborted, false otherwise.
|
||||
*/
|
||||
@Named("AbortMultipartUpload")
|
||||
@DELETE
|
||||
@Path("/-/vaults/{vault}/multipart-uploads/{uploadId}")
|
||||
boolean abortMultipartUpload(
|
||||
@ParamValidators(VaultNameValidator.class) @PathParam("vault") String vaultName,
|
||||
@PathParam("uploadId") String uploadId);
|
||||
|
||||
/**
|
||||
* Lists the multipart upload parts. You can list the parts of an ongoing multipart upload at any time. By default
|
||||
* it returns up to 1,000 uploaded parts, but you can control this using the request options.
|
||||
*
|
||||
* @param vaultName
|
||||
* Name of the Vault where the archive is going to be stored.
|
||||
* @param uploadId
|
||||
* Multipart upload identifier.
|
||||
* @param options
|
||||
* Options used for pagination.
|
||||
* @return A MultipartUploadMetadata, containing an iterable part list with a marker.
|
||||
*/
|
||||
@Named("ListParts")
|
||||
@GET
|
||||
@Path("/-/vaults/{vault}/multipart-uploads/{uploadId}")
|
||||
@ResponseParser(ParseMultipartUploadPartListFromHttpContent.class)
|
||||
MultipartUploadMetadata listParts(
|
||||
@ParamValidators(VaultNameValidator.class) @PathParam("vault") String vaultName,
|
||||
@PathParam("uploadId") String uploadId,
|
||||
PaginationOptions options);
|
||||
|
||||
|
||||
/**
|
||||
* @see GlacierClient#listParts(String, String, org.jclouds.glacier.options.PaginationOptions)
|
||||
*/
|
||||
@Named("ListParts")
|
||||
@GET
|
||||
@Path("/-/vaults/{vault}/multipart-uploads/{uploadId}")
|
||||
@ResponseParser(ParseMultipartUploadPartListFromHttpContent.class)
|
||||
MultipartUploadMetadata listParts(
|
||||
@ParamValidators(VaultNameValidator.class) @PathParam("vault") String vaultName,
|
||||
@PathParam("uploadId") String uploadId);
|
||||
|
||||
/**
|
||||
* Lists the ongoing multipart uploads in a vault. By default, this operation returns up to 1,000 multipart uploads.
|
||||
* You can control this using the request options.
|
||||
*
|
||||
* @param vaultName
|
||||
* Name of the Vault where the archive is going to be stored.
|
||||
* @param options
|
||||
* Options used for pagination.
|
||||
* @return A PaginatedMultipartUploadCollection, containing an iterable multipart upload list with a marker.
|
||||
*/
|
||||
@Named("ListMultipartUploads")
|
||||
@GET
|
||||
@Path("/-/vaults/{vault}/multipart-uploads")
|
||||
@ResponseParser(ParseMultipartUploadListFromHttpContent.class)
|
||||
PaginatedMultipartUploadCollection listMultipartUploads(
|
||||
@ParamValidators(VaultNameValidator.class) @PathParam("vault") String vaultName,
|
||||
PaginationOptions options);
|
||||
|
||||
/**
|
||||
* @see GlacierClient#listMultipartUploads(String, org.jclouds.glacier.options.PaginationOptions)
|
||||
*/
|
||||
@Named("ListMultipartUploads")
|
||||
@GET
|
||||
@Path("/-/vaults/{vault}/multipart-uploads")
|
||||
@ResponseParser(ParseMultipartUploadListFromHttpContent.class)
|
||||
PaginatedMultipartUploadCollection listMultipartUploads(
|
||||
@ParamValidators(VaultNameValidator.class) @PathParam("vault") String vaultName);
|
||||
|
||||
/**
|
||||
* Initiates a job. The job can be an inventory retrieval or an archive retrieval. Once the job is started the
|
||||
* estimated time to complete it is ~4 hours.
|
||||
*
|
||||
* @param vaultName
|
||||
* Name of the target Vault for the job.
|
||||
* @param job
|
||||
* JobRequest instance with the concrete request.
|
||||
* @return The job identifier.
|
||||
*/
|
||||
@Named("InitiateJob")
|
||||
@POST
|
||||
@Path("/-/vaults/{vault}/jobs")
|
||||
@ResponseParser(ParseJobIdHeader.class)
|
||||
String initiateJob(@ParamValidators(VaultNameValidator.class) @PathParam("vault") String vaultName,
|
||||
@BinderParam(BindJobRequestToJsonPayload.class) JobRequest job);
|
||||
|
||||
/**
|
||||
* Retrieves information about an ongoing job. Among the information you will find the initiation date, the user who
|
||||
* initiated the job or the status message.
|
||||
*
|
||||
* @param vaultName
|
||||
* Name of the target Vault for the job.
|
||||
* @param jobId
|
||||
* Job identifier.
|
||||
* @return The job metadata if the job exists in the vault, null otherwise.
|
||||
*/
|
||||
@Named("DescribeJob")
|
||||
@GET
|
||||
@Path("/-/vaults/{vault}/jobs/{job}")
|
||||
@ResponseParser(ParseJobMetadataFromHttpContent.class)
|
||||
@Fallback(NullOnNotFoundOr404.class)
|
||||
JobMetadata describeJob(@ParamValidators(VaultNameValidator.class) @PathParam("vault") String vaultName,
|
||||
@PathParam("job") String jobId);
|
||||
|
||||
/**
|
||||
* Lists the ongoing jobs and the recently finished jobs for a vault. By default this operation returns up to
|
||||
* 1,000 jobs in the response, but you can control this using the request options. Note that this operation also
|
||||
* returns the recently finished jobs and you can still download their output.
|
||||
*
|
||||
* @param vaultName
|
||||
* Name of the target Vault.
|
||||
* @param options
|
||||
* Options used for pagination
|
||||
* @return A PaginatedJobCollection, containing an iterable job list with a marker.
|
||||
*/
|
||||
@Named("ListJobs")
|
||||
@GET
|
||||
@Path("/-/vaults/{vault}/jobs")
|
||||
@ResponseParser(ParseJobMetadataListFromHttpContent.class)
|
||||
PaginatedJobCollection listJobs(@ParamValidators(VaultNameValidator.class) @PathParam("vault") String vaultName,
|
||||
PaginationOptions options);
|
||||
|
||||
/**
|
||||
* Lists jobs.
|
||||
*/
|
||||
@Named("ListJobs")
|
||||
@GET
|
||||
@Path("/-/vaults/{vault}/jobs")
|
||||
@ResponseParser(ParseJobMetadataListFromHttpContent.class)
|
||||
PaginatedJobCollection listJobs(@ParamValidators(VaultNameValidator.class) @PathParam("vault") String vaultName);
|
||||
|
||||
/**
|
||||
* Gets the raw job output of any kind of job. You can download the job output within the 24 hour period after
|
||||
* Glacier comlpetes a job.
|
||||
*
|
||||
* @param vaultName
|
||||
* Name of the target Vault for the job.
|
||||
* @param jobId
|
||||
* Job identifier.
|
||||
* @param range
|
||||
* The range of bytes to retrieve from the output.
|
||||
* @return The content data.
|
||||
*/
|
||||
@Named("GetJobOutput")
|
||||
@GET
|
||||
@Path("/-/vaults/{vault}/jobs/{job}/output")
|
||||
@ResponseParser(GetPayloadFromHttpContent.class)
|
||||
Payload getJobOutput(@ParamValidators(VaultNameValidator.class) @PathParam("vault") String vaultName,
|
||||
@PathParam("job") String jobId,
|
||||
@BinderParam(BindArchiveOutputRangeToHeaders.class) ContentRange range);
|
||||
|
||||
/**
|
||||
* Downloads the output of an archive retrieval job.
|
||||
*
|
||||
* @param vaultName
|
||||
* Name of the target Vault for the job.
|
||||
* @param jobId
|
||||
* Job identifier.
|
||||
* @return The content data.
|
||||
*/
|
||||
@Named("GetJobOutput")
|
||||
@GET
|
||||
@Path("/-/vaults/{vault}/jobs/{job}/output")
|
||||
@ResponseParser(GetPayloadFromHttpContent.class)
|
||||
Payload getJobOutput(@ParamValidators(VaultNameValidator.class) @PathParam("vault") String vaultName,
|
||||
@PathParam("job") String jobId);
|
||||
|
||||
/**
|
||||
* Gets the job output for the given job ID. The job must be an inventory retrieval, otherwise this operation will
|
||||
* fail. This operation will parse the job output and build a collection of ArchiveMetadata.
|
||||
*
|
||||
* @param vaultName
|
||||
* Name of the target Vault for the job.
|
||||
* @param jobId
|
||||
* Job identifier.
|
||||
* @return The ArchiveMetadata collection
|
||||
*/
|
||||
@Named("GetInventoryRetrievalOutput")
|
||||
@GET
|
||||
@Path("/-/vaults/{vault}/jobs/{job}/output")
|
||||
@ResponseParser(ParseArchiveMetadataCollectionFromHttpContent.class)
|
||||
ArchiveMetadataCollection getInventoryRetrievalOutput(@ParamValidators(VaultNameValidator.class) @PathParam("vault") String vaultName,
|
||||
@PathParam("job") String jobId);
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
|
||||
import org.jclouds.glacier.domain.GlacierError;
|
||||
import org.jclouds.http.HttpCommand;
|
||||
import org.jclouds.http.HttpResponse;
|
||||
import org.jclouds.http.HttpResponseException;
|
||||
|
||||
/**
|
||||
* Encapsulates a GlacierError.
|
||||
*/
|
||||
public class GlacierResponseException extends HttpResponseException {
|
||||
|
||||
private static final long serialVersionUID = 1L;
|
||||
private final GlacierError error;
|
||||
|
||||
public GlacierResponseException(HttpCommand command, HttpResponse response, GlacierError error) {
|
||||
super("request " + command.getCurrentRequest().getRequestLine() + " failed with code " + response.getStatusCode()
|
||||
+ ", error: " + error.toString(), command, response);
|
||||
this.error = checkNotNull(error, "error");
|
||||
}
|
||||
|
||||
public GlacierError getError() {
|
||||
return error;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.binders;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkArgument;
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
|
||||
import org.jclouds.glacier.util.ContentRange;
|
||||
import org.jclouds.http.HttpRequest;
|
||||
import org.jclouds.rest.Binder;
|
||||
|
||||
/**
|
||||
* Binds the ContentRange to the request headers.
|
||||
*/
|
||||
public class BindArchiveOutputRangeToHeaders implements Binder {
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public <R extends HttpRequest> R bindToRequest(R request, Object input) {
|
||||
checkArgument(input instanceof ContentRange, "This binder is only valid for ContentRange");
|
||||
checkNotNull(request, "request");
|
||||
ContentRange range = ContentRange.class.cast(input);
|
||||
return (R) request.toBuilder().addHeader("Range", "bytes=" + range.toString()).build();
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,41 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.binders;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkArgument;
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
|
||||
import org.jclouds.glacier.reference.GlacierHeaders;
|
||||
import org.jclouds.http.HttpRequest;
|
||||
import org.jclouds.rest.Binder;
|
||||
|
||||
/**
|
||||
* Binds the Archive size to the request headers.
|
||||
*/
|
||||
public class BindArchiveSizeToHeaders implements Binder {
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public <R extends HttpRequest> R bindToRequest(R request, Object input) {
|
||||
checkArgument(input instanceof Long, "This binder is only valid for long");
|
||||
checkNotNull(request, "request");
|
||||
Long archiveSize = Long.class.cast(input);
|
||||
return (R) request.toBuilder()
|
||||
.replaceHeader(GlacierHeaders.ARCHIVE_SIZE, Long.toString(archiveSize))
|
||||
.build();
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.binders;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkArgument;
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
|
||||
import org.jclouds.glacier.util.ContentRange;
|
||||
import org.jclouds.http.HttpRequest;
|
||||
import org.jclouds.rest.Binder;
|
||||
|
||||
import com.google.common.net.HttpHeaders;
|
||||
|
||||
/**
|
||||
* Binds the ContentRange to the request headers.
|
||||
*/
|
||||
public class BindContentRangeToHeaders implements Binder {
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public <R extends HttpRequest> R bindToRequest(R request, Object input) {
|
||||
checkArgument(input instanceof ContentRange, "This binder is only valid for Payload");
|
||||
checkNotNull(request, "request");
|
||||
ContentRange range = ContentRange.class.cast(input);
|
||||
return (R) request.toBuilder().addHeader(HttpHeaders.CONTENT_RANGE, range.buildHeader()).build();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.binders;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkArgument;
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
|
||||
import org.jclouds.glacier.reference.GlacierHeaders;
|
||||
import org.jclouds.http.HttpRequest;
|
||||
import org.jclouds.rest.Binder;
|
||||
|
||||
/**
|
||||
* Binds the archive description to the request headers.
|
||||
*/
|
||||
public class BindDescriptionToHeaders implements Binder {
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public <R extends HttpRequest> R bindToRequest(R request, Object input) {
|
||||
checkNotNull(request, "request");
|
||||
if (input == null)
|
||||
return request;
|
||||
checkArgument(input instanceof String, "This binder is only valid for string");
|
||||
String description = String.class.cast(input);
|
||||
return (R) request.toBuilder().replaceHeader(GlacierHeaders.ARCHIVE_DESCRIPTION, description).build();
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.binders;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkArgument;
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.jclouds.glacier.reference.GlacierHeaders;
|
||||
import org.jclouds.glacier.util.TreeHash;
|
||||
import org.jclouds.http.HttpException;
|
||||
import org.jclouds.http.HttpRequest;
|
||||
import org.jclouds.io.Payload;
|
||||
import org.jclouds.rest.Binder;
|
||||
|
||||
/**
|
||||
* Binds the linear hash and the tree hash of payload to the request headers.
|
||||
*/
|
||||
public class BindHashesToHeaders implements Binder {
|
||||
|
||||
private HttpRequest addChecksumHeaders(HttpRequest request, Payload payload) {
|
||||
try {
|
||||
TreeHash hash = TreeHash.buildTreeHashFromPayload(payload);
|
||||
request = request.toBuilder()
|
||||
.addHeader(GlacierHeaders.LINEAR_HASH, hash.getLinearHash().toString())
|
||||
.addHeader(GlacierHeaders.TREE_HASH, hash.getTreeHash().toString())
|
||||
.build();
|
||||
} catch (IOException e) {
|
||||
throw new HttpException("Error hashing the payload", e);
|
||||
}
|
||||
return request;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public <R extends HttpRequest> R bindToRequest(R request, Object input) {
|
||||
checkArgument(input instanceof Payload, "This binder is only valid for Payload");
|
||||
checkNotNull(request, "request");
|
||||
Payload payload = Payload.class.cast(input);
|
||||
return (R) addChecksumHeaders(request, payload);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,41 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.binders;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkArgument;
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
|
||||
import org.jclouds.glacier.domain.JobRequest;
|
||||
import org.jclouds.http.HttpRequest;
|
||||
import org.jclouds.json.Json;
|
||||
import org.jclouds.rest.Binder;
|
||||
|
||||
import com.google.inject.Inject;
|
||||
|
||||
public class BindJobRequestToJsonPayload implements Binder {
|
||||
@Inject
|
||||
private Json json;
|
||||
|
||||
@Override
|
||||
public <R extends HttpRequest> R bindToRequest(R request, Object input) {
|
||||
checkArgument(input instanceof JobRequest,
|
||||
"This binder is only valid for JobRequest");
|
||||
checkNotNull(request, "request");
|
||||
request.setPayload(json.toJson(input));
|
||||
return request;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,46 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.binders;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkArgument;
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import org.jclouds.glacier.reference.GlacierHeaders;
|
||||
import org.jclouds.glacier.util.TreeHash;
|
||||
import org.jclouds.http.HttpRequest;
|
||||
import org.jclouds.rest.Binder;
|
||||
|
||||
import com.google.common.hash.HashCode;
|
||||
|
||||
/**
|
||||
* Binds the Tree hash to the request headers.
|
||||
*/
|
||||
public class BindMultipartTreeHashToHeaders implements Binder {
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public <R extends HttpRequest> R bindToRequest(R request, Object input) {
|
||||
checkArgument(input instanceof Map, "This binder is only valid for Map");
|
||||
checkNotNull(request, "request");
|
||||
Map<Integer, HashCode> map = Map.class.cast(input);
|
||||
checkArgument(map.size() != 0, "The map cannot be empty");
|
||||
return (R) request.toBuilder()
|
||||
.addHeader(GlacierHeaders.TREE_HASH, TreeHash.buildTreeHashFromMap(map).toString())
|
||||
.build();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.binders;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkArgument;
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
|
||||
import org.jclouds.glacier.reference.GlacierHeaders;
|
||||
import org.jclouds.http.HttpRequest;
|
||||
import org.jclouds.rest.Binder;
|
||||
|
||||
/**
|
||||
* Binds the Part size to the request headers.
|
||||
*/
|
||||
public class BindPartSizeToHeaders implements Binder {
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public <R extends HttpRequest> R bindToRequest(R request, Object input) {
|
||||
checkArgument(input instanceof Long, "This binder is only valid for long");
|
||||
checkNotNull(request, "request");
|
||||
Long partSizeInMB = Long.class.cast(input);
|
||||
return (R) request.toBuilder()
|
||||
.replaceHeader(GlacierHeaders.PART_SIZE, Long.toString(partSizeInMB << 20))
|
||||
.build();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,419 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.blobstore;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkArgument;
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
import static com.google.common.base.Preconditions.checkState;
|
||||
import static org.jclouds.util.Predicates2.retry;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.jclouds.blobstore.BlobStoreContext;
|
||||
import org.jclouds.blobstore.domain.Blob;
|
||||
import org.jclouds.blobstore.domain.BlobAccess;
|
||||
import org.jclouds.blobstore.domain.BlobMetadata;
|
||||
import org.jclouds.blobstore.domain.ContainerAccess;
|
||||
import org.jclouds.blobstore.domain.MultipartPart;
|
||||
import org.jclouds.blobstore.domain.MultipartUpload;
|
||||
import org.jclouds.blobstore.domain.MutableBlobMetadata;
|
||||
import org.jclouds.blobstore.domain.PageSet;
|
||||
import org.jclouds.blobstore.domain.StorageMetadata;
|
||||
import org.jclouds.blobstore.domain.internal.BlobImpl;
|
||||
import org.jclouds.blobstore.domain.internal.MutableBlobMetadataImpl;
|
||||
import org.jclouds.blobstore.internal.BaseBlobStore;
|
||||
import org.jclouds.blobstore.options.CreateContainerOptions;
|
||||
import org.jclouds.blobstore.options.GetOptions;
|
||||
import org.jclouds.blobstore.options.ListContainerOptions;
|
||||
import org.jclouds.blobstore.options.PutOptions;
|
||||
import org.jclouds.blobstore.util.BlobUtils;
|
||||
import org.jclouds.collect.Memoized;
|
||||
import org.jclouds.domain.Location;
|
||||
import org.jclouds.glacier.GlacierClient;
|
||||
import org.jclouds.glacier.blobstore.functions.ArchiveMetadataCollectionToStorageMetadata;
|
||||
import org.jclouds.glacier.blobstore.functions.ListContainerOptionsToInventoryRetrievalJobRequest;
|
||||
import org.jclouds.glacier.blobstore.functions.PaginatedVaultCollectionToStorageMetadata;
|
||||
import org.jclouds.glacier.blobstore.strategy.MultipartUploadStrategy;
|
||||
import org.jclouds.glacier.blobstore.strategy.PollingStrategy;
|
||||
import org.jclouds.glacier.domain.ArchiveRetrievalJobRequest;
|
||||
import org.jclouds.glacier.util.ContentRange;
|
||||
import org.jclouds.io.Payload;
|
||||
import org.jclouds.io.PayloadSlicer;
|
||||
import org.jclouds.javax.annotation.Nullable;
|
||||
|
||||
import com.google.common.base.Predicate;
|
||||
import com.google.common.base.Supplier;
|
||||
import com.google.common.base.Throwables;
|
||||
import com.google.inject.Inject;
|
||||
import com.google.inject.Provider;
|
||||
import com.google.inject.Singleton;
|
||||
import com.google.inject.name.Named;
|
||||
|
||||
@Singleton
|
||||
public class GlacierBlobStore extends BaseBlobStore {
|
||||
public static final long DEFAULT_INVENTORY_UPDATE_TIME = TimeUnit.HOURS.toMillis(24);
|
||||
|
||||
@Inject(optional = true)
|
||||
@Named("jclouds.glacier.inventory.update.time")
|
||||
private long inventoryUpdateTime = DEFAULT_INVENTORY_UPDATE_TIME;
|
||||
|
||||
private final GlacierClient sync;
|
||||
private final Provider<MultipartUploadStrategy> multipartUploadStrategy;
|
||||
private final Provider<PollingStrategy> pollingStrategy;
|
||||
private final PaginatedVaultCollectionToStorageMetadata vaultsToContainers;
|
||||
private final ArchiveMetadataCollectionToStorageMetadata archivesToBlobs;
|
||||
private final ListContainerOptionsToInventoryRetrievalJobRequest containerOptionsToInventoryRetrieval;
|
||||
|
||||
@Inject
|
||||
GlacierBlobStore(BlobStoreContext context, BlobUtils blobUtils, Supplier<Location> defaultLocation,
|
||||
@Memoized Supplier<Set<? extends Location>> locations, PayloadSlicer slicer, GlacierClient sync,
|
||||
Provider<MultipartUploadStrategy> multipartUploadStrategy,
|
||||
Provider<PollingStrategy> pollingStrategy,
|
||||
PaginatedVaultCollectionToStorageMetadata vaultsToContainers,
|
||||
ArchiveMetadataCollectionToStorageMetadata archivesToBlobs, ListContainerOptionsToInventoryRetrievalJobRequest containerOptionsToInventoryRetrieval) {
|
||||
super(context, blobUtils, defaultLocation, locations, slicer);
|
||||
this.containerOptionsToInventoryRetrieval = checkNotNull(containerOptionsToInventoryRetrieval,
|
||||
"containerOptionsToInventoryRetrieval");
|
||||
this.archivesToBlobs = checkNotNull(archivesToBlobs, "archivesToBlobs");
|
||||
this.pollingStrategy = checkNotNull(pollingStrategy, "pollingStrategy");
|
||||
this.vaultsToContainers = checkNotNull(vaultsToContainers, "vaultsToContainers");
|
||||
this.multipartUploadStrategy = checkNotNull(multipartUploadStrategy, "multipartUploadStrategy");
|
||||
this.sync = checkNotNull(sync, "sync");
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes the container and all its blobs.
|
||||
* Inventories will be retrieved until the container is gone. Since inventories need 24 hours to be updated this
|
||||
* operation may take days.
|
||||
*
|
||||
* @param container
|
||||
* container name
|
||||
* @see <a href="http://aws.amazon.com/glacier/faqs/#data-inventories" />
|
||||
*/
|
||||
@Override
|
||||
public void deleteContainer(String container) {
|
||||
// attempt to delete possibly-empty vault to avoid inventory retrieval
|
||||
if (!sync.deleteVault(container)) {
|
||||
deletePathAndEnsureGone(container);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void deletePathAndEnsureGone(String container) {
|
||||
checkState(retry(new Predicate<String>() {
|
||||
public boolean apply(String container) {
|
||||
clearContainer(container);
|
||||
return sync.deleteVault(container);
|
||||
}
|
||||
}, inventoryUpdateTime).apply(container), "%s still exists after deleting!", container);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean deleteAndVerifyContainerGone(String container) {
|
||||
return sync.deleteVault(container);
|
||||
}
|
||||
|
||||
/**
|
||||
* Lists the containers.
|
||||
*
|
||||
* @return a PageSet of StorageMetadata
|
||||
*/
|
||||
@Override
|
||||
public PageSet<? extends StorageMetadata> list() {
|
||||
return vaultsToContainers.apply(sync.listVaults());
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the container exists.
|
||||
* This implementation invokes {@link GlacierClient#describeVault(String)}.
|
||||
*
|
||||
* @param container
|
||||
* container name
|
||||
* @return true if the vault exists, false otherwise
|
||||
*/
|
||||
@Override
|
||||
public boolean containerExists(String container) {
|
||||
return sync.describeVault(container) != null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a container.
|
||||
* Location is currently ignored.
|
||||
*
|
||||
* @param location
|
||||
* currently ignored
|
||||
* @param container
|
||||
* container name
|
||||
* @return true if the container was created, false otherwise
|
||||
*/
|
||||
@Override
|
||||
public boolean createContainerInLocation(@Nullable Location location, String container) {
|
||||
return sync.createVault(container) != null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a container.
|
||||
* Location and options are currently ignored.
|
||||
*
|
||||
* @param location
|
||||
* currently ignored
|
||||
* @param container
|
||||
* container name
|
||||
* @param options
|
||||
* currently ignored
|
||||
* @return true if the container was created, false otherwise
|
||||
*/
|
||||
@Override
|
||||
public boolean createContainerInLocation(@Nullable Location location, String container,
|
||||
CreateContainerOptions options) {
|
||||
return createContainerInLocation(location, container);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ContainerAccess getContainerAccess(String container) {
|
||||
throw new UnsupportedOperationException("not implemented");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setContainerAccess(String container, ContainerAccess access) {
|
||||
throw new UnsupportedOperationException("not implemented");
|
||||
}
|
||||
|
||||
/**
|
||||
* Lists the blobs in the container.
|
||||
* An inventory will be retrieved to obtain the list. Note that this will take hours and the result may be
|
||||
* inaccurate (Inventories are updated every 24 hours).
|
||||
*
|
||||
* @param container
|
||||
* container name
|
||||
* @param listContainerOptions
|
||||
* list options
|
||||
* @return the blob list
|
||||
* @see <a href="http://aws.amazon.com/glacier/faqs/#data-inventories" />
|
||||
*/
|
||||
@Override
|
||||
public PageSet<? extends StorageMetadata> list(String container, ListContainerOptions listContainerOptions) {
|
||||
String jobId = sync.initiateJob(container, containerOptionsToInventoryRetrieval.apply(listContainerOptions));
|
||||
try {
|
||||
if (pollingStrategy.get().waitForSuccess(container, jobId)) {
|
||||
return archivesToBlobs.apply(sync.getInventoryRetrievalOutput(container, jobId));
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
Throwables.propagate(e);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the blob exists in the container.
|
||||
* An inventory will be retrieved to obtain the blob list and the method will iterate through it. This operation
|
||||
* will take several hours and the result may be inaccurate (Inventories are updated every 24 hours).
|
||||
*
|
||||
* @param container
|
||||
* container name
|
||||
* @param key
|
||||
* blob key
|
||||
* @return true if the blob exists, false otherwise
|
||||
* @see <a href="http://aws.amazon.com/glacier/faqs/#data-inventories" />
|
||||
*/
|
||||
@Override
|
||||
public boolean blobExists(String container, String key) {
|
||||
return blobMetadata(container, key) != null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Stores a blob in a container. The blob name will be ignored, since it's not supported by Glacier.
|
||||
*
|
||||
* @param container
|
||||
* container name
|
||||
* @param blob
|
||||
* blob to upload
|
||||
* @return the blob name
|
||||
*/
|
||||
@Override
|
||||
public String putBlob(String container, Blob blob) {
|
||||
return sync.uploadArchive(container, blob.getPayload());
|
||||
}
|
||||
|
||||
/**
|
||||
* Stores the blob in a container.
|
||||
*
|
||||
* @param container
|
||||
* container name
|
||||
* @param blob
|
||||
* blob to upload
|
||||
* @param options
|
||||
* upload options.
|
||||
* @return the blob name
|
||||
*/
|
||||
@Override
|
||||
public String putBlob(String container, Blob blob, PutOptions options) {
|
||||
if (options.isMultipart()) {
|
||||
return multipartUploadStrategy.get().execute(container, blob);
|
||||
}
|
||||
return putBlob(container, blob);
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves the blob metadata.
|
||||
* An inventory will be retrieved to obtain the blob list and the method will iterate through it. This operation
|
||||
* will take several hours and the result may be inaccurate (Inventories are updated every 24 hours).
|
||||
*
|
||||
* @param container
|
||||
* container name
|
||||
* @param key
|
||||
* blob name
|
||||
* @return null if the blob doesn't exist, the blob metadata otherwise
|
||||
* @see <a href="http://aws.amazon.com/glacier/faqs/#data-inventories" />
|
||||
*/
|
||||
@Override
|
||||
public BlobMetadata blobMetadata(String container, String key) {
|
||||
PageSet<? extends StorageMetadata> blobMetadataSet = list(container, null);
|
||||
for (StorageMetadata blob : blobMetadataSet) {
|
||||
if (blob.getName().equals(key)) {
|
||||
return (BlobMetadata) blob;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private static ArchiveRetrievalJobRequest buildArchiveRetrievalRequest(String key, GetOptions getOptions) {
|
||||
ArchiveRetrievalJobRequest.Builder requestBuilder = ArchiveRetrievalJobRequest.builder().archiveId(key);
|
||||
if (getOptions != null) {
|
||||
int size = getOptions.getRanges().size();
|
||||
checkArgument(size <= 1, "The number of ranges should be zero or one");
|
||||
if (size == 1) {
|
||||
requestBuilder.range(ContentRange.fromString(getOptions.getRanges().get(0)));
|
||||
}
|
||||
}
|
||||
return requestBuilder.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves the blob
|
||||
* This operation will take several hours.
|
||||
*
|
||||
* @param container
|
||||
* container name
|
||||
* @param key
|
||||
* blob name
|
||||
* @return The blob to retrieve, or null if the blob doesn't exist or the archive retrieval fails
|
||||
*/
|
||||
@Override
|
||||
public Blob getBlob(String container, String key, GetOptions getOptions) {
|
||||
String jobId = sync.initiateJob(container, buildArchiveRetrievalRequest(key, getOptions));
|
||||
try {
|
||||
if (pollingStrategy.get().waitForSuccess(container, jobId)) {
|
||||
MutableBlobMetadata blobMetadata = new MutableBlobMetadataImpl();
|
||||
blobMetadata.setContainer(container);
|
||||
blobMetadata.setName(key);
|
||||
|
||||
Blob blob = new BlobImpl(blobMetadata);
|
||||
blob.setPayload(sync.getJobOutput(container, jobId));
|
||||
return blob;
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
Throwables.propagate(e);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves the blob using default options
|
||||
* This operation will take several hours.
|
||||
*
|
||||
* @param container
|
||||
* container name
|
||||
* @param key
|
||||
* blob name
|
||||
* @return The blob to retrieve, or null if the blob doesn't exist or the archive retrieval fails
|
||||
*/
|
||||
@Override
|
||||
public Blob getBlob(String container, String key) {
|
||||
return getBlob(container, key, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes the blob.
|
||||
*
|
||||
* @param container
|
||||
* container name
|
||||
* @param key
|
||||
* blob name
|
||||
*/
|
||||
@Override
|
||||
public void removeBlob(String container, String key) {
|
||||
sync.deleteArchive(container, key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public BlobAccess getBlobAccess(String container, String name) {
|
||||
return BlobAccess.PRIVATE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setBlobAccess(String container, String name, BlobAccess access) {
|
||||
throw new UnsupportedOperationException("not implemented");
|
||||
}
|
||||
|
||||
@Override
|
||||
public MultipartUpload initiateMultipartUpload(String container, BlobMetadata blobMetadata, PutOptions options) {
|
||||
throw new UnsupportedOperationException("not yet implemented");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void abortMultipartUpload(MultipartUpload mpu) {
|
||||
throw new UnsupportedOperationException("not yet implemented");
|
||||
}
|
||||
|
||||
@Override
|
||||
public String completeMultipartUpload(MultipartUpload mpu, List<MultipartPart> parts) {
|
||||
throw new UnsupportedOperationException("not yet implemented");
|
||||
}
|
||||
|
||||
@Override
|
||||
public MultipartPart uploadMultipartPart(MultipartUpload mpu, int partNumber, Payload payload) {
|
||||
throw new UnsupportedOperationException("not yet implemented");
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<MultipartPart> listMultipartUpload(MultipartUpload mpu) {
|
||||
throw new UnsupportedOperationException("not yet implemented");
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<MultipartUpload> listMultipartUploads(String container) {
|
||||
throw new UnsupportedOperationException("not yet implemented");
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getMinimumMultipartPartSize() {
|
||||
throw new UnsupportedOperationException("not yet implemented");
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getMaximumMultipartPartSize() {
|
||||
throw new UnsupportedOperationException("not yet implemented");
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getMaximumNumberOfParts() {
|
||||
throw new UnsupportedOperationException("not yet implemented");
|
||||
}
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.blobstore.config;
|
||||
|
||||
import org.jclouds.blobstore.BlobStore;
|
||||
import org.jclouds.blobstore.attr.ConsistencyModel;
|
||||
import org.jclouds.blobstore.strategy.ClearListStrategy;
|
||||
import org.jclouds.glacier.blobstore.GlacierBlobStore;
|
||||
import org.jclouds.glacier.blobstore.strategy.MultipartUploadStrategy;
|
||||
import org.jclouds.glacier.blobstore.strategy.PollingStrategy;
|
||||
import org.jclouds.glacier.blobstore.strategy.SlicingStrategy;
|
||||
import org.jclouds.glacier.blobstore.strategy.internal.BasePollingStrategy;
|
||||
import org.jclouds.glacier.blobstore.strategy.internal.BaseSlicingStrategy;
|
||||
import org.jclouds.glacier.blobstore.strategy.internal.ClearVaultStrategy;
|
||||
import org.jclouds.glacier.blobstore.strategy.internal.SequentialMultipartUploadStrategy;
|
||||
|
||||
import com.google.inject.AbstractModule;
|
||||
|
||||
public class GlacierBlobStoreContextModule extends AbstractModule {
|
||||
@Override
|
||||
protected void configure() {
|
||||
bind(ConsistencyModel.class).toInstance(ConsistencyModel.EVENTUAL);
|
||||
bind(BlobStore.class).to(GlacierBlobStore.class);
|
||||
bind(MultipartUploadStrategy.class).to(SequentialMultipartUploadStrategy.class);
|
||||
bind(SlicingStrategy.class).to(BaseSlicingStrategy.class);
|
||||
bind(ClearListStrategy.class).to(ClearVaultStrategy.class);
|
||||
bind(PollingStrategy.class).to(BasePollingStrategy.class);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,52 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.blobstore.functions;
|
||||
|
||||
import org.jclouds.blobstore.domain.MutableBlobMetadata;
|
||||
import org.jclouds.blobstore.domain.PageSet;
|
||||
import org.jclouds.blobstore.domain.StorageMetadata;
|
||||
import org.jclouds.blobstore.domain.internal.MutableBlobMetadataImpl;
|
||||
import org.jclouds.blobstore.domain.internal.PageSetImpl;
|
||||
import org.jclouds.glacier.domain.ArchiveMetadata;
|
||||
import org.jclouds.glacier.domain.ArchiveMetadataCollection;
|
||||
import org.jclouds.io.MutableContentMetadata;
|
||||
import org.jclouds.io.payloads.BaseMutableContentMetadata;
|
||||
|
||||
import com.google.common.base.Function;
|
||||
import com.google.common.collect.Iterables;
|
||||
|
||||
public class ArchiveMetadataCollectionToStorageMetadata implements Function<ArchiveMetadataCollection,
|
||||
PageSet<? extends StorageMetadata>> {
|
||||
@Override
|
||||
public PageSet<? extends StorageMetadata> apply(ArchiveMetadataCollection archives) {
|
||||
return new PageSetImpl<StorageMetadata>(Iterables.transform(archives, new ArchiveMetadataToBlobMetadata()), null);
|
||||
}
|
||||
|
||||
private static class ArchiveMetadataToBlobMetadata implements Function<ArchiveMetadata, MutableBlobMetadata> {
|
||||
@Override
|
||||
public MutableBlobMetadata apply(ArchiveMetadata from) {
|
||||
MutableContentMetadata contentMetadata = new BaseMutableContentMetadata();
|
||||
contentMetadata.setContentLength(from.getSize());
|
||||
|
||||
MutableBlobMetadata to = new MutableBlobMetadataImpl();
|
||||
to.setName(from.getArchiveId());
|
||||
to.setCreationDate(from.getCreationDate());
|
||||
to.setContentMetadata(contentMetadata);
|
||||
return to;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.blobstore.functions;
|
||||
|
||||
import org.jclouds.blobstore.options.ListContainerOptions;
|
||||
import org.jclouds.glacier.domain.InventoryRetrievalJobRequest;
|
||||
|
||||
import com.google.common.base.Function;
|
||||
|
||||
public class ListContainerOptionsToInventoryRetrievalJobRequest implements Function<ListContainerOptions,
|
||||
InventoryRetrievalJobRequest> {
|
||||
@Override
|
||||
public InventoryRetrievalJobRequest apply(ListContainerOptions listContainerOptions) {
|
||||
InventoryRetrievalJobRequest.Builder builder = InventoryRetrievalJobRequest.builder();
|
||||
if (listContainerOptions != null) {
|
||||
if (listContainerOptions.getMarker() != null) {
|
||||
builder.marker(listContainerOptions.getMarker());
|
||||
}
|
||||
if (listContainerOptions.getMaxResults() != null) {
|
||||
builder.limit(listContainerOptions.getMaxResults());
|
||||
}
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,34 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.blobstore.functions;
|
||||
|
||||
import org.jclouds.blobstore.domain.PageSet;
|
||||
import org.jclouds.blobstore.domain.StorageMetadata;
|
||||
import org.jclouds.blobstore.domain.internal.PageSetImpl;
|
||||
import org.jclouds.glacier.domain.PaginatedVaultCollection;
|
||||
|
||||
import com.google.common.base.Function;
|
||||
import com.google.common.collect.Iterables;
|
||||
|
||||
public class PaginatedVaultCollectionToStorageMetadata implements Function<PaginatedVaultCollection,
|
||||
PageSet<? extends StorageMetadata>> {
|
||||
@Override
|
||||
public PageSet<? extends StorageMetadata> apply(PaginatedVaultCollection vaults) {
|
||||
return new PageSetImpl<StorageMetadata>(Iterables.transform(vaults, new VaultMetadataToStorageMetadata()),
|
||||
(String) vaults.nextMarker().orNull());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,33 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.blobstore.functions;
|
||||
|
||||
import org.jclouds.blobstore.domain.StorageMetadata;
|
||||
import org.jclouds.blobstore.domain.StorageType;
|
||||
import org.jclouds.blobstore.domain.internal.StorageMetadataImpl;
|
||||
import org.jclouds.glacier.domain.VaultMetadata;
|
||||
|
||||
import com.google.common.base.Function;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
|
||||
public class VaultMetadataToStorageMetadata implements Function<VaultMetadata, StorageMetadata> {
|
||||
@Override
|
||||
public StorageMetadata apply(VaultMetadata vault) {
|
||||
return new StorageMetadataImpl(StorageType.CONTAINER, vault.getVaultARN(), vault.getVaultName(), null, null, null,
|
||||
vault.getCreationDate(), vault.getLastInventoryDate(), ImmutableMap.<String, String>of());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.blobstore.strategy;
|
||||
|
||||
import org.jclouds.blobstore.domain.Blob;
|
||||
|
||||
public interface MultipartUploadStrategy {
|
||||
String execute(String container, Blob blob);
|
||||
}
|
|
@ -0,0 +1,51 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.blobstore.strategy;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkArgument;
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
|
||||
import org.jclouds.glacier.util.ContentRange;
|
||||
import org.jclouds.io.Payload;
|
||||
|
||||
/**
|
||||
* Represents an ordered part of data from a payload
|
||||
*/
|
||||
public class PayloadSlice {
|
||||
private final Payload payload;
|
||||
private final ContentRange range;
|
||||
private final int part;
|
||||
|
||||
public PayloadSlice(Payload payload, ContentRange range, int part) {
|
||||
this.payload = checkNotNull(payload, "payload");
|
||||
this.range = checkNotNull(range, "range");
|
||||
checkArgument(part >= 0, "The part number cannot be negative");
|
||||
this.part = part;
|
||||
}
|
||||
|
||||
public Payload getPayload() {
|
||||
return payload;
|
||||
}
|
||||
|
||||
public ContentRange getRange() {
|
||||
return range;
|
||||
}
|
||||
|
||||
public int getPart() {
|
||||
return part;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.blobstore.strategy;
|
||||
|
||||
public interface PollingStrategy {
|
||||
boolean waitForSuccess(String vault, String job) throws InterruptedException;
|
||||
}
|
|
@ -0,0 +1,33 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.blobstore.strategy;
|
||||
|
||||
import org.jclouds.io.Payload;
|
||||
|
||||
public interface SlicingStrategy {
|
||||
int MAX_LIST_PARTS_RETURNED = 1000;
|
||||
int MAX_LIST_MPU_RETURNED = 1000;
|
||||
int MAX_NUMBER_OF_PARTS = 10000;
|
||||
|
||||
long MIN_PART_SIZE = 1L << 20; //1 MB, last part can be < 1 MB
|
||||
long MAX_PART_SIZE = 1L << 32; //4 GB
|
||||
|
||||
void startSlicing(Payload payload);
|
||||
PayloadSlice nextSlice();
|
||||
boolean hasNext();
|
||||
long getPartSizeInMB();
|
||||
}
|
|
@ -0,0 +1,78 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.blobstore.strategy.internal;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.jclouds.glacier.GlacierClient;
|
||||
import org.jclouds.glacier.blobstore.strategy.PollingStrategy;
|
||||
import org.jclouds.glacier.domain.JobMetadata;
|
||||
import org.jclouds.glacier.domain.JobStatus;
|
||||
|
||||
import com.google.inject.Inject;
|
||||
import com.google.inject.Singleton;
|
||||
|
||||
/**
|
||||
* This implementation waits a fixed amount of time before start polling.
|
||||
*/
|
||||
@Singleton
|
||||
public class BasePollingStrategy implements PollingStrategy {
|
||||
public static final long DEFAULT_INITIAL_WAIT = TimeUnit.HOURS.toMillis(3);
|
||||
public static final long DEFAULT_TIME_BETWEEN_POLLS = TimeUnit.MINUTES.toMillis(15);
|
||||
|
||||
private final GlacierClient client;
|
||||
private final long initialWait;
|
||||
private final long timeBetweenPolls;
|
||||
|
||||
public BasePollingStrategy(GlacierClient client, long initialWait, long timeBetweenPolls) {
|
||||
this.client = checkNotNull(client, "client");
|
||||
this.initialWait = initialWait;
|
||||
this.timeBetweenPolls = timeBetweenPolls;
|
||||
}
|
||||
|
||||
@Inject
|
||||
public BasePollingStrategy(GlacierClient client) {
|
||||
this(client, Long.parseLong(System.getProperty("test.glacier.initial-wait",
|
||||
Long.toString(DEFAULT_INITIAL_WAIT))), DEFAULT_TIME_BETWEEN_POLLS);
|
||||
}
|
||||
|
||||
private boolean inProgress(String job, String vault) {
|
||||
JobMetadata jobMetadata = client.describeJob(vault, job);
|
||||
return (jobMetadata != null) && (jobMetadata.getStatusCode() == JobStatus.IN_PROGRESS);
|
||||
}
|
||||
|
||||
private boolean succeeded(String job, String vault) {
|
||||
JobMetadata jobMetadata = client.describeJob(vault, job);
|
||||
return (jobMetadata != null) && (jobMetadata.getStatusCode() == JobStatus.SUCCEEDED);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean waitForSuccess(String vault, String job) throws InterruptedException {
|
||||
// Avoid waiting if the job doesn't exist
|
||||
if (client.describeJob(vault, job) == null) {
|
||||
return false;
|
||||
}
|
||||
Thread.sleep(initialWait);
|
||||
while (inProgress(job, vault)) {
|
||||
Thread.sleep(timeBetweenPolls);
|
||||
}
|
||||
return succeeded(job, vault);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,103 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.blobstore.strategy.internal;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
import static java.lang.Math.sqrt;
|
||||
|
||||
import javax.inject.Singleton;
|
||||
|
||||
import org.jclouds.glacier.blobstore.strategy.PayloadSlice;
|
||||
import org.jclouds.glacier.blobstore.strategy.SlicingStrategy;
|
||||
import org.jclouds.glacier.util.ContentRange;
|
||||
import org.jclouds.io.Payload;
|
||||
import org.jclouds.io.PayloadSlicer;
|
||||
|
||||
import com.google.inject.Inject;
|
||||
import com.google.inject.name.Named;
|
||||
|
||||
/**
|
||||
* This implementation slice a payload based on the (part size)/(number of parts) ratio. This ratio may be overriden.
|
||||
*/
|
||||
@Singleton
|
||||
public class BaseSlicingStrategy implements SlicingStrategy {
|
||||
|
||||
public static final double DEFAULT_RATIO = 0.32; // (part size/number of parts) ratio
|
||||
|
||||
@Inject(optional = true)
|
||||
@Named("jclouds.mpu.part.ratio")
|
||||
private double ratio = DEFAULT_RATIO;
|
||||
|
||||
private final PayloadSlicer slicer;
|
||||
private Payload payload;
|
||||
private volatile long partSizeInMB;
|
||||
private volatile long total;
|
||||
private volatile long copied;
|
||||
private volatile int part;
|
||||
|
||||
@Inject
|
||||
public BaseSlicingStrategy(PayloadSlicer slicer) {
|
||||
this.slicer = checkNotNull(slicer, "slicer");
|
||||
this.total = 0;
|
||||
this.copied = 0;
|
||||
this.partSizeInMB = 0;
|
||||
this.part = 0;
|
||||
}
|
||||
|
||||
protected long calculatePartSize(long length) {
|
||||
long lengthInMB = (length / (1L << 20)) + 1;
|
||||
double fpPartSizeInMB = sqrt(ratio * lengthInMB); //Get the part size which matches the given ratio
|
||||
long partSizeInMB = Long.highestOneBit((long) fpPartSizeInMB - 1) << 1;
|
||||
if (partSizeInMB < 1) return 1;
|
||||
else if (partSizeInMB > MAX_PART_SIZE) return MAX_PART_SIZE;
|
||||
return partSizeInMB;
|
||||
}
|
||||
|
||||
public long getRemaining() {
|
||||
return total - copied;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void startSlicing(Payload payload) {
|
||||
this.payload = checkNotNull(payload, "payload");
|
||||
this.copied = 0;
|
||||
this.total = checkNotNull(payload.getContentMetadata().getContentLength(), "contentLength");
|
||||
this.partSizeInMB = calculatePartSize(total);
|
||||
this.part = 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public PayloadSlice nextSlice() {
|
||||
checkNotNull(this.payload, "payload");
|
||||
long sliceLength = Math.min(getRemaining(), partSizeInMB << 20);
|
||||
Payload slicedPayload = slicer.slice(payload, copied, sliceLength);
|
||||
ContentRange range = ContentRange.build(copied, copied + sliceLength - 1);
|
||||
copied += sliceLength;
|
||||
part++;
|
||||
return new PayloadSlice(slicedPayload, range, part);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return this.getRemaining() != 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getPartSizeInMB() {
|
||||
return partSizeInMB;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,66 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.blobstore.strategy.internal;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
|
||||
import org.jclouds.blobstore.options.ListContainerOptions;
|
||||
import org.jclouds.blobstore.strategy.ClearListStrategy;
|
||||
import org.jclouds.glacier.GlacierClient;
|
||||
import org.jclouds.glacier.blobstore.strategy.PollingStrategy;
|
||||
import org.jclouds.glacier.domain.ArchiveMetadata;
|
||||
import org.jclouds.glacier.domain.ArchiveMetadataCollection;
|
||||
import org.jclouds.glacier.domain.InventoryRetrievalJobRequest;
|
||||
import org.jclouds.rest.ResourceNotFoundException;
|
||||
|
||||
import com.google.common.base.Throwables;
|
||||
import com.google.inject.Inject;
|
||||
import com.google.inject.Singleton;
|
||||
|
||||
/**
|
||||
* An inventory will be retrieved to obtain the blob list and the method will iterate through it deleting the blobs.
|
||||
* This operation will take several hours and the result may be inaccurate (Inventories are updated every 24 hours).
|
||||
*/
|
||||
@Singleton
|
||||
public class ClearVaultStrategy implements ClearListStrategy {
|
||||
private final GlacierClient client;
|
||||
private final PollingStrategy pollingStrategy;
|
||||
|
||||
@Inject
|
||||
public ClearVaultStrategy(GlacierClient client, PollingStrategy pollingStrategy) {
|
||||
this.client = checkNotNull(client, "client");
|
||||
this.pollingStrategy = checkNotNull(pollingStrategy, "pollingStrategy");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void execute(String container, ListContainerOptions listContainerOptions) {
|
||||
String jobId = client.initiateJob(container, InventoryRetrievalJobRequest.builder().build());
|
||||
try {
|
||||
if (pollingStrategy.waitForSuccess(container, jobId)) {
|
||||
ArchiveMetadataCollection archives = client.getInventoryRetrievalOutput(container, jobId);
|
||||
for (ArchiveMetadata archive : archives) {
|
||||
try {
|
||||
client.deleteArchive(container, archive.getArchiveId());
|
||||
} catch (ResourceNotFoundException ignored) {
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
Throwables.propagate(e);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,65 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.blobstore.strategy.internal;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
|
||||
import org.jclouds.blobstore.domain.Blob;
|
||||
import org.jclouds.glacier.GlacierClient;
|
||||
import org.jclouds.glacier.blobstore.strategy.MultipartUploadStrategy;
|
||||
import org.jclouds.glacier.blobstore.strategy.PayloadSlice;
|
||||
import org.jclouds.glacier.blobstore.strategy.SlicingStrategy;
|
||||
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.hash.HashCode;
|
||||
import com.google.inject.Inject;
|
||||
import com.google.inject.Singleton;
|
||||
|
||||
/**
|
||||
* This implementation uploads the parts sequentially.
|
||||
*/
|
||||
@Singleton
|
||||
public class SequentialMultipartUploadStrategy implements MultipartUploadStrategy {
|
||||
private final GlacierClient client;
|
||||
private final SlicingStrategy slicer;
|
||||
|
||||
@Inject
|
||||
public SequentialMultipartUploadStrategy(GlacierClient client, SlicingStrategy slicer) {
|
||||
this.client = checkNotNull(client, "client");
|
||||
this.slicer = checkNotNull(slicer, "slicer");
|
||||
}
|
||||
|
||||
@Override
|
||||
public String execute(String container, Blob blob) {
|
||||
slicer.startSlicing(blob.getPayload());
|
||||
String uploadId = client.initiateMultipartUpload(container, slicer.getPartSizeInMB(),
|
||||
blob.getMetadata().getName());
|
||||
try {
|
||||
ImmutableMap.Builder<Integer, HashCode> hashes = ImmutableMap.builder();
|
||||
while (slicer.hasNext()) {
|
||||
PayloadSlice slice = slicer.nextSlice();
|
||||
hashes.put(slice.getPart(),
|
||||
client.uploadPart(container, uploadId, slice.getRange(), slice.getPayload()));
|
||||
}
|
||||
return client.completeMultipartUpload(container, uploadId, hashes.build(),
|
||||
blob.getPayload().getContentMetadata().getContentLength());
|
||||
} catch (RuntimeException exception) {
|
||||
client.abortMultipartUpload(container, uploadId);
|
||||
throw exception;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,78 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.config;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import javax.inject.Named;
|
||||
|
||||
import org.jclouds.Constants;
|
||||
import org.jclouds.date.DateService;
|
||||
import org.jclouds.date.TimeStamp;
|
||||
import org.jclouds.glacier.GlacierClient;
|
||||
import org.jclouds.glacier.filters.RequestAuthorizeSignature;
|
||||
import org.jclouds.glacier.handlers.ParseGlacierErrorFromJsonContent;
|
||||
import org.jclouds.http.HttpErrorHandler;
|
||||
import org.jclouds.http.annotation.ClientError;
|
||||
import org.jclouds.http.annotation.Redirection;
|
||||
import org.jclouds.http.annotation.ServerError;
|
||||
import org.jclouds.rest.ConfiguresHttpApi;
|
||||
import org.jclouds.rest.config.HttpApiModule;
|
||||
|
||||
import com.google.common.base.Supplier;
|
||||
import com.google.common.base.Suppliers;
|
||||
import com.google.inject.Provides;
|
||||
import com.google.inject.Scopes;
|
||||
|
||||
/**
|
||||
* Configures the mappings. Installs the Object and Parser modules.
|
||||
*/
|
||||
@ConfiguresHttpApi
|
||||
public class GlacierHttpApiModule extends HttpApiModule<GlacierClient> {
|
||||
|
||||
@Override
|
||||
protected void configure() {
|
||||
super.configure();
|
||||
bind(RequestAuthorizeSignature.class).in(Scopes.SINGLETON);
|
||||
}
|
||||
|
||||
@Provides
|
||||
@TimeStamp
|
||||
protected String provideTimeStamp(@TimeStamp Supplier<String> cache) {
|
||||
return cache.get();
|
||||
}
|
||||
|
||||
@Provides
|
||||
@TimeStamp
|
||||
Supplier<String> provideTimeStampCache(@Named(Constants.PROPERTY_SESSION_INTERVAL) long seconds,
|
||||
final DateService dateService) {
|
||||
return Suppliers.memoizeWithExpiration(new Supplier<String>() {
|
||||
|
||||
@Override
|
||||
public String get() {
|
||||
return dateService.iso8601SecondsDateFormat().replaceAll("[-:]", "");
|
||||
}
|
||||
}, seconds, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void bindErrorHandlers() {
|
||||
bind(HttpErrorHandler.class).annotatedWith(Redirection.class).to(ParseGlacierErrorFromJsonContent.class);
|
||||
bind(HttpErrorHandler.class).annotatedWith(ClientError.class).to(ParseGlacierErrorFromJsonContent.class);
|
||||
bind(HttpErrorHandler.class).annotatedWith(ServerError.class).to(ParseGlacierErrorFromJsonContent.class);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,33 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.config;
|
||||
|
||||
import org.jclouds.json.config.GsonModule.DateAdapter;
|
||||
import org.jclouds.json.config.GsonModule.Iso8601DateAdapter;
|
||||
|
||||
import com.google.inject.AbstractModule;
|
||||
|
||||
/**
|
||||
* Configures the parser mappings.
|
||||
*/
|
||||
public class GlacierParserModule extends AbstractModule {
|
||||
|
||||
@Override
|
||||
protected void configure() {
|
||||
bind(DateAdapter.class).to(Iso8601DateAdapter.class);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,103 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.domain;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
|
||||
import java.beans.ConstructorProperties;
|
||||
import java.util.Date;
|
||||
|
||||
import org.jclouds.javax.annotation.Nullable;
|
||||
|
||||
import com.google.common.base.Objects;
|
||||
import com.google.common.collect.ComparisonChain;
|
||||
import com.google.common.hash.HashCode;
|
||||
import com.google.gson.annotations.SerializedName;
|
||||
|
||||
public class ArchiveMetadata implements Comparable<ArchiveMetadata> {
|
||||
|
||||
@SerializedName("ArchiveId")
|
||||
private final String archiveId;
|
||||
@SerializedName("ArchiveDescription")
|
||||
private final String description;
|
||||
@SerializedName("CreationDate")
|
||||
private final Date creationDate;
|
||||
@SerializedName("Size")
|
||||
private final long size;
|
||||
@SerializedName("SHA256TreeHash")
|
||||
private final HashCode treeHash;
|
||||
|
||||
@ConstructorProperties({ "ArchiveId", "ArchiveDescription", "CreationDate", "Size", "SHA256TreeHash" })
|
||||
public ArchiveMetadata(String archiveId, @Nullable String description, Date creationDate, long size, String hashCode) {
|
||||
this.archiveId = checkNotNull(archiveId, "archiveId");
|
||||
this.description = description;
|
||||
this.creationDate = (Date) checkNotNull(creationDate, "creationDate").clone();
|
||||
this.size = size;
|
||||
this.treeHash = HashCode.fromString(checkNotNull(hashCode, "hashCode"));
|
||||
}
|
||||
|
||||
public String getArchiveId() {
|
||||
return archiveId;
|
||||
}
|
||||
|
||||
public String getDescription() {
|
||||
return description;
|
||||
}
|
||||
|
||||
public Date getCreationDate() {
|
||||
return (Date) creationDate.clone();
|
||||
}
|
||||
|
||||
public long getSize() {
|
||||
return size;
|
||||
}
|
||||
|
||||
public HashCode getTreeHash() {
|
||||
return treeHash;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hashCode(this.archiveId, this.description, this.creationDate, this.size, this.treeHash);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null)
|
||||
return false;
|
||||
if (getClass() != obj.getClass())
|
||||
return false;
|
||||
ArchiveMetadata other = (ArchiveMetadata) obj;
|
||||
|
||||
return Objects.equal(this.archiveId, other.archiveId)
|
||||
&& Objects.equal(this.description, other.description)
|
||||
&& Objects.equal(this.creationDate, other.creationDate)
|
||||
&& Objects.equal(this.treeHash, other.treeHash)
|
||||
&& Objects.equal(this.size, other.size);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ArchiveMetadata [archiveId=" + archiveId + ", description=" + description
|
||||
+ ", creationDate=" + creationDate + ", treeHash=" + treeHash + ", size=" + size + "]";
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(ArchiveMetadata o) {
|
||||
return ComparisonChain.start().compare(this.archiveId, o.archiveId).result();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,57 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.domain;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
|
||||
import java.beans.ConstructorProperties;
|
||||
import java.util.Date;
|
||||
import java.util.Iterator;
|
||||
|
||||
import com.google.common.collect.FluentIterable;
|
||||
import com.google.gson.annotations.SerializedName;
|
||||
|
||||
|
||||
public class ArchiveMetadataCollection extends FluentIterable<ArchiveMetadata>{
|
||||
|
||||
@SerializedName("ArchiveList")
|
||||
private final Iterable<ArchiveMetadata> archives;
|
||||
@SerializedName("VaultARN")
|
||||
private final String vaultARN;
|
||||
@SerializedName("InventoryDate")
|
||||
private final Date inventoryDate;
|
||||
|
||||
@ConstructorProperties({ "ArchiveList", "VaultARN", "InventoryDate" })
|
||||
public ArchiveMetadataCollection(Iterable<ArchiveMetadata> archives, String vaultARN, Date inventoryDate) {
|
||||
this.archives = checkNotNull(archives, "archives");
|
||||
this.vaultARN = checkNotNull(vaultARN, "vaultARN");
|
||||
this.inventoryDate = (Date) checkNotNull(inventoryDate, "inventoryDate").clone();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterator<ArchiveMetadata> iterator() {
|
||||
return archives.iterator();
|
||||
}
|
||||
|
||||
public String getVaultARN() {
|
||||
return vaultARN;
|
||||
}
|
||||
|
||||
public Date getInventoryDate() {
|
||||
return (Date) inventoryDate.clone();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,117 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.domain;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
|
||||
import java.beans.ConstructorProperties;
|
||||
|
||||
import org.jclouds.glacier.util.ContentRange;
|
||||
import org.jclouds.javax.annotation.Nullable;
|
||||
|
||||
import com.google.common.base.Objects;
|
||||
import com.google.gson.annotations.SerializedName;
|
||||
|
||||
public class ArchiveRetrievalJobRequest extends JobRequest {
|
||||
private static final String TYPE = "archive-retrieval";
|
||||
|
||||
@SerializedName("ArchiveId")
|
||||
private final String archiveId;
|
||||
@SerializedName("Description")
|
||||
private final String description;
|
||||
@SerializedName("RetrievalByteRange")
|
||||
private final ContentRange range;
|
||||
|
||||
private ArchiveRetrievalJobRequest(String archiveId, @Nullable String description, @Nullable ContentRange range) {
|
||||
super(TYPE);
|
||||
this.archiveId = checkNotNull(archiveId, "archiveId");
|
||||
this.description = description;
|
||||
this.range = range;
|
||||
}
|
||||
|
||||
@ConstructorProperties({ "ArchiveId", "Description", "RetrievalByteRange" })
|
||||
private ArchiveRetrievalJobRequest(String archiveId, @Nullable String description, @Nullable String range) {
|
||||
this(archiveId, description, range == null ? null : ContentRange.fromString(range));
|
||||
}
|
||||
|
||||
public String getDescription() {
|
||||
return description;
|
||||
}
|
||||
|
||||
public ContentRange getRange() {
|
||||
return range;
|
||||
}
|
||||
|
||||
public String getArchiveId() {
|
||||
return archiveId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hashCode(this.archiveId, this.description, this.range);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null)
|
||||
return false;
|
||||
if (getClass() != obj.getClass())
|
||||
return false;
|
||||
ArchiveRetrievalJobRequest other = (ArchiveRetrievalJobRequest) obj;
|
||||
|
||||
return Objects.equal(this.archiveId, other.archiveId) && Objects.equal(this.description, other.description)
|
||||
&& Objects.equal(this.range, other.range);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "InventoryRetrievalParameters [archiveId=" + archiveId + ", description=" + description + ", range="
|
||||
+ range + "]";
|
||||
}
|
||||
|
||||
public static Builder builder() {
|
||||
return new Builder();
|
||||
}
|
||||
|
||||
public static class Builder {
|
||||
private String archiveId;
|
||||
private String description;
|
||||
private ContentRange range;
|
||||
|
||||
Builder() {
|
||||
}
|
||||
|
||||
public Builder archiveId(String archiveId) {
|
||||
this.archiveId = archiveId;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder description(String description) {
|
||||
this.description = description;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder range(ContentRange range) {
|
||||
this.range = range;
|
||||
return this;
|
||||
}
|
||||
|
||||
public ArchiveRetrievalJobRequest build() {
|
||||
return new ArchiveRetrievalJobRequest(archiveId, description, range);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,79 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.domain;
|
||||
|
||||
import java.beans.ConstructorProperties;
|
||||
|
||||
import org.jclouds.javax.annotation.Nullable;
|
||||
|
||||
import com.google.common.base.Objects;
|
||||
|
||||
/**
|
||||
* Defines attributes to describe a GlacierError
|
||||
*/
|
||||
public class GlacierError {
|
||||
|
||||
private final String code;
|
||||
private final String message;
|
||||
private final String type;
|
||||
|
||||
@ConstructorProperties({ "code", "message", "type" })
|
||||
public GlacierError(@Nullable String code, @Nullable String message, @Nullable String type) {
|
||||
this.code = code;
|
||||
this.message = message;
|
||||
this.type = type;
|
||||
}
|
||||
|
||||
public String getCode() {
|
||||
return code;
|
||||
}
|
||||
|
||||
public String getMessage() {
|
||||
return message;
|
||||
}
|
||||
|
||||
public String getType() {
|
||||
return type;
|
||||
}
|
||||
|
||||
public boolean isValid() {
|
||||
return (this.code != null) && (this.message != null) && (this.type != null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hashCode(this.code, this.message, this.type);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null)
|
||||
return false;
|
||||
if (getClass() != obj.getClass())
|
||||
return false;
|
||||
GlacierError other = (GlacierError) obj;
|
||||
|
||||
return Objects.equal(this.code, other.code)
|
||||
&& Objects.equal(this.message, other.message)
|
||||
&& Objects.equal(this.type, other.type);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "GlacierError [code=" + this.getCode() + ", message=" + this.getMessage() + "type=" + this.getType() + "]";
|
||||
}
|
||||
}
|
|
@ -0,0 +1,133 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.domain;
|
||||
|
||||
import java.beans.ConstructorProperties;
|
||||
|
||||
import org.jclouds.javax.annotation.Nullable;
|
||||
|
||||
import com.google.common.base.Objects;
|
||||
import com.google.gson.annotations.SerializedName;
|
||||
|
||||
public class InventoryRetrievalJobRequest extends JobRequest {
|
||||
private static final String TYPE = "inventory-retrieval";
|
||||
|
||||
@SerializedName("Description")
|
||||
private final String description;
|
||||
@SerializedName("Format")
|
||||
private final String format;
|
||||
@SerializedName("InventoryRetrievalParameters")
|
||||
private final InventoryRetrievalParameters parameters;
|
||||
|
||||
@ConstructorProperties({ "Description", "Format" })
|
||||
private InventoryRetrievalJobRequest(@Nullable String description, @Nullable String format) {
|
||||
super(TYPE);
|
||||
this.description = description;
|
||||
this.format = format;
|
||||
this.parameters = new InventoryRetrievalParameters();
|
||||
}
|
||||
|
||||
public String getDescription() {
|
||||
return description;
|
||||
}
|
||||
|
||||
public String getFormat() {
|
||||
return format;
|
||||
}
|
||||
|
||||
public InventoryRetrievalParameters getParameters() {
|
||||
return this.parameters;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hashCode(this.description, this.format, this.parameters);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null)
|
||||
return false;
|
||||
if (getClass() != obj.getClass())
|
||||
return false;
|
||||
InventoryRetrievalJobRequest other = (InventoryRetrievalJobRequest) obj;
|
||||
|
||||
return Objects.equal(this.description, other.description) && Objects.equal(this.format, other.format)
|
||||
&& Objects.equal(this.parameters, other.parameters);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "InventoryRetrievalJobRequest [description=" + description + ", format=" + format + "," +
|
||||
"parameters=" + parameters + "]";
|
||||
}
|
||||
|
||||
public static Builder builder() {
|
||||
return new Builder();
|
||||
}
|
||||
|
||||
public static class Builder {
|
||||
private String description;
|
||||
private String format;
|
||||
private String startDate;
|
||||
private String endDate;
|
||||
private Integer limit;
|
||||
private String marker;
|
||||
|
||||
Builder() {
|
||||
}
|
||||
|
||||
public Builder description(String description) {
|
||||
this.description = description;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder format(String format) {
|
||||
this.format = format;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder startDate(String startDate) {
|
||||
this.startDate = startDate;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder endDate(String endDate) {
|
||||
this.endDate = endDate;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder limit(Integer limit) {
|
||||
this.limit = limit;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder marker(String marker) {
|
||||
this.marker = marker;
|
||||
return this;
|
||||
}
|
||||
|
||||
public InventoryRetrievalJobRequest build() {
|
||||
InventoryRetrievalJobRequest request = new InventoryRetrievalJobRequest(description, format);
|
||||
request.getParameters().setEndDate(endDate);
|
||||
request.getParameters().setStartDate(startDate);
|
||||
request.getParameters().setLimit(limit);
|
||||
request.getParameters().setMarker(marker);
|
||||
return request;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,90 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.domain;
|
||||
|
||||
import com.google.common.base.Objects;
|
||||
import com.google.gson.annotations.SerializedName;
|
||||
|
||||
public class InventoryRetrievalParameters {
|
||||
@SerializedName("StartDate")
|
||||
private String startDate;
|
||||
@SerializedName("EndDate")
|
||||
private String endDate;
|
||||
@SerializedName("Limit")
|
||||
private Integer limit;
|
||||
@SerializedName("Marker")
|
||||
private String marker;
|
||||
|
||||
public InventoryRetrievalParameters() {
|
||||
}
|
||||
|
||||
public String getStartDate() {
|
||||
return startDate;
|
||||
}
|
||||
|
||||
public void setStartDate(String startDate) {
|
||||
this.startDate = startDate;
|
||||
}
|
||||
|
||||
public String getEndDate() {
|
||||
return endDate;
|
||||
}
|
||||
|
||||
public void setEndDate(String endDate) {
|
||||
this.endDate = endDate;
|
||||
}
|
||||
|
||||
public Integer getLimit() {
|
||||
return limit;
|
||||
}
|
||||
|
||||
public void setLimit(Integer limit) {
|
||||
this.limit = limit;
|
||||
}
|
||||
|
||||
public String getMarker() {
|
||||
return marker;
|
||||
}
|
||||
|
||||
public void setMarker(String marker) {
|
||||
this.marker = marker;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hashCode(this.startDate, this.endDate, this.limit, this.marker);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null)
|
||||
return false;
|
||||
if (getClass() != obj.getClass())
|
||||
return false;
|
||||
InventoryRetrievalParameters other = (InventoryRetrievalParameters) obj;
|
||||
|
||||
return Objects.equal(this.startDate, other.startDate) && Objects.equal(this.endDate, other.endDate)
|
||||
&& Objects.equal(this.limit, other.limit)
|
||||
&& Objects.equal(this.marker, other.marker);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "InventoryRetrievalParameters [startDate=" + startDate + ", endDate=" + endDate + ", limit=" + limit
|
||||
+ ", marker=" + marker + "]";
|
||||
}
|
||||
}
|
|
@ -0,0 +1,187 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.domain;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
|
||||
import java.beans.ConstructorProperties;
|
||||
import java.util.Date;
|
||||
|
||||
import org.jclouds.glacier.util.ContentRange;
|
||||
import org.jclouds.javax.annotation.Nullable;
|
||||
|
||||
import com.google.common.base.Objects;
|
||||
import com.google.gson.annotations.SerializedName;
|
||||
|
||||
public class JobMetadata {
|
||||
@SerializedName("Action")
|
||||
private final String action;
|
||||
@SerializedName("ArchiveId")
|
||||
private final String archiveId;
|
||||
@SerializedName("ArchiveSizeInBytes")
|
||||
private final Long archiveSizeInBytes;
|
||||
@SerializedName("ArchiveSHA256TreeHash")
|
||||
private final String archiveSHA256TreeHash;
|
||||
@SerializedName("Completed")
|
||||
private final boolean completed;
|
||||
@SerializedName("CompletionDate")
|
||||
private final Date completionDate;
|
||||
@SerializedName("CreationDate")
|
||||
private final Date creationDate;
|
||||
@SerializedName("InventorySizeInBytes")
|
||||
private final Long inventorySizeInBytes;
|
||||
@SerializedName("JobDescription")
|
||||
private final String jobDescription;
|
||||
@SerializedName("JobId")
|
||||
private final String jobId;
|
||||
@SerializedName("RetrievalByteRange")
|
||||
private final ContentRange retrievalByteRange;
|
||||
@SerializedName("SHA256TreeHash")
|
||||
private final String sha256TreeHash;
|
||||
@SerializedName("SNSTopic")
|
||||
private final String snsTopic;
|
||||
@SerializedName("StatusCode")
|
||||
private final JobStatus statusCode;
|
||||
@SerializedName("StatusMessage")
|
||||
private final String statusMessage;
|
||||
@SerializedName("VaultARN")
|
||||
private final String vaultArn;
|
||||
@SerializedName("InventoryRetrievalParameters")
|
||||
private final InventoryRetrievalParameters parameters;
|
||||
|
||||
@ConstructorProperties({ "Action", "ArchiveId", "ArchiveSizeInBytes", "ArchiveSHA256TreeHash", "Completed",
|
||||
"CompletionDate", "CreationDate", "InventorySizeInBytes", "JobDescription", "JobId", "RetrievalByteRange",
|
||||
"SHA256TreeHash", "SNSTopic", "StatusCode", "StatusMessage", "VaultARN", "InventoryRetrievalParameters" })
|
||||
public JobMetadata(String action, @Nullable String archiveId, @Nullable Long archiveSizeInBytes,
|
||||
@Nullable String archiveSHA256TreeHash, boolean completed, @Nullable Date completionDate, Date creationDate,
|
||||
@Nullable Long inventorySizeInBytes, @Nullable String jobDescription, String jobId,
|
||||
@Nullable String retrievalByteRange, @Nullable String sha256TreeHash, @Nullable String snsTopic,
|
||||
String statusCode, @Nullable String statusMessage, String vaultArn,
|
||||
@Nullable InventoryRetrievalParameters parameters) {
|
||||
super();
|
||||
this.action = checkNotNull(action, "action");
|
||||
this.archiveId = archiveId;
|
||||
this.archiveSizeInBytes = archiveSizeInBytes;
|
||||
this.archiveSHA256TreeHash = archiveSHA256TreeHash;
|
||||
this.completed = completed;
|
||||
this.completionDate = completionDate == null ? null : (Date) completionDate.clone();
|
||||
this.creationDate = (Date) checkNotNull(creationDate, "creationDate").clone();
|
||||
this.inventorySizeInBytes = inventorySizeInBytes;
|
||||
this.jobDescription = jobDescription;
|
||||
this.jobId = checkNotNull(jobId, "jobId");
|
||||
this.retrievalByteRange = retrievalByteRange == null ? null : ContentRange.fromString(retrievalByteRange);
|
||||
this.sha256TreeHash = sha256TreeHash;
|
||||
this.snsTopic = snsTopic;
|
||||
this.statusCode = JobStatus.fromString(checkNotNull(statusCode, "statusCode"));
|
||||
this.statusMessage = statusMessage;
|
||||
this.vaultArn = checkNotNull(vaultArn, "vaultArn");
|
||||
this.parameters = parameters;
|
||||
}
|
||||
|
||||
public String getAction() {
|
||||
return action;
|
||||
}
|
||||
|
||||
public String getArchiveId() {
|
||||
return archiveId;
|
||||
}
|
||||
|
||||
public Long getArchiveSizeInBytes() {
|
||||
return archiveSizeInBytes;
|
||||
}
|
||||
|
||||
public String getArchiveSHA256TreeHash() {
|
||||
return archiveSHA256TreeHash;
|
||||
}
|
||||
|
||||
public boolean isCompleted() {
|
||||
return completed;
|
||||
}
|
||||
|
||||
public Date getCompletionDate() {
|
||||
return completionDate == null ? null : (Date) completionDate.clone();
|
||||
}
|
||||
|
||||
public Date getCreationDate() {
|
||||
return (Date) creationDate.clone();
|
||||
}
|
||||
|
||||
public Long getInventorySizeInBytes() {
|
||||
return inventorySizeInBytes;
|
||||
}
|
||||
|
||||
public String getJobDescription() {
|
||||
return jobDescription;
|
||||
}
|
||||
|
||||
public String getJobId() {
|
||||
return jobId;
|
||||
}
|
||||
|
||||
public ContentRange getRetrievalByteRange() {
|
||||
return retrievalByteRange;
|
||||
}
|
||||
|
||||
public String getSha256TreeHash() {
|
||||
return sha256TreeHash;
|
||||
}
|
||||
|
||||
public String getSnsTopic() {
|
||||
return snsTopic;
|
||||
}
|
||||
|
||||
public JobStatus getStatusCode() {
|
||||
return statusCode;
|
||||
}
|
||||
|
||||
public String getStatusMessage() {
|
||||
return statusMessage;
|
||||
}
|
||||
|
||||
public String getVaultArn() {
|
||||
return vaultArn;
|
||||
}
|
||||
|
||||
public InventoryRetrievalParameters getParameters() {
|
||||
return parameters;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hashCode(this.action, this.archiveId, this.archiveSizeInBytes, this.archiveSHA256TreeHash,
|
||||
this.completed, this.completionDate, this.creationDate, this.inventorySizeInBytes, this.jobDescription,
|
||||
this.jobId, this.retrievalByteRange, this.sha256TreeHash, this.snsTopic, this.statusCode,
|
||||
this.statusMessage, this.vaultArn, this.parameters);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null)
|
||||
return false;
|
||||
if (getClass() != obj.getClass())
|
||||
return false;
|
||||
JobMetadata other = (JobMetadata) obj;
|
||||
|
||||
return Objects.equal(this.jobId, other.jobId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "JobMetadata [jobId=" + jobId + ", statusCode=" + statusCode + ", statusMessage=" + statusMessage + "]";
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.domain;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
|
||||
import com.google.gson.annotations.SerializedName;
|
||||
|
||||
|
||||
public abstract class JobRequest {
|
||||
@SerializedName("Type")
|
||||
protected final String type;
|
||||
|
||||
public JobRequest(String type) {
|
||||
this.type = checkNotNull(type, "type");
|
||||
}
|
||||
|
||||
public String getType() {
|
||||
return type;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.domain;
|
||||
|
||||
import com.google.common.base.CaseFormat;
|
||||
|
||||
public enum JobStatus {
|
||||
SUCCEEDED,
|
||||
FAILED,
|
||||
IN_PROGRESS;
|
||||
|
||||
public static JobStatus fromString(String symbol) {
|
||||
return JobStatus.valueOf(CaseFormat.UPPER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, symbol));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,141 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.domain;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
|
||||
import java.beans.ConstructorProperties;
|
||||
import java.util.Date;
|
||||
import java.util.Iterator;
|
||||
|
||||
import org.jclouds.collect.IterableWithMarker;
|
||||
import org.jclouds.glacier.options.PaginationOptions;
|
||||
import org.jclouds.javax.annotation.Nullable;
|
||||
|
||||
import com.google.common.base.Objects;
|
||||
import com.google.common.base.Optional;
|
||||
import com.google.common.collect.ComparisonChain;
|
||||
import com.google.gson.annotations.SerializedName;
|
||||
|
||||
/**
|
||||
* Defines the attributes needed for Multipart uploads. Extends IterableWithMarker to support requesting paginated
|
||||
* multipart upload parts.
|
||||
*/
|
||||
public class MultipartUploadMetadata extends IterableWithMarker<PartMetadata> implements Comparable<MultipartUploadMetadata> {
|
||||
|
||||
@SerializedName("ArchiveDescription")
|
||||
private final String archiveDescription;
|
||||
@SerializedName("CreationDate")
|
||||
private final Date creationDate;
|
||||
@SerializedName("MultipartUploadId")
|
||||
private final String multipartUploadId;
|
||||
@SerializedName("PartSizeInBytes")
|
||||
private final long partSizeInBytes;
|
||||
@SerializedName("VaultARN")
|
||||
private final String vaultARN;
|
||||
@SerializedName("Parts")
|
||||
private final Iterable<PartMetadata> parts;
|
||||
@SerializedName("Marker")
|
||||
private final String marker;
|
||||
|
||||
@ConstructorProperties({ "ArchiveDescription", "CreationDate", "MultipartUploadId", "PartSizeInBytes", "VaultARN",
|
||||
"Parts", "Marker" })
|
||||
public MultipartUploadMetadata(@Nullable String archiveDescription, Date creationDate, String multipartUploadId,
|
||||
long partSizeInBytes, String vaultARN, @Nullable Iterable<PartMetadata> parts, @Nullable String marker) {
|
||||
super();
|
||||
this.archiveDescription = archiveDescription;
|
||||
this.creationDate = (Date) checkNotNull(creationDate, "creationDate").clone();
|
||||
this.multipartUploadId = checkNotNull(multipartUploadId, "multipartUploadId");
|
||||
this.partSizeInBytes = partSizeInBytes;
|
||||
this.vaultARN = checkNotNull(vaultARN, "vaultARN");
|
||||
this.parts = parts;
|
||||
this.marker = marker;
|
||||
}
|
||||
|
||||
public String getArchiveDescription() {
|
||||
return archiveDescription;
|
||||
}
|
||||
|
||||
public Date getCreationDate() {
|
||||
return (Date) creationDate.clone();
|
||||
}
|
||||
|
||||
public String getMultipartUploadId() {
|
||||
return multipartUploadId;
|
||||
}
|
||||
|
||||
public long getPartSizeInBytes() {
|
||||
return partSizeInBytes;
|
||||
}
|
||||
|
||||
public long getPartSizeInMB() {
|
||||
return partSizeInBytes >> 20;
|
||||
}
|
||||
|
||||
public String getVaultARN() {
|
||||
return vaultARN;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterator<PartMetadata> iterator() {
|
||||
return parts == null ? null : parts.iterator();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<Object> nextMarker() {
|
||||
return Optional.<Object>fromNullable(marker);
|
||||
}
|
||||
|
||||
public PaginationOptions nextPaginationOptions() {
|
||||
return PaginationOptions.class.cast(nextMarker().get());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hashCode(this.archiveDescription, this.creationDate, this.multipartUploadId, this.partSizeInBytes,
|
||||
this.vaultARN, this.marker, this.parts);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null)
|
||||
return false;
|
||||
if (getClass() != obj.getClass())
|
||||
return false;
|
||||
MultipartUploadMetadata other = (MultipartUploadMetadata) obj;
|
||||
|
||||
return Objects.equal(this.archiveDescription, other.archiveDescription)
|
||||
&& Objects.equal(this.creationDate, other.creationDate)
|
||||
&& Objects.equal(this.multipartUploadId, other.multipartUploadId)
|
||||
&& Objects.equal(this.partSizeInBytes, other.partSizeInBytes)
|
||||
&& Objects.equal(this.vaultARN, other.vaultARN)
|
||||
&& Objects.equal(this.marker, other.marker)
|
||||
&& Objects.equal(this.parts, other.parts);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "MultipartUploadMetadata [archiveDescription=" + archiveDescription + ", creationDate=" + creationDate
|
||||
+ ", multipartUploadId=" + multipartUploadId + ", partSizeInBytes=" + partSizeInBytes + ", vaultARN="
|
||||
+ vaultARN + ", marker=" + marker + ", parts=" + parts + "]";
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(MultipartUploadMetadata o) {
|
||||
return ComparisonChain.start().compare(this.creationDate, o.creationDate).result();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.domain;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
|
||||
import java.beans.ConstructorProperties;
|
||||
import java.util.Iterator;
|
||||
|
||||
import org.jclouds.collect.IterableWithMarker;
|
||||
import org.jclouds.glacier.options.PaginationOptions;
|
||||
|
||||
import com.google.common.base.Optional;
|
||||
import com.google.gson.annotations.SerializedName;
|
||||
|
||||
/**
|
||||
* Paginated collection used to store Job listing information.
|
||||
*/
|
||||
public class PaginatedJobCollection extends IterableWithMarker<JobMetadata> {
|
||||
@SerializedName("JobList")
|
||||
private final Iterable<JobMetadata> jobs;
|
||||
@SerializedName("Marker")
|
||||
private final String marker;
|
||||
|
||||
@ConstructorProperties({ "JobList", "Marker" })
|
||||
public PaginatedJobCollection(Iterable<JobMetadata> jobs, String marker) {
|
||||
this.jobs = checkNotNull(jobs, "jobs");
|
||||
this.marker = marker;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterator<JobMetadata> iterator() {
|
||||
return jobs.iterator();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<Object> nextMarker() {
|
||||
return Optional.<Object>fromNullable(marker);
|
||||
}
|
||||
|
||||
public PaginationOptions nextPaginationOptions() {
|
||||
return PaginationOptions.class.cast(nextMarker().get());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,60 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.domain;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
|
||||
import java.beans.ConstructorProperties;
|
||||
import java.util.Iterator;
|
||||
|
||||
import org.jclouds.collect.IterableWithMarker;
|
||||
import org.jclouds.glacier.options.PaginationOptions;
|
||||
import org.jclouds.javax.annotation.Nullable;
|
||||
|
||||
import com.google.common.base.Optional;
|
||||
import com.google.gson.annotations.SerializedName;
|
||||
|
||||
/**
|
||||
* Paginated collection used to store multipart upload lists.
|
||||
*/
|
||||
public class PaginatedMultipartUploadCollection extends IterableWithMarker<MultipartUploadMetadata> {
|
||||
|
||||
@SerializedName("UploadsList")
|
||||
private final Iterable<MultipartUploadMetadata> uploads;
|
||||
@SerializedName("Marker")
|
||||
private final String marker;
|
||||
|
||||
@ConstructorProperties({ "UploadsList", "Marker" })
|
||||
public PaginatedMultipartUploadCollection(Iterable<MultipartUploadMetadata> uploads, @Nullable String marker) {
|
||||
this.uploads = checkNotNull(uploads, "uploads");
|
||||
this.marker = marker;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterator<MultipartUploadMetadata> iterator() {
|
||||
return uploads.iterator();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<Object> nextMarker() {
|
||||
return Optional.<Object>fromNullable(marker);
|
||||
}
|
||||
|
||||
public PaginationOptions nextPaginationOptions() {
|
||||
return PaginationOptions.class.cast(nextMarker().get());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,59 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.domain;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
|
||||
import java.beans.ConstructorProperties;
|
||||
import java.util.Iterator;
|
||||
|
||||
import org.jclouds.collect.IterableWithMarker;
|
||||
import org.jclouds.glacier.options.PaginationOptions;
|
||||
|
||||
import com.google.common.base.Optional;
|
||||
import com.google.gson.annotations.SerializedName;
|
||||
|
||||
/**
|
||||
* Paginated collection used to store Vault listing information.
|
||||
*/
|
||||
public class PaginatedVaultCollection extends IterableWithMarker<VaultMetadata> {
|
||||
|
||||
@SerializedName("VaultList")
|
||||
private final Iterable<VaultMetadata> vaults;
|
||||
@SerializedName("Marker")
|
||||
private final String marker;
|
||||
|
||||
@ConstructorProperties({ "VaultList", "Marker" })
|
||||
public PaginatedVaultCollection(Iterable<VaultMetadata> vaults, String marker) {
|
||||
this.vaults = checkNotNull(vaults, "vaults");
|
||||
this.marker = marker;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterator<VaultMetadata> iterator() {
|
||||
return vaults.iterator();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<Object> nextMarker() {
|
||||
return Optional.<Object>fromNullable(marker);
|
||||
}
|
||||
|
||||
public PaginationOptions nextPaginationOptions() {
|
||||
return PaginationOptions.class.cast(nextMarker().get());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,53 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.domain;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
|
||||
import java.beans.ConstructorProperties;
|
||||
|
||||
import org.jclouds.glacier.util.ContentRange;
|
||||
|
||||
import com.google.common.hash.HashCode;
|
||||
import com.google.gson.annotations.SerializedName;
|
||||
|
||||
/**
|
||||
* Defines the attributes needed for a multipart upload part.
|
||||
*/
|
||||
public class PartMetadata {
|
||||
|
||||
@SerializedName("SHA256TreeHash")
|
||||
private final HashCode treeHash;
|
||||
@SerializedName("RangeInBytes")
|
||||
private final ContentRange range;
|
||||
|
||||
@ConstructorProperties({ "SHA256TreeHash", "RangeInBytes" })
|
||||
public PartMetadata(String treeHash, String range) {
|
||||
super();
|
||||
this.treeHash = HashCode.fromString(checkNotNull(treeHash, "treeHash"));
|
||||
this.range = ContentRange.fromString(checkNotNull(range, "range"));
|
||||
}
|
||||
|
||||
public ContentRange getRange() {
|
||||
return range;
|
||||
}
|
||||
|
||||
public HashCode getTreeHash() {
|
||||
return treeHash;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,116 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.domain;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
|
||||
import java.beans.ConstructorProperties;
|
||||
import java.util.Date;
|
||||
|
||||
import org.jclouds.javax.annotation.Nullable;
|
||||
|
||||
import com.google.common.base.Objects;
|
||||
import com.google.common.collect.ComparisonChain;
|
||||
import com.google.gson.annotations.SerializedName;
|
||||
|
||||
/**
|
||||
* Defines the attributes needed to describe a vault.
|
||||
*/
|
||||
public class VaultMetadata implements Comparable<VaultMetadata> {
|
||||
|
||||
@SerializedName("VaultName")
|
||||
private final String vaultName;
|
||||
@SerializedName("VaultARN")
|
||||
private final String vaultARN;
|
||||
@SerializedName("CreationDate")
|
||||
private final Date creationDate;
|
||||
@SerializedName("LastInventoryDate")
|
||||
private final Date lastInventoryDate;
|
||||
@SerializedName("NumberOfArchives")
|
||||
private final long numberOfArchives;
|
||||
@SerializedName("SizeInBytes")
|
||||
private final long sizeInBytes;
|
||||
|
||||
@ConstructorProperties({ "VaultName", "VaultARN", "CreationDate", "LastInventoryDate", "NumberOfArchives",
|
||||
"SizeInBytes" })
|
||||
public VaultMetadata(String vaultName, String vaultARN, Date creationDate, @Nullable Date lastInventoryDate,
|
||||
long numberOfArchives, long sizeInBytes) {
|
||||
this.vaultName = checkNotNull(vaultName, "vaultName");
|
||||
this.vaultARN = checkNotNull(vaultARN, "vaultARN");
|
||||
this.creationDate = (Date) checkNotNull(creationDate, "creationDate").clone();
|
||||
this.lastInventoryDate = lastInventoryDate;
|
||||
this.numberOfArchives = numberOfArchives;
|
||||
this.sizeInBytes = sizeInBytes;
|
||||
}
|
||||
|
||||
public String getVaultName() {
|
||||
return vaultName;
|
||||
}
|
||||
|
||||
public String getVaultARN() {
|
||||
return vaultARN;
|
||||
}
|
||||
|
||||
public Date getCreationDate() {
|
||||
return (Date) creationDate.clone();
|
||||
}
|
||||
|
||||
public Date getLastInventoryDate() {
|
||||
return lastInventoryDate;
|
||||
}
|
||||
|
||||
public long getNumberOfArchives() {
|
||||
return numberOfArchives;
|
||||
}
|
||||
|
||||
public long getSizeInBytes() {
|
||||
return sizeInBytes;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hashCode(this.vaultName, this.vaultARN, this.creationDate, this.lastInventoryDate,
|
||||
this.numberOfArchives, this.sizeInBytes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null)
|
||||
return false;
|
||||
if (getClass() != obj.getClass())
|
||||
return false;
|
||||
VaultMetadata other = (VaultMetadata) obj;
|
||||
|
||||
return Objects.equal(this.vaultName, other.vaultName) && Objects.equal(this.vaultARN, other.vaultARN)
|
||||
&& Objects.equal(this.creationDate, other.creationDate)
|
||||
&& Objects.equal(this.lastInventoryDate, other.lastInventoryDate)
|
||||
&& Objects.equal(this.numberOfArchives, other.numberOfArchives)
|
||||
&& Objects.equal(this.sizeInBytes, other.sizeInBytes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "VaultMetadata [vaultName=" + vaultName + ", vaultARN=" + vaultARN + ", creationDate=" + creationDate
|
||||
+ ", lastInventoryDate=" + lastInventoryDate + ", numberOfArchives=" + numberOfArchives + ", sizeInBytes="
|
||||
+ sizeInBytes + "]";
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(VaultMetadata o) {
|
||||
return ComparisonChain.start().compare(this.vaultName, o.getVaultName()).result();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.fallbacks;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
import static com.google.common.base.Throwables.propagate;
|
||||
import static org.jclouds.util.Throwables2.getFirstThrowableOfType;
|
||||
|
||||
import org.jclouds.Fallback;
|
||||
|
||||
/**
|
||||
* Returns false when encountering an IllegalArgumentException.
|
||||
*/
|
||||
public class FalseOnIllegalArgumentException implements Fallback<Boolean> {
|
||||
@Override
|
||||
public Boolean createOrPropagate(Throwable t) throws Exception {
|
||||
if (getFirstThrowableOfType(checkNotNull(t, "throwable"), IllegalArgumentException.class) != null)
|
||||
return false;
|
||||
throw propagate(t);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,76 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.filters;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
|
||||
import javax.annotation.Resource;
|
||||
import javax.inject.Inject;
|
||||
import javax.inject.Named;
|
||||
import javax.inject.Provider;
|
||||
import javax.inject.Singleton;
|
||||
|
||||
import org.jclouds.Constants;
|
||||
import org.jclouds.crypto.Crypto;
|
||||
import org.jclouds.date.TimeStamp;
|
||||
import org.jclouds.domain.Credentials;
|
||||
import org.jclouds.glacier.reference.GlacierHeaders;
|
||||
import org.jclouds.glacier.util.AWSRequestSignerV4;
|
||||
import org.jclouds.http.HttpException;
|
||||
import org.jclouds.http.HttpRequest;
|
||||
import org.jclouds.http.HttpRequestFilter;
|
||||
import org.jclouds.http.HttpUtils;
|
||||
import org.jclouds.logging.Logger;
|
||||
|
||||
import com.google.common.base.Supplier;
|
||||
import com.google.common.net.HttpHeaders;
|
||||
|
||||
/**
|
||||
* Signs the request using the AWSRequestSignerV4.
|
||||
*/
|
||||
@Singleton
|
||||
public class RequestAuthorizeSignature implements HttpRequestFilter {
|
||||
|
||||
private final AWSRequestSignerV4 signer;
|
||||
|
||||
@Resource
|
||||
@Named(Constants.LOGGER_SIGNATURE)
|
||||
Logger signatureLog = Logger.NULL;
|
||||
|
||||
private final Provider<String> timeStampProvider;
|
||||
private final HttpUtils utils;
|
||||
|
||||
@Inject
|
||||
public RequestAuthorizeSignature(@TimeStamp Provider<String> timeStampProvider,
|
||||
@org.jclouds.location.Provider Supplier<Credentials> creds, Crypto crypto, HttpUtils utils) {
|
||||
checkNotNull(creds, "creds");
|
||||
this.signer = new AWSRequestSignerV4(creds.get().identity, creds.get().credential, checkNotNull(crypto, "crypto"));
|
||||
this.timeStampProvider = checkNotNull(timeStampProvider, "timeStampProvider");
|
||||
this.utils = checkNotNull(utils, "utils");
|
||||
}
|
||||
|
||||
@Override
|
||||
public HttpRequest filter(HttpRequest request) throws HttpException {
|
||||
request = request.toBuilder().removeHeader(HttpHeaders.DATE)
|
||||
.replaceHeader(GlacierHeaders.ALTERNATE_DATE, timeStampProvider.get())
|
||||
.replaceHeader(HttpHeaders.HOST, request.getEndpoint().getHost()).build();
|
||||
utils.logRequest(signatureLog, request, ">>");
|
||||
request = this.signer.sign(request);
|
||||
utils.logRequest(signatureLog, request, "<<");
|
||||
return request;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.functions;
|
||||
|
||||
import org.jclouds.http.HttpException;
|
||||
import org.jclouds.http.HttpResponse;
|
||||
import org.jclouds.io.Payload;
|
||||
|
||||
import com.google.common.base.Function;
|
||||
|
||||
/**
|
||||
* Gets the payload from the http response.
|
||||
*/
|
||||
public class GetPayloadFromHttpContent implements Function<HttpResponse, Payload> {
|
||||
|
||||
@Override
|
||||
public Payload apply(HttpResponse from) {
|
||||
if (from.getPayload() == null)
|
||||
throw new HttpException("Did not receive payload");
|
||||
return from.getPayload();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,37 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.functions;
|
||||
|
||||
import org.jclouds.glacier.reference.GlacierHeaders;
|
||||
import org.jclouds.http.HttpException;
|
||||
import org.jclouds.http.HttpResponse;
|
||||
|
||||
import com.google.common.base.Function;
|
||||
|
||||
/**
|
||||
* Parses the archiveId from the HttpResponse.
|
||||
*/
|
||||
public class ParseArchiveIdHeader implements Function<HttpResponse, String> {
|
||||
|
||||
@Override
|
||||
public String apply(HttpResponse from) {
|
||||
String id = from.getFirstHeaderOrNull(GlacierHeaders.ARCHIVE_ID);
|
||||
if (id == null)
|
||||
throw new HttpException("Did not receive ArchiveId");
|
||||
return id;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,32 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.functions;
|
||||
|
||||
import org.jclouds.glacier.domain.ArchiveMetadataCollection;
|
||||
import org.jclouds.http.functions.ParseJson;
|
||||
import org.jclouds.json.Json;
|
||||
|
||||
import com.google.inject.Inject;
|
||||
import com.google.inject.TypeLiteral;
|
||||
|
||||
public class ParseArchiveMetadataCollectionFromHttpContent extends ParseJson<ArchiveMetadataCollection> {
|
||||
|
||||
@Inject
|
||||
public ParseArchiveMetadataCollectionFromHttpContent(Json json) {
|
||||
super(json, TypeLiteral.get(ArchiveMetadataCollection.class));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.functions;
|
||||
|
||||
import org.jclouds.glacier.reference.GlacierHeaders;
|
||||
import org.jclouds.http.HttpException;
|
||||
import org.jclouds.http.HttpResponse;
|
||||
|
||||
import com.google.common.base.Function;
|
||||
|
||||
/**
|
||||
* Parses the jobId from the HttpResponse.
|
||||
*/
|
||||
public class ParseJobIdHeader implements Function<HttpResponse, String> {
|
||||
|
||||
@Override
|
||||
public String apply(HttpResponse from) {
|
||||
String id = from.getFirstHeaderOrNull(GlacierHeaders.JOB_ID);
|
||||
if (id == null) {
|
||||
throw new HttpException("Did not receive JobId");
|
||||
}
|
||||
return id;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.functions;
|
||||
|
||||
import org.jclouds.glacier.domain.JobMetadata;
|
||||
import org.jclouds.http.functions.ParseJson;
|
||||
import org.jclouds.json.Json;
|
||||
|
||||
import com.google.inject.Inject;
|
||||
import com.google.inject.TypeLiteral;
|
||||
|
||||
/**
|
||||
* Parses the JSON job information from the HttpResponse.
|
||||
*/
|
||||
public class ParseJobMetadataFromHttpContent extends ParseJson<JobMetadata> {
|
||||
|
||||
@Inject
|
||||
public ParseJobMetadataFromHttpContent(Json json) {
|
||||
super(json, TypeLiteral.get(JobMetadata.class));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.functions;
|
||||
|
||||
import org.jclouds.glacier.domain.PaginatedJobCollection;
|
||||
import org.jclouds.http.functions.ParseJson;
|
||||
import org.jclouds.json.Json;
|
||||
|
||||
import com.google.inject.Inject;
|
||||
import com.google.inject.TypeLiteral;
|
||||
|
||||
/**
|
||||
* Parses the JSON job list from the HttpResponse.
|
||||
*/
|
||||
public class ParseJobMetadataListFromHttpContent extends ParseJson<PaginatedJobCollection> {
|
||||
|
||||
@Inject
|
||||
public ParseJobMetadataListFromHttpContent(Json json) {
|
||||
super(json, TypeLiteral.get(PaginatedJobCollection.class));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.functions;
|
||||
|
||||
import org.jclouds.glacier.reference.GlacierHeaders;
|
||||
import org.jclouds.http.HttpException;
|
||||
import org.jclouds.http.HttpResponse;
|
||||
|
||||
import com.google.common.base.Function;
|
||||
|
||||
/**
|
||||
* Parses the multipart upload id from the HttpResponse.
|
||||
*/
|
||||
public class ParseMultipartUploadIdHeader implements Function<HttpResponse, String> {
|
||||
@Override
|
||||
public String apply(HttpResponse from) {
|
||||
String id = from.getFirstHeaderOrNull(GlacierHeaders.MULTIPART_UPLOAD_ID);
|
||||
if (id == null)
|
||||
throw new HttpException("Did not receive Multipart upload Id");
|
||||
return id;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.functions;
|
||||
|
||||
import org.jclouds.glacier.domain.PaginatedMultipartUploadCollection;
|
||||
import org.jclouds.http.functions.ParseJson;
|
||||
import org.jclouds.json.Json;
|
||||
|
||||
import com.google.inject.Inject;
|
||||
import com.google.inject.TypeLiteral;
|
||||
|
||||
/**
|
||||
* Parses the JSON multipart upload list from the HttpResponse.
|
||||
*/
|
||||
public class ParseMultipartUploadListFromHttpContent extends ParseJson<PaginatedMultipartUploadCollection> {
|
||||
|
||||
@Inject
|
||||
public ParseMultipartUploadListFromHttpContent(Json json) {
|
||||
super(json, TypeLiteral.get(PaginatedMultipartUploadCollection.class));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.functions;
|
||||
|
||||
import org.jclouds.glacier.domain.MultipartUploadMetadata;
|
||||
import org.jclouds.http.functions.ParseJson;
|
||||
import org.jclouds.json.Json;
|
||||
|
||||
import com.google.inject.Inject;
|
||||
import com.google.inject.TypeLiteral;
|
||||
|
||||
/**
|
||||
* Parses the JSON multipart upload part list from the HttpResponse.
|
||||
*/
|
||||
public class ParseMultipartUploadPartListFromHttpContent extends ParseJson<MultipartUploadMetadata> {
|
||||
|
||||
@Inject
|
||||
public ParseMultipartUploadPartListFromHttpContent(Json json) {
|
||||
super(json, TypeLiteral.get(MultipartUploadMetadata.class));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,37 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.functions;
|
||||
|
||||
import org.jclouds.glacier.reference.GlacierHeaders;
|
||||
import org.jclouds.http.HttpException;
|
||||
import org.jclouds.http.HttpResponse;
|
||||
|
||||
import com.google.common.base.Function;
|
||||
import com.google.common.hash.HashCode;
|
||||
|
||||
/**
|
||||
* Parses the tree hash header from the HttpResponse.
|
||||
*/
|
||||
public class ParseMultipartUploadTreeHashHeader implements Function<HttpResponse, HashCode> {
|
||||
@Override
|
||||
public HashCode apply(HttpResponse from) {
|
||||
String treehash = from.getFirstHeaderOrNull(GlacierHeaders.TREE_HASH);
|
||||
if (treehash == null)
|
||||
throw new HttpException("Did not receive Tree hash");
|
||||
return HashCode.fromString(treehash);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.functions;
|
||||
|
||||
import org.jclouds.glacier.domain.VaultMetadata;
|
||||
import org.jclouds.http.functions.ParseJson;
|
||||
import org.jclouds.json.Json;
|
||||
|
||||
import com.google.inject.Inject;
|
||||
import com.google.inject.TypeLiteral;
|
||||
|
||||
/**
|
||||
* Parses the JSON vault information from the HttpResponse.
|
||||
*/
|
||||
public class ParseVaultMetadataFromHttpContent extends ParseJson<VaultMetadata> {
|
||||
|
||||
@Inject
|
||||
public ParseVaultMetadataFromHttpContent(Json json) {
|
||||
super(json, TypeLiteral.get(VaultMetadata.class));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.functions;
|
||||
|
||||
import org.jclouds.glacier.domain.PaginatedVaultCollection;
|
||||
import org.jclouds.http.functions.ParseJson;
|
||||
import org.jclouds.json.Json;
|
||||
|
||||
import com.google.inject.Inject;
|
||||
import com.google.inject.TypeLiteral;
|
||||
|
||||
/**
|
||||
* Parses the JSON vault list from the HttpResponse.
|
||||
*/
|
||||
public class ParseVaultMetadataListFromHttpContent extends ParseJson<PaginatedVaultCollection> {
|
||||
|
||||
@Inject
|
||||
public ParseVaultMetadataListFromHttpContent(Json json) {
|
||||
super(json, TypeLiteral.get(PaginatedVaultCollection.class));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,66 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.handlers;
|
||||
|
||||
import org.jclouds.glacier.GlacierResponseException;
|
||||
import org.jclouds.glacier.domain.GlacierError;
|
||||
import org.jclouds.http.HttpCommand;
|
||||
import org.jclouds.http.HttpErrorHandler;
|
||||
import org.jclouds.http.HttpResponse;
|
||||
import org.jclouds.http.HttpResponseException;
|
||||
import org.jclouds.http.functions.ParseJson;
|
||||
import org.jclouds.json.Json;
|
||||
import org.jclouds.rest.AuthorizationException;
|
||||
import org.jclouds.rest.InsufficientResourcesException;
|
||||
import org.jclouds.rest.ResourceNotFoundException;
|
||||
|
||||
import com.google.inject.Inject;
|
||||
import com.google.inject.TypeLiteral;
|
||||
|
||||
/**
|
||||
* Parses a GlacierError from a Json response content.
|
||||
*/
|
||||
public class ParseGlacierErrorFromJsonContent extends ParseJson<GlacierError> implements HttpErrorHandler {
|
||||
|
||||
@Inject
|
||||
public ParseGlacierErrorFromJsonContent(Json json) {
|
||||
super(json, TypeLiteral.get(GlacierError.class));
|
||||
}
|
||||
|
||||
private static Exception refineException(GlacierError error, Exception exception) {
|
||||
if ("AccessDeniedException".equals(error.getCode())) {
|
||||
return new AuthorizationException(error.getMessage(), exception);
|
||||
} else if ("InvalidParameterValueException".equals(error.getCode())) {
|
||||
return new IllegalArgumentException(error.getMessage(), exception);
|
||||
} else if ("LimitExceededException".equals(error.getCode())) {
|
||||
return new InsufficientResourcesException(error.getMessage(), exception);
|
||||
} else if ("ResourceNotFoundException".equals(error.getCode())) {
|
||||
return new ResourceNotFoundException(error.getMessage(), exception);
|
||||
}
|
||||
return exception;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleError(HttpCommand command, HttpResponse response) {
|
||||
GlacierError error = this.apply(response);
|
||||
Exception exception = error.isValid()
|
||||
? refineException(error, new GlacierResponseException(command, response, error))
|
||||
: new HttpResponseException(command, response);
|
||||
command.setException(exception);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.options;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkArgument;
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
|
||||
import org.jclouds.http.options.BaseHttpRequestOptions;
|
||||
|
||||
/**
|
||||
* Pagination options used to specify the collection responses.
|
||||
*
|
||||
* @see <a href="http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vaults-get.html" />
|
||||
*/
|
||||
public class PaginationOptions extends BaseHttpRequestOptions {
|
||||
|
||||
private static final int MIN_LIMIT = 1;
|
||||
private static final int MAX_LIMIT = 1000;
|
||||
|
||||
public PaginationOptions marker(String marker) {
|
||||
queryParameters.put("marker", checkNotNull(marker, "marker"));
|
||||
return this;
|
||||
}
|
||||
|
||||
public PaginationOptions limit(int limit) {
|
||||
checkArgument(limit >= MIN_LIMIT, "limit must be >= " + MIN_LIMIT + " but was " + limit);
|
||||
checkArgument(limit <= MAX_LIMIT, "limit must be <= " + MAX_LIMIT + " but was " + limit);
|
||||
queryParameters.put("limit", Integer.toString(limit));
|
||||
return this;
|
||||
}
|
||||
|
||||
public static class Builder {
|
||||
|
||||
public static PaginationOptions marker(String marker) {
|
||||
PaginationOptions options = new PaginationOptions();
|
||||
return options.marker(marker);
|
||||
}
|
||||
|
||||
public static PaginationOptions limit(int limit) {
|
||||
PaginationOptions options = new PaginationOptions();
|
||||
return options.limit(limit);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.predicates.validators;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkArgument;
|
||||
import static com.google.common.base.Strings.isNullOrEmpty;
|
||||
|
||||
import org.jclouds.predicates.Validator;
|
||||
|
||||
import com.google.common.base.CharMatcher;
|
||||
import com.google.inject.Singleton;
|
||||
|
||||
/**
|
||||
* Validates the archive description string.
|
||||
*/
|
||||
@Singleton
|
||||
public final class DescriptionValidator extends Validator<String> {
|
||||
|
||||
private static final int MAX_DESC_LENGTH = 1024;
|
||||
|
||||
private static final CharMatcher DESCRIPTION_ACCEPTABLE_RANGE = CharMatcher.inRange(' ', '~');
|
||||
|
||||
@Override
|
||||
public void validate(String description) {
|
||||
if (isNullOrEmpty(description))
|
||||
return;
|
||||
checkArgument(description.length() <= MAX_DESC_LENGTH,
|
||||
"Description can't be longer than %s characters but was %s",
|
||||
MAX_DESC_LENGTH, description.length());
|
||||
checkArgument(DESCRIPTION_ACCEPTABLE_RANGE.matchesAllOf(description),
|
||||
"Description should have ASCII values between 32 and 126.");
|
||||
}
|
||||
}
|
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.predicates.validators;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkArgument;
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
|
||||
import org.jclouds.predicates.Validator;
|
||||
|
||||
import com.google.inject.Singleton;
|
||||
|
||||
/**
|
||||
* Validates the part size parameter used when initiating multipart uploads.
|
||||
*/
|
||||
@Singleton
|
||||
public final class PartSizeValidator extends Validator<Long> {
|
||||
private static final int MIN_PART_SIZE = 1;
|
||||
private static final int MAX_PART_SIZE = 4096;
|
||||
|
||||
@Override
|
||||
public void validate(Long partSizeInMB) throws IllegalArgumentException {
|
||||
checkNotNull(partSizeInMB, "partSizeInMB");
|
||||
checkArgument(!(partSizeInMB < MIN_PART_SIZE || partSizeInMB > MAX_PART_SIZE || Long.bitCount(partSizeInMB) != 1),
|
||||
"partSizeInMB must be a power of 2 between 1 and 4096.");
|
||||
}
|
||||
}
|
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.predicates.validators;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkArgument;
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
|
||||
import org.jclouds.io.Payload;
|
||||
import org.jclouds.predicates.Validator;
|
||||
|
||||
import com.google.inject.Singleton;
|
||||
|
||||
/**
|
||||
* Validates the Glacier archive payload being uploaded.
|
||||
*/
|
||||
@Singleton
|
||||
public final class PayloadValidator extends Validator<Payload> {
|
||||
|
||||
private static final long MAX_CONTENT_SIZE = 1L << 32; // 4GiB
|
||||
|
||||
@Override
|
||||
public void validate(Payload payload) {
|
||||
checkNotNull(payload, "Archive must have a payload.");
|
||||
checkNotNull(payload.getContentMetadata().getContentLength(), "Content length must be set.");
|
||||
checkArgument(payload.getContentMetadata().getContentLength() <= MAX_CONTENT_SIZE,
|
||||
"Max content size is 4gb but was %s", payload.getContentMetadata().getContentLength());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,50 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.predicates.validators;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkArgument;
|
||||
import static com.google.common.base.Strings.isNullOrEmpty;
|
||||
|
||||
import org.jclouds.predicates.Validator;
|
||||
|
||||
import com.google.common.base.CharMatcher;
|
||||
import com.google.inject.Singleton;
|
||||
|
||||
/**
|
||||
* Validates Vault names according to Amazon Vault conventions.
|
||||
*
|
||||
* @see <a href="http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-put.html" />
|
||||
*/
|
||||
@Singleton
|
||||
public final class VaultNameValidator extends Validator<String> {
|
||||
|
||||
private static final int MIN_LENGTH = 1;
|
||||
private static final int MAX_LENGTH = 255;
|
||||
|
||||
private static final CharMatcher VAULT_NAME_ACCEPTABLE_RANGE = CharMatcher.inRange('a', 'z')
|
||||
.or(CharMatcher.inRange('A', 'Z'))
|
||||
.or(CharMatcher.inRange('0', '9'))
|
||||
.or(CharMatcher.anyOf("-_."));
|
||||
|
||||
@Override
|
||||
public void validate(String vaultName) {
|
||||
checkArgument(!isNullOrEmpty(vaultName) && vaultName.length() <= MAX_LENGTH,
|
||||
"Can't be null or empty. Length must be %s to %s symbols.", MIN_LENGTH, MAX_LENGTH);
|
||||
checkArgument(VAULT_NAME_ACCEPTABLE_RANGE.matchesAllOf(vaultName),
|
||||
"Should contain only ASCII letters and numbers, underscores, hyphens, or periods.");
|
||||
}
|
||||
}
|
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.reference;
|
||||
|
||||
/**
|
||||
* Headers used by Amazon Glacier.
|
||||
*/
|
||||
public final class GlacierHeaders {
|
||||
|
||||
public static final String DEFAULT_AMAZON_HEADERTAG = "amz";
|
||||
public static final String HEADER_PREFIX = "x-" + DEFAULT_AMAZON_HEADERTAG + "-";
|
||||
public static final String REQUEST_ID = HEADER_PREFIX + "RequestId";
|
||||
public static final String VERSION = HEADER_PREFIX + "glacier-version";
|
||||
public static final String ALTERNATE_DATE = HEADER_PREFIX + "date";
|
||||
public static final String ARCHIVE_DESCRIPTION = HEADER_PREFIX + "archive-description";
|
||||
public static final String LINEAR_HASH = HEADER_PREFIX + "content-sha256";
|
||||
public static final String TREE_HASH = HEADER_PREFIX + "sha256-tree-hash";
|
||||
public static final String ARCHIVE_ID = HEADER_PREFIX + "archive-id";
|
||||
public static final String MULTIPART_UPLOAD_ID = HEADER_PREFIX + "multipart-upload-id";
|
||||
public static final String PART_SIZE = HEADER_PREFIX + "part-size";
|
||||
public static final String ARCHIVE_SIZE = HEADER_PREFIX + "archive-size";
|
||||
public static final String JOB_ID = HEADER_PREFIX + "job-id";
|
||||
|
||||
private GlacierHeaders() {
|
||||
}
|
||||
}
|
|
@ -0,0 +1,202 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.util;
|
||||
|
||||
import static com.google.common.base.Charsets.UTF_8;
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
import static org.jclouds.util.Closeables2.closeQuietly;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Locale;
|
||||
import java.util.Map.Entry;
|
||||
|
||||
import javax.crypto.Mac;
|
||||
|
||||
import org.jclouds.crypto.Crypto;
|
||||
import org.jclouds.glacier.reference.GlacierHeaders;
|
||||
import org.jclouds.http.HttpException;
|
||||
import org.jclouds.http.HttpRequest;
|
||||
|
||||
import com.google.common.base.Function;
|
||||
import com.google.common.base.Joiner;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Multimap;
|
||||
import com.google.common.collect.SortedSetMultimap;
|
||||
import com.google.common.collect.TreeMultimap;
|
||||
import com.google.common.hash.HashCode;
|
||||
import com.google.common.hash.Hashing;
|
||||
import com.google.common.hash.HashingInputStream;
|
||||
import com.google.common.io.BaseEncoding;
|
||||
import com.google.common.io.ByteSource;
|
||||
import com.google.common.io.ByteStreams;
|
||||
import com.google.common.net.HttpHeaders;
|
||||
|
||||
// TODO: Query parameters, not necessary for Glacier
|
||||
// TODO: Endpoint on buildCredentialScope is being read from the static string. Uncool.
|
||||
/**
|
||||
* Signs requests using the AWSv4 signing algorithm
|
||||
*
|
||||
* @see <a href="http://docs.aws.amazon.com/general/latest/gr/sigv4_signing.html" />
|
||||
*/
|
||||
public final class AWSRequestSignerV4 {
|
||||
|
||||
public static final String AUTH_TAG = "AWS4";
|
||||
public static final String HEADER_TAG = "x-amz-";
|
||||
public static final String ALGORITHM = AUTH_TAG + "-HMAC-SHA256";
|
||||
public static final String TERMINATION_STRING = "aws4_request";
|
||||
public static final String REGION = "us-east-1";
|
||||
public static final String SERVICE = "glacier";
|
||||
|
||||
private final Crypto crypto;
|
||||
private final String identity;
|
||||
private final String credential;
|
||||
|
||||
public AWSRequestSignerV4(String identity, String credential, Crypto crypto) {
|
||||
this.crypto = checkNotNull(crypto, "crypto");
|
||||
this.identity = checkNotNull(identity, "identity");
|
||||
this.credential = checkNotNull(credential, "credential");
|
||||
}
|
||||
|
||||
private static HashCode buildHashedCanonicalRequest(String method, String endpoint, HashCode hashedPayload,
|
||||
String canonicalizedHeadersString, String signedHeaders) {
|
||||
return Hashing.sha256().newHasher()
|
||||
.putString(method, UTF_8)
|
||||
.putString("\n", UTF_8)
|
||||
.putString(endpoint, UTF_8)
|
||||
.putString("\n", UTF_8)
|
||||
.putString("\n", UTF_8)
|
||||
.putString(canonicalizedHeadersString, UTF_8)
|
||||
.putString("\n", UTF_8)
|
||||
.putString(signedHeaders, UTF_8)
|
||||
.putString("\n", UTF_8)
|
||||
.putString(hashedPayload.toString(), UTF_8)
|
||||
.hash();
|
||||
}
|
||||
|
||||
private static String createStringToSign(String date, String credentialScope, HashCode hashedCanonicalRequest) {
|
||||
return ALGORITHM + "\n" + date + "\n" + credentialScope + "\n" + hashedCanonicalRequest.toString();
|
||||
}
|
||||
|
||||
private static String formatDateWithoutTimestamp(String date) {
|
||||
return date.substring(0, 8);
|
||||
}
|
||||
|
||||
private static String buildCredentialScope(String dateWithoutTimeStamp) {
|
||||
return dateWithoutTimeStamp + "/" + REGION + "/" + SERVICE + "/" + TERMINATION_STRING;
|
||||
}
|
||||
|
||||
private static Multimap<String, String> buildCanonicalizedHeadersMap(HttpRequest request) {
|
||||
Multimap<String, String> headers = request.getHeaders();
|
||||
SortedSetMultimap<String, String> canonicalizedHeaders = TreeMultimap.create();
|
||||
for (Entry<String, String> header : headers.entries()) {
|
||||
if (header.getKey() == null)
|
||||
continue;
|
||||
String key = header.getKey().toString().toLowerCase(Locale.getDefault());
|
||||
// Ignore any headers that are not particularly interesting.
|
||||
if (key.equalsIgnoreCase(HttpHeaders.CONTENT_TYPE) || key.equalsIgnoreCase(HttpHeaders.CONTENT_MD5)
|
||||
|| key.equalsIgnoreCase(HttpHeaders.HOST) || key.startsWith(HEADER_TAG)) {
|
||||
canonicalizedHeaders.put(key, header.getValue());
|
||||
}
|
||||
}
|
||||
return canonicalizedHeaders;
|
||||
}
|
||||
|
||||
private static String buildCanonicalizedHeadersString(Multimap<String, String> canonicalizedHeadersMap) {
|
||||
StringBuilder canonicalizedHeadersBuffer = new StringBuilder();
|
||||
for (Entry<String, String> header : canonicalizedHeadersMap.entries()) {
|
||||
String key = header.getKey();
|
||||
canonicalizedHeadersBuffer.append(key.toLowerCase()).append(':').append(header.getValue()).append('\n');
|
||||
}
|
||||
return canonicalizedHeadersBuffer.toString();
|
||||
}
|
||||
|
||||
private static String buildSignedHeaders(Multimap<String, String> canonicalizedHeadersMap) {
|
||||
return Joiner.on(';').join(Iterables.transform(canonicalizedHeadersMap.keySet(), new Function<String, String>() {
|
||||
|
||||
@Override
|
||||
public String apply(String input) {
|
||||
return input.toLowerCase();
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
private static HashCode buildHashedPayload(HttpRequest request) {
|
||||
HashingInputStream his = null;
|
||||
try {
|
||||
his = new HashingInputStream(Hashing.sha256(),
|
||||
request.getPayload() == null ? ByteSource.empty().openStream() : request.getPayload().openStream());
|
||||
ByteStreams.copy(his, ByteStreams.nullOutputStream());
|
||||
return his.hash();
|
||||
} catch (IOException e) {
|
||||
throw new HttpException("Error signing request", e);
|
||||
} finally {
|
||||
closeQuietly(his);
|
||||
}
|
||||
}
|
||||
|
||||
private static String buildAuthHeader(String accessKey, String credentialScope, String signedHeaders,
|
||||
String signature) {
|
||||
return ALGORITHM + " " + "Credential=" + accessKey + "/" + credentialScope + "," + "SignedHeaders="
|
||||
+ signedHeaders + "," + "Signature=" + signature;
|
||||
}
|
||||
|
||||
private byte[] hmacSha256(byte[] key, String s) {
|
||||
try {
|
||||
Mac hmacSHA256 = crypto.hmacSHA256(key);
|
||||
return hmacSHA256.doFinal(s.getBytes());
|
||||
} catch (Exception e) {
|
||||
throw new HttpException("Error signing request", e);
|
||||
}
|
||||
}
|
||||
|
||||
private String buildSignature(String dateWithoutTimestamp, String stringToSign) {
|
||||
byte[] kSecret = (AUTH_TAG + credential).getBytes(UTF_8);
|
||||
byte[] kDate = hmacSha256(kSecret, dateWithoutTimestamp);
|
||||
byte[] kRegion = hmacSha256(kDate, REGION);
|
||||
byte[] kService = hmacSha256(kRegion, SERVICE);
|
||||
byte[] kSigning = hmacSha256(kService, TERMINATION_STRING);
|
||||
return BaseEncoding.base16().encode(hmacSha256(kSigning, stringToSign)).toLowerCase();
|
||||
}
|
||||
|
||||
public HttpRequest sign(HttpRequest request) {
|
||||
// Grab the needed data to build the signature
|
||||
Multimap<String, String> canonicalizedHeadersMap = buildCanonicalizedHeadersMap(request);
|
||||
String canonicalizedHeadersString = buildCanonicalizedHeadersString(canonicalizedHeadersMap);
|
||||
String signedHeaders = buildSignedHeaders(canonicalizedHeadersMap);
|
||||
String date = request.getFirstHeaderOrNull(GlacierHeaders.ALTERNATE_DATE);
|
||||
String dateWithoutTimestamp = formatDateWithoutTimestamp(date);
|
||||
String method = request.getMethod();
|
||||
String endpoint = request.getEndpoint().getRawPath();
|
||||
String credentialScope = buildCredentialScope(dateWithoutTimestamp);
|
||||
HashCode hashedPayload = buildHashedPayload(request);
|
||||
|
||||
// Task 1: Create a Canonical Request For Signature Version 4.
|
||||
HashCode hashedCanonicalRequest = buildHashedCanonicalRequest(method, endpoint, hashedPayload,
|
||||
canonicalizedHeadersString, signedHeaders);
|
||||
|
||||
// Task 2: Create a String to Sign for Signature Version 4.
|
||||
String stringToSign = createStringToSign(date, credentialScope, hashedCanonicalRequest);
|
||||
|
||||
// Task 3: Calculate the AWS Signature Version 4.
|
||||
String signature = buildSignature(dateWithoutTimestamp, stringToSign);
|
||||
|
||||
// Sign the request
|
||||
String authHeader = buildAuthHeader(identity, credentialScope, signedHeaders, signature);
|
||||
request = request.toBuilder().replaceHeader(HttpHeaders.AUTHORIZATION, authHeader).build();
|
||||
return request;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,90 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.util;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkArgument;
|
||||
import static com.google.common.base.Strings.isNullOrEmpty;
|
||||
|
||||
import com.google.common.base.Objects;
|
||||
|
||||
/**
|
||||
* This class represents a range of bytes.
|
||||
*/
|
||||
public final class ContentRange {
|
||||
private final long from;
|
||||
private final long to;
|
||||
|
||||
private ContentRange(long from, long to) {
|
||||
checkArgument(from < to, "\"from\" should be lower than \"to\"");
|
||||
checkArgument(from >= 0 && to > 0, "\"from\" cannot be negative and \"to\" has to be positive");
|
||||
this.from = from;
|
||||
this.to = to;
|
||||
}
|
||||
|
||||
public static ContentRange fromString(String contentRangeString) {
|
||||
checkArgument(!isNullOrEmpty(contentRangeString) && contentRangeString.matches("[0-9]+-[0-9]+"),
|
||||
"The string should be two numbers separated by a hyphen (from-to)");
|
||||
String[] strings = contentRangeString.split("-", 2);
|
||||
long from = Long.parseLong(strings[0]);
|
||||
long to = Long.parseLong(strings[1]);
|
||||
return new ContentRange(from, to);
|
||||
}
|
||||
|
||||
public static ContentRange fromPartNumber(long partNumber, long partSizeInMB) {
|
||||
checkArgument(partNumber >= 0, "The part number cannot be negative");
|
||||
checkArgument(partSizeInMB > 0, "The part size has to be positive");
|
||||
long from = partNumber * (partSizeInMB << 20);
|
||||
long to = from + (partSizeInMB << 20) - 1;
|
||||
return new ContentRange(from, to);
|
||||
}
|
||||
|
||||
public static ContentRange build(long from, long to) {
|
||||
return new ContentRange(from, to);
|
||||
}
|
||||
|
||||
public long getFrom() {
|
||||
return from;
|
||||
}
|
||||
|
||||
public long getTo() {
|
||||
return to;
|
||||
}
|
||||
|
||||
public String buildHeader() {
|
||||
return "bytes " + from + "-" + to + "/*";
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hashCode(this.from, this.to);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null)
|
||||
return false;
|
||||
if (getClass() != obj.getClass())
|
||||
return false;
|
||||
ContentRange other = (ContentRange) obj;
|
||||
return Objects.equal(this.from, other.from) && Objects.equal(this.to, other.to);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return from + "-" + to;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,139 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.util;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkArgument;
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
import static org.jclouds.util.Closeables2.closeQuietly;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.Collection;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
|
||||
import org.jclouds.io.Payload;
|
||||
|
||||
import com.google.common.base.Objects;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableList.Builder;
|
||||
import com.google.common.collect.ImmutableSortedMap;
|
||||
import com.google.common.hash.HashCode;
|
||||
import com.google.common.hash.Hashing;
|
||||
import com.google.common.hash.HashingInputStream;
|
||||
import com.google.common.io.ByteStreams;
|
||||
|
||||
public final class TreeHash {
|
||||
private static final int CHUNK_SIZE = 1024 * 1024;
|
||||
|
||||
private final HashCode treeHash;
|
||||
private final HashCode linearHash;
|
||||
|
||||
private TreeHash(HashCode treeHash, HashCode linearHash) {
|
||||
this.treeHash = checkNotNull(treeHash, "treeHash");
|
||||
this.linearHash = checkNotNull(linearHash, "linearHash");
|
||||
}
|
||||
|
||||
public HashCode getLinearHash() {
|
||||
return linearHash;
|
||||
}
|
||||
|
||||
public HashCode getTreeHash() {
|
||||
return treeHash;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hashCode(this.treeHash, this.linearHash);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null)
|
||||
return false;
|
||||
if (getClass() != obj.getClass())
|
||||
return false;
|
||||
TreeHash other = (TreeHash) obj;
|
||||
return Objects.equal(this.treeHash, other.treeHash) && Objects.equal(this.linearHash, other.linearHash);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "TreeHash [treeHash=" + treeHash + ", linearHash=" + linearHash + "]";
|
||||
}
|
||||
|
||||
private static HashCode hashList(Collection<HashCode> hashList) {
|
||||
Builder<HashCode> result = ImmutableList.builder();
|
||||
while (hashList.size() > 1) {
|
||||
//Hash pairs of values and add them to the result list.
|
||||
for (Iterator<HashCode> it = hashList.iterator(); it.hasNext();) {
|
||||
HashCode hc1 = it.next();
|
||||
if (it.hasNext()) {
|
||||
HashCode hc2 = it.next();
|
||||
result.add(Hashing.sha256().newHasher()
|
||||
.putBytes(hc1.asBytes())
|
||||
.putBytes(hc2.asBytes())
|
||||
.hash());
|
||||
} else {
|
||||
result.add(hc1);
|
||||
}
|
||||
}
|
||||
hashList = result.build();
|
||||
result = ImmutableList.builder();
|
||||
}
|
||||
return hashList.iterator().next();
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds the Hash and the TreeHash values of the payload.
|
||||
*
|
||||
* @return The calculated TreeHash.
|
||||
* @see <a href="http://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html" />
|
||||
*/
|
||||
public static TreeHash buildTreeHashFromPayload(Payload payload) throws IOException {
|
||||
InputStream is = null;
|
||||
try {
|
||||
is = checkNotNull(payload, "payload").openStream();
|
||||
Builder<HashCode> list = ImmutableList.builder();
|
||||
HashingInputStream linearHis = new HashingInputStream(Hashing.sha256(), is);
|
||||
while (true) {
|
||||
HashingInputStream chunkedHis = new HashingInputStream(
|
||||
Hashing.sha256(), ByteStreams.limit(linearHis, CHUNK_SIZE));
|
||||
long count = ByteStreams.copy(chunkedHis, ByteStreams.nullOutputStream());
|
||||
if (count == 0) {
|
||||
break;
|
||||
}
|
||||
list.add(chunkedHis.hash());
|
||||
}
|
||||
//The result list contains exactly one element now.
|
||||
return new TreeHash(hashList(list.build()), linearHis.hash());
|
||||
} finally {
|
||||
closeQuietly(is);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds a TreeHash based on a map of hashed chunks.
|
||||
*
|
||||
* @return The calculated TreeHash.
|
||||
* @see <a href="http://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html" />
|
||||
*/
|
||||
public static HashCode buildTreeHashFromMap(Map<Integer, HashCode> map) {
|
||||
checkArgument(!map.isEmpty(), "The map cannot be empty.");
|
||||
return hashList(ImmutableSortedMap.copyOf(map).values());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier;
|
||||
|
||||
import org.jclouds.blobstore.internal.BaseBlobStoreApiMetadataTest;
|
||||
import org.testng.annotations.Test;
|
||||
|
||||
@Test(groups = "unit", testName = "GlacierApiMetadataTest")
|
||||
public class GlacierApiMetadataTest extends BaseBlobStoreApiMetadataTest {
|
||||
|
||||
public GlacierApiMetadataTest() {
|
||||
super(new GlacierApiMetadata());
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,104 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.jclouds.glacier.util.TestUtils.MiB;
|
||||
import static org.jclouds.glacier.util.TestUtils.buildPayload;
|
||||
|
||||
import java.util.UUID;
|
||||
|
||||
import org.jclouds.apis.BaseApiLiveTest;
|
||||
import org.jclouds.glacier.domain.PaginatedVaultCollection;
|
||||
import org.jclouds.glacier.util.ContentRange;
|
||||
import org.testng.annotations.Test;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.hash.HashCode;
|
||||
|
||||
/**
|
||||
* Live test for Glacier.
|
||||
*/
|
||||
@Test(groups = {"live", "liveshort"})
|
||||
public class GlacierClientLiveTest extends BaseApiLiveTest<GlacierClient> {
|
||||
|
||||
private final String VAULT_NAME1 = UUID.randomUUID().toString();
|
||||
private final String VAULT_NAME2 = UUID.randomUUID().toString();
|
||||
private final String VAULT_NAME3 = UUID.randomUUID().toString();
|
||||
|
||||
public GlacierClientLiveTest() {
|
||||
this.provider = "glacier";
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDeleteVaultIfEmptyOrNotFound() throws Exception {
|
||||
assertThat(api.deleteVault(UUID.randomUUID().toString())).isTrue();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDescribeNonExistentVault() throws Exception {
|
||||
assertThat(api.describeVault(UUID.randomUUID().toString())).isNull();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateVault() throws Exception {
|
||||
assertThat(api.createVault(VAULT_NAME1).toString()).contains("/vaults/" + VAULT_NAME1);
|
||||
assertThat(api.createVault(VAULT_NAME2).toString()).contains("/vaults/" + VAULT_NAME2);
|
||||
assertThat(api.createVault(VAULT_NAME3).toString()).contains("/vaults/" + VAULT_NAME3);
|
||||
}
|
||||
|
||||
@Test(dependsOnMethods = {"testCreateVault"})
|
||||
public void testListAndDescribeVaults() throws Exception {
|
||||
PaginatedVaultCollection vaults = api.listVaults();
|
||||
assertThat(vaults).containsAll(ImmutableList.of(
|
||||
api.describeVault(VAULT_NAME1),
|
||||
api.describeVault(VAULT_NAME2),
|
||||
api.describeVault(VAULT_NAME3)));
|
||||
}
|
||||
|
||||
@Test(dependsOnMethods = {"testCreateVault"})
|
||||
public void testListMultipartUploadWithEmptyList() throws Exception {
|
||||
assertThat(api.listMultipartUploads(VAULT_NAME1)).isEmpty();
|
||||
}
|
||||
|
||||
@Test(dependsOnMethods = {"testListMultipartUploadWithEmptyList"})
|
||||
public void testInitiateListAndAbortMultipartUpload() throws Exception {
|
||||
long partSizeInMb = 1;
|
||||
String uploadId = api.initiateMultipartUpload(VAULT_NAME1, partSizeInMb);
|
||||
try {
|
||||
assertThat(api.listMultipartUploads(VAULT_NAME1)).extracting("multipartUploadId").contains(uploadId);
|
||||
|
||||
HashCode part1 = api.uploadPart(VAULT_NAME1, uploadId,
|
||||
ContentRange.fromPartNumber(0, partSizeInMb), buildPayload(partSizeInMb * MiB));
|
||||
HashCode part2 = api.uploadPart(VAULT_NAME1, uploadId,
|
||||
ContentRange.fromPartNumber(1, partSizeInMb), buildPayload(partSizeInMb * MiB));
|
||||
assertThat(part1).isNotNull();
|
||||
assertThat(part2).isNotNull();
|
||||
assertThat(api.listParts(VAULT_NAME1, uploadId).iterator()).extracting("treeHash").containsExactly(part1, part2);
|
||||
} finally {
|
||||
assertThat(api.abortMultipartUpload(VAULT_NAME1, uploadId)).isTrue();
|
||||
}
|
||||
}
|
||||
|
||||
@Test(dependsOnMethods = {"testListAndDescribeVaults", "testListMultipartUploadWithEmptyList",
|
||||
"testInitiateListAndAbortMultipartUpload"})
|
||||
public void testDeleteVaultAndArchive() throws Exception {
|
||||
assertThat(api.deleteVault(VAULT_NAME1)).isTrue();
|
||||
assertThat(api.deleteVault(VAULT_NAME2)).isTrue();
|
||||
assertThat(api.deleteVault(VAULT_NAME3)).isTrue();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,143 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.jclouds.glacier.blobstore.strategy.internal.BasePollingStrategy.DEFAULT_TIME_BETWEEN_POLLS;
|
||||
import static org.jclouds.glacier.util.TestUtils.MiB;
|
||||
import static org.jclouds.glacier.util.TestUtils.buildData;
|
||||
import static org.jclouds.glacier.util.TestUtils.buildPayload;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
|
||||
import org.jclouds.apis.BaseApiLiveTest;
|
||||
import org.jclouds.glacier.blobstore.strategy.internal.BasePollingStrategy;
|
||||
import org.jclouds.glacier.domain.ArchiveRetrievalJobRequest;
|
||||
import org.jclouds.glacier.domain.InventoryRetrievalJobRequest;
|
||||
import org.jclouds.glacier.domain.JobMetadata;
|
||||
import org.jclouds.glacier.domain.JobStatus;
|
||||
import org.jclouds.glacier.domain.VaultMetadata;
|
||||
import org.jclouds.glacier.util.ContentRange;
|
||||
import org.testng.annotations.Test;
|
||||
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.hash.HashCode;
|
||||
import com.google.common.io.Closer;
|
||||
|
||||
/**
|
||||
* Long live test for Glacier.
|
||||
*/
|
||||
public class GlacierClientLongLiveTest extends BaseApiLiveTest<GlacierClient>{
|
||||
|
||||
private static final long PART_SIZE = 1;
|
||||
private static final String VAULT_NAME = "JCLOUDS_LIVE_TESTS";
|
||||
private static final String ARCHIVE_DESCRIPTION = "test archive";
|
||||
|
||||
private String archiveId = null;
|
||||
private String archiveRetrievalJob = null;
|
||||
private String inventoryRetrievalJob = null;
|
||||
|
||||
public GlacierClientLongLiveTest() {
|
||||
this.provider = "glacier";
|
||||
}
|
||||
|
||||
@Test(groups = {"live", "livelong", "setup"})
|
||||
public void testSetVault() throws Exception {
|
||||
api.createVault(VAULT_NAME);
|
||||
api.uploadArchive(VAULT_NAME, buildPayload(1 * MiB), ARCHIVE_DESCRIPTION);
|
||||
}
|
||||
|
||||
@Test(groups = {"live", "livelong", "longtest"})
|
||||
public void testUploadArchive() {
|
||||
String archiveId = api.uploadArchive(VAULT_NAME, buildPayload(1 * MiB));
|
||||
assertThat(api.deleteArchive(VAULT_NAME, archiveId)).isTrue();
|
||||
}
|
||||
|
||||
@Test(groups = {"live", "livelong", "longtest"})
|
||||
public void testCompleteMultipartUpload() {
|
||||
String uploadId = api.initiateMultipartUpload(VAULT_NAME, PART_SIZE);
|
||||
ImmutableMap.Builder<Integer, HashCode> hashes = ImmutableMap.builder();
|
||||
hashes.put(0, api.uploadPart(VAULT_NAME, uploadId, ContentRange.fromPartNumber(0, PART_SIZE),
|
||||
buildPayload(PART_SIZE * MiB)));
|
||||
hashes.put(1, api.uploadPart(VAULT_NAME, uploadId, ContentRange.fromPartNumber(1, PART_SIZE),
|
||||
buildPayload(PART_SIZE * MiB)));
|
||||
archiveId = api.completeMultipartUpload(VAULT_NAME, uploadId, hashes.build(), PART_SIZE * 2 * MiB);
|
||||
assertThat(archiveId).isNotNull();
|
||||
}
|
||||
|
||||
@Test(groups = {"live", "livelong", "longtest"}, dependsOnMethods = {"testUploadArchive", "testCompleteMultipartUpload"})
|
||||
public void testInitiateJob() {
|
||||
ArchiveRetrievalJobRequest archiveRetrieval = ArchiveRetrievalJobRequest.builder().archiveId(archiveId).build();
|
||||
InventoryRetrievalJobRequest inventoryRetrieval = InventoryRetrievalJobRequest.builder().build();
|
||||
archiveRetrievalJob = api.initiateJob(VAULT_NAME, archiveRetrieval);
|
||||
inventoryRetrievalJob = api.initiateJob(VAULT_NAME, inventoryRetrieval);
|
||||
assertThat(archiveRetrievalJob).isNotNull();
|
||||
assertThat(inventoryRetrievalJob).isNotNull();
|
||||
}
|
||||
|
||||
@Test(groups = {"live", "livelong", "longtest"}, dependsOnMethods = {"testInitiateJob"})
|
||||
public void testDescribeJob() {
|
||||
VaultMetadata vaultMetadata = api.describeVault(VAULT_NAME);
|
||||
|
||||
JobMetadata archiveRetrievalMetadata = api.describeJob(VAULT_NAME, archiveRetrievalJob);
|
||||
assertThat(archiveRetrievalMetadata.getArchiveId()).isEqualTo(archiveId);
|
||||
assertThat(archiveRetrievalMetadata.getJobId()).isEqualTo(archiveRetrievalJob);
|
||||
assertThat(archiveRetrievalMetadata.getVaultArn()).isEqualTo(vaultMetadata.getVaultARN());
|
||||
|
||||
JobMetadata inventoryRetrievalMetadata = api.describeJob(VAULT_NAME, inventoryRetrievalJob);
|
||||
assertThat(inventoryRetrievalMetadata.getJobId()).isEqualTo(inventoryRetrievalJob);
|
||||
assertThat(inventoryRetrievalMetadata.getVaultArn()).isEqualTo(vaultMetadata.getVaultARN());
|
||||
}
|
||||
|
||||
@Test(groups = {"live", "livelong", "longtest"}, dependsOnMethods = {"testInitiateJob"})
|
||||
public void testListJobs() {
|
||||
assertThat(api.listJobs(VAULT_NAME)).extracting("jobId").contains(inventoryRetrievalJob, archiveRetrievalJob);
|
||||
}
|
||||
|
||||
@Test(groups = {"live", "livelong", "longtest"}, dependsOnMethods = {"testInitiateJob", "testDescribeJob", "testListJobs"})
|
||||
public void testWaitForSucceed() throws InterruptedException {
|
||||
new BasePollingStrategy(api).waitForSuccess(VAULT_NAME, archiveRetrievalJob);
|
||||
new BasePollingStrategy(api, 0, DEFAULT_TIME_BETWEEN_POLLS).waitForSuccess(VAULT_NAME,
|
||||
inventoryRetrievalJob);
|
||||
assertThat(api.describeJob(VAULT_NAME, archiveRetrievalJob).getStatusCode()).isEqualTo(JobStatus.SUCCEEDED);
|
||||
assertThat(api.describeJob(VAULT_NAME, inventoryRetrievalJob).getStatusCode()).isEqualTo(JobStatus.SUCCEEDED);
|
||||
}
|
||||
|
||||
@Test(groups = {"live", "livelong", "longtest"}, dependsOnMethods = {"testWaitForSucceed"})
|
||||
public void testGetJobOutput() throws IOException {
|
||||
Closer closer = Closer.create();
|
||||
try {
|
||||
InputStream inputStream = closer.register(api.getJobOutput(VAULT_NAME, archiveRetrievalJob).openStream());
|
||||
InputStream expectedInputStream = closer.register(buildData(PART_SIZE * 2 * MiB).openStream());
|
||||
assertThat(inputStream).hasContentEqualTo(expectedInputStream);
|
||||
} finally {
|
||||
closer.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Test(groups = {"live", "livelong", "longtest"}, dependsOnMethods = {"testWaitForSucceed"})
|
||||
public void testGetInventoryRetrievalOutput() throws InterruptedException {
|
||||
assertThat(api.getInventoryRetrievalOutput(VAULT_NAME, inventoryRetrievalJob))
|
||||
.extracting("description").contains(ARCHIVE_DESCRIPTION);
|
||||
}
|
||||
|
||||
@Test(groups = {"live", "livelong", "longtest"}, dependsOnMethods = {"testGetJobOutput"})
|
||||
public void testDeleteArchive() throws Exception {
|
||||
assertThat(api.deleteArchive(VAULT_NAME, archiveId)).isTrue();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,540 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier;
|
||||
|
||||
import static com.google.common.base.Charsets.UTF_8;
|
||||
import static com.google.common.util.concurrent.MoreExecutors.newDirectExecutorService;
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.jclouds.Constants.PROPERTY_MAX_RETRIES;
|
||||
import static org.jclouds.Constants.PROPERTY_SO_TIMEOUT;
|
||||
import static org.jclouds.glacier.util.TestUtils.MiB;
|
||||
import static org.jclouds.glacier.util.TestUtils.buildPayload;
|
||||
import static org.jclouds.util.Strings2.urlEncode;
|
||||
import static org.testng.Assert.assertEquals;
|
||||
import static org.testng.Assert.assertNotNull;
|
||||
import static org.testng.Assert.assertTrue;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.net.URL;
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
|
||||
import org.jclouds.ContextBuilder;
|
||||
import org.jclouds.concurrent.config.ExecutorServiceModule;
|
||||
import org.jclouds.glacier.domain.ArchiveMetadataCollection;
|
||||
import org.jclouds.glacier.domain.ArchiveRetrievalJobRequest;
|
||||
import org.jclouds.glacier.domain.InventoryRetrievalJobRequest;
|
||||
import org.jclouds.glacier.domain.JobMetadata;
|
||||
import org.jclouds.glacier.domain.MultipartUploadMetadata;
|
||||
import org.jclouds.glacier.domain.PaginatedMultipartUploadCollection;
|
||||
import org.jclouds.glacier.domain.PaginatedVaultCollection;
|
||||
import org.jclouds.glacier.domain.VaultMetadata;
|
||||
import org.jclouds.glacier.options.PaginationOptions;
|
||||
import org.jclouds.glacier.reference.GlacierHeaders;
|
||||
import org.jclouds.glacier.util.ContentRange;
|
||||
import org.jclouds.http.HttpResponseException;
|
||||
import org.jclouds.io.Payload;
|
||||
import org.jclouds.json.Json;
|
||||
import org.jclouds.json.internal.GsonWrapper;
|
||||
import org.testng.Assert;
|
||||
import org.testng.annotations.AfterMethod;
|
||||
import org.testng.annotations.BeforeMethod;
|
||||
import org.testng.annotations.Test;
|
||||
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.hash.HashCode;
|
||||
import com.google.common.io.Resources;
|
||||
import com.google.common.net.HttpHeaders;
|
||||
import com.google.common.net.MediaType;
|
||||
import com.google.gson.Gson;
|
||||
import com.google.inject.Module;
|
||||
import okhttp3.mockwebserver.MockResponse;
|
||||
import okhttp3.mockwebserver.MockWebServer;
|
||||
import okhttp3.mockwebserver.RecordedRequest;
|
||||
|
||||
/**
|
||||
* Mock test for Glacier.
|
||||
*/
|
||||
@Test(singleThreaded = true, groups = {"mock"}, testName = "GlacierClientMockTest")
|
||||
public class GlacierClientMockTest {
|
||||
|
||||
private static final String REQUEST_ID = "AAABZpJrTyioDC_HsOmHae8EZp_uBSJr6cnGOLKp_XJCl-Q";
|
||||
private static final String DATE = "Sun, 25 Mar 2012 12:00:00 GMT";
|
||||
private static final String HTTP = "HTTP/1.1";
|
||||
private static final String VAULT_NAME = "examplevault";
|
||||
private static final String VAULT_NAME1 = "examplevault1";
|
||||
private static final String VAULT_NAME2 = "examplevault2";
|
||||
private static final String VAULT_NAME3 = "examplevault3";
|
||||
private static final String LOCATION = "/111122223333/";
|
||||
private static final String VAULT_LOCATION = LOCATION + "vaults/" + VAULT_NAME;
|
||||
private static final String VAULT_ARN_PREFIX = "arn:aws:glacier:us-east-1:012345678901:vaults/";
|
||||
private static final String VAULT_ARN = VAULT_ARN_PREFIX + VAULT_NAME;
|
||||
private static final String VAULT_ARN1 = VAULT_ARN_PREFIX + VAULT_NAME1;
|
||||
private static final String VAULT_ARN3 = VAULT_ARN_PREFIX + VAULT_NAME3;
|
||||
private static final String ARCHIVE_ID = "NkbByEejwEggmBz2fTHgJrg0XBoDfjP4q6iu87-TjhqG6eGoOY9Z8i1_AUyUsuhPAdTqLHy8pTl5nfCFJmDl2yEZONi5L26Omw12vcs01MNGntHEQL8MBfGlqrEXAMPLEArchiveId";
|
||||
private static final String ARCHIVE_LOCATION = VAULT_LOCATION + "/archives/" + ARCHIVE_ID;
|
||||
private static final HashCode TREEHASH = HashCode.fromString("beb0fe31a1c7ca8c6c04d574ea906e3f97b31fdca7571defb5b44dca89b5af60");
|
||||
private static final String DESCRIPTION = "test description";
|
||||
private static final String MULTIPART_UPLOAD_LOCATION = VAULT_LOCATION + "/multipart-uploads/" + ARCHIVE_ID;
|
||||
private static final String MULTIPART_UPLOAD_ID = "OW2fM5iVylEpFEMM9_HpKowRapC3vn5sSL39_396UW9zLFUWVrnRHaPjUJddQ5OxSHVXjYtrN47NBZ-khxOjyEXAMPLE";
|
||||
private static final String MARKER = "xsQdFIRsfJr20CW2AbZBKpRZAFTZSJIMtL2hYf8mvp8dM0m4RUzlaqoEye6g3h3ecqB_zqwB7zLDMeSWhwo65re4C4Ev";
|
||||
private static final String JOB_ID = "HkF9p6o7yjhFx-K3CGl6fuSm6VzW9T7esGQfco8nUXVYwS0jlb5gq1JZ55yHgt5vP54ZShjoQzQVVh7vEXAMPLEjobID";
|
||||
private static final Set<Module> modules = ImmutableSet.<Module> of(new ExecutorServiceModule(newDirectExecutorService(),
|
||||
newDirectExecutorService()));
|
||||
|
||||
private MockWebServer server;
|
||||
private GlacierClient client;
|
||||
|
||||
private static GlacierClient getGlacierClient(URL server) {
|
||||
Properties overrides = new Properties();
|
||||
// prevent expect-100 bug http://code.google.com/p/mockwebserver/issues/detail?id=6
|
||||
overrides.setProperty(PROPERTY_SO_TIMEOUT, "0");
|
||||
overrides.setProperty(PROPERTY_MAX_RETRIES, "1");
|
||||
return ContextBuilder.newBuilder("glacier").credentials("accessKey", "secretKey").endpoint(server.toString())
|
||||
.modules(modules).overrides(overrides).buildApi(GlacierClient.class);
|
||||
}
|
||||
|
||||
private static MockResponse buildBaseResponse(int responseCode) {
|
||||
MockResponse mr = new MockResponse();
|
||||
mr.setResponseCode(responseCode);
|
||||
mr.addHeader(GlacierHeaders.REQUEST_ID, REQUEST_ID);
|
||||
mr.addHeader(HttpHeaders.DATE, DATE);
|
||||
return mr;
|
||||
}
|
||||
|
||||
private static String getResponseBody(String path) throws IOException {
|
||||
return Resources.toString(Resources.getResource(GlacierClientMockTest.class, path), UTF_8);
|
||||
}
|
||||
|
||||
@BeforeMethod
|
||||
private void initServer() throws IOException {
|
||||
server = new MockWebServer();
|
||||
server.start();
|
||||
client = getGlacierClient(server.url("/").url());
|
||||
}
|
||||
|
||||
@AfterMethod
|
||||
private void shutdownServer() {
|
||||
try {
|
||||
server.shutdown();
|
||||
} catch (IOException e) {
|
||||
// MockWebServer 2.1.0 introduces an active wait for its executor
|
||||
// termination. That active wait is a hardcoded value and throws an IOE
|
||||
// if the executor has not terminated in that timeout. It is safe to
|
||||
// ignore this exception as the functionality has been properly
|
||||
// verified.
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateVault() throws InterruptedException {
|
||||
MockResponse mr = buildBaseResponse(201);
|
||||
mr.addHeader(HttpHeaders.LOCATION, VAULT_LOCATION);
|
||||
server.enqueue(mr);
|
||||
|
||||
assertThat(client.createVault(VAULT_NAME)).isEqualTo(URI.create(server.url("/") + VAULT_LOCATION.substring(1)));
|
||||
|
||||
assertEquals(server.takeRequest().getRequestLine(), "PUT /-/vaults/" + VAULT_NAME + " " + HTTP);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDeleteVault() throws InterruptedException {
|
||||
server.enqueue(buildBaseResponse(204));
|
||||
|
||||
assertThat(client.deleteVault(VAULT_NAME)).isTrue();
|
||||
|
||||
assertEquals(server.takeRequest().getRequestLine(), "DELETE /-/vaults/" + VAULT_NAME + " " + HTTP);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDescribeVault() throws InterruptedException, IOException {
|
||||
MockResponse mr = buildBaseResponse(200);
|
||||
mr.addHeader(HttpHeaders.CONTENT_TYPE, MediaType.JSON_UTF_8);
|
||||
mr.setBody(getResponseBody("/json/describeVaultResponseBody.json"));
|
||||
mr.addHeader(HttpHeaders.CONTENT_LENGTH, mr.getBody().size());
|
||||
server.enqueue(mr);
|
||||
|
||||
VaultMetadata vault = client.describeVault(VAULT_NAME);
|
||||
assertThat(vault.getVaultName()).isEqualTo(VAULT_NAME);
|
||||
assertThat(vault.getVaultARN()).isEqualTo(VAULT_ARN);
|
||||
assertThat(vault.getSizeInBytes()).isEqualTo(78088912);
|
||||
assertThat(vault.getNumberOfArchives()).isEqualTo(192);
|
||||
|
||||
assertEquals(server.takeRequest().getRequestLine(), "GET /-/vaults/" + VAULT_NAME + " " + HTTP);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testListVaults() throws InterruptedException, IOException {
|
||||
MockResponse mr = buildBaseResponse(200);
|
||||
mr.addHeader(HttpHeaders.CONTENT_TYPE, MediaType.JSON_UTF_8);
|
||||
mr.setBody(getResponseBody("/json/listVaultsResponseBody.json"));
|
||||
mr.addHeader(HttpHeaders.CONTENT_LENGTH, mr.getBody().size());
|
||||
server.enqueue(mr);
|
||||
|
||||
assertThat(client.listVaults()).extracting("vaultName").containsExactly(VAULT_NAME1, VAULT_NAME2, VAULT_NAME3);
|
||||
|
||||
assertEquals(server.takeRequest().getRequestLine(), "GET /-/vaults " + HTTP);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testListVaultsWithEmptyList() throws InterruptedException, IOException {
|
||||
MockResponse mr = buildBaseResponse(200);
|
||||
mr.addHeader(HttpHeaders.CONTENT_TYPE, MediaType.JSON_UTF_8);
|
||||
mr.setBody(getResponseBody("/json/listVaultsWithEmptyListResponseBody.json"));
|
||||
mr.addHeader(HttpHeaders.CONTENT_LENGTH, mr.getBody().size());
|
||||
server.enqueue(mr);
|
||||
|
||||
assertThat(client.listVaults()).isEmpty();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testListVaultsWithQueryParams() throws InterruptedException, IOException {
|
||||
MockResponse mr = buildBaseResponse(200);
|
||||
mr.addHeader(HttpHeaders.CONTENT_TYPE, MediaType.JSON_UTF_8);
|
||||
mr.setBody(getResponseBody("/json/listVaultsWithQueryParamsResponseBody.json"));
|
||||
mr.addHeader(HttpHeaders.CONTENT_LENGTH, mr.getBody().size());
|
||||
server.enqueue(mr);
|
||||
|
||||
PaginatedVaultCollection vaults = client.listVaults(PaginationOptions.Builder.limit(2).marker(VAULT_ARN1));
|
||||
assertThat(vaults).extracting("vaultName").containsExactly(VAULT_NAME1, VAULT_NAME2);
|
||||
assertThat(vaults.nextMarker().get()).isEqualTo(VAULT_ARN3);
|
||||
|
||||
assertEquals(server.takeRequest().getRequestLine(),
|
||||
"GET /-/vaults?limit=2&marker=" + urlEncode(VAULT_ARN1, '/') + " " + HTTP);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUploadArchive() throws InterruptedException {
|
||||
MockResponse mr = buildBaseResponse(201);
|
||||
mr.addHeader(GlacierHeaders.TREE_HASH, TREEHASH);
|
||||
mr.addHeader(HttpHeaders.LOCATION, ARCHIVE_LOCATION);
|
||||
mr.addHeader(GlacierHeaders.ARCHIVE_ID, ARCHIVE_ID);
|
||||
server.enqueue(mr);
|
||||
|
||||
assertThat(client.uploadArchive(VAULT_NAME, buildPayload(10), DESCRIPTION)).isEqualTo(ARCHIVE_ID);
|
||||
|
||||
RecordedRequest request = server.takeRequest();
|
||||
assertEquals(request.getRequestLine(), "POST /-/vaults/" + VAULT_NAME + "/archives " + HTTP);
|
||||
assertEquals(request.getHeader(GlacierHeaders.ARCHIVE_DESCRIPTION), DESCRIPTION);
|
||||
assertNotNull(request.getHeader(GlacierHeaders.TREE_HASH));
|
||||
assertNotNull(request.getHeader(GlacierHeaders.LINEAR_HASH));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDeleteArchive() throws InterruptedException {
|
||||
MockResponse mr = buildBaseResponse(204);
|
||||
server.enqueue(mr);
|
||||
|
||||
assertThat(client.deleteArchive(VAULT_NAME, ARCHIVE_ID)).isTrue();
|
||||
|
||||
assertEquals(server.takeRequest().getRequestLine(), "DELETE /-/vaults/" + VAULT_NAME + "/archives/" + ARCHIVE_ID + " " + HTTP);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInitiateMultipartUpload() throws InterruptedException {
|
||||
MockResponse mr = buildBaseResponse(201);
|
||||
mr.addHeader(HttpHeaders.LOCATION, MULTIPART_UPLOAD_LOCATION);
|
||||
mr.addHeader(GlacierHeaders.MULTIPART_UPLOAD_ID, MULTIPART_UPLOAD_ID);
|
||||
server.enqueue(mr);
|
||||
|
||||
assertThat(client.initiateMultipartUpload(VAULT_NAME, 4, DESCRIPTION)).isEqualTo(MULTIPART_UPLOAD_ID);
|
||||
|
||||
RecordedRequest request = server.takeRequest();
|
||||
assertEquals(request.getRequestLine(), "POST /-/vaults/" + VAULT_NAME + "/multipart-uploads " + HTTP);
|
||||
assertEquals(request.getHeader(GlacierHeaders.PART_SIZE), "4194304");
|
||||
assertEquals(request.getHeader(GlacierHeaders.ARCHIVE_DESCRIPTION), DESCRIPTION);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUploadPart() throws InterruptedException {
|
||||
MockResponse mr = buildBaseResponse(204);
|
||||
mr.addHeader(GlacierHeaders.TREE_HASH, TREEHASH);
|
||||
server.enqueue(mr);
|
||||
|
||||
assertThat(client.uploadPart(VAULT_NAME, MULTIPART_UPLOAD_ID, ContentRange.fromPartNumber(0, 4),
|
||||
buildPayload(4 * MiB))).isEqualTo(TREEHASH);
|
||||
|
||||
RecordedRequest request = server.takeRequest();
|
||||
assertEquals(request.getRequestLine(),
|
||||
"PUT /-/vaults/" + VAULT_NAME + "/multipart-uploads/" + MULTIPART_UPLOAD_ID + " " + HTTP);
|
||||
assertEquals(request.getHeader(HttpHeaders.CONTENT_RANGE), "bytes 0-4194303/*");
|
||||
assertEquals(request.getHeader(HttpHeaders.CONTENT_LENGTH), "4194304");
|
||||
}
|
||||
|
||||
// TODO: Change size to 4096 when moving to JDK 7
|
||||
@Test
|
||||
public void testUploadPartMaxSize() throws InterruptedException {
|
||||
// force the server to discard the request body
|
||||
server.setBodyLimit(0);
|
||||
MockResponse mr = buildBaseResponse(204);
|
||||
mr.addHeader(GlacierHeaders.TREE_HASH, TREEHASH);
|
||||
server.enqueue(mr);
|
||||
|
||||
long size = 1024;
|
||||
ContentRange range = ContentRange.fromPartNumber(0, size);
|
||||
Payload payload = buildPayload(1);
|
||||
payload.getContentMetadata().setContentLength(size * MiB);
|
||||
try {
|
||||
/* The client.uploadPart call should throw an HttpResponseException since the payload is smaller than expected.
|
||||
* This trick makes the test way faster.
|
||||
*/
|
||||
client.uploadPart(VAULT_NAME, MULTIPART_UPLOAD_ID, range, payload);
|
||||
Assert.fail();
|
||||
} catch (HttpResponseException e) {
|
||||
}
|
||||
|
||||
RecordedRequest request = server.takeRequest();
|
||||
assertEquals(request.getRequestLine(), "PUT /-/vaults/" + VAULT_NAME + "/multipart-uploads/" + MULTIPART_UPLOAD_ID + " " + HTTP);
|
||||
assertEquals(request.getHeader(HttpHeaders.CONTENT_RANGE), range.buildHeader());
|
||||
assertEquals(request.getHeader(HttpHeaders.CONTENT_LENGTH), payload.getContentMetadata().getContentLength().toString());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCompleteMultipartUpload() throws IOException, InterruptedException {
|
||||
MockResponse mr = buildBaseResponse(201);
|
||||
mr.addHeader(HttpHeaders.LOCATION, ARCHIVE_LOCATION);
|
||||
mr.addHeader(GlacierHeaders.ARCHIVE_ID, ARCHIVE_ID);
|
||||
server.enqueue(mr);
|
||||
|
||||
HashCode partHashcode = HashCode.fromString("9bc1b2a288b26af7257a36277ae3816a7d4f16e89c1e7e77d0a5c48bad62b360");
|
||||
ImmutableMap<Integer, HashCode> map = ImmutableMap.of(
|
||||
1, partHashcode,
|
||||
2, partHashcode,
|
||||
3, partHashcode,
|
||||
4, partHashcode);
|
||||
assertThat(client.completeMultipartUpload(VAULT_NAME, MULTIPART_UPLOAD_ID, map, 8 * MiB)).isEqualTo(ARCHIVE_ID);
|
||||
|
||||
RecordedRequest request = server.takeRequest();
|
||||
assertEquals(request.getRequestLine(),
|
||||
"POST /-/vaults/" + VAULT_NAME + "/multipart-uploads/" + MULTIPART_UPLOAD_ID + " " + HTTP);
|
||||
assertEquals(request.getHeader(GlacierHeaders.TREE_HASH),
|
||||
"9491cb2ed1d4e7cd53215f4017c23ec4ad21d7050a1e6bb636c4f67e8cddb844");
|
||||
assertEquals(request.getHeader(GlacierHeaders.ARCHIVE_SIZE), "8388608");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAbortMultipartUpload() throws IOException, InterruptedException {
|
||||
MockResponse mr = buildBaseResponse(204);
|
||||
server.enqueue(mr);
|
||||
|
||||
assertTrue(client.abortMultipartUpload(VAULT_NAME, MULTIPART_UPLOAD_ID));
|
||||
|
||||
assertEquals(server.takeRequest().getRequestLine(),
|
||||
"DELETE /-/vaults/" + VAULT_NAME + "/multipart-uploads/" + MULTIPART_UPLOAD_ID + " " + HTTP);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testListParts() throws IOException, InterruptedException {
|
||||
MockResponse mr = buildBaseResponse(200);
|
||||
mr.addHeader(HttpHeaders.CONTENT_TYPE, MediaType.JSON_UTF_8);
|
||||
mr.setBody(getResponseBody("/json/listPartsResponseBody.json"));
|
||||
mr.addHeader(HttpHeaders.CONTENT_LENGTH, mr.getBody().size());
|
||||
server.enqueue(mr);
|
||||
|
||||
MultipartUploadMetadata mpu = client.listParts(VAULT_NAME, MULTIPART_UPLOAD_ID, PaginationOptions.Builder.limit(1).marker("1001"));
|
||||
assertThat(mpu.getArchiveDescription()).isEqualTo("archive description 1");
|
||||
assertThat(mpu.getMultipartUploadId()).isEqualTo(MULTIPART_UPLOAD_ID);
|
||||
assertThat(mpu.getPartSizeInBytes()).isEqualTo(4194304);
|
||||
assertThat(mpu.iterator()).extracting("treeHash").containsExactly(HashCode.fromString("01d34dabf7be316472c93b1ef80721f5d4"));
|
||||
assertThat(mpu.iterator()).extracting("range").containsExactly(ContentRange.fromString("4194304-8388607"));
|
||||
|
||||
assertEquals(server.takeRequest().getRequestLine(),
|
||||
"GET /-/vaults/examplevault/multipart-uploads/" + MULTIPART_UPLOAD_ID + "?limit=1&marker=1001 " + HTTP);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testListMultipartUploads() throws IOException, InterruptedException {
|
||||
MockResponse mr = buildBaseResponse(200);
|
||||
mr.addHeader(HttpHeaders.CONTENT_TYPE, MediaType.JSON_UTF_8);
|
||||
mr.setBody(getResponseBody("/json/listMultipartUploadsResponseBody.json"));
|
||||
mr.addHeader(HttpHeaders.CONTENT_LENGTH, mr.getBody().size());
|
||||
server.enqueue(mr);
|
||||
|
||||
PaginatedMultipartUploadCollection mpus = client.listMultipartUploads(VAULT_NAME, PaginationOptions.Builder.limit(1).marker(MARKER));
|
||||
assertThat(mpus).extracting("archiveDescription").containsExactly("archive 2");
|
||||
assertThat(mpus).extracting("multipartUploadId").containsExactly("nPyGOnyFcx67qqX7E-0tSGiRi88hHMOwOxR-_jNyM6RjVMFfV29lFqZ3rNsSaWBugg6OP92pRtufeHdQH7ClIpSF6uJc");
|
||||
assertThat(mpus).extracting("partSizeInBytes").containsExactly(4194304L);
|
||||
assertThat(mpus).extracting("vaultARN").containsExactly(VAULT_ARN);
|
||||
|
||||
assertEquals(server.takeRequest().getRequestLine(),
|
||||
"GET /-/vaults/examplevault/multipart-uploads?limit=1&marker=" + MARKER + " " + HTTP);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testListMultipartUploadsWithEmptyList() throws IOException, InterruptedException {
|
||||
MockResponse mr = buildBaseResponse(200);
|
||||
mr.addHeader(HttpHeaders.CONTENT_TYPE, MediaType.JSON_UTF_8);
|
||||
mr.setBody(getResponseBody("/json/listMultipartUploadsWithEmptyListResponseBody.json"));
|
||||
mr.addHeader(HttpHeaders.CONTENT_LENGTH, mr.getBody().size());
|
||||
server.enqueue(mr);
|
||||
|
||||
assertThat(client.listMultipartUploads(VAULT_NAME, PaginationOptions.Builder.limit(1).marker(MARKER))).isEmpty();
|
||||
|
||||
assertEquals(server.takeRequest().getRequestLine(),
|
||||
"GET /-/vaults/examplevault/multipart-uploads?limit=1&marker=" + MARKER + " " + HTTP);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInitiateArchiveRetrievalJob() throws IOException, InterruptedException {
|
||||
MockResponse mr = buildBaseResponse(202);
|
||||
mr.addHeader(HttpHeaders.LOCATION, VAULT_LOCATION + "/jobs/" + JOB_ID);
|
||||
mr.addHeader(GlacierHeaders.JOB_ID, JOB_ID);
|
||||
server.enqueue(mr);
|
||||
|
||||
ContentRange range = ContentRange.fromString("2097152-4194303");
|
||||
ArchiveRetrievalJobRequest retrieval = ArchiveRetrievalJobRequest.builder()
|
||||
.archiveId(ARCHIVE_ID)
|
||||
.description(DESCRIPTION)
|
||||
.range(range)
|
||||
.build();
|
||||
assertThat(client.initiateJob(VAULT_NAME, retrieval)).isEqualTo(JOB_ID);
|
||||
|
||||
RecordedRequest request = server.takeRequest();
|
||||
Json json = new GsonWrapper(new Gson());
|
||||
ArchiveRetrievalJobRequest job = json.fromJson(request.getBody().readUtf8(), ArchiveRetrievalJobRequest.class);
|
||||
assertThat(job.getDescription()).isEqualTo(DESCRIPTION);
|
||||
assertThat(job.getRange()).isEqualTo(range);
|
||||
assertThat(job.getArchiveId()).isEqualTo(ARCHIVE_ID);
|
||||
assertThat(job.getType()).isEqualTo("archive-retrieval");
|
||||
|
||||
assertEquals(request.getRequestLine(), "POST /-/vaults/" + VAULT_NAME + "/jobs " + HTTP);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInitiateInventoryRetrievalJob() throws IOException, InterruptedException {
|
||||
MockResponse mr = buildBaseResponse(202);
|
||||
mr.addHeader(HttpHeaders.LOCATION, VAULT_LOCATION + "/jobs/" + JOB_ID);
|
||||
mr.addHeader(GlacierHeaders.JOB_ID, JOB_ID);
|
||||
server.enqueue(mr);
|
||||
|
||||
String marker = "vyS0t2jHQe5qbcDggIeD50chS1SXwYMrkVKo0KHiTUjEYxBGCqRLKaiySzdN7QXGVVV5XZpNVG67pCZ_uykQXFMLaxOSu2hO_-5C0AtWMDrfo7LgVOyfnveDRuOSecUo3Ueq7K0";
|
||||
int limit = 10000;
|
||||
String startDate = "2013-12-04T21:25:42Z";
|
||||
String endDate = "2013-12-05T21:25:42Z";
|
||||
String format = "CSV";
|
||||
InventoryRetrievalJobRequest job = InventoryRetrievalJobRequest.builder()
|
||||
.format(format)
|
||||
.endDate(endDate)
|
||||
.startDate(startDate)
|
||||
.limit(limit)
|
||||
.marker(marker)
|
||||
.build();
|
||||
assertThat(client.initiateJob(VAULT_NAME, job)).isEqualTo(JOB_ID);
|
||||
|
||||
RecordedRequest request = server.takeRequest();
|
||||
Json json = new GsonWrapper(new Gson());
|
||||
job = json.fromJson(request.getBody().readUtf8(), InventoryRetrievalJobRequest.class);
|
||||
assertThat(job.getFormat()).isEqualTo(format);
|
||||
assertThat(job.getParameters().getMarker()).isEqualTo(marker);
|
||||
assertThat(job.getParameters().getLimit()).isEqualTo(limit);
|
||||
assertThat(job.getParameters().getStartDate()).isEqualTo(startDate);
|
||||
assertThat(job.getParameters().getEndDate()).isEqualTo(endDate);
|
||||
assertThat(job.getType()).isEqualTo("inventory-retrieval");
|
||||
|
||||
assertEquals(request.getRequestLine(), "POST /-/vaults/examplevault/jobs HTTP/1.1");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDescribeJob() throws IOException, InterruptedException {
|
||||
MockResponse mr = buildBaseResponse(200);
|
||||
mr.addHeader(HttpHeaders.CONTENT_TYPE, MediaType.JSON_UTF_8);
|
||||
mr.setBody(getResponseBody("/json/describeJobResponseBody.json"));
|
||||
mr.addHeader(HttpHeaders.CONTENT_LENGTH, mr.getBody().size());
|
||||
server.enqueue(mr);
|
||||
|
||||
JobMetadata job = client.describeJob(VAULT_NAME, JOB_ID);
|
||||
assertThat(job.getAction()).isEqualTo("ArchiveRetrieval");
|
||||
assertThat(job.getArchiveId()).isEqualTo(ARCHIVE_ID);
|
||||
assertThat(ContentRange.fromString("0-16777215")).isEqualTo(job.getRetrievalByteRange());
|
||||
assertThat(job.getVaultArn()).isEqualTo("arn:aws:glacier:us-east-1:012345678901:vaults/examplevault");
|
||||
|
||||
assertEquals(server.takeRequest().getRequestLine(), "GET /-/vaults/" + VAULT_NAME + "/jobs/" + JOB_ID + " " + HTTP);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testListJobs() throws IOException, InterruptedException {
|
||||
MockResponse mr = buildBaseResponse(200);
|
||||
mr.addHeader(HttpHeaders.CONTENT_TYPE, MediaType.JSON_UTF_8);
|
||||
mr.setBody(getResponseBody("/json/listJobsResponseBody.json"));
|
||||
mr.addHeader(HttpHeaders.CONTENT_LENGTH, mr.getBody().size());
|
||||
server.enqueue(mr);
|
||||
|
||||
assertThat(client.listJobs(VAULT_NAME)).extracting("jobId").containsExactly(
|
||||
"s4MvaNHIh6mOa1f8iY4ioG2921SDPihXxh3Kv0FBX-JbNPctpRvE4c2_BifuhdGLqEhGBNGeB6Ub-JMunR9JoVa8y1hQ",
|
||||
"CQ_tf6fOR4jrJCL61Mfk6VM03oY8lmnWK93KK4gLig1UPAbZiN3UV4G_5nq4AfmJHQ_dOMLOX5k8ItFv0wCPN0oaz5dG");
|
||||
|
||||
assertEquals(server.takeRequest().getRequestLine(), "GET /-/vaults/examplevault/jobs HTTP/1.1");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetJobOutput() throws IOException, InterruptedException {
|
||||
MockResponse mr = buildBaseResponse(200);
|
||||
mr.addHeader(HttpHeaders.CONTENT_TYPE, MediaType.JSON_UTF_8);
|
||||
mr.setBody(getResponseBody("/json/getJobOutputResponseBody.json"));
|
||||
mr.addHeader(HttpHeaders.CONTENT_LENGTH, mr.getBody().size());
|
||||
server.enqueue(mr);
|
||||
|
||||
Payload payload = client.getJobOutput(VAULT_NAME, JOB_ID);
|
||||
assertThat(payload.getContentMetadata().getContentType()).isEqualTo(MediaType.JSON_UTF_8.toString());
|
||||
assertThat(payload.getContentMetadata().getContentLength()).isEqualTo(mr.getBody().size());
|
||||
assertThat(payload.openStream())
|
||||
.hasContentEqualTo(Resources.getResource(GlacierClientMockTest.class,
|
||||
"/json/getJobOutputResponseBody.json").openStream());
|
||||
|
||||
assertEquals(server.takeRequest().getRequestLine(),
|
||||
"GET /-/vaults/" + VAULT_NAME + "/jobs/" + JOB_ID + "/output " + HTTP);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetJobOutputWithContentRange() throws IOException, InterruptedException {
|
||||
MockResponse mr = buildBaseResponse(206);
|
||||
mr.addHeader(HttpHeaders.CONTENT_TYPE, MediaType.JSON_UTF_8);
|
||||
mr.setBody(getResponseBody("/json/getJobOutputResponseBody.json"));
|
||||
mr.addHeader(HttpHeaders.CONTENT_LENGTH, mr.getBody().size());
|
||||
server.enqueue(mr);
|
||||
|
||||
ContentRange range = ContentRange.fromString("16-32");
|
||||
client.getJobOutput(VAULT_NAME, JOB_ID, range);
|
||||
|
||||
RecordedRequest request = server.takeRequest();
|
||||
assertEquals(request.getHeader("Range"), "bytes=" + range.toString());
|
||||
assertEquals(request.getRequestLine(),
|
||||
"GET /-/vaults/" + VAULT_NAME + "/jobs/" + JOB_ID + "/output " + HTTP);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetInventoryRetrievalOutput() throws IOException, InterruptedException {
|
||||
MockResponse mr = buildBaseResponse(200);
|
||||
mr.addHeader(HttpHeaders.CONTENT_TYPE, MediaType.JSON_UTF_8);
|
||||
mr.setBody(getResponseBody("/json/getJobOutputResponseBody.json"));
|
||||
mr.addHeader(HttpHeaders.CONTENT_LENGTH, mr.getBody().size());
|
||||
server.enqueue(mr);
|
||||
|
||||
ArchiveMetadataCollection archives = client.getInventoryRetrievalOutput(VAULT_NAME, JOB_ID);
|
||||
assertThat(archives.getVaultARN()).isEqualTo("arn:aws:glacier:us-east-1:012345678901:vaults/examplevault");
|
||||
assertThat(archives).extracting("archiveId").containsExactly(
|
||||
"DMTmICA2n5Tdqq5BV2z7og-A20xnpAPKt3UXwWxdWsn_D6auTUrW6kwy5Qyj9xd1MCE1mBYvMQ63LWaT8yTMzMaCxB_9VBWrW4Jw4zsvg5kehAPDVKcppUD1X7b24JukOr4mMAq-oA",
|
||||
"2lHzwhKhgF2JHyvCS-ZRuF08IQLuyB4265Hs3AXj9MoAIhz7tbXAvcFeHusgU_hViO1WeCBe0N5lsYYHRyZ7rrmRkNRuYrXUs_sjl2K8ume_7mKO_0i7C-uHE1oHqaW9d37pabXrSA");
|
||||
assertThat(archives).extracting("description").containsExactly("my archive1", "my archive2");
|
||||
assertThat(archives).extracting("size").containsExactly(2140123L, 2140123L);
|
||||
assertThat(archives).extracting("treeHash").containsExactly(
|
||||
HashCode.fromString("6b9d4cf8697bd3af6aa1b590a0b27b337da5b18988dbcc619a3e608a554a1e62"),
|
||||
HashCode.fromString("7f2fe580edb35154041fa3d4b41dd6d3adaef0c85d2ff6309f1d4b520eeecda3"));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,90 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.blobstore.strategy.internal;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.jclouds.glacier.util.TestUtils.MiB;
|
||||
import static org.jclouds.glacier.util.TestUtils.GiB;
|
||||
import static org.jclouds.glacier.util.TestUtils.buildPayload;
|
||||
|
||||
import org.jclouds.glacier.blobstore.strategy.PayloadSlice;
|
||||
import org.jclouds.glacier.util.ContentRange;
|
||||
import org.jclouds.io.internal.BasePayloadSlicer;
|
||||
import org.testng.annotations.Test;
|
||||
|
||||
@Test(groups = {"unit"})
|
||||
public class BaseSlicingStrategyTest {
|
||||
@Test
|
||||
public void slicing100MBTest() {
|
||||
BaseSlicingStrategy slicer = new BaseSlicingStrategy(new BasePayloadSlicer());
|
||||
slicer.startSlicing(buildPayload(100 * MiB));
|
||||
|
||||
long offset = 0;
|
||||
while (slicer.hasNext()) {
|
||||
PayloadSlice slice = slicer.nextSlice();
|
||||
long expectedLength = (slicer.hasNext() ? 8 : 4) * MiB;
|
||||
assertThat(slice.getPayload().getContentMetadata().getContentLength()).isEqualTo(expectedLength);
|
||||
assertThat(slice.getRange()).isEqualTo(ContentRange.build(offset, offset + expectedLength - 1));
|
||||
offset += expectedLength;
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void slicing2000MBTest() {
|
||||
BaseSlicingStrategy slicer = new BaseSlicingStrategy(new BasePayloadSlicer());
|
||||
slicer.startSlicing(buildPayload(2000 * MiB));
|
||||
|
||||
long offset = 0;
|
||||
while (slicer.hasNext()) {
|
||||
PayloadSlice slice = slicer.nextSlice();
|
||||
long expectedLength = (slicer.hasNext() ? 32 : 16) * MiB;
|
||||
assertThat(slice.getPayload().getContentMetadata().getContentLength()).isEqualTo(expectedLength);
|
||||
assertThat(slice.getRange()).isEqualTo(ContentRange.build(offset, offset + expectedLength - 1));
|
||||
offset += expectedLength;
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void slicing2MBTest() {
|
||||
BaseSlicingStrategy slicer = new BaseSlicingStrategy(new BasePayloadSlicer());
|
||||
slicer.startSlicing(buildPayload(2 * MiB));
|
||||
|
||||
long offset = 0;
|
||||
while (slicer.hasNext()) {
|
||||
PayloadSlice slice = slicer.nextSlice();
|
||||
long expectedLength = 1 * MiB;
|
||||
assertThat(slice.getPayload().getContentMetadata().getContentLength()).isEqualTo(expectedLength);
|
||||
assertThat(slice.getRange()).isEqualTo(ContentRange.build(offset, offset + expectedLength - 1));
|
||||
offset += expectedLength;
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void slicing40000GBTest() {
|
||||
BaseSlicingStrategy slicer = new BaseSlicingStrategy(new BasePayloadSlicer());
|
||||
slicer.startSlicing(buildPayload(40000 * GiB));
|
||||
|
||||
long offset = 0;
|
||||
while (slicer.hasNext()) {
|
||||
PayloadSlice slice = slicer.nextSlice();
|
||||
long expectedLength = 4096 * MiB;
|
||||
assertThat(slice.getPayload().getContentMetadata().getContentLength()).isEqualTo(expectedLength);
|
||||
assertThat(slice.getRange()).isEqualTo(ContentRange.build(offset, offset + expectedLength - 1));
|
||||
offset += expectedLength;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,150 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.blobstore.strategy.internal;
|
||||
|
||||
import static com.google.common.util.concurrent.MoreExecutors.newDirectExecutorService;
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.jclouds.Constants.PROPERTY_MAX_RETRIES;
|
||||
import static org.jclouds.Constants.PROPERTY_SO_TIMEOUT;
|
||||
import static org.jclouds.glacier.reference.GlacierHeaders.ARCHIVE_DESCRIPTION;
|
||||
import static org.jclouds.glacier.reference.GlacierHeaders.ARCHIVE_ID;
|
||||
import static org.jclouds.glacier.reference.GlacierHeaders.ARCHIVE_SIZE;
|
||||
import static org.jclouds.glacier.reference.GlacierHeaders.MULTIPART_UPLOAD_ID;
|
||||
import static org.jclouds.glacier.reference.GlacierHeaders.PART_SIZE;
|
||||
import static org.jclouds.glacier.reference.GlacierHeaders.TREE_HASH;
|
||||
import static org.jclouds.glacier.util.TestUtils.MiB;
|
||||
import static org.jclouds.glacier.util.TestUtils.buildPayload;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URL;
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
|
||||
import org.jclouds.ContextBuilder;
|
||||
import org.jclouds.blobstore.domain.internal.BlobBuilderImpl;
|
||||
import org.jclouds.concurrent.config.ExecutorServiceModule;
|
||||
import org.jclouds.glacier.GlacierClient;
|
||||
import org.jclouds.http.HttpResponseException;
|
||||
import org.jclouds.io.internal.BasePayloadSlicer;
|
||||
import org.testng.annotations.AfterMethod;
|
||||
import org.testng.annotations.BeforeMethod;
|
||||
import org.testng.annotations.Test;
|
||||
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.hash.HashCode;
|
||||
import com.google.common.net.HttpHeaders;
|
||||
import com.google.inject.Module;
|
||||
import okhttp3.mockwebserver.MockResponse;
|
||||
import okhttp3.mockwebserver.MockWebServer;
|
||||
import okhttp3.mockwebserver.RecordedRequest;
|
||||
|
||||
@Test(groups = {"mock"}, singleThreaded = true)
|
||||
public class MultipartUploadStrategyMockTest {
|
||||
private static final Set<Module> modules = ImmutableSet.<Module> of(new ExecutorServiceModule(newDirectExecutorService(),
|
||||
newDirectExecutorService()));
|
||||
private static HashCode hash8 = HashCode.fromString("c87a460c93d4a8ffcf09a9a236cc17a486d7ed8a1a2f48e9c361c5f7ac0f1280");
|
||||
private static HashCode hash4 = HashCode.fromString("9491cb2ed1d4e7cd53215f4017c23ec4ad21d7050a1e6bb636c4f67e8cddb844");
|
||||
private static HashCode hcomp = HashCode.fromString("e196b8ae66b4e55a10c84647957c1291c84ffafa44bfdb88d87f0456e5399e46");
|
||||
|
||||
MockWebServer server;
|
||||
GlacierClient client;
|
||||
|
||||
private static GlacierClient getGlacierClient(URL server) {
|
||||
Properties overrides = new Properties();
|
||||
// prevent expect-100 bug http://code.google.com/p/mockwebserver/issues/detail?id=6
|
||||
overrides.setProperty(PROPERTY_SO_TIMEOUT, "0");
|
||||
overrides.setProperty(PROPERTY_MAX_RETRIES, "1");
|
||||
return ContextBuilder.newBuilder("glacier").credentials("accessKey", "secretKey").endpoint(server.toString())
|
||||
.modules(modules).overrides(overrides).buildApi(GlacierClient.class);
|
||||
}
|
||||
|
||||
@BeforeMethod
|
||||
private void initServer() throws IOException {
|
||||
server = new MockWebServer();
|
||||
server.start();
|
||||
client = getGlacierClient(server.url("/").url());
|
||||
}
|
||||
|
||||
@AfterMethod
|
||||
private void shutdownServer() throws IOException {
|
||||
server.shutdown();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSequentialMPU() throws IOException, InterruptedException {
|
||||
server.enqueue(new MockResponse().setResponseCode(201).addHeader(MULTIPART_UPLOAD_ID, "upload-id"));
|
||||
for (int i = 0; i < 12; i++) {
|
||||
server.enqueue(new MockResponse().setResponseCode(204).addHeader(TREE_HASH, hash8));
|
||||
}
|
||||
server.enqueue(new MockResponse().setResponseCode(204).addHeader(TREE_HASH, hash4));
|
||||
server.enqueue(new MockResponse().setResponseCode(201).addHeader(ARCHIVE_ID, "archive-id"));
|
||||
|
||||
SequentialMultipartUploadStrategy strat = new SequentialMultipartUploadStrategy(client,
|
||||
new BaseSlicingStrategy(new BasePayloadSlicer()));
|
||||
|
||||
assertThat(strat.execute("vault", new BlobBuilderImpl().name("test").payload(buildPayload(100 * MiB)).build()))
|
||||
.isEqualTo("archive-id");
|
||||
|
||||
RecordedRequest initiate = server.takeRequest();
|
||||
assertThat(initiate.getRequestLine()).isEqualTo("POST /-/vaults/vault/multipart-uploads HTTP/1.1");
|
||||
assertThat(initiate.getHeader(ARCHIVE_DESCRIPTION)).isEqualTo("test");
|
||||
assertThat(Long.parseLong(initiate.getHeader(PART_SIZE))).isEqualTo(8 * MiB);
|
||||
|
||||
RecordedRequest p1 = server.takeRequest();
|
||||
assertThat(p1.getRequestLine())
|
||||
.isEqualTo("PUT /-/vaults/vault/multipart-uploads/upload-id HTTP/1.1");
|
||||
assertThat(Long.parseLong(p1.getHeader(HttpHeaders.CONTENT_LENGTH))).isEqualTo(8388608);
|
||||
assertThat(HashCode.fromString(p1.getHeader(TREE_HASH))).isEqualTo(hash8);
|
||||
|
||||
for (int i = 0; i < 11; i++) {
|
||||
server.takeRequest();
|
||||
}
|
||||
|
||||
RecordedRequest p13 = server.takeRequest();
|
||||
assertThat(p13.getRequestLine())
|
||||
.isEqualTo("PUT /-/vaults/vault/multipart-uploads/upload-id HTTP/1.1");
|
||||
assertThat(HashCode.fromString(p13.getHeader(TREE_HASH))).isEqualTo(hash4);
|
||||
assertThat(Long.parseLong(p13.getHeader(HttpHeaders.CONTENT_LENGTH))).isEqualTo(4194304);
|
||||
|
||||
RecordedRequest complete = server.takeRequest();
|
||||
assertThat(complete.getRequestLine()).isEqualTo("POST /-/vaults/vault/multipart-uploads/upload-id HTTP/1.1");
|
||||
assertThat(HashCode.fromString(complete.getHeader(TREE_HASH))).isEqualTo(hcomp);
|
||||
assertThat(Long.parseLong(complete.getHeader(ARCHIVE_SIZE))).isEqualTo(100 * MiB);
|
||||
}
|
||||
|
||||
@Test(expectedExceptions = HttpResponseException.class)
|
||||
public void testSequentialMPUAbort() throws InterruptedException {
|
||||
server.enqueue(new MockResponse().setResponseCode(201).addHeader(MULTIPART_UPLOAD_ID, "upload-id"));
|
||||
server.enqueue(new MockResponse().setResponseCode(204).addHeader(TREE_HASH, hash8));
|
||||
server.enqueue(new MockResponse().setResponseCode(404));
|
||||
server.enqueue(new MockResponse().setResponseCode(204));
|
||||
|
||||
SequentialMultipartUploadStrategy strat = new SequentialMultipartUploadStrategy(client,
|
||||
new BaseSlicingStrategy(new BasePayloadSlicer()));
|
||||
|
||||
try {
|
||||
strat.execute("vault", new BlobBuilderImpl().name("test").payload(buildPayload(100 * MiB)).build());
|
||||
} finally {
|
||||
server.takeRequest();
|
||||
server.takeRequest();
|
||||
server.takeRequest();
|
||||
RecordedRequest abort = server.takeRequest();
|
||||
assertThat(abort.getRequestLine()).isEqualTo("DELETE /-/vaults/vault/multipart-uploads/upload-id HTTP/1.1");
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,54 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.predicates.validators;
|
||||
|
||||
import static com.google.common.base.Charsets.UTF_8;
|
||||
import static org.jclouds.glacier.util.TestUtils.buildData;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.testng.annotations.Test;
|
||||
|
||||
@Test(groups = "unit", testName = "DescriptionValidatorTest")
|
||||
public class DescriptionValidatorTest {
|
||||
|
||||
private static final DescriptionValidator VALIDATOR = new DescriptionValidator();
|
||||
|
||||
public void testValidate() throws IOException {
|
||||
VALIDATOR.validate("This is a valid description");
|
||||
VALIDATOR.validate("This_is*A#valid@Description");
|
||||
VALIDATOR.validate("This~Is~A~Valid~Description");
|
||||
VALIDATOR.validate("&Valid$Description");
|
||||
VALIDATOR.validate("");
|
||||
VALIDATOR.validate(buildData(1024).asCharSource(UTF_8).read());
|
||||
}
|
||||
|
||||
@Test(expectedExceptions = IllegalArgumentException.class)
|
||||
public void testIllegalCharacter() {
|
||||
VALIDATOR.validate(Character.toString((char) 31));
|
||||
}
|
||||
|
||||
@Test(expectedExceptions = IllegalArgumentException.class)
|
||||
public void testIllegalCharacter2() {
|
||||
VALIDATOR.validate(Character.toString((char) 127));
|
||||
}
|
||||
|
||||
@Test(expectedExceptions = IllegalArgumentException.class)
|
||||
public void testDescriptionTooLong() throws IOException {
|
||||
VALIDATOR.validate(buildData(1025).asCharSource(UTF_8).read());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.predicates.validators;
|
||||
|
||||
import org.testng.annotations.Test;
|
||||
|
||||
@Test(groups = "unit", testName = "PartSizeValidatorTest")
|
||||
public class PartSizeValidatorTest {
|
||||
private static final PartSizeValidator VALIDATOR = new PartSizeValidator();
|
||||
|
||||
public void testValidate() {
|
||||
VALIDATOR.validate(1L);
|
||||
VALIDATOR.validate(2L);
|
||||
VALIDATOR.validate(4L);
|
||||
VALIDATOR.validate(32L);
|
||||
VALIDATOR.validate(4096L);
|
||||
}
|
||||
|
||||
@Test(expectedExceptions = IllegalArgumentException.class)
|
||||
public void testZero() {
|
||||
VALIDATOR.validate(0L);
|
||||
}
|
||||
|
||||
@Test(expectedExceptions = IllegalArgumentException.class)
|
||||
public void testTooBig() {
|
||||
VALIDATOR.validate(8192L);
|
||||
}
|
||||
|
||||
@Test(expectedExceptions = IllegalArgumentException.class)
|
||||
public void testNotPowerOfTwo() {
|
||||
VALIDATOR.validate(25L);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,51 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.predicates.validators;
|
||||
|
||||
import static org.jclouds.glacier.util.TestUtils.GiB;
|
||||
import static org.jclouds.glacier.util.TestUtils.buildPayload;
|
||||
|
||||
import org.jclouds.io.Payload;
|
||||
import org.testng.annotations.Test;
|
||||
|
||||
@Test(groups = "unit", testName = "PayloadValidatorTest")
|
||||
public class PayloadValidatorTest {
|
||||
|
||||
private static final PayloadValidator VALIDATOR = new PayloadValidator();
|
||||
|
||||
public void testValidate() {
|
||||
VALIDATOR.validate(buildPayload(10));
|
||||
}
|
||||
|
||||
@Test(expectedExceptions = NullPointerException.class)
|
||||
public void testNoContentLength() {
|
||||
Payload payload = buildPayload(10);
|
||||
payload.getContentMetadata().setContentLength(null);
|
||||
VALIDATOR.validate(payload);
|
||||
}
|
||||
|
||||
@Test(expectedExceptions = NullPointerException.class)
|
||||
public void testNullPayload() {
|
||||
VALIDATOR.validate(null);
|
||||
}
|
||||
|
||||
@Test(expectedExceptions = IllegalArgumentException.class)
|
||||
public void testContentLengthTooBig() {
|
||||
VALIDATOR.validate(buildPayload(5 * GiB));
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,59 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.predicates.validators;
|
||||
|
||||
import static com.google.common.base.Charsets.UTF_8;
|
||||
import static org.jclouds.glacier.util.TestUtils.buildData;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.testng.annotations.Test;
|
||||
|
||||
@Test(groups = "unit", testName = "VaultNameValidatorTest")
|
||||
public class VaultNameValidatorTest {
|
||||
|
||||
private static final VaultNameValidator VALIDATOR = new VaultNameValidator();
|
||||
|
||||
public void testValidate() throws IOException {
|
||||
VALIDATOR.validate("VALID_NAME");
|
||||
VALIDATOR.validate("VALID-NAME");
|
||||
VALIDATOR.validate("VALID255NAME");
|
||||
VALIDATOR.validate("255VALID-NAME");
|
||||
VALIDATOR.validate("VALID.NAME");
|
||||
VALIDATOR.validate(buildData(255).asCharSource(UTF_8).read());
|
||||
}
|
||||
|
||||
@Test(expectedExceptions = IllegalArgumentException.class)
|
||||
public void testEmptyName() {
|
||||
VALIDATOR.validate("");
|
||||
}
|
||||
|
||||
@Test(expectedExceptions = IllegalArgumentException.class)
|
||||
public void testIllegalCharacters() {
|
||||
VALIDATOR.validate("<InvalidName>");
|
||||
}
|
||||
|
||||
@Test(expectedExceptions = IllegalArgumentException.class)
|
||||
public void testIllegalCharacters2() {
|
||||
VALIDATOR.validate("INVALID,NAME");
|
||||
}
|
||||
|
||||
@Test(expectedExceptions = IllegalArgumentException.class)
|
||||
public void testNameTooLong() throws IOException {
|
||||
VALIDATOR.validate(buildData(256).asCharSource(UTF_8).read());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,56 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.util;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.security.cert.CertificateException;
|
||||
|
||||
import org.jclouds.encryption.internal.JCECrypto;
|
||||
import org.jclouds.http.HttpRequest;
|
||||
import org.testng.annotations.Test;
|
||||
|
||||
import com.google.common.collect.Multimap;
|
||||
import com.google.common.collect.TreeMultimap;
|
||||
|
||||
@Test(groups = "unit", testName = "AWSRequestSignerV4Test")
|
||||
public class AWSRequestSignerV4Test {
|
||||
|
||||
@Test
|
||||
public void testSignatureCalculation() throws NoSuchAlgorithmException, CertificateException {
|
||||
String auth = "AWS4-HMAC-SHA256 " + "Credential=AKIAIOSFODNN7EXAMPLE/20120525/us-east-1/glacier/aws4_request,"
|
||||
+ "SignedHeaders=host;x-amz-date;x-amz-glacier-version,"
|
||||
+ "Signature=3ce5b2f2fffac9262b4da9256f8d086b4aaf42eba5f111c21681a65a127b7c2a";
|
||||
String identity = "AKIAIOSFODNN7EXAMPLE";
|
||||
String credential = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY";
|
||||
AWSRequestSignerV4 signer = new AWSRequestSignerV4(identity, credential, new JCECrypto());
|
||||
HttpRequest request = signer.sign(createRequest());
|
||||
assertThat(request.getFirstHeaderOrNull("Authorization")).isEqualTo(auth);
|
||||
}
|
||||
|
||||
private HttpRequest createRequest() {
|
||||
Multimap<String, String> headers = TreeMultimap.create();
|
||||
headers.put("Host", "glacier.us-east-1.amazonaws.com");
|
||||
headers.put("x-amz-date", "20120525T002453Z");
|
||||
headers.put("x-amz-glacier-version", "2012-06-01");
|
||||
HttpRequest request = HttpRequest.builder().method("PUT")
|
||||
.endpoint("https://glacier.us-east-1.amazonaws.com/-/vaults/examplevault").headers(headers).build();
|
||||
return request;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,102 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.util;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
import org.testng.annotations.Test;
|
||||
|
||||
@Test(groups = "unit", testName = "ContentRangeTest")
|
||||
public class ContentRangeTest {
|
||||
@Test
|
||||
public void testContentRangeFromString() {
|
||||
ContentRange range = ContentRange.fromString("0-10");
|
||||
assertThat(range.getFrom()).isEqualTo(0);
|
||||
assertThat(range.getTo()).isEqualTo(10);
|
||||
|
||||
range = ContentRange.fromString("1000-2000");
|
||||
assertThat(range.getFrom()).isEqualTo(1000);
|
||||
assertThat(range.getTo()).isEqualTo(2000);
|
||||
}
|
||||
|
||||
@Test(expectedExceptions = IllegalArgumentException.class)
|
||||
public void testContentRangeFromStringWithoutTo() {
|
||||
ContentRange.fromString("-10");
|
||||
}
|
||||
|
||||
@Test(expectedExceptions = IllegalArgumentException.class)
|
||||
public void testContentRangeFromStringWithoutFrom() {
|
||||
ContentRange.fromString("10-");
|
||||
}
|
||||
|
||||
@Test(expectedExceptions = IllegalArgumentException.class)
|
||||
public void testContentRangeFromStringWithEmptyString() {
|
||||
ContentRange.fromString("");
|
||||
}
|
||||
|
||||
@Test(expectedExceptions = IllegalArgumentException.class)
|
||||
public void testContentRangeFromStringWithNullString() {
|
||||
ContentRange.fromString(null);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testContentRangeFromPartNumber() {
|
||||
ContentRange range = ContentRange.fromPartNumber(0, 4096);
|
||||
assertThat(range.getFrom()).isEqualTo(0);
|
||||
assertThat(range.getTo()).isEqualTo((4096L << 20) - 1);
|
||||
|
||||
range = ContentRange.fromPartNumber(1, 4096);
|
||||
assertThat(range.getFrom()).isEqualTo(4096L << 20);
|
||||
assertThat(range.getTo()).isEqualTo(2 * (4096L << 20) - 1);
|
||||
|
||||
range = ContentRange.fromPartNumber(2, 4096);
|
||||
assertThat(range.getFrom()).isEqualTo(2 * (4096L << 20));
|
||||
assertThat(range.getTo()).isEqualTo(3 * (4096L << 20) - 1);
|
||||
}
|
||||
|
||||
@Test(expectedExceptions = IllegalArgumentException.class)
|
||||
public void testContentRangeFromPartNumberWithNegativePartNumber() {
|
||||
ContentRange.fromPartNumber(-1, 4096);
|
||||
}
|
||||
|
||||
@Test(expectedExceptions = IllegalArgumentException.class)
|
||||
public void testContentRangeFromPartNumberWithZeroPartSize() {
|
||||
ContentRange.fromPartNumber(0, 0);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBuildContentRange() {
|
||||
ContentRange range = ContentRange.build(0, 4096);
|
||||
assertThat(range.getFrom()).isEqualTo(0);
|
||||
assertThat(range.getTo()).isEqualTo(4096);
|
||||
}
|
||||
|
||||
@Test(expectedExceptions = IllegalArgumentException.class)
|
||||
public void testBuildContentRangeWithTransposedValues() {
|
||||
ContentRange.build(50, 10);
|
||||
}
|
||||
|
||||
@Test(expectedExceptions = IllegalArgumentException.class)
|
||||
public void testBuildContentRangeWithNegatives() {
|
||||
ContentRange.build(-100, -50);
|
||||
}
|
||||
|
||||
@Test(expectedExceptions = IllegalArgumentException.class)
|
||||
public void testBuildContentRangeWithZeroTo() {
|
||||
ContentRange.build(0, 0);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.util;
|
||||
|
||||
import static com.google.common.net.MediaType.PLAIN_TEXT_UTF_8;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.jclouds.io.ByteSources;
|
||||
import org.jclouds.io.Payload;
|
||||
import org.jclouds.io.payloads.ByteSourcePayload;
|
||||
|
||||
import com.google.common.io.ByteSource;
|
||||
|
||||
public class TestUtils {
|
||||
public static final long MiB = 1L << 20;
|
||||
public static final long GiB = 1L << 30;
|
||||
public static final long TiB = 1L << 40;
|
||||
|
||||
public static Payload buildPayload(long size) {
|
||||
ByteSource data = buildData(size);
|
||||
Payload payload = new ByteSourcePayload(data);
|
||||
payload.getContentMetadata().setContentType(PLAIN_TEXT_UTF_8.toString());
|
||||
payload.getContentMetadata().setContentLength(size);
|
||||
return payload;
|
||||
}
|
||||
|
||||
public static ByteSource buildData(long size) {
|
||||
byte[] array = new byte[1024];
|
||||
Arrays.fill(array, (byte) 'a');
|
||||
return ByteSources.repeatingArrayByteSource(array).slice(0, size);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,79 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.jclouds.glacier.util;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.jclouds.glacier.util.TestUtils.MiB;
|
||||
import static org.jclouds.glacier.util.TestUtils.buildData;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.jclouds.io.payloads.ByteSourcePayload;
|
||||
import org.testng.annotations.Test;
|
||||
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.ImmutableMap.Builder;
|
||||
import com.google.common.hash.HashCode;
|
||||
|
||||
@Test(groups = "unit", testName = "TreeHasherTest")
|
||||
public class TreeHashTest {
|
||||
|
||||
@Test
|
||||
public void testTreeHasherWith1MBPayload() throws IOException {
|
||||
TreeHash th = TreeHash.buildTreeHashFromPayload(new ByteSourcePayload(buildData(1 * MiB)));
|
||||
assertThat(th.getLinearHash())
|
||||
.isEqualTo(HashCode.fromString("9bc1b2a288b26af7257a36277ae3816a7d4f16e89c1e7e77d0a5c48bad62b360"));
|
||||
assertThat(th.getTreeHash())
|
||||
.isEqualTo(HashCode.fromString("9bc1b2a288b26af7257a36277ae3816a7d4f16e89c1e7e77d0a5c48bad62b360"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testTreeHasherWith2MBPayload() throws IOException {
|
||||
TreeHash th = TreeHash.buildTreeHashFromPayload(new ByteSourcePayload(buildData(2 * MiB)));
|
||||
assertThat(th.getLinearHash())
|
||||
.isEqualTo(HashCode.fromString("5256ec18f11624025905d057d6befb03d77b243511ac5f77ed5e0221ce6d84b5"));
|
||||
assertThat(th.getTreeHash())
|
||||
.isEqualTo(HashCode.fromString("560c2c9333c719cb00cfdffee3ba293db17f58743cdd1f7e4055373ae6300afa"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testTreeHasherWith3MBPayload() throws IOException {
|
||||
TreeHash th = TreeHash.buildTreeHashFromPayload(new ByteSourcePayload(buildData(3 * MiB)));
|
||||
assertThat(th.getLinearHash())
|
||||
.isEqualTo(HashCode.fromString("6f850bc94ae6f7de14297c01616c36d712d22864497b28a63b81d776b035e656"));
|
||||
assertThat(th.getTreeHash())
|
||||
.isEqualTo(HashCode.fromString("70239f4f2ead7561f69d48b956b547edef52a1280a93c262c0b582190be7db17"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testTreeHasherWithMoreThan3MBPayload() throws IOException {
|
||||
TreeHash th = TreeHash.buildTreeHashFromPayload(new ByteSourcePayload(buildData(3 * MiB + 512 * 1024)));
|
||||
assertThat(th.getLinearHash())
|
||||
.isEqualTo(HashCode.fromString("34c8bdd269f89a091cf17d5d23503940e0abf61c4b6544e42854b9af437f31bb"));
|
||||
assertThat(th.getTreeHash())
|
||||
.isEqualTo(HashCode.fromString("daede4eb580f914dacd5e0bdf7015c937fd615c1e6c6552d25cb04a8b7219828"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBuildTreeHashFromMap() throws IOException {
|
||||
Builder<Integer, HashCode> map = ImmutableMap.<Integer, HashCode>builder();
|
||||
map.put(2, HashCode.fromString("9bc1b2a288b26af7257a36277ae3816a7d4f16e89c1e7e77d0a5c48bad62b360"));
|
||||
map.put(1, HashCode.fromString("9bc1b2a288b26af7257a36277ae3816a7d4f16e89c1e7e77d0a5c48bad62b360"));
|
||||
HashCode treehash = TreeHash.buildTreeHashFromMap(map.build());
|
||||
assertThat(treehash).isEqualTo(HashCode.fromString("560c2c9333c719cb00cfdffee3ba293db17f58743cdd1f7e4055373ae6300afa"));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,18 @@
|
|||
{
|
||||
"Action": "ArchiveRetrieval",
|
||||
"ArchiveId": "NkbByEejwEggmBz2fTHgJrg0XBoDfjP4q6iu87-TjhqG6eGoOY9Z8i1_AUyUsuhPAdTqLHy8pTl5nfCFJmDl2yEZONi5L26Omw12vcs01MNGntHEQL8MBfGlqrEXAMPLEArchiveId",
|
||||
"ArchiveSizeInBytes": 16777216,
|
||||
"ArchiveSHA256TreeHash": "beb0fe31a1c7ca8c6c04d574ea906e3f97b31fdca7571defb5b44dca89b5af60",
|
||||
"Completed": false,
|
||||
"CreationDate": "2012-05-15T17:21:39.339Z",
|
||||
"CompletionDate": "2012-05-15T17:21:43.561Z",
|
||||
"InventorySizeInBytes": null,
|
||||
"JobDescription": "My ArchiveRetrieval Job",
|
||||
"JobId": "HkF9p6o7yjhFx-K3CGl6fuSm6VzW9T7esGQfco8nUXVYwS0jlb5gq1JZ55yHgt5vP54ZShjoQzQVVh7vEXAMPLEjobID",
|
||||
"RetrievalByteRange": "0-16777215",
|
||||
"SHA256TreeHash": "beb0fe31a1c7ca8c6c04d574ea906e3f97b31fdca7571defb5b44dca89b5af60",
|
||||
"SNSTopic": "arn:aws:sns:us-east-1:012345678901:mytopic",
|
||||
"StatusCode": "InProgress",
|
||||
"StatusMessage": "Operation in progress.",
|
||||
"VaultARN": "arn:aws:glacier:us-east-1:012345678901:vaults/examplevault"
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
{
|
||||
"CreationDate" : "2012-02-20T17:01:45.198Z",
|
||||
"LastInventoryDate" : "2012-03-20T17:03:43.221Z",
|
||||
"NumberOfArchives" : 192,
|
||||
"SizeInBytes" : 78088912,
|
||||
"VaultARN" : "arn:aws:glacier:us-east-1:012345678901:vaults/examplevault",
|
||||
"VaultName" : "examplevault"
|
||||
}
|
|
@ -0,0 +1,20 @@
|
|||
{
|
||||
"VaultARN": "arn:aws:glacier:us-east-1:012345678901:vaults/examplevault",
|
||||
"InventoryDate": "2011-12-12T14:19:01Z",
|
||||
"ArchiveList": [
|
||||
{
|
||||
"ArchiveId": "DMTmICA2n5Tdqq5BV2z7og-A20xnpAPKt3UXwWxdWsn_D6auTUrW6kwy5Qyj9xd1MCE1mBYvMQ63LWaT8yTMzMaCxB_9VBWrW4Jw4zsvg5kehAPDVKcppUD1X7b24JukOr4mMAq-oA",
|
||||
"ArchiveDescription": "my archive1",
|
||||
"CreationDate": "2012-05-15T17:19:46.700Z",
|
||||
"Size": 2140123,
|
||||
"SHA256TreeHash": "6b9d4cf8697bd3af6aa1b590a0b27b337da5b18988dbcc619a3e608a554a1e62"
|
||||
},
|
||||
{
|
||||
"ArchiveId": "2lHzwhKhgF2JHyvCS-ZRuF08IQLuyB4265Hs3AXj9MoAIhz7tbXAvcFeHusgU_hViO1WeCBe0N5lsYYHRyZ7rrmRkNRuYrXUs_sjl2K8ume_7mKO_0i7C-uHE1oHqaW9d37pabXrSA",
|
||||
"ArchiveDescription": "my archive2",
|
||||
"CreationDate": "2012-05-15T17:21:39.339Z",
|
||||
"Size": 2140123,
|
||||
"SHA256TreeHash": "7f2fe580edb35154041fa3d4b41dd6d3adaef0c85d2ff6309f1d4b520eeecda3"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -0,0 +1,41 @@
|
|||
{
|
||||
"JobList": [
|
||||
{
|
||||
"Action": "ArchiveRetrieval",
|
||||
"ArchiveId": "58-3KpZfcMPUznvMZNPaKyJx9wODCsWTnqcjtx2CjKZ6b-XgxEuA8yvZOYTPQfd7gWR4GRm2XR08gcnWbLV4VPV_kDWtZJKi0TFhKKVPzwrZnA4-FXuIBfViYUIVveeiBE51FO4bvg",
|
||||
"ArchiveSizeInBytes": 8388608,
|
||||
"ArchiveSHA256TreeHash": "106086b256ddf0fedf3d9e72f461d5983a2566247ebe7e1949246bc61359b4f4",
|
||||
"Completed": true,
|
||||
"CompletionDate": "2012-05-01T00:25:20.043Z",
|
||||
"CreationDate": "2012-05-01T00:25:16.344Z",
|
||||
"InventorySizeInBytes": null,
|
||||
"JobDescription": "aaabbbccc",
|
||||
"JobId": "s4MvaNHIh6mOa1f8iY4ioG2921SDPihXxh3Kv0FBX-JbNPctpRvE4c2_BifuhdGLqEhGBNGeB6Ub-JMunR9JoVa8y1hQ",
|
||||
"RetrievalByteRange": "0-8388607",
|
||||
"SHA256TreeHash": "106086b256ddf0fedf3d9e72f461d5983a2566247ebe7e1949246bc61359b4f4",
|
||||
"SNSTopic": null,
|
||||
"StatusCode": "Succeeded",
|
||||
"StatusMessage": "Succeeded",
|
||||
"VaultARN": "arn:aws:glacier:us-east-1:012345678901:vaults/examplevault"
|
||||
},
|
||||
{
|
||||
"Action": "ArchiveRetrieval",
|
||||
"ArchiveId": "2NVGpf83U6qB9M2u-Ihh61yoFLRDEoh7YLZWKBn80A2i1xG8uieBwGjAr4RkzOHA0E07ZjtI267R03Z-6Hxd8pyGQkBdciCSH1-Lw63Kx9qKpZbPCdU0uTW_WAdwF6lR6w8iSyKdvw",
|
||||
"ArchiveSizeInBytes": 1048576,
|
||||
"ArchiveSHA256TreeHash": "3d2ae052b2978727e0c51c0a5e32961c6a56650d1f2e4ceccab6472a5ed4a0",
|
||||
"Completed": true,
|
||||
"CompletionDate": "2012-05-01T16:59:48.444Z",
|
||||
"CreationDate": "2012-05-01T16:59:42.977Z",
|
||||
"InventorySizeInBytes": null,
|
||||
"JobDescription": "aaabbbccc",
|
||||
"JobId": "CQ_tf6fOR4jrJCL61Mfk6VM03oY8lmnWK93KK4gLig1UPAbZiN3UV4G_5nq4AfmJHQ_dOMLOX5k8ItFv0wCPN0oaz5dG",
|
||||
"RetrievalByteRange": "0-1048575",
|
||||
"SHA256TreeHash": "3d2ae052b2978727e0c51c0a5e32961c6a56650d1f2e4ceccab6472a5ed4a0",
|
||||
"SNSTopic": null,
|
||||
"StatusCode": "Succeeded",
|
||||
"StatusMessage": "Succeeded",
|
||||
"VaultARN": "arn:aws:glacier:us-east-1:012345678901:vaults/examplevault"
|
||||
}
|
||||
],
|
||||
"Marker": "CQ_tf6fOR4jrJCL61Mfk6VM03oY8lmnWK93KK4gLig1UPAbZiN3UV4G_5nq4AfmJHQ_dOMLOX5k8ItFv0wCPN0oaz5dG"
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
{
|
||||
"Marker": "qt-RBst_7yO8gVIonIBsAxr2t-db0pE4s8MNeGjKjGdNpuU-cdSAcqG62guwV9r5jh5mLyFPzFEitTpNE7iQfHiu1XoV",
|
||||
"UploadsList" : [
|
||||
{
|
||||
"ArchiveDescription": "archive 2",
|
||||
"CreationDate": "2012-04-01T15:00:00.000Z",
|
||||
"MultipartUploadId": "nPyGOnyFcx67qqX7E-0tSGiRi88hHMOwOxR-_jNyM6RjVMFfV29lFqZ3rNsSaWBugg6OP92pRtufeHdQH7ClIpSF6uJc",
|
||||
"PartSizeInBytes": 4194304,
|
||||
"VaultARN": "arn:aws:glacier:us-east-1:012345678901:vaults/examplevault"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"Marker": "qt-RBst_7yO8gVIonIBsAxr2t-db0pE4s8MNeGjKjGdNpuU-cdSAcqG62guwV9r5jh5mLyFPzFEitTpNE7iQfHiu1XoV",
|
||||
"UploadsList" : []
|
||||
}
|
|
@ -0,0 +1,13 @@
|
|||
{
|
||||
"ArchiveDescription" : "archive description 1",
|
||||
"CreationDate" : "2012-03-20T17:03:43.221Z",
|
||||
"Marker": "MfgsKHVjbQ6EldVl72bn3_n5h2TaGZQUO-Qb3B9j3TITf7WajQ",
|
||||
"MultipartUploadId" : "OW2fM5iVylEpFEMM9_HpKowRapC3vn5sSL39_396UW9zLFUWVrnRHaPjUJddQ5OxSHVXjYtrN47NBZ-khxOjyEXAMPLE",
|
||||
"PartSizeInBytes" : 4194304,
|
||||
"Parts" :
|
||||
[ {
|
||||
"RangeInBytes" : "4194304-8388607",
|
||||
"SHA256TreeHash" : "01d34dabf7be316472c93b1ef80721f5d4"
|
||||
}],
|
||||
"VaultARN" : "arn:aws:glacier:us-east-1:012345678901:vaults/demo1-vault"
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
{
|
||||
"Marker": null,
|
||||
"VaultList": [
|
||||
{
|
||||
"CreationDate": "2012-03-16T22:22:47.214Z",
|
||||
"LastInventoryDate": "2012-03-21T22:06:51.218Z",
|
||||
"NumberOfArchives": 2,
|
||||
"SizeInBytes": 12334,
|
||||
"VaultARN": "arn:aws:glacier:us-east-1:012345678901:vaults/examplevault1",
|
||||
"VaultName": "examplevault1"
|
||||
},
|
||||
{
|
||||
"CreationDate": "2012-03-19T22:06:51.218Z",
|
||||
"LastInventoryDate": "2012-03-21T22:06:51.218Z",
|
||||
"NumberOfArchives": 0,
|
||||
"SizeInBytes": 0,
|
||||
"VaultARN": "arn:aws:glacier:us-east-1:012345678901:vaults/examplevault2",
|
||||
"VaultName": "examplevault2"
|
||||
},
|
||||
{
|
||||
"CreationDate": "2012-03-19T22:06:51.218Z",
|
||||
"LastInventoryDate": "2012-03-25T12:14:31.121Z",
|
||||
"NumberOfArchives": 0,
|
||||
"SizeInBytes": 0,
|
||||
"VaultARN": "arn:aws:glacier:us-east-1:012345678901:vaults/examplevault3",
|
||||
"VaultName": "examplevault3"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"Marker": null,
|
||||
"VaultList": []
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
{
|
||||
"Marker": "arn:aws:glacier:us-east-1:012345678901:vaults/examplevault3",
|
||||
"VaultList": [
|
||||
{
|
||||
"CreationDate": "2012-03-16T22:22:47.214Z",
|
||||
"LastInventoryDate": "2012-03-21T22:06:51.218Z",
|
||||
"NumberOfArchives": 2,
|
||||
"SizeInBytes": 12334,
|
||||
"VaultARN": "arn:aws:glacier:us-east-1:012345678901:vaults/examplevault1",
|
||||
"VaultName": "examplevault1"
|
||||
},
|
||||
{
|
||||
"CreationDate": "2012-03-19T22:06:51.218Z",
|
||||
"LastInventoryDate": "2012-03-21T22:06:51.218Z",
|
||||
"NumberOfArchives": 0,
|
||||
"SizeInBytes": 0,
|
||||
"VaultARN": "arn:aws:glacier:us-east-1:012345678901:vaults/examplevault2",
|
||||
"VaultName": "examplevault2"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -0,0 +1,34 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE suite SYSTEM "http://testng.org/testng-1.0.dtd" >
|
||||
<suite name="tests">
|
||||
<test name="livelong">
|
||||
<groups>
|
||||
<run>
|
||||
<include name="livelong" />
|
||||
</run>
|
||||
</groups>
|
||||
<classes>
|
||||
<class name="org.jclouds.glacier.GlacierClientLongLiveTest" />
|
||||
</classes>
|
||||
</test>
|
||||
<test name="mock">
|
||||
<groups>
|
||||
<run>
|
||||
<include name="mock" />
|
||||
</run>
|
||||
</groups>
|
||||
<classes>
|
||||
<class name="org.jclouds.glacier.GlacierClientMockTest" />
|
||||
</classes>
|
||||
</test>
|
||||
<test name="liveshort">
|
||||
<groups>
|
||||
<run>
|
||||
<include name="liveshort" />
|
||||
</run>
|
||||
</groups>
|
||||
<classes>
|
||||
<class name="org.jclouds.glacier.GlacierClientLiveTest" />
|
||||
</classes>
|
||||
</test>
|
||||
</suite>
|
Loading…
Reference in New Issue