Fix the code according PR review

This commit is contained in:
zhuzhih2017 2017-03-04 18:06:32 +08:00
parent 5e44a28a42
commit 0400ebae95
57 changed files with 8520 additions and 243 deletions

View File

@ -22,6 +22,7 @@ func (c *AlicloudAccessConfig) Client() (*ecs.Client, error) {
return nil, err
}
client := ecs.NewClient(c.AlicloudAccessKey, c.AlicloudSecretKey)
client.SetBusinessInfo("Packer")
if _, err := client.DescribeRegions(); err != nil {
return nil, err
}

View File

@ -9,12 +9,6 @@ import (
"os"
)
const (
ssh_time_out = 60000000000
default_port = 22
default_comm_type = "ssh"
)
type RunConfig struct {
AssociatePublicIpAddress bool `mapstructure:"associate_public_ip_address"`
ZoneId string `mapstructure:"zone_id"`
@ -51,18 +45,6 @@ func (c *RunConfig) Prepare(ctx *interpolate.Context) []error {
c.TemporaryKeyPairName = fmt.Sprintf("packer_%s", uuid.TimeOrderedUUID())
}
if c.Comm.Type == "" {
c.Comm.Type = default_comm_type
}
if c.Comm.SSHTimeout == 0 {
c.Comm.SSHTimeout = ssh_time_out
}
if c.Comm.SSHPort == 0 {
c.Comm.SSHPort = default_port
}
// Validation
errs := c.Comm.Prepare(ctx)
if c.AlicloudSourceImage == "" {

View File

@ -5,6 +5,9 @@ import (
"github.com/mitchellh/multistep"
packerssh "github.com/mitchellh/packer/communicator/ssh"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/agent"
"net"
"os"
)
type alicloudSSHHelper interface {
@ -20,6 +23,24 @@ func SSHHost(e alicloudSSHHelper, private bool) func(multistep.StateBag) (string
func SSHConfig(useAgent bool, username, password string) func(multistep.StateBag) (*ssh.ClientConfig, error) {
return func(state multistep.StateBag) (*ssh.ClientConfig, error) {
if useAgent {
authSock := os.Getenv("SSH_AUTH_SOCK")
if authSock == "" {
return nil, fmt.Errorf("SSH_AUTH_SOCK is not set")
}
sshAgent, err := net.Dial("unix", authSock)
if err != nil {
return nil, fmt.Errorf("Cannot connect to SSH Agent socket %q: %s", authSock, err)
}
return &ssh.ClientConfig{
User: username,
Auth: []ssh.AuthMethod{
ssh.PublicKeysCallback(agent.NewClient(sshAgent).Signers),
},
}, nil
}
privateKey, hasKey := state.GetOk("privateKey")
if hasKey {

View File

@ -81,7 +81,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
state.Put("ui", ui)
state.Put("networktype", b.chooseNetworkType())
var steps []multistep.Step
if b.chooseNetworkType() == VpcNet {
// Build the steps
steps = []multistep.Step{
&stepPreValidate{
@ -98,17 +98,22 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
PublicKeyFile: b.config.PublicKey,
TemporaryKeyPairName: b.config.TemporaryKeyPairName,
},
}
if b.chooseNetworkType() == VpcNet {
steps = append(steps,
&stepConfigAlicloudVPC{
VpcId: b.config.VpcId,
CidrBlock: b.config.CidrBlock,
VpcName: b.config.VpcName,
},
&stepConfigAlicloudVSwithc{
&stepConfigAlicloudVSwitch{
VSwitchId: b.config.VSwitchId,
ZoneId: b.config.ZoneId,
CidrBlock: b.config.CidrBlock,
VSwitchName: b.config.VSwitchName,
},
})
}
steps = append(steps,
&stepConfigAlicloudSecurityGroup{
SecurityGroupId: b.config.SecurityGroupId,
SecurityGroupName: b.config.SecurityGroupId,
@ -124,80 +129,18 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
InternetMaxBandwidthOut: b.config.InternetMaxBandwidthOut,
InstnaceName: b.config.InstanceName,
ZoneId: b.config.ZoneId,
},
&setpConfigAlicloudEIP{
})
if b.chooseNetworkType() == VpcNet {
steps = append(steps, &setpConfigAlicloudEIP{
AssociatePublicIpAddress: b.config.AssociatePublicIpAddress,
RegionId: b.config.AlicloudRegion,
},
&stepRunAlicloudInstance{},
&stepMountAlicloudDisk{},
&communicator.StepConnect{
Config: &b.config.RunConfig.Comm,
Host: SSHHost(
client,
b.config.SSHPrivateIp),
SSHConfig: SSHConfig(
b.config.RunConfig.Comm.SSHAgentAuth,
b.config.RunConfig.Comm.SSHUsername,
b.config.RunConfig.Comm.SSHPassword),
},
&common.StepProvision{},
&stepStopAlicloudInstance{
ForceStop: b.config.ForceStopInstance,
},
&stepDeleteAlicloudImageSnapshots{
AlicloudImageForceDeteleSnapshots: b.config.AlicloudImageForceDeteleSnapshots,
AlicloudImageForceDetele: b.config.AlicloudImageForceDetele,
AlicloudImageName: b.config.AlicloudImageName,
},
&stepCreateAlicloudImage{},
&setpRegionCopyAlicloudImage{
AlicloudImageDestinationRegions: b.config.AlicloudImageDestinationRegions,
AlicloudImageDestinationNames: b.config.AlicloudImageDestinationNames,
RegionId: b.config.AlicloudRegion,
},
&setpShareAlicloudImage{
AlicloudImageShareAccounts: b.config.AlicloudImageShareAccounts,
AlicloudImageUNShareAccounts: b.config.AlicloudImageUNShareAccounts,
RegionId: b.config.AlicloudRegion,
},
}
})
} else {
// Build the steps
steps = []multistep.Step{
&stepPreValidate{
AlicloudDestImageName: b.config.AlicloudImageName,
ForceDelete: b.config.AlicloudImageForceDetele,
},
&stepCheckAlicloudSourceImage{
SourceECSImageId: b.config.AlicloudSourceImage,
},
&StepConfigAlicloudKeyPair{
Debug: b.config.PackerDebug,
KeyPairName: b.config.SSHKeyPairName,
PrivateKeyFile: b.config.Comm.SSHPrivateKey,
PublicKeyFile: b.config.PublicKey,
TemporaryKeyPairName: b.config.TemporaryKeyPairName,
},
&stepConfigAlicloudSecurityGroup{
SecurityGroupId: b.config.SecurityGroupId,
SecurityGroupName: b.config.SecurityGroupId,
steps = append(steps, &stepConfigAlicloudPublicIP{
RegionId: b.config.AlicloudRegion,
},
&stepCreateAlicloudInstance{
IOOptimized: b.config.IOOptimized,
InstanceType: b.config.InstanceType,
UserData: b.config.UserData,
UserDataFile: b.config.UserDataFile,
RegionId: b.config.AlicloudRegion,
InternetChargeType: b.config.InternetChargeType,
InternetMaxBandwidthOut: b.config.InternetMaxBandwidthOut,
InstnaceName: b.config.InstanceName,
ZoneId: b.config.ZoneId,
},
&stepConfigAlicloudPublicIP{
RegionId: b.config.AlicloudRegion,
},
})
}
steps = append(steps,
&stepRunAlicloudInstance{},
&stepMountAlicloudDisk{},
&communicator.StepConnect{
@ -229,10 +172,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
AlicloudImageShareAccounts: b.config.AlicloudImageShareAccounts,
AlicloudImageUNShareAccounts: b.config.AlicloudImageUNShareAccounts,
RegionId: b.config.AlicloudRegion,
},
}
}
})
// Run!
b.runner = common.NewRunner(steps, b.config.PackerConfig, ui)

View File

@ -13,6 +13,7 @@ type StepConfigAlicloudKeyPair struct {
KeyPairName string
PrivateKeyFile string
PublicKeyFile string
SSHAgentAuth bool
keyName string
}
@ -38,6 +39,17 @@ func (s *StepConfigAlicloudKeyPair) Run(state multistep.StateBag) multistep.Step
return multistep.ActionContinue
}
if s.SSHAgentAuth && s.KeyPairName == "" {
ui.Say("Using SSH Agent with key pair in Alicloud Source Image")
return multistep.ActionContinue
}
if s.SSHAgentAuth && s.KeyPairName != "" {
ui.Say(fmt.Sprintf("Using SSH Agent for existing key pair %s", s.KeyPairName))
state.Put("keyPair", s.KeyPairName)
return multistep.ActionContinue
}
if s.TemporaryKeyPairName == "" {
ui.Say("Not using temporary keypair")
state.Put("keyPair", "")

View File

@ -10,7 +10,7 @@ import (
"time"
)
type stepConfigAlicloudVSwithc struct {
type stepConfigAlicloudVSwitch struct {
VSwitchId string
ZoneId string
isCreate bool
@ -18,7 +18,7 @@ type stepConfigAlicloudVSwithc struct {
VSwitchName string
}
func (s *stepConfigAlicloudVSwithc) Run(state multistep.StateBag) multistep.StepAction {
func (s *stepConfigAlicloudVSwitch) Run(state multistep.StateBag) multistep.StepAction {
client := state.Get("client").(*ecs.Client)
ui := state.Get("ui").(packer.Ui)
vpcId := state.Get("vpcid").(string)
@ -42,7 +42,7 @@ func (s *stepConfigAlicloudVSwithc) Run(state multistep.StateBag) multistep.Step
return multistep.ActionContinue
}
s.isCreate = false
message := fmt.Sprintf("The specific vswitch {%s} isn't exist.", s.VSwitchId)
message := fmt.Sprintf("The specific vswitch {%s} doesn't exist.", s.VSwitchId)
state.Put("error", errors.New(message))
ui.Say(message)
return multistep.ActionHalt
@ -52,7 +52,7 @@ func (s *stepConfigAlicloudVSwithc) Run(state multistep.StateBag) multistep.Step
zones, err := client.DescribeZones(common.Region(config.AlicloudRegion))
if err != nil {
ui.Say(fmt.Sprintf("Query avaiable zone failed: %s", err))
ui.Say(fmt.Sprintf("Query for available zones failed: %s", err))
state.Put("error", err)
return multistep.ActionHalt
}
@ -77,20 +77,20 @@ func (s *stepConfigAlicloudVSwithc) Run(state multistep.StateBag) multistep.Step
if s.ZoneId == "" {
if len(instanceTypes) > 0 {
ui.Say(fmt.Sprintf("The instance Type %s isn't avaiable in this Region."+
"\n You can either change the instance one of following %v \n"+
ui.Say(fmt.Sprintf("The instance type %s isn't available in this region."+
"\n You can either change the instance to one of following: %v \n"+
"or choose another region", config.InstanceType, instanceTypes))
state.Put("error", fmt.Errorf("The instance Type %s isn't avaiable in this Region."+
"\n You can either change the instance one of following %v \n"+
state.Put("error", fmt.Errorf("The instance type %s isn't available in this region."+
"\n You can either change the instance to one of following: %v \n"+
"or choose another region", config.InstanceType, instanceTypes))
return multistep.ActionHalt
} else {
ui.Say(fmt.Sprintf("The instance Type %s isn't avaiable in this Region."+
"\n You can change to other ragions \n", config.InstanceType))
ui.Say(fmt.Sprintf("The instance type %s isn't available in this region."+
"\n You can change to other regions \n", config.InstanceType))
state.Put("error", fmt.Errorf("The instance Type %s isn't avaiable in this Region."+
"\n You can change to other ragions \n", config.InstanceType))
state.Put("error", fmt.Errorf("The instance type %s isn't available in this region."+
"\n You can change to other regions \n", config.InstanceType))
return multistep.ActionHalt
}
}
@ -112,7 +112,7 @@ func (s *stepConfigAlicloudVSwithc) Run(state multistep.StateBag) multistep.Step
}
if err := client.WaitForVSwitchAvailable(vpcId, s.VSwitchId, ALICLOUD_DEFAULT_TIMEOUT); err != nil {
state.Put("error", err)
ui.Error(fmt.Sprintf("Error waiting vswitch avaiable timeout: %v", err))
ui.Error(fmt.Sprintf("Timeout waiting for vswitch to become avaiable: %v", err))
return multistep.ActionHalt
}
state.Put("vswitchid", vswitchId)
@ -121,7 +121,7 @@ func (s *stepConfigAlicloudVSwithc) Run(state multistep.StateBag) multistep.Step
return multistep.ActionContinue
}
func (s *stepConfigAlicloudVSwithc) Cleanup(state multistep.StateBag) {
func (s *stepConfigAlicloudVSwitch) Cleanup(state multistep.StateBag) {
if !s.isCreate {
return
}

View File

@ -31,7 +31,7 @@ func (s *stepCreateAlicloudImage) Run(state multistep.StateBag) multistep.StepAc
Description: config.AlicloudImageDescription})
if err != nil {
err := fmt.Errorf("Error create alicloud images: %s", err)
err := fmt.Errorf("Error creating alicloud images: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt

View File

@ -146,7 +146,7 @@ func (s *stepCreateAlicloudInstance) Cleanup(state multistep.StateBag) {
ui := state.Get("ui").(packer.Ui)
err := client.DeleteInstance(s.instance.InstanceId)
if err != nil {
ui.Say(fmt.Sprintf("Clean instance %s failed ", s.instance.InstanceId))
ui.Say(fmt.Sprintf("Cleaning instance: %s failed ", s.instance.InstanceId))
}
}

View File

@ -44,7 +44,7 @@ func (s *stepDeleteAlicloudImageSnapshots) Run(state multistep.StateBag) multist
if s.AlicloudImageForceDeteleSnapshots {
for _, diskDevice := range image.DiskDeviceMappings.DiskDeviceMapping {
if err := client.DeleteSnapshot(diskDevice.SnapshotId); err != nil {
err := fmt.Errorf("Delete ECS snapshots failed: %s", err)
err := fmt.Errorf("Deleting ECS snapshot failed: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt

View File

@ -42,13 +42,13 @@ func (s *stepMountAlicloudDisk) Run(state multistep.StateBag) multistep.StepActi
}
for _, disk := range disks {
if err := client.WaitForDisk(instance.RegionId, disk.DiskId, ecs.DiskStatusInUse, ALICLOUD_DEFAULT_SHORT_TIMEOUT); err != nil {
err := fmt.Errorf("Error waiting mount alicloud disks failed: %s", err)
err := fmt.Errorf("Timeout waiting for mount of alicloud disk: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
}
ui.Say("Finish mounting Disks")
ui.Say("Finish mounting disks")
return multistep.ActionContinue
}

View File

@ -38,6 +38,7 @@ import (
virtualboxovfbuilder "github.com/mitchellh/packer/builder/virtualbox/ovf"
vmwareisobuilder "github.com/mitchellh/packer/builder/vmware/iso"
vmwarevmxbuilder "github.com/mitchellh/packer/builder/vmware/vmx"
alicloudimportpostprocessor "github.com/mitchellh/packer/post-processor/alicloud-import"
amazonimportpostprocessor "github.com/mitchellh/packer/post-processor/amazon-import"
artificepostprocessor "github.com/mitchellh/packer/post-processor/artifice"
atlaspostprocessor "github.com/mitchellh/packer/post-processor/atlas"
@ -119,6 +120,7 @@ var Provisioners = map[string]packer.Provisioner{
}
var PostProcessors = map[string]packer.PostProcessor{
"alicloud-import": new(alicloudimportpostprocessor.PostProcessor),
"amazon-import": new(amazonimportpostprocessor.PostProcessor),
"artifice": new(artificepostprocessor.PostProcessor),
"atlas": new(atlaspostprocessor.PostProcessor),

View File

@ -0,0 +1,346 @@
package alicloudimport
import (
"fmt"
"github.com/aliyun/aliyun-oss-go-sdk/oss"
packercommon "github.com/denverdino/aliyungo/common"
"github.com/denverdino/aliyungo/ecs"
"github.com/denverdino/aliyungo/ram"
packerecs "github.com/mitchellh/packer/builder/alicloud/ecs"
"github.com/mitchellh/packer/common"
"github.com/mitchellh/packer/helper/config"
"github.com/mitchellh/packer/packer"
"github.com/mitchellh/packer/template/interpolate"
"log"
"strconv"
"strings"
"time"
)
const (
BuilderId = "packer.post-processor.alicloud-import"
OSSSuffix = "oss-"
RAWFileFormat = "raw"
VHDFileFormat = "vhd"
BUSINESSINFO = "packer"
AliyunECSImageImportDefaultRolePolicy = `{
"Statement": [
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {
"Service": [
"ecs.aliyuncs.com"
]
}
}
],
"Version": "1"
}`
)
// Configuration of this post processor
type Config struct {
common.PackerConfig `mapstructure:",squash"`
packerecs.Config `mapstructure:",squash"`
// Variables specific to this post processor
OSSBucket string `mapstructure:"oss_bucket_name"`
OSSKey string `mapstructure:"oss_key_name"`
SkipClean bool `mapstructure:"skip_clean"`
Tags map[string]string `mapstructure:"tags"`
AlicloudImageName string `mapstructure:"image_name"`
AlicloudImageVersion string `mapstructure:"image_version"`
AlicloudImageDescription string `mapstructure:"image_description"`
AlicloudImageShareAccounts []string `mapstructure:"image_share_account"`
AlicloudImageDestinationRegions []string `mapstructure:"image_copy_regions"`
OSType string `mapstructure:"image_os_type"`
Platform string `mapstructure:"image_platform"`
Architecture string `mapstructure:"image_architecture"`
Size string `mapstructure:"image_system_size"`
Format string `mapstructure:"format"`
AlicloudImageForceDetele bool `mapstructure:"image_force_delete"`
ctx interpolate.Context
}
type PostProcessor struct {
config Config
DiskDeviceMapping []ecs.DiskDeviceMapping
}
// Entry point for configuration parisng when we've defined
func (p *PostProcessor) Configure(raws ...interface{}) error {
err := config.Decode(&p.config, &config.DecodeOpts{
Interpolate: true,
InterpolateContext: &p.config.ctx,
InterpolateFilter: &interpolate.RenderFilter{
Exclude: []string{
"oss_key_name",
},
},
}, raws...)
if err != nil {
return err
}
errs := new(packer.MultiError)
// Check and render oss_key_name
if err = interpolate.Validate(p.config.OSSKey, &p.config.ctx); err != nil {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Error parsing oss_key_name template: %s", err))
}
// Check we have alicloud access variables defined somewhere
errs = packer.MultiErrorAppend(errs, p.config.AlicloudAccessConfig.Prepare(&p.config.ctx)...)
// define all our required paramaters
templates := map[string]*string{
"oss_bucket_name": &p.config.OSSBucket,
}
// Check out required params are defined
for key, ptr := range templates {
if *ptr == "" {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("%s must be set", key))
}
}
// Anything which flagged return back up the stack
if len(errs.Errors) > 0 {
return errs
}
log.Println(common.ScrubConfig(p.config, p.config.AlicloudAccessKey, p.config.AlicloudSecretKey))
return nil
}
func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) {
var err error
// Render this key since we didn't in the configure phase
p.config.OSSKey, err = interpolate.Render(p.config.OSSKey, &p.config.ctx)
if err != nil {
return nil, false, fmt.Errorf("Error rendering oss_key_name template: %s", err)
}
if p.config.OSSKey == "" {
p.config.OSSKey = "Pakcer_" + strconv.Itoa(time.Now().Nanosecond())
}
log.Printf("Rendered oss_key_name as %s", p.config.OSSKey)
log.Println("Looking for RAW or VHD in artifact")
// Locate the files output from the builder
source := ""
for _, path := range artifact.Files() {
if strings.HasSuffix(path, VHDFileFormat) || strings.HasSuffix(path, RAWFileFormat) {
source = path
break
}
}
// Hope we found something useful
if source == "" {
return nil, false, fmt.Errorf("No vhd or raw file found in artifact from builder")
}
ecsClient, err := p.config.AlicloudAccessConfig.Client()
if err != nil {
return nil, false, fmt.Errorf("Failed to connect alicloud ecs %s", err)
}
ecsClient.SetBusinessInfo(BUSINESSINFO)
images, _, err := ecsClient.DescribeImages(&ecs.DescribeImagesArgs{
RegionId: packercommon.Region(p.config.AlicloudRegion),
ImageName: p.config.AlicloudImageName,
})
if err != nil {
return nil, false, fmt.Errorf("Failed to start import from %s/%s: %s", getEndPonit(p.config.OSSBucket), p.config.OSSKey, err)
}
if len(images) > 0 && !p.config.AlicloudImageForceDetele {
return nil, false, fmt.Errorf("Duplicated image exists, please delete the existing images or set the 'image_force_delete' value as true")
}
// Set up the OSS client
log.Println("Creating OSS Client")
client, err := oss.New(getEndPonit(p.config.AlicloudRegion), p.config.AlicloudAccessKey, p.config.AlicloudSecretKey)
if err != nil {
return nil, false, fmt.Errorf("Creating oss connection failed: %s", err)
}
bucket, err := queryOrCreateBucket(p.config.OSSBucket, client)
if err != nil {
return nil, false, fmt.Errorf("Failed to query or create bucket %s: %s", p.config.OSSBucket, err)
}
if err != nil {
return nil, false, fmt.Errorf("Failed to open %s: %s", source, err)
}
err = bucket.PutObjectFromFile(p.config.OSSKey, source)
if err != nil {
return nil, false, fmt.Errorf("Failed to upload image %s: %s", source, err)
}
if len(images) > 0 && p.config.AlicloudImageForceDetele {
if err = ecsClient.DeleteImage(packercommon.Region(p.config.AlicloudRegion), images[0].ImageId); err != nil {
return nil, false, fmt.Errorf("Delete duplicated image %s failed", images[0].ImageName)
}
}
diskDeviceMapping := ecs.DiskDeviceMapping{
Size: p.config.Size,
Format: p.config.Format,
OSSBucket: p.config.OSSBucket,
OSSObject: p.config.OSSKey,
}
imageImageArgs := &ecs.ImportImageArgs{
RegionId: packercommon.Region(p.config.AlicloudRegion),
ImageName: p.config.AlicloudImageName,
ImageVersion: p.config.AlicloudImageVersion,
Description: p.config.AlicloudImageDescription,
Architecture: p.config.Architecture,
OSType: p.config.OSType,
Platform: p.config.Platform,
}
imageImageArgs.DiskDeviceMappings.DiskDeviceMapping = []ecs.DiskDeviceMapping{
diskDeviceMapping,
}
imageId, err := ecsClient.ImportImage(imageImageArgs)
if err != nil {
e, _ := err.(*packercommon.Error)
if e.Code == "NoSetRoletoECSServiceAcount" {
ramClient := ram.NewClient(p.config.AlicloudAccessKey, p.config.AlicloudSecretKey)
roleResponse, err := ramClient.GetRole(ram.RoleQueryRequest{
RoleName: "AliyunECSImageImportDefaultRole",
})
if err != nil {
return nil, false, fmt.Errorf("Failed to start import from %s/%s: %s", getEndPonit(p.config.OSSBucket), p.config.OSSKey, err)
}
if roleResponse.Role.RoleId == "" {
if _, err = ramClient.CreateRole(ram.RoleRequest{
RoleName: "AliyunECSImageImportDefaultRole",
AssumeRolePolicyDocument: AliyunECSImageImportDefaultRolePolicy,
}); err != nil {
return nil, false, fmt.Errorf("Failed to start import from %s/%s: %s", getEndPonit(p.config.OSSBucket), p.config.OSSKey, err)
}
if _, err := ramClient.AttachPolicyToRole(ram.AttachPolicyToRoleRequest{
ram.PolicyRequest{
PolicyName: "AliyunECSImageImportRolePolicy",
PolicyType: "System",
}, "AliyunECSImageImportDefaultRole",
}); err != nil {
return nil, false, fmt.Errorf("Failed to start import from %s/%s: %s", getEndPonit(p.config.OSSBucket), p.config.OSSKey, err)
}
} else {
policyListResponse, err := ramClient.ListPoliciesForRole(ram.RoleQueryRequest{
"AliyunECSImageImportDefaultRole",
})
if err != nil {
return nil, false, fmt.Errorf("Failed to start import from %s/%s: %s", getEndPonit(p.config.OSSBucket), p.config.OSSKey, err)
}
isAliyunECSImageImportRolePolicyNotExit := true
for _, policy := range policyListResponse.Policies.Policy {
if policy.PolicyName == "AliyunECSImageImportRolePolicy" && policy.PolicyType == "System" {
isAliyunECSImageImportRolePolicyNotExit = false
break
}
}
if isAliyunECSImageImportRolePolicyNotExit {
if _, err := ramClient.AttachPolicyToRole(ram.AttachPolicyToRoleRequest{
ram.PolicyRequest{
PolicyName: "AliyunECSImageImportRolePolicy",
PolicyType: "System",
}, "AliyunECSImageImportDefaultRole",
}); err != nil {
return nil, false, fmt.Errorf("Failed to start import from %s/%s: %s", getEndPonit(p.config.OSSBucket), p.config.OSSKey, err)
}
}
if _, err = ramClient.UpdateRole(
ram.UpdateRoleRequest{
RoleName: "AliyunECSImageImportDefaultRole",
NewAssumeRolePolicyDocument: AliyunECSImageImportDefaultRolePolicy,
}); err != nil {
return nil, false, fmt.Errorf("Failed to start import from %s/%s: %s", getEndPonit(p.config.OSSBucket), p.config.OSSKey, err)
}
}
for i := 10; i > 0; i = i - 1 {
imageId, err = ecsClient.ImportImage(imageImageArgs)
if err != nil {
e, _ = err.(*packercommon.Error)
if e.Code == "NoSetRoletoECSServiceAcount" {
time.Sleep(5 * time.Second)
continue
} else if e.Code == "ImageIsImporting" || e.Code == "InvalidImageName.Duplicated" {
break
}
return nil, false, fmt.Errorf("Failed to start import from %s/%s: %s", getEndPonit(p.config.OSSBucket), p.config.OSSKey, err)
}
break
}
} else {
return nil, false, fmt.Errorf("Failed to start import from %s/%s: %s", getEndPonit(p.config.OSSBucket), p.config.OSSKey, err)
}
}
err = ecsClient.WaitForImageReady(packercommon.Region(p.config.AlicloudRegion),
imageId, packerecs.ALICLOUD_DEFAULT_LONG_TIMEOUT)
// Add the reported Alicloud image ID to the artifact list
log.Printf("Importing created alicloud image ID %s in region %s Finished.", imageId, p.config.AlicloudRegion)
artifact = &packerecs.Artifact{
AlicloudImages: map[string]string{
p.config.AlicloudRegion: imageId,
},
BuilderIdValue: BuilderId,
Client: ecsClient,
}
if !p.config.SkipClean {
ui.Message(fmt.Sprintf("Deleting import source %s/%s/%s", getEndPonit(p.config.AlicloudRegion), p.config.OSSBucket, p.config.OSSKey))
if err = bucket.DeleteObject(p.config.OSSKey); err != nil {
return nil, false, fmt.Errorf("Failed to delete %s/%s/%s: %s", getEndPonit(p.config.AlicloudRegion), p.config.OSSBucket, p.config.OSSKey, err)
}
}
return artifact, false, nil
}
func queryOrCreateBucket(bucketName string, client *oss.Client) (*oss.Bucket, error) {
isExist, err := client.IsBucketExist(bucketName)
if err != nil {
return nil, err
}
if !isExist {
err = client.CreateBucket(bucketName)
if err != nil {
return nil, err
}
}
bucket, err := client.Bucket(bucketName)
if err != nil {
return nil, err
}
return bucket, nil
}
func getEndPonit(region string) string {
return "https://" + GetOSSRegion(region) + ".aliyuncs.com"
}
func GetOSSRegion(region string) string {
if strings.HasPrefix(region, OSSSuffix) {
return region
}
return OSSSuffix + region
}
func GetECSRegion(region string) string {
if strings.HasPrefix(region, OSSSuffix) {
return strings.TrimSuffix(region, OSSSuffix)
}
return region
}

92
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/auth.go generated vendored Normal file
View File

@ -0,0 +1,92 @@
package oss
import (
"bytes"
"crypto/hmac"
"crypto/sha1"
"encoding/base64"
"hash"
"io"
"net/http"
"sort"
"strings"
)
// 用于signHeader的字典排序存放容器。
type headerSorter struct {
Keys []string
Vals []string
}
// 生成签名方法直接设置请求的Header
func (conn Conn) signHeader(req *http.Request, canonicalizedResource string) {
// Find out the "x-oss-"'s address in this request'header
temp := make(map[string]string)
for k, v := range req.Header {
if strings.HasPrefix(strings.ToLower(k), "x-oss-") {
temp[strings.ToLower(k)] = v[0]
}
}
hs := newHeaderSorter(temp)
// Sort the temp by the Ascending Order
hs.Sort()
// Get the CanonicalizedOSSHeaders
canonicalizedOSSHeaders := ""
for i := range hs.Keys {
canonicalizedOSSHeaders += hs.Keys[i] + ":" + hs.Vals[i] + "\n"
}
// Give other parameters values
date := req.Header.Get(HTTPHeaderDate)
contentType := req.Header.Get(HTTPHeaderContentType)
contentMd5 := req.Header.Get(HTTPHeaderContentMD5)
signStr := req.Method + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedOSSHeaders + canonicalizedResource
h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(conn.config.AccessKeySecret))
io.WriteString(h, signStr)
signedStr := base64.StdEncoding.EncodeToString(h.Sum(nil))
// Get the final Authorization' string
authorizationStr := "OSS " + conn.config.AccessKeyID + ":" + signedStr
// Give the parameter "Authorization" value
req.Header.Set(HTTPHeaderAuthorization, authorizationStr)
}
// Additional function for function SignHeader.
func newHeaderSorter(m map[string]string) *headerSorter {
hs := &headerSorter{
Keys: make([]string, 0, len(m)),
Vals: make([]string, 0, len(m)),
}
for k, v := range m {
hs.Keys = append(hs.Keys, k)
hs.Vals = append(hs.Vals, v)
}
return hs
}
// Additional function for function SignHeader.
func (hs *headerSorter) Sort() {
sort.Sort(hs)
}
// Additional function for function SignHeader.
func (hs *headerSorter) Len() int {
return len(hs.Vals)
}
// Additional function for function SignHeader.
func (hs *headerSorter) Less(i, j int) bool {
return bytes.Compare([]byte(hs.Keys[i]), []byte(hs.Keys[j])) < 0
}
// Additional function for function SignHeader.
func (hs *headerSorter) Swap(i, j int) {
hs.Vals[i], hs.Vals[j] = hs.Vals[j], hs.Vals[i]
hs.Keys[i], hs.Keys[j] = hs.Keys[j], hs.Keys[i]
}

View File

@ -0,0 +1,633 @@
package oss
import (
"bytes"
"crypto/md5"
"encoding/base64"
"encoding/xml"
"hash"
"hash/crc64"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"strconv"
)
// Bucket implements the operations of object.
type Bucket struct {
Client Client
BucketName string
}
//
// PutObject 新建Object如果Object已存在覆盖原有Object。
//
// objectKey 上传对象的名称使用UTF-8编码、长度必须在1-1023字节之间、不能以“/”或者“\”字符开头。
// reader io.Reader读取object的数据。
// options 上传对象时可以指定对象的属性可用选项有CacheControl、ContentDisposition、ContentEncoding、
// Expires、ServerSideEncryption、ObjectACL、Meta具体含义请参看
// https://help.aliyun.com/document_detail/oss/api-reference/object/PutObject.html
//
// error 操作无错误为nil非nil为错误信息。
//
func (bucket Bucket) PutObject(objectKey string, reader io.Reader, options ...Option) error {
opts := addContentType(options, objectKey)
request := &PutObjectRequest{
ObjectKey: objectKey,
Reader: reader,
}
resp, err := bucket.DoPutObject(request, opts)
if err != nil {
return err
}
defer resp.Body.Close()
return err
}
//
// PutObjectFromFile 新建Object内容从本地文件中读取。
//
// objectKey 上传对象的名称。
// filePath 本地文件,上传对象的值为该文件内容。
// options 上传对象时可以指定对象的属性。详见PutObject的options。
//
// error 操作无错误为nil非nil为错误信息。
//
func (bucket Bucket) PutObjectFromFile(objectKey, filePath string, options ...Option) error {
fd, err := os.Open(filePath)
if err != nil {
return err
}
defer fd.Close()
opts := addContentType(options, filePath, objectKey)
request := &PutObjectRequest{
ObjectKey: objectKey,
Reader: fd,
}
resp, err := bucket.DoPutObject(request, opts)
if err != nil {
return err
}
defer resp.Body.Close()
return err
}
//
// DoPutObject 上传文件。
//
// request 上传请求。
// options 上传选项。
//
// Response 上传请求返回值。
// error 操作无错误为nil非nil为错误信息。
//
func (bucket Bucket) DoPutObject(request *PutObjectRequest, options []Option) (*Response, error) {
isOptSet, _, _ := isOptionSet(options, HTTPHeaderContentType)
if !isOptSet {
options = addContentType(options, request.ObjectKey)
}
listener := getProgressListener(options)
resp, err := bucket.do("PUT", request.ObjectKey, "", "", options, request.Reader, listener)
if err != nil {
return nil, err
}
if bucket.getConfig().IsEnableCRC {
err = checkCRC(resp, "DoPutObject")
if err != nil {
return resp, err
}
}
err = checkRespCode(resp.StatusCode, []int{http.StatusOK})
return resp, err
}
//
// GetObject 下载文件。
//
// objectKey 下载的文件名称。
// options 对象的属性限制项可选值有Range、IfModifiedSince、IfUnmodifiedSince、IfMatch、
// IfNoneMatch、AcceptEncoding详细请参考
// https://help.aliyun.com/document_detail/oss/api-reference/object/GetObject.html
//
// io.ReadCloser reader读取数据后需要close。error为nil时有效。
// error 操作无错误为nil非nil为错误信息。
//
func (bucket Bucket) GetObject(objectKey string, options ...Option) (io.ReadCloser, error) {
result, err := bucket.DoGetObject(&GetObjectRequest{objectKey}, options)
if err != nil {
return nil, err
}
return result.Response.Body, nil
}
//
// GetObjectToFile 下载文件。
//
// objectKey 下载的文件名称。
// filePath 下载对象的内容写到该本地文件。
// options 对象的属性限制项。详见GetObject的options。
//
// error 操作无错误时返回error为nil非nil为错误说明。
//
func (bucket Bucket) GetObjectToFile(objectKey, filePath string, options ...Option) error {
tempFilePath := filePath + TempFileSuffix
// 读取Object内容
result, err := bucket.DoGetObject(&GetObjectRequest{objectKey}, options)
if err != nil {
return err
}
defer result.Response.Body.Close()
// 如果文件不存在则创建,存在则清空
fd, err := os.OpenFile(tempFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, FilePermMode)
if err != nil {
return err
}
// 存储数据到文件
_, err = io.Copy(fd, result.Response.Body)
fd.Close()
if err != nil {
return err
}
// 比较CRC值
hasRange, _, _ := isOptionSet(options, HTTPHeaderRange)
if bucket.getConfig().IsEnableCRC && !hasRange {
result.Response.ClientCRC = result.ClientCRC.Sum64()
err = checkCRC(result.Response, "GetObjectToFile")
if err != nil {
os.Remove(tempFilePath)
return err
}
}
return os.Rename(tempFilePath, filePath)
}
//
// DoGetObject 下载文件
//
// request 下载请求
// options 对象的属性限制项。详见GetObject的options。
//
// GetObjectResult 下载请求返回值。
// error 操作无错误为nil非nil为错误信息。
//
func (bucket Bucket) DoGetObject(request *GetObjectRequest, options []Option) (*GetObjectResult, error) {
resp, err := bucket.do("GET", request.ObjectKey, "", "", options, nil, nil)
if err != nil {
return nil, err
}
result := &GetObjectResult{
Response: resp,
}
// crc
var crcCalc hash.Hash64
hasRange, _, _ := isOptionSet(options, HTTPHeaderRange)
if bucket.getConfig().IsEnableCRC && !hasRange {
crcCalc = crc64.New(crcTable())
result.ServerCRC = resp.ServerCRC
result.ClientCRC = crcCalc
}
// progress
listener := getProgressListener(options)
contentLen, _ := strconv.ParseInt(resp.Headers.Get(HTTPHeaderContentLength), 10, 64)
resp.Body = ioutil.NopCloser(TeeReader(resp.Body, crcCalc, contentLen, listener, nil))
return result, nil
}
//
// CopyObject 同一个bucket内拷贝Object。
//
// srcObjectKey Copy的源对象。
// destObjectKey Copy的目标对象。
// options Copy对象时您可以指定源对象的限制条件满足限制条件时copy不满足时返回错误您可以选择如下选项CopySourceIfMatch、
// CopySourceIfNoneMatch、CopySourceIfModifiedSince、CopySourceIfUnmodifiedSince、MetadataDirective。
// Copy对象时您可以指定目标对象的属性如CacheControl、ContentDisposition、ContentEncoding、Expires、
// ServerSideEncryption、ObjectACL、Meta选项的含义请参看
// https://help.aliyun.com/document_detail/oss/api-reference/object/CopyObject.html
//
// error 操作无错误为nil非nil为错误信息。
//
func (bucket Bucket) CopyObject(srcObjectKey, destObjectKey string, options ...Option) (CopyObjectResult, error) {
var out CopyObjectResult
options = append(options, CopySource(bucket.BucketName, url.QueryEscape(srcObjectKey)))
resp, err := bucket.do("PUT", destObjectKey, "", "", options, nil, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
return out, err
}
//
// CopyObjectTo bucket间拷贝object。
//
// srcObjectKey 源Object名称。源Bucket名称为Bucket.BucketName。
// destBucketName 目标Bucket名称。
// destObjectKey 目标Object名称。
// options Copy选项详见CopyObject的options。
//
// error 操作无错误为nil非nil为错误信息。
//
func (bucket Bucket) CopyObjectTo(destBucketName, destObjectKey, srcObjectKey string, options ...Option) (CopyObjectResult, error) {
return bucket.copy(srcObjectKey, destBucketName, destObjectKey, options...)
}
//
// CopyObjectFrom bucket间拷贝object。
//
// srcBucketName 源Bucket名称。
// srcObjectKey 源Object名称。
// destObjectKey 目标Object名称。目标Bucket名称为Bucket.BucketName。
// options Copy选项详见CopyObject的options。
//
// error 操作无错误为nil非nil为错误信息。
//
func (bucket Bucket) CopyObjectFrom(srcBucketName, srcObjectKey, destObjectKey string, options ...Option) (CopyObjectResult, error) {
destBucketName := bucket.BucketName
var out CopyObjectResult
srcBucket, err := bucket.Client.Bucket(srcBucketName)
if err != nil {
return out, err
}
return srcBucket.copy(srcObjectKey, destBucketName, destObjectKey, options...)
}
func (bucket Bucket) copy(srcObjectKey, destBucketName, destObjectKey string, options ...Option) (CopyObjectResult, error) {
var out CopyObjectResult
options = append(options, CopySource(bucket.BucketName, url.QueryEscape(srcObjectKey)))
headers := make(map[string]string)
err := handleOptions(headers, options)
if err != nil {
return out, err
}
resp, err := bucket.Client.Conn.Do("PUT", destBucketName, destObjectKey, "", "", headers, nil, 0, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
return out, err
}
//
// AppendObject 追加方式上传。
//
// AppendObject参数必须包含position其值指定从何处进行追加。首次追加操作的position必须为0
// 后续追加操作的position是Object的当前长度。例如第一次Append Object请求指定position值为0
// content-length是65536那么第二次Append Object需要指定position为65536。
// 每次操作成功后响应头部x-oss-next-append-position也会标明下一次追加的position。
//
// objectKey 需要追加的Object。
// reader io.Reader读取追的内容。
// appendPosition object追加的起始位置。
// destObjectProperties 第一次追加时指定新对象的属性如CacheControl、ContentDisposition、ContentEncoding、
// Expires、ServerSideEncryption、ObjectACL。
//
// int64 下次追加的开始位置error为nil空时有效。
// error 操作无错误为nil非nil为错误信息。
//
func (bucket Bucket) AppendObject(objectKey string, reader io.Reader, appendPosition int64, options ...Option) (int64, error) {
request := &AppendObjectRequest{
ObjectKey: objectKey,
Reader: reader,
Position: appendPosition,
}
result, err := bucket.DoAppendObject(request, options)
return result.NextPosition, err
}
//
// DoAppendObject 追加上传。
//
// request 追加上传请求。
// options 追加上传选项。
//
// AppendObjectResult 追加上传请求返回值。
// error 操作无错误为nil非nil为错误信息。
//
func (bucket Bucket) DoAppendObject(request *AppendObjectRequest, options []Option) (*AppendObjectResult, error) {
params := "append&position=" + strconv.FormatInt(request.Position, 10)
headers := make(map[string]string)
opts := addContentType(options, request.ObjectKey)
handleOptions(headers, opts)
var initCRC uint64
isCRCSet, initCRCOpt, _ := isOptionSet(options, initCRC64)
if isCRCSet {
initCRC = initCRCOpt.(uint64)
}
listener := getProgressListener(options)
handleOptions(headers, opts)
resp, err := bucket.Client.Conn.Do("POST", bucket.BucketName, request.ObjectKey, params, params, headers,
request.Reader, initCRC, listener)
if err != nil {
return nil, err
}
defer resp.Body.Close()
nextPosition, _ := strconv.ParseInt(resp.Headers.Get(HTTPHeaderOssNextAppendPosition), 10, 64)
result := &AppendObjectResult{
NextPosition: nextPosition,
CRC: resp.ServerCRC,
}
if bucket.getConfig().IsEnableCRC && isCRCSet {
err = checkCRC(resp, "AppendObject")
if err != nil {
return result, err
}
}
return result, nil
}
//
// DeleteObject 删除Object。
//
// objectKey 待删除Object。
//
// error 操作无错误为nil非nil为错误信息。
//
func (bucket Bucket) DeleteObject(objectKey string) error {
resp, err := bucket.do("DELETE", objectKey, "", "", nil, nil, nil)
if err != nil {
return err
}
defer resp.Body.Close()
return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
}
//
// DeleteObjects 批量删除object。
//
// objectKeys 待删除object类表。
// options 删除选项DeleteObjectsQuiet是否是安静模式默认不使用。
//
// DeleteObjectsResult 非安静模式的的返回值。
// error 操作无错误为nil非nil为错误信息。
//
func (bucket Bucket) DeleteObjects(objectKeys []string, options ...Option) (DeleteObjectsResult, error) {
out := DeleteObjectsResult{}
dxml := deleteXML{}
for _, key := range objectKeys {
dxml.Objects = append(dxml.Objects, DeleteObject{Key: key})
}
isQuiet, _ := findOption(options, deleteObjectsQuiet, false)
dxml.Quiet = isQuiet.(bool)
encode := "&encoding-type=url"
bs, err := xml.Marshal(dxml)
if err != nil {
return out, err
}
buffer := new(bytes.Buffer)
buffer.Write(bs)
contentType := http.DetectContentType(buffer.Bytes())
options = append(options, ContentType(contentType))
sum := md5.Sum(bs)
b64 := base64.StdEncoding.EncodeToString(sum[:])
options = append(options, ContentMD5(b64))
resp, err := bucket.do("POST", "", "delete"+encode, "delete", options, buffer, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
if !dxml.Quiet {
if err = xmlUnmarshal(resp.Body, &out); err == nil {
err = decodeDeleteObjectsResult(&out)
}
}
return out, err
}
//
// IsObjectExist object是否存在。
//
// bool object是否存在true存在false不存在。error为nil时有效。
//
// error 操作无错误为nil非nil为错误信息。
//
func (bucket Bucket) IsObjectExist(objectKey string) (bool, error) {
listRes, err := bucket.ListObjects(Prefix(objectKey), MaxKeys(1))
if err != nil {
return false, err
}
if len(listRes.Objects) == 1 && listRes.Objects[0].Key == objectKey {
return true, nil
}
return false, nil
}
//
// ListObjects 获得Bucket下筛选后所有的object的列表。
//
// options ListObject的筛选行为。Prefix指定的前缀、MaxKeys最大数目、Marker第一个开始、Delimiter对Object名字进行分组的字符。
//
// 您有如下8个objectmy-object-1, my-object-11, my-object-2, my-object-21,
// my-object-22, my-object-3, my-object-31, my-object-32。如果您指定了Prefix为my-object-2,
// 则返回my-object-2, my-object-21, my-object-22三个object。如果您指定了Marker为my-object-22
// 则返回my-object-3, my-object-31, my-object-32三个object。如果您指定MaxKeys则每次最多返回MaxKeys个
// 最后一次可能不足。这三个参数可以组合使用实现分页等功能。如果把prefix设为某个文件夹名就可以罗列以此prefix开头的文件
// 即该文件夹下递归的所有的文件和子文件夹。如果再把delimiter设置为"/"时,返回值就只罗列该文件夹下的文件,该文件夹下的子文件名
// 返回在CommonPrefixes部分子文件夹下递归的文件和文件夹不被显示。例如一个bucket存在三个objectfun/test.jpg、
// fun/movie/001.avi、fun/movie/007.avi。若设定prefix为"fun/"则返回三个object如果增加设定
// delimiter为"/",则返回文件"fun/test.jpg"和前缀"fun/movie/",即实现了文件夹的逻辑。
//
// 常用场景请参数示例sample/list_object.go。
//
// ListObjectsResponse 操作成功后的返回值成员Objects为bucket中对象列表。error为nil时该返回值有效。
//
func (bucket Bucket) ListObjects(options ...Option) (ListObjectsResult, error) {
var out ListObjectsResult
options = append(options, EncodingType("url"))
params, err := handleParams(options)
if err != nil {
return out, err
}
resp, err := bucket.do("GET", "", params, "", nil, nil, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
if err != nil {
return out, err
}
err = decodeListObjectsResult(&out)
return out, err
}
//
// SetObjectMeta 设置Object的Meta。
//
// objectKey object
// options 指定对象的属性有以下可选项CacheControl、ContentDisposition、ContentEncoding、Expires、
// ServerSideEncryption、Meta。
//
// error 操作无错误时error为nil非nil为错误信息。
//
func (bucket Bucket) SetObjectMeta(objectKey string, options ...Option) error {
options = append(options, MetadataDirective(MetaReplace))
_, err := bucket.CopyObject(objectKey, objectKey, options...)
return err
}
//
// GetObjectDetailedMeta 查询Object的头信息。
//
// objectKey object名称。
// objectPropertyConstraints 对象的属性限制项满足时正常返回不满足时返回错误。现在项有IfModifiedSince、IfUnmodifiedSince、
// IfMatch、IfNoneMatch。具体含义请参看 https://help.aliyun.com/document_detail/oss/api-reference/object/HeadObject.html
//
// http.Header 对象的metaerror为nil时有效。
// error 操作无错误为nil非nil为错误信息。
//
func (bucket Bucket) GetObjectDetailedMeta(objectKey string, options ...Option) (http.Header, error) {
resp, err := bucket.do("HEAD", objectKey, "", "", options, nil, nil)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return resp.Headers, nil
}
//
// GetObjectMeta 查询Object的头信息。
//
// GetObjectMeta相比GetObjectDetailedMeta更轻量仅返回指定Object的少量基本meta信息
// 包括该Object的ETag、Size对象大小、LastModified其中Size由响应头Content-Length的数值表示。
//
// objectKey object名称。
//
// http.Header 对象的metaerror为nil时有效。
// error 操作无错误为nil非nil为错误信息。
//
func (bucket Bucket) GetObjectMeta(objectKey string) (http.Header, error) {
resp, err := bucket.do("GET", objectKey, "?objectMeta", "", nil, nil, nil)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return resp.Headers, nil
}
//
// SetObjectACL 修改Object的ACL权限。
//
// 只有Bucket Owner才有权限调用PutObjectACL来修改Object的ACL。Object ACL优先级高于Bucket ACL。
// 例如Bucket ACL是private的而Object ACL是public-read-write的则访问这个Object时
// 先判断Object的ACL所以所有用户都拥有这个Object的访问权限即使这个Bucket是private bucket。
// 如果某个Object从来没设置过ACL则访问权限遵循Bucket ACL。
//
// Object的读操作包括GetObjectHeadObjectCopyObject和UploadPartCopy中的对source object的读
// Object的写操作包括PutObjectPostObjectAppendObjectDeleteObject
// DeleteMultipleObjectsCompleteMultipartUpload以及CopyObject对新的Object的写。
//
// objectKey 设置权限的object。
// objectAcl 对象权限。可选值PrivateACL(私有读写)、PublicReadACL(公共读私有写)、PublicReadWriteACL(公共读写)。
//
// error 操作无错误为nil非nil为错误信息。
//
func (bucket Bucket) SetObjectACL(objectKey string, objectACL ACLType) error {
options := []Option{ObjectACL(objectACL)}
resp, err := bucket.do("PUT", objectKey, "acl", "acl", options, nil, nil)
if err != nil {
return err
}
defer resp.Body.Close()
return checkRespCode(resp.StatusCode, []int{http.StatusOK})
}
//
// GetObjectACL 获取对象的ACL权限。
//
// objectKey 获取权限的object。
//
// GetObjectAclResponse 获取权限操作返回值error为nil时有效。GetObjectAclResponse.Acl为对象的权限。
// error 操作无错误为nil非nil为错误信息。
//
func (bucket Bucket) GetObjectACL(objectKey string) (GetObjectACLResult, error) {
var out GetObjectACLResult
resp, err := bucket.do("GET", objectKey, "acl", "acl", nil, nil, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
return out, err
}
// Private
func (bucket Bucket) do(method, objectName, urlParams, subResource string, options []Option,
data io.Reader, listener ProgressListener) (*Response, error) {
headers := make(map[string]string)
err := handleOptions(headers, options)
if err != nil {
return nil, err
}
return bucket.Client.Conn.Do(method, bucket.BucketName, objectName,
urlParams, subResource, headers, data, 0, listener)
}
func (bucket Bucket) getConfig() *Config {
return bucket.Client.Config
}
func addContentType(options []Option, keys ...string) []Option {
typ := TypeByExtension("")
for _, key := range keys {
typ = TypeByExtension(key)
if typ != "" {
break
}
}
if typ == "" {
typ = "application/octet-stream"
}
opts := []Option{ContentType(typ)}
opts = append(opts, options...)
return opts
}

View File

@ -0,0 +1,748 @@
// Package oss implements functions for access oss service.
// It has two main struct Client and Bucket.
package oss
import (
"bytes"
"encoding/xml"
"io"
"net/http"
"strings"
"time"
)
//
// Client Sdk的入口Client的方法可以完成bucket的各种操作如create/delete bucket
// set/get acl/lifecycle/referer/logging/website等。文件(object)的上传下载通过Bucket完成。
// 用户用oss.New创建Client。
//
type (
// Client oss client
Client struct {
Config *Config // Oss Client configure
Conn *Conn // Send http request
}
// ClientOption client option such as UseCname, Timeout, SecurityToken.
ClientOption func(*Client)
)
//
// New 生成一个新的Client。
//
// endpoint 用户Bucket所在数据中心的访问域名如http://oss-cn-hangzhou.aliyuncs.com。
// accessKeyId 用户标识。
// accessKeySecret 用户密钥。
//
// Client 生成的新Client。error为nil时有效。
// error 操作无错误时为nil非nil时表示操作出错。
//
func New(endpoint, accessKeyID, accessKeySecret string, options ...ClientOption) (*Client, error) {
// configuration
config := getDefaultOssConfig()
config.Endpoint = endpoint
config.AccessKeyID = accessKeyID
config.AccessKeySecret = accessKeySecret
// url parse
url := &urlMaker{}
url.Init(config.Endpoint, config.IsCname, config.IsUseProxy)
// http connect
conn := &Conn{config: config, url: url}
// oss client
client := &Client{
config,
conn,
}
// client options parse
for _, option := range options {
option(client)
}
// create http connect
err := conn.init(config, url)
return client, err
}
//
// Bucket 取存储空间Bucket的对象实例。
//
// bucketName 存储空间名称。
// Bucket 新的Bucket。error为nil时有效。
//
// error 操作无错误时返回nil非nil为错误信息。
//
func (client Client) Bucket(bucketName string) (*Bucket, error) {
return &Bucket{
client,
bucketName,
}, nil
}
//
// CreateBucket 创建Bucket。
//
// bucketName bucket名称在整个OSS中具有全局唯一性且不能修改。bucket名称的只能包括小写字母数字和短横线-
// 必须以小写字母或者数字开头长度必须在3-255字节之间。
// options 创建bucket的选项。您可以使用选项ACL指定bucket的访问权限。Bucket有以下三种访问权限私有读写ACLPrivate
// 公共读私有写ACLPublicRead公共读公共写(ACLPublicReadWrite),默认访问权限是私有读写。
//
// error 操作无错误时返回nil非nil为错误信息。
//
func (client Client) CreateBucket(bucketName string, options ...Option) error {
headers := make(map[string]string)
handleOptions(headers, options)
resp, err := client.do("PUT", bucketName, "", "", headers, nil)
if err != nil {
return err
}
defer resp.Body.Close()
return checkRespCode(resp.StatusCode, []int{http.StatusOK})
}
//
// ListBuckets 获取当前用户下的bucket。
//
// options 指定ListBuckets的筛选行为Prefix、Marker、MaxKeys三个选项。Prefix限定前缀。
// Marker设定从Marker之后的第一个开始返回。MaxKeys限定此次返回的最大数目默认为100。
// 常用使用场景的实现参数示例程序list_bucket.go。
// ListBucketsResponse 操作成功后的返回值error为nil时该返回值有效。
//
// error 操作无错误时返回nil非nil为错误信息。
//
func (client Client) ListBuckets(options ...Option) (ListBucketsResult, error) {
var out ListBucketsResult
params, err := handleParams(options)
if err != nil {
return out, err
}
resp, err := client.do("GET", "", params, "", nil, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
return out, err
}
//
// IsBucketExist Bucket是否存在。
//
// bucketName 存储空间名称。
//
// bool 存储空间是否存在。error为nil时有效。
// error 操作无错误时返回nil非nil为错误信息。
//
func (client Client) IsBucketExist(bucketName string) (bool, error) {
listRes, err := client.ListBuckets(Prefix(bucketName), MaxKeys(1))
if err != nil {
return false, err
}
if len(listRes.Buckets) == 1 && listRes.Buckets[0].Name == bucketName {
return true, nil
}
return false, nil
}
//
// DeleteBucket 删除空存储空间。非空时请先清理Object、Upload。
//
// bucketName 存储空间名称。
//
// error 操作无错误时返回nil非nil为错误信息。
//
func (client Client) DeleteBucket(bucketName string) error {
resp, err := client.do("DELETE", bucketName, "", "", nil, nil)
if err != nil {
return err
}
defer resp.Body.Close()
return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
}
//
// GetBucketLocation 查看Bucket所属数据中心位置的信息。
//
// 如果您想了解"访问域名和数据中心"详细信息,请参看
// https://help.aliyun.com/document_detail/oss/user_guide/oss_concept/endpoint.html
//
// bucketName 存储空间名称。
//
// string Bucket所属的数据中心位置信息。
// error 操作无错误时返回nil非nil为错误信息。
//
func (client Client) GetBucketLocation(bucketName string) (string, error) {
resp, err := client.do("GET", bucketName, "location", "location", nil, nil)
if err != nil {
return "", err
}
defer resp.Body.Close()
var LocationConstraint string
err = xmlUnmarshal(resp.Body, &LocationConstraint)
return LocationConstraint, err
}
//
// SetBucketACL 修改Bucket的访问权限。
//
// bucketName 存储空间名称。
// bucketAcl bucket的访问权限。Bucket有以下三种访问权限Bucket有以下三种访问权限私有读写ACLPrivate
// 公共读私有写ACLPublicRead公共读公共写(ACLPublicReadWrite)。
//
// error 操作无错误时返回nil非nil为错误信息。
//
func (client Client) SetBucketACL(bucketName string, bucketACL ACLType) error {
headers := map[string]string{HTTPHeaderOssACL: string(bucketACL)}
resp, err := client.do("PUT", bucketName, "", "", headers, nil)
if err != nil {
return err
}
defer resp.Body.Close()
return checkRespCode(resp.StatusCode, []int{http.StatusOK})
}
//
// GetBucketACL 获得Bucket的访问权限。
//
// bucketName 存储空间名称。
//
// GetBucketAclResponse 操作成功后的返回值error为nil时该返回值有效。
// error 操作无错误时返回nil非nil为错误信息。
//
func (client Client) GetBucketACL(bucketName string) (GetBucketACLResult, error) {
var out GetBucketACLResult
resp, err := client.do("GET", bucketName, "acl", "acl", nil, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
return out, err
}
//
// SetBucketLifecycle 修改Bucket的生命周期设置。
//
// OSS提供Object生命周期管理来为用户管理对象。用户可以为某个Bucket定义生命周期配置来为该Bucket的Object定义各种规则。
// Bucket的拥有者可以通过SetBucketLifecycle来设置Bucket的Lifecycle配置。Lifecycle开启后OSS将按照配置
// 定期自动删除与Lifecycle规则相匹配的Object。如果您想了解更多的生命周期的信息请参看
// https://help.aliyun.com/document_detail/oss/user_guide/manage_object/object_lifecycle.html
//
// bucketName 存储空间名称。
// rules 生命周期规则列表。生命周期规则有两种格式指定绝对和相对过期时间分布由days和year/month/day控制。
// 具体用法请参考示例程序sample/bucket_lifecycle.go。
//
// error 操作无错误时返回error为nil非nil为错误信息。
//
func (client Client) SetBucketLifecycle(bucketName string, rules []LifecycleRule) error {
lxml := lifecycleXML{Rules: convLifecycleRule(rules)}
bs, err := xml.Marshal(lxml)
if err != nil {
return err
}
buffer := new(bytes.Buffer)
buffer.Write(bs)
contentType := http.DetectContentType(buffer.Bytes())
headers := map[string]string{}
headers[HTTPHeaderContentType] = contentType
resp, err := client.do("PUT", bucketName, "lifecycle", "lifecycle", headers, buffer)
if err != nil {
return err
}
defer resp.Body.Close()
return checkRespCode(resp.StatusCode, []int{http.StatusOK})
}
//
// DeleteBucketLifecycle 删除Bucket的生命周期设置。
//
//
// bucketName 存储空间名称。
//
// error 操作无错误为nil非nil为错误信息。
//
func (client Client) DeleteBucketLifecycle(bucketName string) error {
resp, err := client.do("DELETE", bucketName, "lifecycle", "lifecycle", nil, nil)
if err != nil {
return err
}
defer resp.Body.Close()
return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
}
//
// GetBucketLifecycle 查看Bucket的生命周期设置。
//
// bucketName 存储空间名称。
//
// GetBucketLifecycleResponse 操作成功的返回值error为nil时该返回值有效。Rules为该bucket上的规则列表。
// error 操作无错误时为nil非nil为错误信息。
//
func (client Client) GetBucketLifecycle(bucketName string) (GetBucketLifecycleResult, error) {
var out GetBucketLifecycleResult
resp, err := client.do("GET", bucketName, "lifecycle", "lifecycle", nil, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
return out, err
}
//
// SetBucketReferer 设置bucket的referer访问白名单和是否允许referer字段为空的请求访问。
//
// 防止用户在OSS上的数据被其他人盗用OSS支持基于HTTP header中表头字段referer的防盗链方法。可以通过OSS控制台或者API的方式对
// 一个bucket设置referer字段的白名单和是否允许referer字段为空的请求访问。例如对于一个名为oss-example的bucket
// 设置其referer白名单为http://www.aliyun.com。则所有referer为http://www.aliyun.com的请求才能访问oss-example
// 这个bucket中的object。如果您还需要了解更多信息请参看
// https://help.aliyun.com/document_detail/oss/user_guide/security_management/referer.html
//
// bucketName 存储空间名称。
// referers 访问白名单列表。一个bucket可以支持多个referer参数。referer参数支持通配符"*"和"?"。
// 用法请参看示例sample/bucket_referer.go
// allowEmptyReferer 指定是否允许referer字段为空的请求访问。 默认为true。
//
// error 操作无错误为nil非nil为错误信息。
//
func (client Client) SetBucketReferer(bucketName string, referers []string, allowEmptyReferer bool) error {
rxml := RefererXML{}
rxml.AllowEmptyReferer = allowEmptyReferer
if referers == nil {
rxml.RefererList = append(rxml.RefererList, "")
} else {
for _, referer := range referers {
rxml.RefererList = append(rxml.RefererList, referer)
}
}
bs, err := xml.Marshal(rxml)
if err != nil {
return err
}
buffer := new(bytes.Buffer)
buffer.Write(bs)
contentType := http.DetectContentType(buffer.Bytes())
headers := map[string]string{}
headers[HTTPHeaderContentType] = contentType
resp, err := client.do("PUT", bucketName, "referer", "referer", headers, buffer)
if err != nil {
return err
}
defer resp.Body.Close()
return checkRespCode(resp.StatusCode, []int{http.StatusOK})
}
//
// GetBucketReferer 获得Bucket的白名单地址。
//
// bucketName 存储空间名称。
//
// GetBucketRefererResponse 操作成功的返回值error为nil时该返回值有效。
// error 操作无错误时为nil非nil为错误信息。
//
func (client Client) GetBucketReferer(bucketName string) (GetBucketRefererResult, error) {
var out GetBucketRefererResult
resp, err := client.do("GET", bucketName, "referer", "referer", nil, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
return out, err
}
//
// SetBucketLogging 修改Bucket的日志设置。
//
// OSS为您提供自动保存访问日志记录功能。Bucket的拥有者可以开启访问日志记录功能。当一个bucket开启访问日志记录功能后
// OSS自动将访问这个bucket的请求日志以小时为单位按照固定的命名规则生成一个Object写入用户指定的bucket中。
// 如果您需要更多,请参看 https://help.aliyun.com/document_detail/oss/user_guide/security_management/logging.html
//
// bucketName 需要记录访问日志的Bucket。
// targetBucket 访问日志记录到的Bucket。
// targetPrefix bucketName中需要存储访问日志记录的object前缀。为空记录所有object的访问日志。
//
// error 操作无错误为nil非nil为错误信息。
//
func (client Client) SetBucketLogging(bucketName, targetBucket, targetPrefix string,
isEnable bool) error {
var err error
var bs []byte
if isEnable {
lxml := LoggingXML{}
lxml.LoggingEnabled.TargetBucket = targetBucket
lxml.LoggingEnabled.TargetPrefix = targetPrefix
bs, err = xml.Marshal(lxml)
} else {
lxml := loggingXMLEmpty{}
bs, err = xml.Marshal(lxml)
}
if err != nil {
return err
}
buffer := new(bytes.Buffer)
buffer.Write(bs)
contentType := http.DetectContentType(buffer.Bytes())
headers := map[string]string{}
headers[HTTPHeaderContentType] = contentType
resp, err := client.do("PUT", bucketName, "logging", "logging", headers, buffer)
if err != nil {
return err
}
defer resp.Body.Close()
return checkRespCode(resp.StatusCode, []int{http.StatusOK})
}
//
// DeleteBucketLogging 删除Bucket的日志设置。
//
// bucketName 需要删除访问日志的Bucket。
//
// error 操作无错误为nil非nil为错误信息。
//
func (client Client) DeleteBucketLogging(bucketName string) error {
resp, err := client.do("DELETE", bucketName, "logging", "logging", nil, nil)
if err != nil {
return err
}
defer resp.Body.Close()
return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
}
//
// GetBucketLogging 获得Bucket的日志设置。
//
// bucketName 需要删除访问日志的Bucket。
// GetBucketLoggingResponse 操作成功的返回值error为nil时该返回值有效。
//
// error 操作无错误为nil非nil为错误信息。
//
func (client Client) GetBucketLogging(bucketName string) (GetBucketLoggingResult, error) {
var out GetBucketLoggingResult
resp, err := client.do("GET", bucketName, "logging", "logging", nil, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
return out, err
}
//
// SetBucketWebsite 设置/修改Bucket的默认首页以及错误页。
//
// OSS支持静态网站托管Website操作可以将一个bucket设置成静态网站托管模式 。您可以将自己的Bucket配置成静态网站托管模式。
// 如果您需要更多,请参看 https://help.aliyun.com/document_detail/oss/user_guide/static_host_website.html
//
// bucketName 需要设置Website的Bucket。
// indexDocument 索引文档。
// errorDocument 错误文档。
//
// error 操作无错误为nil非nil为错误信息。
//
func (client Client) SetBucketWebsite(bucketName, indexDocument, errorDocument string) error {
wxml := WebsiteXML{}
wxml.IndexDocument.Suffix = indexDocument
wxml.ErrorDocument.Key = errorDocument
bs, err := xml.Marshal(wxml)
if err != nil {
return err
}
buffer := new(bytes.Buffer)
buffer.Write(bs)
contentType := http.DetectContentType(buffer.Bytes())
headers := make(map[string]string)
headers[HTTPHeaderContentType] = contentType
resp, err := client.do("PUT", bucketName, "website", "website", headers, buffer)
if err != nil {
return err
}
defer resp.Body.Close()
return checkRespCode(resp.StatusCode, []int{http.StatusOK})
}
//
// DeleteBucketWebsite 删除Bucket的Website设置。
//
// bucketName 需要删除website设置的Bucket。
//
// error 操作无错误为nil非nil为错误信息。
//
func (client Client) DeleteBucketWebsite(bucketName string) error {
resp, err := client.do("DELETE", bucketName, "website", "website", nil, nil)
if err != nil {
return err
}
defer resp.Body.Close()
return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
}
//
// GetBucketWebsite 获得Bucket的默认首页以及错误页。
//
// bucketName 存储空间名称。
//
// GetBucketWebsiteResponse 操作成功的返回值error为nil时该返回值有效。
// error 操作无错误为nil非nil为错误信息。
//
func (client Client) GetBucketWebsite(bucketName string) (GetBucketWebsiteResult, error) {
var out GetBucketWebsiteResult
resp, err := client.do("GET", bucketName, "website", "website", nil, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
return out, err
}
//
// SetBucketCORS 设置Bucket的跨域访问(CORS)规则。
//
// 跨域访问的更多信息,请参看 https://help.aliyun.com/document_detail/oss/user_guide/security_management/cors.html
//
// bucketName 需要设置Website的Bucket。
// corsRules 待设置的CORS规则。用法请参看示例代码sample/bucket_cors.go。
//
// error 操作无错误为nil非nil为错误信息。
//
func (client Client) SetBucketCORS(bucketName string, corsRules []CORSRule) error {
corsxml := CORSXML{}
for _, v := range corsRules {
cr := CORSRule{}
cr.AllowedMethod = v.AllowedMethod
cr.AllowedOrigin = v.AllowedOrigin
cr.AllowedHeader = v.AllowedHeader
cr.ExposeHeader = v.ExposeHeader
cr.MaxAgeSeconds = v.MaxAgeSeconds
corsxml.CORSRules = append(corsxml.CORSRules, cr)
}
bs, err := xml.Marshal(corsxml)
if err != nil {
return err
}
buffer := new(bytes.Buffer)
buffer.Write(bs)
contentType := http.DetectContentType(buffer.Bytes())
headers := map[string]string{}
headers[HTTPHeaderContentType] = contentType
resp, err := client.do("PUT", bucketName, "cors", "cors", headers, buffer)
if err != nil {
return err
}
defer resp.Body.Close()
return checkRespCode(resp.StatusCode, []int{http.StatusOK})
}
//
// DeleteBucketCORS 删除Bucket的Website设置。
//
// bucketName 需要删除cors设置的Bucket。
//
// error 操作无错误为nil非nil为错误信息。
//
func (client Client) DeleteBucketCORS(bucketName string) error {
resp, err := client.do("DELETE", bucketName, "cors", "cors", nil, nil)
if err != nil {
return err
}
defer resp.Body.Close()
return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
}
//
// GetBucketCORS 获得Bucket的CORS设置。
//
//
// bucketName 存储空间名称。
// GetBucketCORSResult 操作成功的返回值error为nil时该返回值有效。
//
// error 操作无错误为nil非nil为错误信息。
//
func (client Client) GetBucketCORS(bucketName string) (GetBucketCORSResult, error) {
var out GetBucketCORSResult
resp, err := client.do("GET", bucketName, "cors", "cors", nil, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
return out, err
}
//
// GetBucketInfo 获得Bucket的信息。
//
// bucketName 存储空间名称。
// GetBucketInfoResult 操作成功的返回值error为nil时该返回值有效。
//
// error 操作无错误为nil非nil为错误信息。
//
func (client Client) GetBucketInfo(bucketName string) (GetBucketInfoResult, error) {
var out GetBucketInfoResult
resp, err := client.do("GET", bucketName, "bucketInfo", "bucketInfo", nil, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
return out, err
}
//
// UseCname 设置是否使用CNAME默认不使用。
//
// isUseCname true设置endpoint格式是cname格式false为非cname格式默认false
//
func UseCname(isUseCname bool) ClientOption {
return func(client *Client) {
client.Config.IsCname = isUseCname
client.Conn.url.Init(client.Config.Endpoint, client.Config.IsCname, client.Config.IsUseProxy)
}
}
//
// Timeout 设置HTTP超时时间。
//
// connectTimeoutSec HTTP链接超时时间单位是秒默认10秒。0表示永不超时。
// readWriteTimeout HTTP发送接受数据超时时间单位是秒默认20秒。0表示永不超时。
//
func Timeout(connectTimeoutSec, readWriteTimeout int64) ClientOption {
return func(client *Client) {
client.Config.HTTPTimeout.ConnectTimeout =
time.Second * time.Duration(connectTimeoutSec)
client.Config.HTTPTimeout.ReadWriteTimeout =
time.Second * time.Duration(readWriteTimeout)
client.Config.HTTPTimeout.HeaderTimeout =
time.Second * time.Duration(readWriteTimeout)
client.Config.HTTPTimeout.LongTimeout =
time.Second * time.Duration(readWriteTimeout*10)
}
}
//
// SecurityToken 临时用户设置SecurityToken。
//
// token STS token
//
func SecurityToken(token string) ClientOption {
return func(client *Client) {
client.Config.SecurityToken = strings.TrimSpace(token)
}
}
//
// EnableMD5 是否启用MD5校验默认启用。
//
// isEnableMD5 true启用MD5校验false不启用MD5校验
//
func EnableMD5(isEnableMD5 bool) ClientOption {
return func(client *Client) {
client.Config.IsEnableMD5 = isEnableMD5
}
}
//
// MD5ThresholdCalcInMemory 使用内存计算MD5值的上限默认16MB。
//
// threshold 单位Byte。上传内容小于threshold在MD5在内存中计算大于使用临时文件计算MD5
//
func MD5ThresholdCalcInMemory(threshold int64) ClientOption {
return func(client *Client) {
client.Config.MD5Threshold = threshold
}
}
//
// EnableCRC 上传是否启用CRC校验默认启用。
//
// isEnableCRC true启用CRC校验false不启用CRC校验
//
func EnableCRC(isEnableCRC bool) ClientOption {
return func(client *Client) {
client.Config.IsEnableCRC = isEnableCRC
}
}
//
// UserAgent 指定UserAgent默认如下aliyun-sdk-go/1.2.0 (windows/-/amd64;go1.5.2)。
//
// userAgent user agent字符串。
//
func UserAgent(userAgent string) ClientOption {
return func(client *Client) {
client.Config.UserAgent = userAgent
}
}
//
// Proxy 设置代理服务器,默认不使用代理。
//
// proxyHost 代理服务器地址格式是host或host:port
//
func Proxy(proxyHost string) ClientOption {
return func(client *Client) {
client.Config.IsUseProxy = true
client.Config.ProxyHost = proxyHost
client.Conn.url.Init(client.Config.Endpoint, client.Config.IsCname, client.Config.IsUseProxy)
}
}
//
// AuthProxy 设置需要认证的代理服务器,默认不使用代理。
//
// proxyHost 代理服务器地址格式是host或host:port
// proxyUser 代理服务器认证的用户名
// proxyPassword 代理服务器认证的用户密码
//
func AuthProxy(proxyHost, proxyUser, proxyPassword string) ClientOption {
return func(client *Client) {
client.Config.IsUseProxy = true
client.Config.ProxyHost = proxyHost
client.Config.IsAuthProxy = true
client.Config.ProxyUser = proxyUser
client.Config.ProxyPassword = proxyPassword
client.Conn.url.Init(client.Config.Endpoint, client.Config.IsCname, client.Config.IsUseProxy)
}
}
// Private
func (client Client) do(method, bucketName, urlParams, subResource string,
headers map[string]string, data io.Reader) (*Response, error) {
return client.Conn.Do(method, bucketName, "", urlParams,
subResource, headers, data, 0, nil)
}

67
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conf.go generated vendored Normal file
View File

@ -0,0 +1,67 @@
package oss
import (
"time"
)
// HTTPTimeout http timeout
type HTTPTimeout struct {
ConnectTimeout time.Duration
ReadWriteTimeout time.Duration
HeaderTimeout time.Duration
LongTimeout time.Duration
}
// Config oss configure
type Config struct {
Endpoint string // oss地址
AccessKeyID string // accessId
AccessKeySecret string // accessKey
RetryTimes uint // 失败重试次数默认5
UserAgent string // SDK名称/版本/系统信息
IsDebug bool // 是否开启调试模式默认false
Timeout uint // 超时时间默认60s
SecurityToken string // STS Token
IsCname bool // Endpoint是否是CNAME
HTTPTimeout HTTPTimeout // HTTP的超时时间设置
IsUseProxy bool // 是否使用代理
ProxyHost string // 代理服务器地址
IsAuthProxy bool // 代理服务器是否使用用户认证
ProxyUser string // 代理服务器认证用户名
ProxyPassword string // 代理服务器认证密码
IsEnableMD5 bool // 上传数据时是否启用MD5校验
MD5Threshold int64 // 内存中计算MD5的上线大小大于该值启用临时文件单位Byte
IsEnableCRC bool // 上传数据时是否启用CRC64校验
}
// 获取默认配置
func getDefaultOssConfig() *Config {
config := Config{}
config.Endpoint = ""
config.AccessKeyID = ""
config.AccessKeySecret = ""
config.RetryTimes = 5
config.IsDebug = false
config.UserAgent = userAgent
config.Timeout = 60 // seconds
config.SecurityToken = ""
config.IsCname = false
config.HTTPTimeout.ConnectTimeout = time.Second * 30 // 30s
config.HTTPTimeout.ReadWriteTimeout = time.Second * 60 // 60s
config.HTTPTimeout.HeaderTimeout = time.Second * 60 // 60s
config.HTTPTimeout.LongTimeout = time.Second * 300 // 300s
config.IsUseProxy = false
config.ProxyHost = ""
config.IsAuthProxy = false
config.ProxyUser = ""
config.ProxyPassword = ""
config.MD5Threshold = 16 * 1024 * 1024 // 16MB
config.IsEnableMD5 = false
config.IsEnableCRC = true
return &config
}

437
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conn.go generated vendored Normal file
View File

@ -0,0 +1,437 @@
package oss
import (
"bytes"
"crypto/md5"
"encoding/base64"
"encoding/xml"
"fmt"
"hash"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"time"
)
// Conn oss conn
type Conn struct {
config *Config
url *urlMaker
client *http.Client
}
// init 初始化Conn
func (conn *Conn) init(config *Config, urlMaker *urlMaker) error {
httpTimeOut := conn.config.HTTPTimeout
// new Transport
transport := &http.Transport{
Dial: func(netw, addr string) (net.Conn, error) {
conn, err := net.DialTimeout(netw, addr, httpTimeOut.ConnectTimeout)
if err != nil {
return nil, err
}
return newTimeoutConn(conn, httpTimeOut.ReadWriteTimeout, httpTimeOut.LongTimeout), nil
},
ResponseHeaderTimeout: httpTimeOut.HeaderTimeout,
}
// Proxy
if conn.config.IsUseProxy {
proxyURL, err := url.Parse(config.ProxyHost)
if err != nil {
return err
}
transport.Proxy = http.ProxyURL(proxyURL)
}
conn.config = config
conn.url = urlMaker
conn.client = &http.Client{Transport: transport}
return nil
}
// Do 处理请求,返回响应结果。
func (conn Conn) Do(method, bucketName, objectName, urlParams, subResource string, headers map[string]string,
data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) {
uri := conn.url.getURL(bucketName, objectName, urlParams)
resource := conn.url.getResource(bucketName, objectName, subResource)
return conn.doRequest(method, uri, resource, headers, data, initCRC, listener)
}
func (conn Conn) doRequest(method string, uri *url.URL, canonicalizedResource string, headers map[string]string,
data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) {
method = strings.ToUpper(method)
if !conn.config.IsUseProxy {
uri.Opaque = uri.Path
}
req := &http.Request{
Method: method,
URL: uri,
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: make(http.Header),
Host: uri.Host,
}
tracker := &readerTracker{completedBytes: 0}
fd, crc := conn.handleBody(req, data, initCRC, listener, tracker)
if fd != nil {
defer func() {
fd.Close()
os.Remove(fd.Name())
}()
}
if conn.config.IsAuthProxy {
auth := conn.config.ProxyUser + ":" + conn.config.ProxyPassword
basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth))
req.Header.Set("Proxy-Authorization", basic)
}
date := time.Now().UTC().Format(http.TimeFormat)
req.Header.Set(HTTPHeaderDate, date)
req.Header.Set(HTTPHeaderHost, conn.config.Endpoint)
req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent)
if conn.config.SecurityToken != "" {
req.Header.Set(HTTPHeaderOssSecurityToken, conn.config.SecurityToken)
}
if headers != nil {
for k, v := range headers {
req.Header.Set(k, v)
}
}
conn.signHeader(req, canonicalizedResource)
// transfer started
event := newProgressEvent(TransferStartedEvent, 0, req.ContentLength)
publishProgress(listener, event)
resp, err := conn.client.Do(req)
if err != nil {
// transfer failed
event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength)
publishProgress(listener, event)
return nil, err
}
// transfer completed
event = newProgressEvent(TransferCompletedEvent, tracker.completedBytes, req.ContentLength)
publishProgress(listener, event)
return conn.handleResponse(resp, crc)
}
// handle request body
func (conn Conn) handleBody(req *http.Request, body io.Reader, initCRC uint64,
listener ProgressListener, tracker *readerTracker) (*os.File, hash.Hash64) {
var file *os.File
var crc hash.Hash64
reader := body
// length
switch v := body.(type) {
case *bytes.Buffer:
req.ContentLength = int64(v.Len())
case *bytes.Reader:
req.ContentLength = int64(v.Len())
case *strings.Reader:
req.ContentLength = int64(v.Len())
case *os.File:
req.ContentLength = tryGetFileSize(v)
case *io.LimitedReader:
req.ContentLength = int64(v.N)
}
req.Header.Set(HTTPHeaderContentLength, strconv.FormatInt(req.ContentLength, 10))
// md5
if body != nil && conn.config.IsEnableMD5 && req.Header.Get(HTTPHeaderContentMD5) == "" {
md5 := ""
reader, md5, file, _ = calcMD5(body, req.ContentLength, conn.config.MD5Threshold)
req.Header.Set(HTTPHeaderContentMD5, md5)
}
// crc
if reader != nil && conn.config.IsEnableCRC {
crc = NewCRC(crcTable(), initCRC)
reader = TeeReader(reader, crc, req.ContentLength, listener, tracker)
}
// http body
rc, ok := reader.(io.ReadCloser)
if !ok && reader != nil {
rc = ioutil.NopCloser(reader)
}
req.Body = rc
return file, crc
}
func tryGetFileSize(f *os.File) int64 {
fInfo, _ := f.Stat()
return fInfo.Size()
}
// handle response
func (conn Conn) handleResponse(resp *http.Response, crc hash.Hash64) (*Response, error) {
var cliCRC uint64
var srvCRC uint64
statusCode := resp.StatusCode
if statusCode >= 400 && statusCode <= 505 {
// 4xx and 5xx indicate that the operation has error occurred
var respBody []byte
respBody, err := readResponseBody(resp)
if err != nil {
return nil, err
}
if len(respBody) == 0 {
// no error in response body
err = fmt.Errorf("oss: service returned without a response body (%s)", resp.Status)
} else {
// response contains storage service error object, unmarshal
srvErr, errIn := serviceErrFromXML(respBody, resp.StatusCode,
resp.Header.Get(HTTPHeaderOssRequestID))
if err != nil { // error unmarshaling the error response
err = errIn
}
err = srvErr
}
return &Response{
StatusCode: resp.StatusCode,
Headers: resp.Header,
Body: ioutil.NopCloser(bytes.NewReader(respBody)), // restore the body
}, err
} else if statusCode >= 300 && statusCode <= 307 {
// oss use 3xx, but response has no body
err := fmt.Errorf("oss: service returned %d,%s", resp.StatusCode, resp.Status)
return &Response{
StatusCode: resp.StatusCode,
Headers: resp.Header,
Body: resp.Body,
}, err
}
if conn.config.IsEnableCRC && crc != nil {
cliCRC = crc.Sum64()
}
srvCRC, _ = strconv.ParseUint(resp.Header.Get(HTTPHeaderOssCRC64), 10, 64)
// 2xx, successful
return &Response{
StatusCode: resp.StatusCode,
Headers: resp.Header,
Body: resp.Body,
ClientCRC: cliCRC,
ServerCRC: srvCRC,
}, nil
}
func calcMD5(body io.Reader, contentLen, md5Threshold int64) (reader io.Reader, b64 string, tempFile *os.File, err error) {
if contentLen == 0 || contentLen > md5Threshold {
// huge body, use temporary file
tempFile, err = ioutil.TempFile(os.TempDir(), TempFilePrefix)
if tempFile != nil {
io.Copy(tempFile, body)
tempFile.Seek(0, os.SEEK_SET)
md5 := md5.New()
io.Copy(md5, tempFile)
sum := md5.Sum(nil)
b64 = base64.StdEncoding.EncodeToString(sum[:])
tempFile.Seek(0, os.SEEK_SET)
reader = tempFile
}
} else {
// small body, use memory
buf, _ := ioutil.ReadAll(body)
sum := md5.Sum(buf)
b64 = base64.StdEncoding.EncodeToString(sum[:])
reader = bytes.NewReader(buf)
}
return
}
func readResponseBody(resp *http.Response) ([]byte, error) {
defer resp.Body.Close()
out, err := ioutil.ReadAll(resp.Body)
if err == io.EOF {
err = nil
}
return out, err
}
func serviceErrFromXML(body []byte, statusCode int, requestID string) (ServiceError, error) {
var storageErr ServiceError
if err := xml.Unmarshal(body, &storageErr); err != nil {
return storageErr, err
}
storageErr.StatusCode = statusCode
storageErr.RequestID = requestID
storageErr.RawMessage = string(body)
return storageErr, nil
}
func xmlUnmarshal(body io.Reader, v interface{}) error {
data, err := ioutil.ReadAll(body)
if err != nil {
return err
}
return xml.Unmarshal(data, v)
}
// Handle http timeout
type timeoutConn struct {
conn net.Conn
timeout time.Duration
longTimeout time.Duration
}
func newTimeoutConn(conn net.Conn, timeout time.Duration, longTimeout time.Duration) *timeoutConn {
conn.SetReadDeadline(time.Now().Add(longTimeout))
return &timeoutConn{
conn: conn,
timeout: timeout,
longTimeout: longTimeout,
}
}
func (c *timeoutConn) Read(b []byte) (n int, err error) {
c.SetReadDeadline(time.Now().Add(c.timeout))
n, err = c.conn.Read(b)
c.SetReadDeadline(time.Now().Add(c.longTimeout))
return n, err
}
func (c *timeoutConn) Write(b []byte) (n int, err error) {
c.SetWriteDeadline(time.Now().Add(c.timeout))
n, err = c.conn.Write(b)
c.SetReadDeadline(time.Now().Add(c.longTimeout))
return n, err
}
func (c *timeoutConn) Close() error {
return c.conn.Close()
}
func (c *timeoutConn) LocalAddr() net.Addr {
return c.conn.LocalAddr()
}
func (c *timeoutConn) RemoteAddr() net.Addr {
return c.conn.RemoteAddr()
}
func (c *timeoutConn) SetDeadline(t time.Time) error {
return c.conn.SetDeadline(t)
}
func (c *timeoutConn) SetReadDeadline(t time.Time) error {
return c.conn.SetReadDeadline(t)
}
func (c *timeoutConn) SetWriteDeadline(t time.Time) error {
return c.conn.SetWriteDeadline(t)
}
// UrlMaker - build url and resource
const (
urlTypeCname = 1
urlTypeIP = 2
urlTypeAliyun = 3
)
type urlMaker struct {
Scheme string // http or https
NetLoc string // host or ip
Type int // 1 CNAME 2 IP 3 ALIYUN
IsProxy bool // proxy
}
// Parse endpoint
func (um *urlMaker) Init(endpoint string, isCname bool, isProxy bool) {
if strings.HasPrefix(endpoint, "http://") {
um.Scheme = "http"
um.NetLoc = endpoint[len("http://"):]
} else if strings.HasPrefix(endpoint, "https://") {
um.Scheme = "https"
um.NetLoc = endpoint[len("https://"):]
} else {
um.Scheme = "http"
um.NetLoc = endpoint
}
host, _, err := net.SplitHostPort(um.NetLoc)
if err != nil {
host = um.NetLoc
}
ip := net.ParseIP(host)
if ip != nil {
um.Type = urlTypeIP
} else if isCname {
um.Type = urlTypeCname
} else {
um.Type = urlTypeAliyun
}
um.IsProxy = isProxy
}
// Build URL
func (um urlMaker) getURL(bucket, object, params string) *url.URL {
var host = ""
var path = ""
if !um.IsProxy {
object = url.QueryEscape(object)
}
if um.Type == urlTypeCname {
host = um.NetLoc
path = "/" + object
} else if um.Type == urlTypeIP {
if bucket == "" {
host = um.NetLoc
path = "/"
} else {
host = um.NetLoc
path = fmt.Sprintf("/%s/%s", bucket, object)
}
} else {
if bucket == "" {
host = um.NetLoc
path = "/"
} else {
host = bucket + "." + um.NetLoc
path = "/" + object
}
}
uri := &url.URL{
Scheme: um.Scheme,
Host: host,
Path: path,
RawQuery: params,
}
return uri
}
// Canonicalized Resource
func (um urlMaker) getResource(bucketName, objectName, subResource string) string {
if subResource != "" {
subResource = "?" + subResource
}
if bucketName == "" {
return fmt.Sprintf("/%s%s", bucketName, subResource)
}
return fmt.Sprintf("/%s/%s%s", bucketName, objectName, subResource)
}

View File

@ -0,0 +1,89 @@
package oss
import "os"
// ACLType Bucket/Object的访问控制
type ACLType string
const (
// ACLPrivate 私有读写
ACLPrivate ACLType = "private"
// ACLPublicRead 公共读私有写
ACLPublicRead ACLType = "public-read"
// ACLPublicReadWrite 公共读写
ACLPublicReadWrite ACLType = "public-read-write"
// ACLDefault Object默认权限Bucket无此权限
ACLDefault ACLType = "default"
)
// MetadataDirectiveType 对象COPY时新对象是否使用原对象的Meta
type MetadataDirectiveType string
const (
// MetaCopy 目标对象使用源对象的META
MetaCopy MetadataDirectiveType = "COPY"
// MetaReplace 目标对象使用自定义的META
MetaReplace MetadataDirectiveType = "REPLACE"
)
// Http头标签
const (
HTTPHeaderAcceptEncoding string = "Accept-Encoding"
HTTPHeaderAuthorization = "Authorization"
HTTPHeaderCacheControl = "Cache-Control"
HTTPHeaderContentDisposition = "Content-Disposition"
HTTPHeaderContentEncoding = "Content-Encoding"
HTTPHeaderContentLength = "Content-Length"
HTTPHeaderContentMD5 = "Content-MD5"
HTTPHeaderContentType = "Content-Type"
HTTPHeaderContentLanguage = "Content-Language"
HTTPHeaderDate = "Date"
HTTPHeaderEtag = "ETag"
HTTPHeaderExpires = "Expires"
HTTPHeaderHost = "Host"
HTTPHeaderLastModified = "Last-Modified"
HTTPHeaderRange = "Range"
HTTPHeaderLocation = "Location"
HTTPHeaderOrigin = "Origin"
HTTPHeaderServer = "Server"
HTTPHeaderUserAgent = "User-Agent"
HTTPHeaderIfModifiedSince = "If-Modified-Since"
HTTPHeaderIfUnmodifiedSince = "If-Unmodified-Since"
HTTPHeaderIfMatch = "If-Match"
HTTPHeaderIfNoneMatch = "If-None-Match"
HTTPHeaderOssACL = "X-Oss-Acl"
HTTPHeaderOssMetaPrefix = "X-Oss-Meta-"
HTTPHeaderOssObjectACL = "X-Oss-Object-Acl"
HTTPHeaderOssSecurityToken = "X-Oss-Security-Token"
HTTPHeaderOssServerSideEncryption = "X-Oss-Server-Side-Encryption"
HTTPHeaderOssCopySource = "X-Oss-Copy-Source"
HTTPHeaderOssCopySourceRange = "X-Oss-Copy-Source-Range"
HTTPHeaderOssCopySourceIfMatch = "X-Oss-Copy-Source-If-Match"
HTTPHeaderOssCopySourceIfNoneMatch = "X-Oss-Copy-Source-If-None-Match"
HTTPHeaderOssCopySourceIfModifiedSince = "X-Oss-Copy-Source-If-Modified-Since"
HTTPHeaderOssCopySourceIfUnmodifiedSince = "X-Oss-Copy-Source-If-Unmodified-Since"
HTTPHeaderOssMetadataDirective = "X-Oss-Metadata-Directive"
HTTPHeaderOssNextAppendPosition = "X-Oss-Next-Append-Position"
HTTPHeaderOssRequestID = "X-Oss-Request-Id"
HTTPHeaderOssCRC64 = "X-Oss-Hash-Crc64ecma"
)
// 其它常量
const (
MaxPartSize = 5 * 1024 * 1024 * 1024 // 文件片最大值5GB
MinPartSize = 100 * 1024 // 文件片最小值100KBß
FilePermMode = os.FileMode(0664) // 新建文件默认权限
TempFilePrefix = "oss-go-temp-" // 临时文件前缀
TempFileSuffix = ".temp" // 临时文件后缀
CheckpointFileSuffix = ".cp" // Checkpoint文件后缀
Version = "1.3.0" // Go sdk版本
)

44
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/crc.go generated vendored Normal file
View File

@ -0,0 +1,44 @@
package oss
import (
"hash"
"hash/crc64"
)
// digest represents the partial evaluation of a checksum.
type digest struct {
crc uint64
tab *crc64.Table
}
// NewCRC creates a new hash.Hash64 computing the CRC-64 checksum
// using the polynomial represented by the Table.
func NewCRC(tab *crc64.Table, init uint64) hash.Hash64 { return &digest{init, tab} }
// Size returns the number of bytes Sum will return.
func (d *digest) Size() int { return crc64.Size }
// BlockSize returns the hash's underlying block size.
// The Write method must be able to accept any amount
// of data, but it may operate more efficiently if all writes
// are a multiple of the block size.
func (d *digest) BlockSize() int { return 1 }
// Reset resets the Hash to its initial state.
func (d *digest) Reset() { d.crc = 0 }
// Write (via the embedded io.Writer interface) adds more data to the running hash.
// It never returns an error.
func (d *digest) Write(p []byte) (n int, err error) {
d.crc = crc64.Update(d.crc, d.tab, p)
return len(p), nil
}
// Sum64 returns crc64 value.
func (d *digest) Sum64() uint64 { return d.crc }
// Sum returns hash value.
func (d *digest) Sum(in []byte) []byte {
s := d.Sum64()
return append(in, byte(s>>56), byte(s>>48), byte(s>>40), byte(s>>32), byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
}

View File

@ -0,0 +1,464 @@
package oss
import (
"crypto/md5"
"encoding/base64"
"encoding/json"
"errors"
"io"
"io/ioutil"
"os"
"strconv"
)
//
// DownloadFile 分片下载文件
//
// objectKey object key。
// filePath 本地文件。objectKey下载到文件。
// partSize 本次上传文件片的大小字节数。比如100 * 1024为每片100KB。
// options Object的属性限制项。详见GetObject。
//
// error 操作成功error为nil非nil为错误信息。
//
func (bucket Bucket) DownloadFile(objectKey, filePath string, partSize int64, options ...Option) error {
if partSize < 1 || partSize > MaxPartSize {
return errors.New("oss: part size invalid range (1, 5GB]")
}
cpConf, err := getCpConfig(options, filePath)
if err != nil {
return err
}
routines := getRoutines(options)
if cpConf.IsEnable {
return bucket.downloadFileWithCp(objectKey, filePath, partSize, options, cpConf.FilePath, routines)
}
return bucket.downloadFile(objectKey, filePath, partSize, options, routines)
}
// ----- 并发无断点的下载 -----
// 工作协程参数
type downloadWorkerArg struct {
bucket *Bucket
key string
filePath string
options []Option
hook downloadPartHook
}
// Hook用于测试
type downloadPartHook func(part downloadPart) error
var downloadPartHooker downloadPartHook = defaultDownloadPartHook
func defaultDownloadPartHook(part downloadPart) error {
return nil
}
// 默认ProgressListener屏蔽GetObject的Options中ProgressListener
type defaultDownloadProgressListener struct {
}
// ProgressChanged 静默处理
func (listener *defaultDownloadProgressListener) ProgressChanged(event *ProgressEvent) {
}
// 工作协程
func downloadWorker(id int, arg downloadWorkerArg, jobs <-chan downloadPart, results chan<- downloadPart, failed chan<- error, die <-chan bool) {
for part := range jobs {
if err := arg.hook(part); err != nil {
failed <- err
break
}
// resolve options
r := Range(part.Start, part.End)
p := Progress(&defaultDownloadProgressListener{})
opts := make([]Option, len(arg.options)+2)
// append orderly, can not be reversed!
opts = append(opts, arg.options...)
opts = append(opts, r, p)
rd, err := arg.bucket.GetObject(arg.key, opts...)
if err != nil {
failed <- err
break
}
defer rd.Close()
select {
case <-die:
return
default:
}
fd, err := os.OpenFile(arg.filePath, os.O_WRONLY, FilePermMode)
if err != nil {
failed <- err
break
}
defer fd.Close()
_, err = fd.Seek(part.Start, os.SEEK_SET)
if err != nil {
failed <- err
break
}
_, err = io.Copy(fd, rd)
if err != nil {
failed <- err
break
}
results <- part
}
}
// 调度协程
func downloadScheduler(jobs chan downloadPart, parts []downloadPart) {
for _, part := range parts {
jobs <- part
}
close(jobs)
}
// 下载片
type downloadPart struct {
Index int // 片序号从0开始编号
Start int64 // 片起始位置
End int64 // 片结束位置
}
// 文件分片
func getDownloadParts(bucket *Bucket, objectKey string, partSize int64) ([]downloadPart, error) {
meta, err := bucket.GetObjectDetailedMeta(objectKey)
if err != nil {
return nil, err
}
parts := []downloadPart{}
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
if err != nil {
return nil, err
}
part := downloadPart{}
i := 0
for offset := int64(0); offset < objectSize; offset += partSize {
part.Index = i
part.Start = offset
part.End = GetPartEnd(offset, objectSize, partSize)
parts = append(parts, part)
i++
}
return parts, nil
}
// 文件大小
func getObjectBytes(parts []downloadPart) int64 {
var ob int64
for _, part := range parts {
ob += (part.End - part.Start + 1)
}
return ob
}
// 并发无断点续传的下载
func (bucket Bucket) downloadFile(objectKey, filePath string, partSize int64, options []Option, routines int) error {
tempFilePath := filePath + TempFileSuffix
listener := getProgressListener(options)
// 如果文件不存在则创建,存在不清空,下载分片会重写文件内容
fd, err := os.OpenFile(tempFilePath, os.O_WRONLY|os.O_CREATE, FilePermMode)
if err != nil {
return err
}
fd.Close()
// 分割文件
parts, err := getDownloadParts(&bucket, objectKey, partSize)
if err != nil {
return err
}
jobs := make(chan downloadPart, len(parts))
results := make(chan downloadPart, len(parts))
failed := make(chan error)
die := make(chan bool)
var completedBytes int64
totalBytes := getObjectBytes(parts)
event := newProgressEvent(TransferStartedEvent, 0, totalBytes)
publishProgress(listener, event)
// 启动工作协程
arg := downloadWorkerArg{&bucket, objectKey, tempFilePath, options, downloadPartHooker}
for w := 1; w <= routines; w++ {
go downloadWorker(w, arg, jobs, results, failed, die)
}
// 并发上传分片
go downloadScheduler(jobs, parts)
// 等待分片下载完成
completed := 0
ps := make([]downloadPart, len(parts))
for completed < len(parts) {
select {
case part := <-results:
completed++
ps[part.Index] = part
completedBytes += (part.End - part.Start + 1)
event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes)
publishProgress(listener, event)
case err := <-failed:
close(die)
event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes)
publishProgress(listener, event)
return err
}
if completed >= len(parts) {
break
}
}
event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes)
publishProgress(listener, event)
return os.Rename(tempFilePath, filePath)
}
// ----- 并发有断点的下载 -----
const downloadCpMagic = "92611BED-89E2-46B6-89E5-72F273D4B0A3"
type downloadCheckpoint struct {
Magic string // magic
MD5 string // cp内容的MD5
FilePath string // 本地文件
Object string // key
ObjStat objectStat // 文件状态
Parts []downloadPart // 全部分片
PartStat []bool // 分片下载是否完成
}
type objectStat struct {
Size int64 // 大小
LastModified string // 最后修改时间
Etag string // etag
}
// CP数据是否有效CP有效且Object没有更新时有效
func (cp downloadCheckpoint) isValid(bucket *Bucket, objectKey string) (bool, error) {
// 比较CP的Magic及MD5
cpb := cp
cpb.MD5 = ""
js, _ := json.Marshal(cpb)
sum := md5.Sum(js)
b64 := base64.StdEncoding.EncodeToString(sum[:])
if cp.Magic != downloadCpMagic || b64 != cp.MD5 {
return false, nil
}
// 确认object没有更新
meta, err := bucket.GetObjectDetailedMeta(objectKey)
if err != nil {
return false, err
}
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
if err != nil {
return false, err
}
// 比较Object的大小/最后修改时间/etag
if cp.ObjStat.Size != objectSize ||
cp.ObjStat.LastModified != meta.Get(HTTPHeaderLastModified) ||
cp.ObjStat.Etag != meta.Get(HTTPHeaderEtag) {
return false, nil
}
return true, nil
}
// 从文件中load
func (cp *downloadCheckpoint) load(filePath string) error {
contents, err := ioutil.ReadFile(filePath)
if err != nil {
return err
}
err = json.Unmarshal(contents, cp)
return err
}
// dump到文件
func (cp *downloadCheckpoint) dump(filePath string) error {
bcp := *cp
// 计算MD5
bcp.MD5 = ""
js, err := json.Marshal(bcp)
if err != nil {
return err
}
sum := md5.Sum(js)
b64 := base64.StdEncoding.EncodeToString(sum[:])
bcp.MD5 = b64
// 序列化
js, err = json.Marshal(bcp)
if err != nil {
return err
}
// dump
return ioutil.WriteFile(filePath, js, FilePermMode)
}
// 未完成的分片
func (cp downloadCheckpoint) todoParts() []downloadPart {
dps := []downloadPart{}
for i, ps := range cp.PartStat {
if !ps {
dps = append(dps, cp.Parts[i])
}
}
return dps
}
// 完成的字节数
func (cp downloadCheckpoint) getCompletedBytes() int64 {
var completedBytes int64
for i, part := range cp.Parts {
if cp.PartStat[i] {
completedBytes += (part.End - part.Start + 1)
}
}
return completedBytes
}
// 初始化下载任务
func (cp *downloadCheckpoint) prepare(bucket *Bucket, objectKey, filePath string, partSize int64) error {
// cp
cp.Magic = downloadCpMagic
cp.FilePath = filePath
cp.Object = objectKey
// object
meta, err := bucket.GetObjectDetailedMeta(objectKey)
if err != nil {
return err
}
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
if err != nil {
return err
}
cp.ObjStat.Size = objectSize
cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified)
cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag)
// parts
cp.Parts, err = getDownloadParts(bucket, objectKey, partSize)
if err != nil {
return err
}
cp.PartStat = make([]bool, len(cp.Parts))
for i := range cp.PartStat {
cp.PartStat[i] = false
}
return nil
}
func (cp *downloadCheckpoint) complete(cpFilePath, downFilepath string) error {
os.Remove(cpFilePath)
return os.Rename(downFilepath, cp.FilePath)
}
// 并发带断点的下载
func (bucket Bucket) downloadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int) error {
tempFilePath := filePath + TempFileSuffix
listener := getProgressListener(options)
// LOAD CP数据
dcp := downloadCheckpoint{}
err := dcp.load(cpFilePath)
if err != nil {
os.Remove(cpFilePath)
}
// LOAD出错或数据无效重新初始化下载
valid, err := dcp.isValid(&bucket, objectKey)
if err != nil || !valid {
if err = dcp.prepare(&bucket, objectKey, filePath, partSize); err != nil {
return err
}
os.Remove(cpFilePath)
}
// 如果文件不存在则创建,存在不清空,下载分片会重写文件内容
fd, err := os.OpenFile(tempFilePath, os.O_WRONLY|os.O_CREATE, FilePermMode)
if err != nil {
return err
}
fd.Close()
// 未完成的分片
parts := dcp.todoParts()
jobs := make(chan downloadPart, len(parts))
results := make(chan downloadPart, len(parts))
failed := make(chan error)
die := make(chan bool)
completedBytes := dcp.getCompletedBytes()
event := newProgressEvent(TransferStartedEvent, completedBytes, dcp.ObjStat.Size)
publishProgress(listener, event)
// 启动工作协程
arg := downloadWorkerArg{&bucket, objectKey, tempFilePath, options, downloadPartHooker}
for w := 1; w <= routines; w++ {
go downloadWorker(w, arg, jobs, results, failed, die)
}
// 并发下载分片
go downloadScheduler(jobs, parts)
// 等待分片下载完成
completed := 0
for completed < len(parts) {
select {
case part := <-results:
completed++
dcp.PartStat[part.Index] = true
dcp.dump(cpFilePath)
completedBytes += (part.End - part.Start + 1)
event = newProgressEvent(TransferDataEvent, completedBytes, dcp.ObjStat.Size)
publishProgress(listener, event)
case err := <-failed:
close(die)
event = newProgressEvent(TransferFailedEvent, completedBytes, dcp.ObjStat.Size)
publishProgress(listener, event)
return err
}
if completed >= len(parts) {
break
}
}
event = newProgressEvent(TransferCompletedEvent, completedBytes, dcp.ObjStat.Size)
publishProgress(listener, event)
return dcp.complete(cpFilePath, tempFilePath)
}

View File

@ -0,0 +1,82 @@
package oss
import (
"encoding/xml"
"fmt"
"net/http"
"strings"
)
// ServiceError contains fields of the error response from Oss Service REST API.
type ServiceError struct {
XMLName xml.Name `xml:"Error"`
Code string `xml:"Code"` // OSS返回给用户的错误码
Message string `xml:"Message"` // OSS给出的详细错误信息
RequestID string `xml:"RequestId"` // 用于唯一标识该次请求的UUID
HostID string `xml:"HostId"` // 用于标识访问的OSS集群
RawMessage string // OSS返回的原始消息内容
StatusCode int // HTTP状态码
}
// Implement interface error
func (e ServiceError) Error() string {
return fmt.Sprintf("oss: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestId=%s",
e.StatusCode, e.Code, e.Message, e.RequestID)
}
// UnexpectedStatusCodeError is returned when a storage service responds with neither an error
// nor with an HTTP status code indicating success.
type UnexpectedStatusCodeError struct {
allowed []int // 预期OSS返回HTTP状态码
got int // OSS实际返回HTTP状态码
}
// Implement interface error
func (e UnexpectedStatusCodeError) Error() string {
s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) }
got := s(e.got)
expected := []string{}
for _, v := range e.allowed {
expected = append(expected, s(v))
}
return fmt.Sprintf("oss: status code from service response is %s; was expecting %s",
got, strings.Join(expected, " or "))
}
// Got is the actual status code returned by oss.
func (e UnexpectedStatusCodeError) Got() int {
return e.got
}
// checkRespCode returns UnexpectedStatusError if the given response code is not
// one of the allowed status codes; otherwise nil.
func checkRespCode(respCode int, allowed []int) error {
for _, v := range allowed {
if respCode == v {
return nil
}
}
return UnexpectedStatusCodeError{allowed, respCode}
}
// CRCCheckError is returned when crc check is inconsistent between client and server
type CRCCheckError struct {
clientCRC uint64 // 客户端计算的CRC64值
serverCRC uint64 // 服务端计算的CRC64值
operation string // 上传操作如PutObject/AppendObject/UploadPart等
requestID string // 本次操作的RequestID
}
// Implement interface error
func (e CRCCheckError) Error() string {
return fmt.Sprintf("oss: the crc of %s is inconsistent, client %d but server %d; request id is %s",
e.operation, e.clientCRC, e.serverCRC, e.requestID)
}
func checkCRC(resp *Response, operation string) error {
if resp.Headers.Get(HTTPHeaderOssCRC64) == "" || resp.ClientCRC == resp.ServerCRC {
return nil
}
return CRCCheckError{resp.ClientCRC, resp.ServerCRC, operation, resp.Headers.Get(HTTPHeaderOssRequestID)}
}

245
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/mime.go generated vendored Normal file
View File

@ -0,0 +1,245 @@
package oss
import (
"mime"
"path"
"strings"
)
var extToMimeType = map[string]string{
".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
".xltx": "application/vnd.openxmlformats-officedocument.spreadsheetml.template",
".potx": "application/vnd.openxmlformats-officedocument.presentationml.template",
".ppsx": "application/vnd.openxmlformats-officedocument.presentationml.slideshow",
".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
".sldx": "application/vnd.openxmlformats-officedocument.presentationml.slide",
".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
".dotx": "application/vnd.openxmlformats-officedocument.wordprocessingml.template",
".xlam": "application/vnd.ms-excel.addin.macroEnabled.12",
".xlsb": "application/vnd.ms-excel.sheet.binary.macroEnabled.12",
".apk": "application/vnd.android.package-archive",
".hqx": "application/mac-binhex40",
".cpt": "application/mac-compactpro",
".doc": "application/msword",
".ogg": "application/ogg",
".pdf": "application/pdf",
".rtf": "text/rtf",
".mif": "application/vnd.mif",
".xls": "application/vnd.ms-excel",
".ppt": "application/vnd.ms-powerpoint",
".odc": "application/vnd.oasis.opendocument.chart",
".odb": "application/vnd.oasis.opendocument.database",
".odf": "application/vnd.oasis.opendocument.formula",
".odg": "application/vnd.oasis.opendocument.graphics",
".otg": "application/vnd.oasis.opendocument.graphics-template",
".odi": "application/vnd.oasis.opendocument.image",
".odp": "application/vnd.oasis.opendocument.presentation",
".otp": "application/vnd.oasis.opendocument.presentation-template",
".ods": "application/vnd.oasis.opendocument.spreadsheet",
".ots": "application/vnd.oasis.opendocument.spreadsheet-template",
".odt": "application/vnd.oasis.opendocument.text",
".odm": "application/vnd.oasis.opendocument.text-master",
".ott": "application/vnd.oasis.opendocument.text-template",
".oth": "application/vnd.oasis.opendocument.text-web",
".sxw": "application/vnd.sun.xml.writer",
".stw": "application/vnd.sun.xml.writer.template",
".sxc": "application/vnd.sun.xml.calc",
".stc": "application/vnd.sun.xml.calc.template",
".sxd": "application/vnd.sun.xml.draw",
".std": "application/vnd.sun.xml.draw.template",
".sxi": "application/vnd.sun.xml.impress",
".sti": "application/vnd.sun.xml.impress.template",
".sxg": "application/vnd.sun.xml.writer.global",
".sxm": "application/vnd.sun.xml.math",
".sis": "application/vnd.symbian.install",
".wbxml": "application/vnd.wap.wbxml",
".wmlc": "application/vnd.wap.wmlc",
".wmlsc": "application/vnd.wap.wmlscriptc",
".bcpio": "application/x-bcpio",
".torrent": "application/x-bittorrent",
".bz2": "application/x-bzip2",
".vcd": "application/x-cdlink",
".pgn": "application/x-chess-pgn",
".cpio": "application/x-cpio",
".csh": "application/x-csh",
".dvi": "application/x-dvi",
".spl": "application/x-futuresplash",
".gtar": "application/x-gtar",
".hdf": "application/x-hdf",
".jar": "application/x-java-archive",
".jnlp": "application/x-java-jnlp-file",
".js": "application/x-javascript",
".ksp": "application/x-kspread",
".chrt": "application/x-kchart",
".kil": "application/x-killustrator",
".latex": "application/x-latex",
".rpm": "application/x-rpm",
".sh": "application/x-sh",
".shar": "application/x-shar",
".swf": "application/x-shockwave-flash",
".sit": "application/x-stuffit",
".sv4cpio": "application/x-sv4cpio",
".sv4crc": "application/x-sv4crc",
".tar": "application/x-tar",
".tcl": "application/x-tcl",
".tex": "application/x-tex",
".man": "application/x-troff-man",
".me": "application/x-troff-me",
".ms": "application/x-troff-ms",
".ustar": "application/x-ustar",
".src": "application/x-wais-source",
".zip": "application/zip",
".m3u": "audio/x-mpegurl",
".ra": "audio/x-pn-realaudio",
".wav": "audio/x-wav",
".wma": "audio/x-ms-wma",
".wax": "audio/x-ms-wax",
".pdb": "chemical/x-pdb",
".xyz": "chemical/x-xyz",
".bmp": "image/bmp",
".gif": "image/gif",
".ief": "image/ief",
".png": "image/png",
".wbmp": "image/vnd.wap.wbmp",
".ras": "image/x-cmu-raster",
".pnm": "image/x-portable-anymap",
".pbm": "image/x-portable-bitmap",
".pgm": "image/x-portable-graymap",
".ppm": "image/x-portable-pixmap",
".rgb": "image/x-rgb",
".xbm": "image/x-xbitmap",
".xpm": "image/x-xpixmap",
".xwd": "image/x-xwindowdump",
".css": "text/css",
".rtx": "text/richtext",
".tsv": "text/tab-separated-values",
".jad": "text/vnd.sun.j2me.app-descriptor",
".wml": "text/vnd.wap.wml",
".wmls": "text/vnd.wap.wmlscript",
".etx": "text/x-setext",
".mxu": "video/vnd.mpegurl",
".flv": "video/x-flv",
".wm": "video/x-ms-wm",
".wmv": "video/x-ms-wmv",
".wmx": "video/x-ms-wmx",
".wvx": "video/x-ms-wvx",
".avi": "video/x-msvideo",
".movie": "video/x-sgi-movie",
".ice": "x-conference/x-cooltalk",
".3gp": "video/3gpp",
".ai": "application/postscript",
".aif": "audio/x-aiff",
".aifc": "audio/x-aiff",
".aiff": "audio/x-aiff",
".asc": "text/plain",
".atom": "application/atom+xml",
".au": "audio/basic",
".bin": "application/octet-stream",
".cdf": "application/x-netcdf",
".cgm": "image/cgm",
".class": "application/octet-stream",
".dcr": "application/x-director",
".dif": "video/x-dv",
".dir": "application/x-director",
".djv": "image/vnd.djvu",
".djvu": "image/vnd.djvu",
".dll": "application/octet-stream",
".dmg": "application/octet-stream",
".dms": "application/octet-stream",
".dtd": "application/xml-dtd",
".dv": "video/x-dv",
".dxr": "application/x-director",
".eps": "application/postscript",
".exe": "application/octet-stream",
".ez": "application/andrew-inset",
".gram": "application/srgs",
".grxml": "application/srgs+xml",
".gz": "application/x-gzip",
".htm": "text/html",
".html": "text/html",
".ico": "image/x-icon",
".ics": "text/calendar",
".ifb": "text/calendar",
".iges": "model/iges",
".igs": "model/iges",
".jp2": "image/jp2",
".jpe": "image/jpeg",
".jpeg": "image/jpeg",
".jpg": "image/jpeg",
".kar": "audio/midi",
".lha": "application/octet-stream",
".lzh": "application/octet-stream",
".m4a": "audio/mp4a-latm",
".m4p": "audio/mp4a-latm",
".m4u": "video/vnd.mpegurl",
".m4v": "video/x-m4v",
".mac": "image/x-macpaint",
".mathml": "application/mathml+xml",
".mesh": "model/mesh",
".mid": "audio/midi",
".midi": "audio/midi",
".mov": "video/quicktime",
".mp2": "audio/mpeg",
".mp3": "audio/mpeg",
".mp4": "video/mp4",
".mpe": "video/mpeg",
".mpeg": "video/mpeg",
".mpg": "video/mpeg",
".mpga": "audio/mpeg",
".msh": "model/mesh",
".nc": "application/x-netcdf",
".oda": "application/oda",
".ogv": "video/ogv",
".pct": "image/pict",
".pic": "image/pict",
".pict": "image/pict",
".pnt": "image/x-macpaint",
".pntg": "image/x-macpaint",
".ps": "application/postscript",
".qt": "video/quicktime",
".qti": "image/x-quicktime",
".qtif": "image/x-quicktime",
".ram": "audio/x-pn-realaudio",
".rdf": "application/rdf+xml",
".rm": "application/vnd.rn-realmedia",
".roff": "application/x-troff",
".sgm": "text/sgml",
".sgml": "text/sgml",
".silo": "model/mesh",
".skd": "application/x-koan",
".skm": "application/x-koan",
".skp": "application/x-koan",
".skt": "application/x-koan",
".smi": "application/smil",
".smil": "application/smil",
".snd": "audio/basic",
".so": "application/octet-stream",
".svg": "image/svg+xml",
".t": "application/x-troff",
".texi": "application/x-texinfo",
".texinfo": "application/x-texinfo",
".tif": "image/tiff",
".tiff": "image/tiff",
".tr": "application/x-troff",
".txt": "text/plain",
".vrml": "model/vrml",
".vxml": "application/voicexml+xml",
".webm": "video/webm",
".wrl": "model/vrml",
".xht": "application/xhtml+xml",
".xhtml": "application/xhtml+xml",
".xml": "application/xml",
".xsl": "application/xml",
".xslt": "application/xslt+xml",
".xul": "application/vnd.mozilla.xul+xml",
}
// TypeByExtension returns the MIME type associated with the file extension ext.
// 获取文件类型选项ContentType使用
func TypeByExtension(filePath string) string {
typ := mime.TypeByExtension(path.Ext(filePath))
if typ == "" {
typ = extToMimeType[strings.ToLower(path.Ext(filePath))]
}
return typ
}

View File

@ -0,0 +1,60 @@
package oss
import (
"hash"
"io"
"net/http"
)
// Response Http response from oss
type Response struct {
StatusCode int
Headers http.Header
Body io.ReadCloser
ClientCRC uint64
ServerCRC uint64
}
// PutObjectRequest The request of DoPutObject
type PutObjectRequest struct {
ObjectKey string
Reader io.Reader
}
// GetObjectRequest The request of DoGetObject
type GetObjectRequest struct {
ObjectKey string
}
// GetObjectResult The result of DoGetObject
type GetObjectResult struct {
Response *Response
ClientCRC hash.Hash64
ServerCRC uint64
}
// AppendObjectRequest The requtest of DoAppendObject
type AppendObjectRequest struct {
ObjectKey string
Reader io.Reader
Position int64
}
// AppendObjectResult The result of DoAppendObject
type AppendObjectResult struct {
NextPosition int64
CRC uint64
}
// UploadPartRequest The request of DoUploadPart
type UploadPartRequest struct {
InitResult *InitiateMultipartUploadResult
Reader io.Reader
PartSize int64
PartNumber int
}
// UploadPartResult The result of DoUploadPart
type UploadPartResult struct {
Part UploadPart
}

View File

@ -0,0 +1,461 @@
package oss
import (
"crypto/md5"
"encoding/base64"
"encoding/json"
"errors"
"io/ioutil"
"os"
"path/filepath"
"strconv"
)
//
// CopyFile 分片复制文件
//
// srcBucketName 源Bucket名称。
// srcObjectKey 源Object名称。
// destObjectKey 目标Object名称。目标Bucket名称为Bucket.BucketName。
// partSize 复制文件片的大小字节数。比如100 * 1024为每片100KB。
// options Object的属性限制项。详见InitiateMultipartUpload。
//
// error 操作成功error为nil非nil为错误信息。
//
func (bucket Bucket) CopyFile(srcBucketName, srcObjectKey, destObjectKey string, partSize int64, options ...Option) error {
destBucketName := bucket.BucketName
if partSize < MinPartSize || partSize > MaxPartSize {
return errors.New("oss: part size invalid range (1024KB, 5GB]")
}
cpConf, err := getCpConfig(options, filepath.Base(destObjectKey))
if err != nil {
return err
}
routines := getRoutines(options)
if cpConf.IsEnable {
return bucket.copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey,
partSize, options, cpConf.FilePath, routines)
}
return bucket.copyFile(srcBucketName, srcObjectKey, destBucketName, destObjectKey,
partSize, options, routines)
}
// ----- 并发无断点的下载 -----
// 工作协程参数
type copyWorkerArg struct {
bucket *Bucket
imur InitiateMultipartUploadResult
srcBucketName string
srcObjectKey string
options []Option
hook copyPartHook
}
// Hook用于测试
type copyPartHook func(part copyPart) error
var copyPartHooker copyPartHook = defaultCopyPartHook
func defaultCopyPartHook(part copyPart) error {
return nil
}
// 工作协程
func copyWorker(id int, arg copyWorkerArg, jobs <-chan copyPart, results chan<- UploadPart, failed chan<- error, die <-chan bool) {
for chunk := range jobs {
if err := arg.hook(chunk); err != nil {
failed <- err
break
}
chunkSize := chunk.End - chunk.Start + 1
part, err := arg.bucket.UploadPartCopy(arg.imur, arg.srcBucketName, arg.srcObjectKey,
chunk.Start, chunkSize, chunk.Number, arg.options...)
if err != nil {
failed <- err
break
}
select {
case <-die:
return
default:
}
results <- part
}
}
// 调度协程
func copyScheduler(jobs chan copyPart, parts []copyPart) {
for _, part := range parts {
jobs <- part
}
close(jobs)
}
// 分片
type copyPart struct {
Number int // 片序号[1, 10000]
Start int64 // 片起始位置
End int64 // 片结束位置
}
// 文件分片
func getCopyParts(bucket *Bucket, objectKey string, partSize int64) ([]copyPart, error) {
meta, err := bucket.GetObjectDetailedMeta(objectKey)
if err != nil {
return nil, err
}
parts := []copyPart{}
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
if err != nil {
return nil, err
}
part := copyPart{}
i := 0
for offset := int64(0); offset < objectSize; offset += partSize {
part.Number = i + 1
part.Start = offset
part.End = GetPartEnd(offset, objectSize, partSize)
parts = append(parts, part)
i++
}
return parts, nil
}
// 获取源文件大小
func getSrcObjectBytes(parts []copyPart) int64 {
var ob int64
for _, part := range parts {
ob += (part.End - part.Start + 1)
}
return ob
}
// 并发无断点续传的下载
func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destObjectKey string,
partSize int64, options []Option, routines int) error {
descBucket, err := bucket.Client.Bucket(destBucketName)
srcBucket, err := bucket.Client.Bucket(srcBucketName)
listener := getProgressListener(options)
// 分割文件
parts, err := getCopyParts(srcBucket, srcObjectKey, partSize)
if err != nil {
return err
}
// 初始化上传任务
imur, err := descBucket.InitiateMultipartUpload(destObjectKey, options...)
if err != nil {
return err
}
jobs := make(chan copyPart, len(parts))
results := make(chan UploadPart, len(parts))
failed := make(chan error)
die := make(chan bool)
var completedBytes int64
totalBytes := getSrcObjectBytes(parts)
event := newProgressEvent(TransferStartedEvent, 0, totalBytes)
publishProgress(listener, event)
// 启动工作协程
arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, options, copyPartHooker}
for w := 1; w <= routines; w++ {
go copyWorker(w, arg, jobs, results, failed, die)
}
// 并发上传分片
go copyScheduler(jobs, parts)
// 等待分片下载完成
completed := 0
ups := make([]UploadPart, len(parts))
for completed < len(parts) {
select {
case part := <-results:
completed++
ups[part.PartNumber-1] = part
completedBytes += (parts[part.PartNumber-1].End - parts[part.PartNumber-1].Start + 1)
event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes)
publishProgress(listener, event)
case err := <-failed:
close(die)
descBucket.AbortMultipartUpload(imur)
event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes)
publishProgress(listener, event)
return err
}
if completed >= len(parts) {
break
}
}
event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes)
publishProgress(listener, event)
// 提交任务
_, err = descBucket.CompleteMultipartUpload(imur, ups)
if err != nil {
bucket.AbortMultipartUpload(imur)
return err
}
return nil
}
// ----- 并发有断点的下载 -----
const copyCpMagic = "84F1F18C-FF1D-403B-A1D8-9DEB5F65910A"
type copyCheckpoint struct {
Magic string // magic
MD5 string // cp内容的MD5
SrcBucketName string // 源Bucket
SrcObjectKey string // 源Object
DestBucketName string // 目标Bucket
DestObjectKey string // 目标Bucket
CopyID string // copy id
ObjStat objectStat // 文件状态
Parts []copyPart // 全部分片
CopyParts []UploadPart // 分片上传成功后的返回值
PartStat []bool // 分片下载是否完成
}
// CP数据是否有效CP有效且Object没有更新时有效
func (cp copyCheckpoint) isValid(bucket *Bucket, objectKey string) (bool, error) {
// 比较CP的Magic及MD5
cpb := cp
cpb.MD5 = ""
js, _ := json.Marshal(cpb)
sum := md5.Sum(js)
b64 := base64.StdEncoding.EncodeToString(sum[:])
if cp.Magic != downloadCpMagic || b64 != cp.MD5 {
return false, nil
}
// 确认object没有更新
meta, err := bucket.GetObjectDetailedMeta(objectKey)
if err != nil {
return false, err
}
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
if err != nil {
return false, err
}
// 比较Object的大小/最后修改时间/etag
if cp.ObjStat.Size != objectSize ||
cp.ObjStat.LastModified != meta.Get(HTTPHeaderLastModified) ||
cp.ObjStat.Etag != meta.Get(HTTPHeaderEtag) {
return false, nil
}
return true, nil
}
// 从文件中load
func (cp *copyCheckpoint) load(filePath string) error {
contents, err := ioutil.ReadFile(filePath)
if err != nil {
return err
}
err = json.Unmarshal(contents, cp)
return err
}
// 更新分片状态
func (cp *copyCheckpoint) update(part UploadPart) {
cp.CopyParts[part.PartNumber-1] = part
cp.PartStat[part.PartNumber-1] = true
}
// dump到文件
func (cp *copyCheckpoint) dump(filePath string) error {
bcp := *cp
// 计算MD5
bcp.MD5 = ""
js, err := json.Marshal(bcp)
if err != nil {
return err
}
sum := md5.Sum(js)
b64 := base64.StdEncoding.EncodeToString(sum[:])
bcp.MD5 = b64
// 序列化
js, err = json.Marshal(bcp)
if err != nil {
return err
}
// dump
return ioutil.WriteFile(filePath, js, FilePermMode)
}
// 未完成的分片
func (cp copyCheckpoint) todoParts() []copyPart {
dps := []copyPart{}
for i, ps := range cp.PartStat {
if !ps {
dps = append(dps, cp.Parts[i])
}
}
return dps
}
// 完成的字节数
func (cp copyCheckpoint) getCompletedBytes() int64 {
var completedBytes int64
for i, part := range cp.Parts {
if cp.PartStat[i] {
completedBytes += (part.End - part.Start + 1)
}
}
return completedBytes
}
// 初始化下载任务
func (cp *copyCheckpoint) prepare(srcBucket *Bucket, srcObjectKey string, destBucket *Bucket, destObjectKey string,
partSize int64, options []Option) error {
// cp
cp.Magic = copyCpMagic
cp.SrcBucketName = srcBucket.BucketName
cp.SrcObjectKey = srcObjectKey
cp.DestBucketName = destBucket.BucketName
cp.DestObjectKey = destObjectKey
// object
meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey)
if err != nil {
return err
}
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
if err != nil {
return err
}
cp.ObjStat.Size = objectSize
cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified)
cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag)
// parts
cp.Parts, err = getCopyParts(srcBucket, srcObjectKey, partSize)
if err != nil {
return err
}
cp.PartStat = make([]bool, len(cp.Parts))
for i := range cp.PartStat {
cp.PartStat[i] = false
}
cp.CopyParts = make([]UploadPart, len(cp.Parts))
// init copy
imur, err := destBucket.InitiateMultipartUpload(destObjectKey, options...)
if err != nil {
return err
}
cp.CopyID = imur.UploadID
return nil
}
func (cp *copyCheckpoint) complete(bucket *Bucket, parts []UploadPart, cpFilePath string) error {
imur := InitiateMultipartUploadResult{Bucket: cp.DestBucketName,
Key: cp.DestObjectKey, UploadID: cp.CopyID}
_, err := bucket.CompleteMultipartUpload(imur, parts)
if err != nil {
return err
}
os.Remove(cpFilePath)
return err
}
// 并发带断点的下载
func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey string,
partSize int64, options []Option, cpFilePath string, routines int) error {
descBucket, err := bucket.Client.Bucket(destBucketName)
srcBucket, err := bucket.Client.Bucket(srcBucketName)
listener := getProgressListener(options)
// LOAD CP数据
ccp := copyCheckpoint{}
err = ccp.load(cpFilePath)
if err != nil {
os.Remove(cpFilePath)
}
// LOAD出错或数据无效重新初始化下载
valid, err := ccp.isValid(srcBucket, srcObjectKey)
if err != nil || !valid {
if err = ccp.prepare(srcBucket, srcObjectKey, descBucket, destObjectKey, partSize, options); err != nil {
return err
}
os.Remove(cpFilePath)
}
// 未完成的分片
parts := ccp.todoParts()
imur := InitiateMultipartUploadResult{
Bucket: destBucketName,
Key: destObjectKey,
UploadID: ccp.CopyID}
jobs := make(chan copyPart, len(parts))
results := make(chan UploadPart, len(parts))
failed := make(chan error)
die := make(chan bool)
completedBytes := ccp.getCompletedBytes()
event := newProgressEvent(TransferStartedEvent, completedBytes, ccp.ObjStat.Size)
publishProgress(listener, event)
// 启动工作协程
arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, options, copyPartHooker}
for w := 1; w <= routines; w++ {
go copyWorker(w, arg, jobs, results, failed, die)
}
// 并发下载分片
go copyScheduler(jobs, parts)
// 等待分片下载完成
completed := 0
for completed < len(parts) {
select {
case part := <-results:
completed++
ccp.update(part)
ccp.dump(cpFilePath)
completedBytes += (parts[part.PartNumber-1].End - parts[part.PartNumber-1].Start + 1)
event = newProgressEvent(TransferDataEvent, completedBytes, ccp.ObjStat.Size)
publishProgress(listener, event)
case err := <-failed:
close(die)
event = newProgressEvent(TransferFailedEvent, completedBytes, ccp.ObjStat.Size)
publishProgress(listener, event)
return err
}
if completed >= len(parts) {
break
}
}
event = newProgressEvent(TransferCompletedEvent, completedBytes, ccp.ObjStat.Size)
publishProgress(listener, event)
return ccp.complete(descBucket, ccp.CopyParts, cpFilePath)
}

View File

@ -0,0 +1,281 @@
package oss
import (
"bytes"
"encoding/xml"
"io"
"net/http"
"os"
"sort"
"strconv"
)
//
// InitiateMultipartUpload 初始化分片上传任务。
//
// objectKey Object名称。
// options 上传时可以指定Object的属性可选属性有CacheControl、ContentDisposition、ContentEncoding、Expires、
// ServerSideEncryption、Meta具体含义请参考
// https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/InitiateMultipartUpload.html
//
// InitiateMultipartUploadResult 初始化后操作成功的返回值用于后面的UploadPartFromFile、UploadPartCopy等操作。error为nil时有效。
// error 操作成功error为nil非nil为错误信息。
//
func (bucket Bucket) InitiateMultipartUpload(objectKey string, options ...Option) (InitiateMultipartUploadResult, error) {
var imur InitiateMultipartUploadResult
opts := addContentType(options, objectKey)
resp, err := bucket.do("POST", objectKey, "uploads", "uploads", opts, nil, nil)
if err != nil {
return imur, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &imur)
return imur, err
}
//
// UploadPart 上传分片。
//
// 初始化一个Multipart Upload之后可以根据指定的Object名和Upload ID来分片Part上传数据。
// 每一个上传的Part都有一个标识它的号码part number范围是1~10000。对于同一个Upload ID
// 该号码不但唯一标识这一片数据也标识了这片数据在整个文件内的相对位置。如果您用同一个part号码上传了新的数据
// 那么OSS上已有的这个号码的Part数据将被覆盖。除了最后一片Part以外其他的part最小为100KB
// 最后一片Part没有大小限制。
//
// imur InitiateMultipartUpload成功后的返回值。
// reader io.Reader 需要分片上传的reader。
// size 本次上传片Part的大小。
// partNumber 本次上传片(Part)的编号范围是1~10000。如果超出范围OSS将返回InvalidArgument错误。
//
// UploadPart 上传成功的返回值两个成员PartNumber、ETag。PartNumber片编号即传入参数partNumber
// ETag及上传数据的MD5。error为nil时有效。
// error 操作成功error为nil非nil为错误信息。
//
func (bucket Bucket) UploadPart(imur InitiateMultipartUploadResult, reader io.Reader,
partSize int64, partNumber int, options ...Option) (UploadPart, error) {
request := &UploadPartRequest{
InitResult: &imur,
Reader: reader,
PartSize: partSize,
PartNumber: partNumber,
}
result, err := bucket.DoUploadPart(request, options)
return result.Part, err
}
//
// UploadPartFromFile 上传分片。
//
// imur InitiateMultipartUpload成功后的返回值。
// filePath 需要分片上传的本地文件。
// startPosition 本次上传文件片的起始位置。
// partSize 本次上传文件片的大小。
// partNumber 本次上传文件片的编号范围是1~10000。
//
// UploadPart 上传成功的返回值两个成员PartNumber、ETag。PartNumber片编号传入参数partNumber
// ETag上传数据的MD5。error为nil时有效。
// error 操作成功error为nil非nil为错误信息。
//
func (bucket Bucket) UploadPartFromFile(imur InitiateMultipartUploadResult, filePath string,
startPosition, partSize int64, partNumber int, options ...Option) (UploadPart, error) {
var part = UploadPart{}
fd, err := os.Open(filePath)
if err != nil {
return part, err
}
defer fd.Close()
fd.Seek(startPosition, os.SEEK_SET)
request := &UploadPartRequest{
InitResult: &imur,
Reader: fd,
PartSize: partSize,
PartNumber: partNumber,
}
result, err := bucket.DoUploadPart(request, options)
return result.Part, err
}
//
// DoUploadPart 上传分片。
//
// request 上传分片请求。
//
// UploadPartResult 上传分片请求返回值。
// error 操作无错误为nil非nil为错误信息。
//
func (bucket Bucket) DoUploadPart(request *UploadPartRequest, options []Option) (*UploadPartResult, error) {
listener := getProgressListener(options)
params := "partNumber=" + strconv.Itoa(request.PartNumber) + "&uploadId=" + request.InitResult.UploadID
opts := []Option{ContentLength(request.PartSize)}
resp, err := bucket.do("PUT", request.InitResult.Key, params, params, opts,
&io.LimitedReader{R: request.Reader, N: request.PartSize}, listener)
if err != nil {
return &UploadPartResult{}, err
}
defer resp.Body.Close()
part := UploadPart{
ETag: resp.Headers.Get(HTTPHeaderEtag),
PartNumber: request.PartNumber,
}
if bucket.getConfig().IsEnableCRC {
err = checkCRC(resp, "DoUploadPart")
if err != nil {
return &UploadPartResult{part}, err
}
}
return &UploadPartResult{part}, nil
}
//
// UploadPartCopy 拷贝分片。
//
// imur InitiateMultipartUpload成功后的返回值。
// copySrc 源Object名称。
// startPosition 本次拷贝片(Part)在源Object的起始位置。
// partSize 本次拷贝片的大小。
// partNumber 本次拷贝片的编号范围是1~10000。如果超出范围OSS将返回InvalidArgument错误。
// options copy时源Object的限制条件满足限制条件时copy不满足时返回错误。可选条件有CopySourceIfMatch、
// CopySourceIfNoneMatch、CopySourceIfModifiedSince CopySourceIfUnmodifiedSince具体含义请参看
// https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/UploadPartCopy.html
//
// UploadPart 上传成功的返回值两个成员PartNumber、ETag。PartNumber片(Part)编号即传入参数partNumber
// ETag及上传数据的MD5。error为nil时有效。
// error 操作成功error为nil非nil为错误信息。
//
func (bucket Bucket) UploadPartCopy(imur InitiateMultipartUploadResult, srcBucketName, srcObjectKey string,
startPosition, partSize int64, partNumber int, options ...Option) (UploadPart, error) {
var out UploadPartCopyResult
var part UploadPart
opts := []Option{CopySource(srcBucketName, srcObjectKey),
CopySourceRange(startPosition, partSize)}
opts = append(opts, options...)
params := "partNumber=" + strconv.Itoa(partNumber) + "&uploadId=" + imur.UploadID
resp, err := bucket.do("PUT", imur.Key, params, params, opts, nil, nil)
if err != nil {
return part, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
if err != nil {
return part, err
}
part.ETag = out.ETag
part.PartNumber = partNumber
return part, nil
}
//
// CompleteMultipartUpload 提交分片上传任务。
//
// imur InitiateMultipartUpload的返回值。
// parts UploadPart/UploadPartFromFile/UploadPartCopy返回值组成的数组。
//
// CompleteMultipartUploadResponse 操作成功后的返回值。error为nil时有效。
// error 操作成功error为nil非nil为错误信息。
//
func (bucket Bucket) CompleteMultipartUpload(imur InitiateMultipartUploadResult,
parts []UploadPart) (CompleteMultipartUploadResult, error) {
var out CompleteMultipartUploadResult
sort.Sort(uploadParts(parts))
cxml := completeMultipartUploadXML{}
cxml.Part = parts
bs, err := xml.Marshal(cxml)
if err != nil {
return out, err
}
buffer := new(bytes.Buffer)
buffer.Write(bs)
params := "uploadId=" + imur.UploadID
resp, err := bucket.do("POST", imur.Key, params, params, nil, buffer, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
return out, err
}
//
// AbortMultipartUpload 取消分片上传任务。
//
// imur InitiateMultipartUpload的返回值。
//
// error 操作成功error为nil非nil为错误信息。
//
func (bucket Bucket) AbortMultipartUpload(imur InitiateMultipartUploadResult) error {
params := "uploadId=" + imur.UploadID
resp, err := bucket.do("DELETE", imur.Key, params, params, nil, nil, nil)
if err != nil {
return err
}
defer resp.Body.Close()
return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
}
//
// ListUploadedParts 列出指定上传任务已经上传的分片。
//
// imur InitiateMultipartUpload的返回值。
//
// ListUploadedPartsResponse 操作成功后的返回值成员UploadedParts已经上传/拷贝的片。error为nil时该返回值有效。
// error 操作成功error为nil非nil为错误信息。
//
func (bucket Bucket) ListUploadedParts(imur InitiateMultipartUploadResult) (ListUploadedPartsResult, error) {
var out ListUploadedPartsResult
params := "uploadId=" + imur.UploadID
resp, err := bucket.do("GET", imur.Key, params, params, nil, nil, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
return out, err
}
//
// ListMultipartUploads 列出所有未上传完整的multipart任务列表。
//
// options ListObject的筛选行为。Prefix返回object的前缀KeyMarker返回object的起始位置MaxUploads最大数目默认1000
// Delimiter用于对Object名字进行分组的字符所有名字包含指定的前缀且第一次出现delimiter字符之间的object。
//
// ListMultipartUploadResponse 操作成功后的返回值error为nil时该返回值有效。
// error 操作成功error为nil非nil为错误信息。
//
func (bucket Bucket) ListMultipartUploads(options ...Option) (ListMultipartUploadResult, error) {
var out ListMultipartUploadResult
options = append(options, EncodingType("url"))
params, err := handleParams(options)
if err != nil {
return out, err
}
resp, err := bucket.do("GET", "", "uploads&"+params, "uploads", nil, nil, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
if err != nil {
return out, err
}
err = decodeListMultipartUploadResult(&out)
return out, err
}

View File

@ -0,0 +1,351 @@
package oss
import (
"bytes"
"fmt"
"net/http"
"net/url"
"sort"
"strconv"
"time"
)
type optionType string
const (
optionParam optionType = "HTTPParameter" // URL参数
optionHTTP optionType = "HTTPHeader" // HTTP头
optionArg optionType = "FuncArgument" // 函数参数
)
const (
deleteObjectsQuiet = "delete-objects-quiet"
routineNum = "x-routine-num"
checkpointConfig = "x-cp-config"
initCRC64 = "init-crc64"
progressListener = "x-progress-listener"
)
type (
optionValue struct {
Value interface{}
Type optionType
}
// Option http option
Option func(map[string]optionValue) error
)
// ACL is an option to set X-Oss-Acl header
func ACL(acl ACLType) Option {
return setHeader(HTTPHeaderOssACL, string(acl))
}
// ContentType is an option to set Content-Type header
func ContentType(value string) Option {
return setHeader(HTTPHeaderContentType, value)
}
// ContentLength is an option to set Content-Length header
func ContentLength(length int64) Option {
return setHeader(HTTPHeaderContentLength, strconv.FormatInt(length, 10))
}
// CacheControl is an option to set Cache-Control header
func CacheControl(value string) Option {
return setHeader(HTTPHeaderCacheControl, value)
}
// ContentDisposition is an option to set Content-Disposition header
func ContentDisposition(value string) Option {
return setHeader(HTTPHeaderContentDisposition, value)
}
// ContentEncoding is an option to set Content-Encoding header
func ContentEncoding(value string) Option {
return setHeader(HTTPHeaderContentEncoding, value)
}
// ContentMD5 is an option to set Content-MD5 header
func ContentMD5(value string) Option {
return setHeader(HTTPHeaderContentMD5, value)
}
// Expires is an option to set Expires header
func Expires(t time.Time) Option {
return setHeader(HTTPHeaderExpires, t.Format(http.TimeFormat))
}
// Meta is an option to set Meta header
func Meta(key, value string) Option {
return setHeader(HTTPHeaderOssMetaPrefix+key, value)
}
// Range is an option to set Range header, [start, end]
func Range(start, end int64) Option {
return setHeader(HTTPHeaderRange, fmt.Sprintf("bytes=%d-%d", start, end))
}
// AcceptEncoding is an option to set Accept-Encoding header
func AcceptEncoding(value string) Option {
return setHeader(HTTPHeaderAcceptEncoding, value)
}
// IfModifiedSince is an option to set If-Modified-Since header
func IfModifiedSince(t time.Time) Option {
return setHeader(HTTPHeaderIfModifiedSince, t.Format(http.TimeFormat))
}
// IfUnmodifiedSince is an option to set If-Unmodified-Since header
func IfUnmodifiedSince(t time.Time) Option {
return setHeader(HTTPHeaderIfUnmodifiedSince, t.Format(http.TimeFormat))
}
// IfMatch is an option to set If-Match header
func IfMatch(value string) Option {
return setHeader(HTTPHeaderIfMatch, value)
}
// IfNoneMatch is an option to set IfNoneMatch header
func IfNoneMatch(value string) Option {
return setHeader(HTTPHeaderIfNoneMatch, value)
}
// CopySource is an option to set X-Oss-Copy-Source header
func CopySource(sourceBucket, sourceObject string) Option {
return setHeader(HTTPHeaderOssCopySource, "/"+sourceBucket+"/"+sourceObject)
}
// CopySourceRange is an option to set X-Oss-Copy-Source header
func CopySourceRange(startPosition, partSize int64) Option {
val := "bytes=" + strconv.FormatInt(startPosition, 10) + "-" +
strconv.FormatInt((startPosition+partSize-1), 10)
return setHeader(HTTPHeaderOssCopySourceRange, val)
}
// CopySourceIfMatch is an option to set X-Oss-Copy-Source-If-Match header
func CopySourceIfMatch(value string) Option {
return setHeader(HTTPHeaderOssCopySourceIfMatch, value)
}
// CopySourceIfNoneMatch is an option to set X-Oss-Copy-Source-If-None-Match header
func CopySourceIfNoneMatch(value string) Option {
return setHeader(HTTPHeaderOssCopySourceIfNoneMatch, value)
}
// CopySourceIfModifiedSince is an option to set X-Oss-CopySource-If-Modified-Since header
func CopySourceIfModifiedSince(t time.Time) Option {
return setHeader(HTTPHeaderOssCopySourceIfModifiedSince, t.Format(http.TimeFormat))
}
// CopySourceIfUnmodifiedSince is an option to set X-Oss-Copy-Source-If-Unmodified-Since header
func CopySourceIfUnmodifiedSince(t time.Time) Option {
return setHeader(HTTPHeaderOssCopySourceIfUnmodifiedSince, t.Format(http.TimeFormat))
}
// MetadataDirective is an option to set X-Oss-Metadata-Directive header
func MetadataDirective(directive MetadataDirectiveType) Option {
return setHeader(HTTPHeaderOssMetadataDirective, string(directive))
}
// ServerSideEncryption is an option to set X-Oss-Server-Side-Encryption header
func ServerSideEncryption(value string) Option {
return setHeader(HTTPHeaderOssServerSideEncryption, value)
}
// ObjectACL is an option to set X-Oss-Object-Acl header
func ObjectACL(acl ACLType) Option {
return setHeader(HTTPHeaderOssObjectACL, string(acl))
}
// Origin is an option to set Origin header
func Origin(value string) Option {
return setHeader(HTTPHeaderOrigin, value)
}
// Delimiter is an option to set delimiler parameter
func Delimiter(value string) Option {
return addParam("delimiter", value)
}
// Marker is an option to set marker parameter
func Marker(value string) Option {
return addParam("marker", value)
}
// MaxKeys is an option to set maxkeys parameter
func MaxKeys(value int) Option {
return addParam("max-keys", strconv.Itoa(value))
}
// Prefix is an option to set prefix parameter
func Prefix(value string) Option {
return addParam("prefix", value)
}
// EncodingType is an option to set encoding-type parameter
func EncodingType(value string) Option {
return addParam("encoding-type", value)
}
// MaxUploads is an option to set max-uploads parameter
func MaxUploads(value int) Option {
return addParam("max-uploads", strconv.Itoa(value))
}
// KeyMarker is an option to set key-marker parameter
func KeyMarker(value string) Option {
return addParam("key-marker", value)
}
// UploadIDMarker is an option to set upload-id-marker parameter
func UploadIDMarker(value string) Option {
return addParam("upload-id-marker", value)
}
// DeleteObjectsQuiet DeleteObjects详细(verbose)模式或简单(quiet)模式,默认详细模式。
func DeleteObjectsQuiet(isQuiet bool) Option {
return addArg(deleteObjectsQuiet, isQuiet)
}
// 断点续传配置包括是否启用、cp文件
type cpConfig struct {
IsEnable bool
FilePath string
}
// Checkpoint DownloadFile/UploadFile是否开启checkpoint及checkpoint文件路径
func Checkpoint(isEnable bool, filePath string) Option {
return addArg(checkpointConfig, &cpConfig{isEnable, filePath})
}
// Routines DownloadFile/UploadFile并发数
func Routines(n int) Option {
return addArg(routineNum, n)
}
// InitCRC AppendObject CRC的校验的初始值
func InitCRC(initCRC uint64) Option {
return addArg(initCRC64, initCRC)
}
// Progress set progress listener
func Progress(listener ProgressListener) Option {
return addArg(progressListener, listener)
}
func setHeader(key string, value interface{}) Option {
return func(params map[string]optionValue) error {
if value == nil {
return nil
}
params[key] = optionValue{value, optionHTTP}
return nil
}
}
func addParam(key string, value interface{}) Option {
return func(params map[string]optionValue) error {
if value == nil {
return nil
}
params[key] = optionValue{value, optionParam}
return nil
}
}
func addArg(key string, value interface{}) Option {
return func(params map[string]optionValue) error {
if value == nil {
return nil
}
params[key] = optionValue{value, optionArg}
return nil
}
}
func handleOptions(headers map[string]string, options []Option) error {
params := map[string]optionValue{}
for _, option := range options {
if option != nil {
if err := option(params); err != nil {
return err
}
}
}
for k, v := range params {
if v.Type == optionHTTP {
headers[k] = v.Value.(string)
}
}
return nil
}
func handleParams(options []Option) (string, error) {
// option
params := map[string]optionValue{}
for _, option := range options {
if option != nil {
if err := option(params); err != nil {
return "", err
}
}
}
// sort
var buf bytes.Buffer
keys := make([]string, 0, len(params))
for k, v := range params {
if v.Type == optionParam {
keys = append(keys, k)
}
}
sort.Strings(keys)
// serialize
for _, k := range keys {
vs := params[k]
prefix := url.QueryEscape(k) + "="
if buf.Len() > 0 {
buf.WriteByte('&')
}
buf.WriteString(prefix)
buf.WriteString(url.QueryEscape(vs.Value.(string)))
}
return buf.String(), nil
}
func findOption(options []Option, param string, defaultVal interface{}) (interface{}, error) {
params := map[string]optionValue{}
for _, option := range options {
if option != nil {
if err := option(params); err != nil {
return nil, err
}
}
}
if val, ok := params[param]; ok {
return val.Value, nil
}
return defaultVal, nil
}
func isOptionSet(options []Option, option string) (bool, interface{}, error) {
params := map[string]optionValue{}
for _, option := range options {
if option != nil {
if err := option(params); err != nil {
return false, nil, err
}
}
}
if val, ok := params[option]; ok {
return true, val.Value, nil
}
return false, nil, nil
}

View File

@ -0,0 +1,105 @@
package oss
import "io"
// ProgressEventType transfer progress event type
type ProgressEventType int
const (
// TransferStartedEvent transfer started, set TotalBytes
TransferStartedEvent ProgressEventType = 1 + iota
// TransferDataEvent transfer data, set ConsumedBytes anmd TotalBytes
TransferDataEvent
// TransferCompletedEvent transfer completed
TransferCompletedEvent
// TransferFailedEvent transfer encounters an error
TransferFailedEvent
)
// ProgressEvent progress event
type ProgressEvent struct {
ConsumedBytes int64
TotalBytes int64
EventType ProgressEventType
}
// ProgressListener listen progress change
type ProgressListener interface {
ProgressChanged(event *ProgressEvent)
}
// -------------------- private --------------------
func newProgressEvent(eventType ProgressEventType, consumed, total int64) *ProgressEvent {
return &ProgressEvent{
ConsumedBytes: consumed,
TotalBytes: total,
EventType: eventType}
}
// publishProgress
func publishProgress(listener ProgressListener, event *ProgressEvent) {
if listener != nil && event != nil {
listener.ProgressChanged(event)
}
}
type readerTracker struct {
completedBytes int64
}
type teeReader struct {
reader io.Reader
writer io.Writer
listener ProgressListener
consumedBytes int64
totalBytes int64
tracker *readerTracker
}
// TeeReader returns a Reader that writes to w what it reads from r.
// All reads from r performed through it are matched with
// corresponding writes to w. There is no internal buffering -
// the write must complete before the read completes.
// Any error encountered while writing is reported as a read error.
func TeeReader(reader io.Reader, writer io.Writer, totalBytes int64, listener ProgressListener, tracker *readerTracker) io.Reader {
return &teeReader{
reader: reader,
writer: writer,
listener: listener,
consumedBytes: 0,
totalBytes: totalBytes,
tracker: tracker,
}
}
func (t *teeReader) Read(p []byte) (n int, err error) {
n, err = t.reader.Read(p)
// read encountered error
if err != nil && err != io.EOF {
event := newProgressEvent(TransferFailedEvent, t.consumedBytes, t.totalBytes)
publishProgress(t.listener, event)
}
if n > 0 {
t.consumedBytes += int64(n)
// crc
if t.writer != nil {
if n, err := t.writer.Write(p[:n]); err != nil {
return n, err
}
}
// progress
if t.listener != nil {
event := newProgressEvent(TransferDataEvent, t.consumedBytes, t.totalBytes)
publishProgress(t.listener, event)
}
// track
if t.tracker != nil {
t.tracker.completedBytes = t.consumedBytes
}
}
return
}

442
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/type.go generated vendored Normal file
View File

@ -0,0 +1,442 @@
package oss
import (
"encoding/xml"
"net/url"
"time"
)
// ListBucketsResult ListBuckets请求返回的结果
type ListBucketsResult struct {
XMLName xml.Name `xml:"ListAllMyBucketsResult"`
Prefix string `xml:"Prefix"` // 本次查询结果的前缀
Marker string `xml:"Marker"` // 标明查询的起点,未全部返回时有此节点
MaxKeys int `xml:"MaxKeys"` // 返回结果的最大数目,未全部返回时有此节点
IsTruncated bool `xml:"IsTruncated"` // 所有的结果是否已经全部返回
NextMarker string `xml:"NextMarker"` // 表示下一次查询的起点
Owner Owner `xml:"Owner"` // 拥有者信息
Buckets []BucketProperties `xml:"Buckets>Bucket"` // Bucket列表
}
// BucketProperties Bucket信息
type BucketProperties struct {
XMLName xml.Name `xml:"Bucket"`
Name string `xml:"Name"` // Bucket名称
Location string `xml:"Location"` // Bucket所在的数据中心
CreationDate time.Time `xml:"CreationDate"` // Bucket创建时间
}
// GetBucketACLResult GetBucketACL请求返回的结果
type GetBucketACLResult struct {
XMLName xml.Name `xml:"AccessControlPolicy"`
ACL string `xml:"AccessControlList>Grant"` // Bucket权限
Owner Owner `xml:"Owner"` // Bucket拥有者信息
}
// LifecycleConfiguration Bucket的Lifecycle配置
type LifecycleConfiguration struct {
XMLName xml.Name `xml:"LifecycleConfiguration"`
Rules []LifecycleRule `xml:"Rule"`
}
// LifecycleRule Lifecycle规则
type LifecycleRule struct {
XMLName xml.Name `xml:"Rule"`
ID string `xml:"ID"` // 规则唯一的ID
Prefix string `xml:"Prefix"` // 规则所适用Object的前缀
Status string `xml:"Status"` // 规则是否生效
Expiration LifecycleExpiration `xml:"Expiration"` // 规则的过期属性
}
// LifecycleExpiration 规则的过期属性
type LifecycleExpiration struct {
XMLName xml.Name `xml:"Expiration"`
Days int `xml:"Days,omitempty"` // 最后修改时间过后多少天生效
Date time.Time `xml:"Date,omitempty"` // 指定规则何时生效
}
type lifecycleXML struct {
XMLName xml.Name `xml:"LifecycleConfiguration"`
Rules []lifecycleRule `xml:"Rule"`
}
type lifecycleRule struct {
XMLName xml.Name `xml:"Rule"`
ID string `xml:"ID"`
Prefix string `xml:"Prefix"`
Status string `xml:"Status"`
Expiration lifecycleExpiration `xml:"Expiration"`
}
type lifecycleExpiration struct {
XMLName xml.Name `xml:"Expiration"`
Days int `xml:"Days,omitempty"`
Date string `xml:"Date,omitempty"`
}
const expirationDateFormat = "2006-01-02T15:04:05.000Z"
func convLifecycleRule(rules []LifecycleRule) []lifecycleRule {
rs := []lifecycleRule{}
for _, rule := range rules {
r := lifecycleRule{}
r.ID = rule.ID
r.Prefix = rule.Prefix
r.Status = rule.Status
if rule.Expiration.Date.IsZero() {
r.Expiration.Days = rule.Expiration.Days
} else {
r.Expiration.Date = rule.Expiration.Date.Format(expirationDateFormat)
}
rs = append(rs, r)
}
return rs
}
// BuildLifecycleRuleByDays 指定过期天数构建Lifecycle规则
func BuildLifecycleRuleByDays(id, prefix string, status bool, days int) LifecycleRule {
var statusStr = "Enabled"
if !status {
statusStr = "Disabled"
}
return LifecycleRule{ID: id, Prefix: prefix, Status: statusStr,
Expiration: LifecycleExpiration{Days: days}}
}
// BuildLifecycleRuleByDate 指定过期时间构建Lifecycle规则
func BuildLifecycleRuleByDate(id, prefix string, status bool, year, month, day int) LifecycleRule {
var statusStr = "Enabled"
if !status {
statusStr = "Disabled"
}
date := time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC)
return LifecycleRule{ID: id, Prefix: prefix, Status: statusStr,
Expiration: LifecycleExpiration{Date: date}}
}
// GetBucketLifecycleResult GetBucketLifecycle请求请求结果
type GetBucketLifecycleResult LifecycleConfiguration
// RefererXML Referer配置
type RefererXML struct {
XMLName xml.Name `xml:"RefererConfiguration"`
AllowEmptyReferer bool `xml:"AllowEmptyReferer"` // 是否允许referer字段为空的请求访问
RefererList []string `xml:"RefererList>Referer"` // referer访问白名单
}
// GetBucketRefererResult GetBucketReferer请教返回结果
type GetBucketRefererResult RefererXML
// LoggingXML Logging配置
type LoggingXML struct {
XMLName xml.Name `xml:"BucketLoggingStatus"`
LoggingEnabled LoggingEnabled `xml:"LoggingEnabled"` // 访问日志信息容器
}
type loggingXMLEmpty struct {
XMLName xml.Name `xml:"BucketLoggingStatus"`
}
// LoggingEnabled 访问日志信息容器
type LoggingEnabled struct {
XMLName xml.Name `xml:"LoggingEnabled"`
TargetBucket string `xml:"TargetBucket"` //存放访问日志的Bucket
TargetPrefix string `xml:"TargetPrefix"` //保存访问日志的文件前缀
}
// GetBucketLoggingResult GetBucketLogging请求返回结果
type GetBucketLoggingResult LoggingXML
// WebsiteXML Website配置
type WebsiteXML struct {
XMLName xml.Name `xml:"WebsiteConfiguration"`
IndexDocument IndexDocument `xml:"IndexDocument"` // 目录URL时添加的索引文件
ErrorDocument ErrorDocument `xml:"ErrorDocument"` // 404错误时使用的文件
}
// IndexDocument 目录URL时添加的索引文件
type IndexDocument struct {
XMLName xml.Name `xml:"IndexDocument"`
Suffix string `xml:"Suffix"` // 目录URL时添加的索引文件名
}
// ErrorDocument 404错误时使用的文件
type ErrorDocument struct {
XMLName xml.Name `xml:"ErrorDocument"`
Key string `xml:"Key"` // 404错误时使用的文件名
}
// GetBucketWebsiteResult GetBucketWebsite请求返回结果
type GetBucketWebsiteResult WebsiteXML
// CORSXML CORS配置
type CORSXML struct {
XMLName xml.Name `xml:"CORSConfiguration"`
CORSRules []CORSRule `xml:"CORSRule"` // CORS规则列表
}
// CORSRule CORS规则
type CORSRule struct {
XMLName xml.Name `xml:"CORSRule"`
AllowedOrigin []string `xml:"AllowedOrigin"` // 允许的来源,默认通配符"*"
AllowedMethod []string `xml:"AllowedMethod"` // 允许的方法
AllowedHeader []string `xml:"AllowedHeader"` // 允许的请求头
ExposeHeader []string `xml:"ExposeHeader"` // 允许的响应头
MaxAgeSeconds int `xml:"MaxAgeSeconds"` // 最大的缓存时间
}
// GetBucketCORSResult GetBucketCORS请求返回的结果
type GetBucketCORSResult CORSXML
// GetBucketInfoResult GetBucketInfo请求返回结果
type GetBucketInfoResult struct {
XMLName xml.Name `xml:"BucketInfo"`
BucketInfo BucketInfo `xml:"Bucket"`
}
// BucketInfo Bucket信息
type BucketInfo struct {
XMLName xml.Name `xml:"Bucket"`
Name string `xml:"Name"` // Bucket名称
Location string `xml:"Location"` // Bucket所在的数据中心
CreationDate time.Time `xml:"CreationDate"` // Bucket创建时间
ExtranetEndpoint string `xml:"ExtranetEndpoint"` // Bucket访问的外网域名
IntranetEndpoint string `xml:"IntranetEndpoint"` // Bucket访问的内网域名
ACL string `xml:"AccessControlList>Grant"` // Bucket权限
Owner Owner `xml:"Owner"` // Bucket拥有者信息
}
// ListObjectsResult ListObjects请求返回结果
type ListObjectsResult struct {
XMLName xml.Name `xml:"ListBucketResult"`
Prefix string `xml:"Prefix"` // 本次查询结果的开始前缀
Marker string `xml:"Marker"` // 这次查询的起点
MaxKeys int `xml:"MaxKeys"` // 请求返回结果的最大数目
Delimiter string `xml:"Delimiter"` // 对Object名字进行分组的字符
IsTruncated bool `xml:"IsTruncated"` // 是否所有的结果都已经返回
NextMarker string `xml:"NextMarker"` // 下一次查询的起点
Objects []ObjectProperties `xml:"Contents"` // Object类别
CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` // 以delimiter结尾并有共同前缀的Object的集合
}
// ObjectProperties Objecct属性
type ObjectProperties struct {
XMLName xml.Name `xml:"Contents"`
Key string `xml:"Key"` // Object的Key
Type string `xml:"Type"` // Object Type
Size int64 `xml:"Size"` // Object的长度字节数
ETag string `xml:"ETag"` // 标示Object的内容
Owner Owner `xml:"Owner"` // 保存Object拥有者信息的容器
LastModified time.Time `xml:"LastModified"` // Object最后修改时间
StorageClass string `xml:"StorageClass"` // Object的存储类型目前只能是Standard
}
// Owner Bucket/Object的owner
type Owner struct {
XMLName xml.Name `xml:"Owner"`
ID string `xml:"ID"` // 用户ID
DisplayName string `xml:"DisplayName"` // Owner名字
}
// CopyObjectResult CopyObject请求返回的结果
type CopyObjectResult struct {
XMLName xml.Name `xml:"CopyObjectResult"`
LastModified time.Time `xml:"LastModified"` // 新Object最后更新时间
ETag string `xml:"ETag"` // 新Object的ETag值
}
// GetObjectACLResult GetObjectACL请求返回的结果
type GetObjectACLResult GetBucketACLResult
type deleteXML struct {
XMLName xml.Name `xml:"Delete"`
Objects []DeleteObject `xml:"Object"` // 删除的所有Object
Quiet bool `xml:"Quiet"` // 安静响应模式
}
// DeleteObject 删除的Object
type DeleteObject struct {
XMLName xml.Name `xml:"Object"`
Key string `xml:"Key"` // Object名称
}
// DeleteObjectsResult DeleteObjects请求返回结果
type DeleteObjectsResult struct {
XMLName xml.Name `xml:"DeleteResult"`
DeletedObjects []string `xml:"Deleted>Key"` // 删除的Object列表
}
// InitiateMultipartUploadResult InitiateMultipartUpload请求返回结果
type InitiateMultipartUploadResult struct {
XMLName xml.Name `xml:"InitiateMultipartUploadResult"`
Bucket string `xml:"Bucket"` // Bucket名称
Key string `xml:"Key"` // 上传Object名称
UploadID string `xml:"UploadId"` // 生成的UploadId
}
// UploadPart 上传/拷贝的分片
type UploadPart struct {
XMLName xml.Name `xml:"Part"`
PartNumber int `xml:"PartNumber"` // Part编号
ETag string `xml:"ETag"` // ETag缓存码
}
type uploadParts []UploadPart
func (slice uploadParts) Len() int {
return len(slice)
}
func (slice uploadParts) Less(i, j int) bool {
return slice[i].PartNumber < slice[j].PartNumber
}
func (slice uploadParts) Swap(i, j int) {
slice[i], slice[j] = slice[j], slice[i]
}
// UploadPartCopyResult 拷贝分片请求返回的结果
type UploadPartCopyResult struct {
XMLName xml.Name `xml:"CopyPartResult"`
LastModified time.Time `xml:"LastModified"` // 最后修改时间
ETag string `xml:"ETag"` // ETag
}
type completeMultipartUploadXML struct {
XMLName xml.Name `xml:"CompleteMultipartUpload"`
Part []UploadPart `xml:"Part"`
}
// CompleteMultipartUploadResult 提交分片上传任务返回结果
type CompleteMultipartUploadResult struct {
XMLName xml.Name `xml:"CompleteMultipartUploadResult"`
Location string `xml:"Location"` // Object的URL
Bucket string `xml:"Bucket"` // Bucket名称
ETag string `xml:"ETag"` // Object的ETag
Key string `xml:"Key"` // Object的名字
}
// ListUploadedPartsResult ListUploadedParts请求返回结果
type ListUploadedPartsResult struct {
XMLName xml.Name `xml:"ListPartsResult"`
Bucket string `xml:"Bucket"` // Bucket名称
Key string `xml:"Key"` // Object名称
UploadID string `xml:"UploadId"` // 上传Id
NextPartNumberMarker string `xml:"NextPartNumberMarker"` // 下一个Part的位置
MaxParts int `xml:"MaxParts"` // 最大Part个数
IsTruncated bool `xml:"IsTruncated"` // 是否完全上传完成
UploadedParts []UploadedPart `xml:"Part"` // 已完成的Part
}
// UploadedPart 该任务已经上传的分片
type UploadedPart struct {
XMLName xml.Name `xml:"Part"`
PartNumber int `xml:"PartNumber"` // Part编号
LastModified time.Time `xml:"LastModified"` // 最后一次修改时间
ETag string `xml:"ETag"` // ETag缓存码
Size int `xml:"Size"` // Part大小
}
// ListMultipartUploadResult ListMultipartUpload请求返回结果
type ListMultipartUploadResult struct {
XMLName xml.Name `xml:"ListMultipartUploadsResult"`
Bucket string `xml:"Bucket"` // Bucket名称
Delimiter string `xml:"Delimiter"` // 分组分割符
Prefix string `xml:"Prefix"` // 筛选前缀
KeyMarker string `xml:"KeyMarker"` // 起始Object位置
UploadIDMarker string `xml:"UploadIdMarker"` // 起始UploadId位置
NextKeyMarker string `xml:"NextKeyMarker"` // 如果没有全部返回标明接下去的KeyMarker位置
NextUploadIDMarker string `xml:"NextUploadIdMarker"` // 如果没有全部返回标明接下去的UploadId位置
MaxUploads int `xml:"MaxUploads"` // 返回最大Upload数目
IsTruncated bool `xml:"IsTruncated"` // 是否完全返回
Uploads []UncompletedUpload `xml:"Upload"` // 未完成上传的MultipartUpload
CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` // 所有名字包含指定的前缀且第一次出现delimiter字符之间的object作为一组的分组结果
}
// UncompletedUpload 未完成的Upload任务
type UncompletedUpload struct {
XMLName xml.Name `xml:"Upload"`
Key string `xml:"Key"` // Object名称
UploadID string `xml:"UploadId"` // 对应UploadId
Initiated time.Time `xml:"Initiated"` // 初始化时间格式2012-02-23T04:18:23.000Z
}
// 解析URL编码
func decodeDeleteObjectsResult(result *DeleteObjectsResult) error {
var err error
for i := 0; i < len(result.DeletedObjects); i++ {
result.DeletedObjects[i], err = url.QueryUnescape(result.DeletedObjects[i])
if err != nil {
return err
}
}
return nil
}
// 解析URL编码
func decodeListObjectsResult(result *ListObjectsResult) error {
var err error
result.Prefix, err = url.QueryUnescape(result.Prefix)
if err != nil {
return err
}
result.Marker, err = url.QueryUnescape(result.Marker)
if err != nil {
return err
}
result.Delimiter, err = url.QueryUnescape(result.Delimiter)
if err != nil {
return err
}
result.NextMarker, err = url.QueryUnescape(result.NextMarker)
if err != nil {
return err
}
for i := 0; i < len(result.Objects); i++ {
result.Objects[i].Key, err = url.QueryUnescape(result.Objects[i].Key)
if err != nil {
return err
}
}
for i := 0; i < len(result.CommonPrefixes); i++ {
result.CommonPrefixes[i], err = url.QueryUnescape(result.CommonPrefixes[i])
if err != nil {
return err
}
}
return nil
}
// 解析URL编码
func decodeListMultipartUploadResult(result *ListMultipartUploadResult) error {
var err error
result.Prefix, err = url.QueryUnescape(result.Prefix)
if err != nil {
return err
}
result.Delimiter, err = url.QueryUnescape(result.Delimiter)
if err != nil {
return err
}
result.KeyMarker, err = url.QueryUnescape(result.KeyMarker)
if err != nil {
return err
}
result.NextKeyMarker, err = url.QueryUnescape(result.NextKeyMarker)
if err != nil {
return err
}
for i := 0; i < len(result.Uploads); i++ {
result.Uploads[i].Key, err = url.QueryUnescape(result.Uploads[i].Key)
if err != nil {
return err
}
}
for i := 0; i < len(result.CommonPrefixes); i++ {
result.CommonPrefixes[i], err = url.QueryUnescape(result.CommonPrefixes[i])
if err != nil {
return err
}
}
return nil
}

View File

@ -0,0 +1,485 @@
package oss
import (
"crypto/md5"
"encoding/base64"
"encoding/json"
"errors"
"io/ioutil"
"os"
"time"
)
//
// UploadFile 分片上传文件
//
// objectKey object名称。
// filePath 本地文件。需要上传的文件。
// partSize 本次上传文件片的大小字节数。比如100 * 1024为每片100KB。
// options 上传Object时可以指定Object的属性。详见InitiateMultipartUpload。
//
// error 操作成功为nil非nil为错误信息。
//
func (bucket Bucket) UploadFile(objectKey, filePath string, partSize int64, options ...Option) error {
if partSize < MinPartSize || partSize > MaxPartSize {
return errors.New("oss: part size invalid range (1024KB, 5GB]")
}
cpConf, err := getCpConfig(options, filePath)
if err != nil {
return err
}
routines := getRoutines(options)
if cpConf.IsEnable {
return bucket.uploadFileWithCp(objectKey, filePath, partSize, options, cpConf.FilePath, routines)
}
return bucket.uploadFile(objectKey, filePath, partSize, options, routines)
}
// ----- 并发无断点的上传 -----
// 获取Checkpoint配置
func getCpConfig(options []Option, filePath string) (*cpConfig, error) {
cpc := &cpConfig{}
cpcOpt, err := findOption(options, checkpointConfig, nil)
if err != nil || cpcOpt == nil {
return cpc, err
}
cpc = cpcOpt.(*cpConfig)
if cpc.IsEnable && cpc.FilePath == "" {
cpc.FilePath = filePath + CheckpointFileSuffix
}
return cpc, nil
}
// 获取并发数默认并发数1
func getRoutines(options []Option) int {
rtnOpt, err := findOption(options, routineNum, nil)
if err != nil || rtnOpt == nil {
return 1
}
rs := rtnOpt.(int)
if rs < 1 {
rs = 1
} else if rs > 100 {
rs = 100
}
return rs
}
// 获取进度回调
func getProgressListener(options []Option) ProgressListener {
isSet, listener, _ := isOptionSet(options, progressListener)
if !isSet {
return nil
}
return listener.(ProgressListener)
}
// 测试使用
type uploadPartHook func(id int, chunk FileChunk) error
var uploadPartHooker uploadPartHook = defaultUploadPart
func defaultUploadPart(id int, chunk FileChunk) error {
return nil
}
// 工作协程参数
type workerArg struct {
bucket *Bucket
filePath string
imur InitiateMultipartUploadResult
hook uploadPartHook
}
// 工作协程
func worker(id int, arg workerArg, jobs <-chan FileChunk, results chan<- UploadPart, failed chan<- error, die <-chan bool) {
for chunk := range jobs {
if err := arg.hook(id, chunk); err != nil {
failed <- err
break
}
part, err := arg.bucket.UploadPartFromFile(arg.imur, arg.filePath, chunk.Offset, chunk.Size, chunk.Number)
if err != nil {
failed <- err
break
}
select {
case <-die:
return
default:
}
results <- part
}
}
// 调度协程
func scheduler(jobs chan FileChunk, chunks []FileChunk) {
for _, chunk := range chunks {
jobs <- chunk
}
close(jobs)
}
func getTotalBytes(chunks []FileChunk) int64 {
var tb int64
for _, chunk := range chunks {
tb += chunk.Size
}
return tb
}
// 并发上传,不带断点续传功能
func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, options []Option, routines int) error {
listener := getProgressListener(options)
chunks, err := SplitFileByPartSize(filePath, partSize)
if err != nil {
return err
}
// 初始化上传任务
imur, err := bucket.InitiateMultipartUpload(objectKey, options...)
if err != nil {
return err
}
jobs := make(chan FileChunk, len(chunks))
results := make(chan UploadPart, len(chunks))
failed := make(chan error)
die := make(chan bool)
var completedBytes int64
totalBytes := getTotalBytes(chunks)
event := newProgressEvent(TransferStartedEvent, 0, totalBytes)
publishProgress(listener, event)
// 启动工作协程
arg := workerArg{&bucket, filePath, imur, uploadPartHooker}
for w := 1; w <= routines; w++ {
go worker(w, arg, jobs, results, failed, die)
}
// 并发上传分片
go scheduler(jobs, chunks)
// 等待分配分片上传完成
completed := 0
parts := make([]UploadPart, len(chunks))
for completed < len(chunks) {
select {
case part := <-results:
completed++
parts[part.PartNumber-1] = part
completedBytes += chunks[part.PartNumber-1].Size
event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes)
publishProgress(listener, event)
case err := <-failed:
close(die)
event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes)
publishProgress(listener, event)
bucket.AbortMultipartUpload(imur)
return err
}
if completed >= len(chunks) {
break
}
}
event = newProgressEvent(TransferStartedEvent, completedBytes, totalBytes)
publishProgress(listener, event)
// 提交任务
_, err = bucket.CompleteMultipartUpload(imur, parts)
if err != nil {
bucket.AbortMultipartUpload(imur)
return err
}
return nil
}
// ----- 并发带断点的上传 -----
const uploadCpMagic = "FE8BB4EA-B593-4FAC-AD7A-2459A36E2E62"
type uploadCheckpoint struct {
Magic string // magic
MD5 string // cp内容的MD5
FilePath string // 本地文件
FileStat cpStat // 文件状态
ObjectKey string // key
UploadID string // upload id
Parts []cpPart // 本地文件的全部分片
}
type cpStat struct {
Size int64 // 文件大小
LastModified time.Time // 本地文件最后修改时间
MD5 string // 本地文件MD5
}
type cpPart struct {
Chunk FileChunk // 分片
Part UploadPart // 上传完成的分片
IsCompleted bool // upload是否完成
}
// CP数据是否有效CP有效且文件没有更新时有效
func (cp uploadCheckpoint) isValid(filePath string) (bool, error) {
// 比较CP的Magic及MD5
cpb := cp
cpb.MD5 = ""
js, _ := json.Marshal(cpb)
sum := md5.Sum(js)
b64 := base64.StdEncoding.EncodeToString(sum[:])
if cp.Magic != uploadCpMagic || b64 != cp.MD5 {
return false, nil
}
// 确认本地文件是否更新
fd, err := os.Open(filePath)
if err != nil {
return false, err
}
defer fd.Close()
st, err := fd.Stat()
if err != nil {
return false, err
}
md, err := calcFileMD5(filePath)
if err != nil {
return false, err
}
// 比较文件大小/文件最后更新时间/文件MD5
if cp.FileStat.Size != st.Size() ||
cp.FileStat.LastModified != st.ModTime() ||
cp.FileStat.MD5 != md {
return false, nil
}
return true, nil
}
// 从文件中load
func (cp *uploadCheckpoint) load(filePath string) error {
contents, err := ioutil.ReadFile(filePath)
if err != nil {
return err
}
err = json.Unmarshal(contents, cp)
return err
}
// dump到文件
func (cp *uploadCheckpoint) dump(filePath string) error {
bcp := *cp
// 计算MD5
bcp.MD5 = ""
js, err := json.Marshal(bcp)
if err != nil {
return err
}
sum := md5.Sum(js)
b64 := base64.StdEncoding.EncodeToString(sum[:])
bcp.MD5 = b64
// 序列化
js, err = json.Marshal(bcp)
if err != nil {
return err
}
// dump
return ioutil.WriteFile(filePath, js, FilePermMode)
}
// 更新分片状态
func (cp *uploadCheckpoint) updatePart(part UploadPart) {
cp.Parts[part.PartNumber-1].Part = part
cp.Parts[part.PartNumber-1].IsCompleted = true
}
// 未完成的分片
func (cp *uploadCheckpoint) todoParts() []FileChunk {
fcs := []FileChunk{}
for _, part := range cp.Parts {
if !part.IsCompleted {
fcs = append(fcs, part.Chunk)
}
}
return fcs
}
// 所有的分片
func (cp *uploadCheckpoint) allParts() []UploadPart {
ps := []UploadPart{}
for _, part := range cp.Parts {
ps = append(ps, part.Part)
}
return ps
}
// 完成的字节数
func (cp *uploadCheckpoint) getCompletedBytes() int64 {
var completedBytes int64
for _, part := range cp.Parts {
if part.IsCompleted {
completedBytes += part.Chunk.Size
}
}
return completedBytes
}
// 计算文件文件MD5
func calcFileMD5(filePath string) (string, error) {
return "", nil
}
// 初始化分片上传
func prepare(cp *uploadCheckpoint, objectKey, filePath string, partSize int64, bucket *Bucket, options []Option) error {
// cp
cp.Magic = uploadCpMagic
cp.FilePath = filePath
cp.ObjectKey = objectKey
// localfile
fd, err := os.Open(filePath)
if err != nil {
return err
}
defer fd.Close()
st, err := fd.Stat()
if err != nil {
return err
}
cp.FileStat.Size = st.Size()
cp.FileStat.LastModified = st.ModTime()
md, err := calcFileMD5(filePath)
if err != nil {
return err
}
cp.FileStat.MD5 = md
// chunks
parts, err := SplitFileByPartSize(filePath, partSize)
if err != nil {
return err
}
cp.Parts = make([]cpPart, len(parts))
for i, part := range parts {
cp.Parts[i].Chunk = part
cp.Parts[i].IsCompleted = false
}
// init load
imur, err := bucket.InitiateMultipartUpload(objectKey, options...)
if err != nil {
return err
}
cp.UploadID = imur.UploadID
return nil
}
// 提交分片上传删除CP文件
func complete(cp *uploadCheckpoint, bucket *Bucket, parts []UploadPart, cpFilePath string) error {
imur := InitiateMultipartUploadResult{Bucket: bucket.BucketName,
Key: cp.ObjectKey, UploadID: cp.UploadID}
_, err := bucket.CompleteMultipartUpload(imur, parts)
if err != nil {
return err
}
os.Remove(cpFilePath)
return err
}
// 并发带断点的上传
func (bucket Bucket) uploadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int) error {
listener := getProgressListener(options)
// LOAD CP数据
ucp := uploadCheckpoint{}
err := ucp.load(cpFilePath)
if err != nil {
os.Remove(cpFilePath)
}
// LOAD出错或数据无效重新初始化上传
valid, err := ucp.isValid(filePath)
if err != nil || !valid {
if err = prepare(&ucp, objectKey, filePath, partSize, &bucket, options); err != nil {
return err
}
os.Remove(cpFilePath)
}
chunks := ucp.todoParts()
imur := InitiateMultipartUploadResult{
Bucket: bucket.BucketName,
Key: objectKey,
UploadID: ucp.UploadID}
jobs := make(chan FileChunk, len(chunks))
results := make(chan UploadPart, len(chunks))
failed := make(chan error)
die := make(chan bool)
completedBytes := ucp.getCompletedBytes()
event := newProgressEvent(TransferStartedEvent, completedBytes, ucp.FileStat.Size)
publishProgress(listener, event)
// 启动工作协程
arg := workerArg{&bucket, filePath, imur, uploadPartHooker}
for w := 1; w <= routines; w++ {
go worker(w, arg, jobs, results, failed, die)
}
// 并发上传分片
go scheduler(jobs, chunks)
// 等待分配分片上传完成
completed := 0
for completed < len(chunks) {
select {
case part := <-results:
completed++
ucp.updatePart(part)
ucp.dump(cpFilePath)
completedBytes += ucp.Parts[part.PartNumber-1].Chunk.Size
event = newProgressEvent(TransferDataEvent, completedBytes, ucp.FileStat.Size)
publishProgress(listener, event)
case err := <-failed:
close(die)
event = newProgressEvent(TransferFailedEvent, completedBytes, ucp.FileStat.Size)
publishProgress(listener, event)
return err
}
if completed >= len(chunks) {
break
}
}
event = newProgressEvent(TransferCompletedEvent, completedBytes, ucp.FileStat.Size)
publishProgress(listener, event)
// 提交分片上传
err = complete(&ucp, &bucket, ucp.allParts(), cpFilePath)
return err
}

165
vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/utils.go generated vendored Normal file
View File

@ -0,0 +1,165 @@
package oss
import (
"bytes"
"errors"
"fmt"
"hash/crc64"
"net/http"
"os"
"os/exec"
"runtime"
"time"
)
// Get User Agent
// Go sdk相关信息包括sdk版本操作系统类型GO版本
var userAgent = func() string {
sys := getSysInfo()
return fmt.Sprintf("aliyun-sdk-go/%s (%s/%s/%s;%s)", Version, sys.name,
sys.release, sys.machine, runtime.Version())
}()
type sysInfo struct {
name string // 操作系统名称windows/Linux
release string // 操作系统版本 2.6.32-220.23.2.ali1089.el5.x86_64等
machine string // 机器类型amd64/x86_64
}
// Get system info
// 获取操作系统信息、机器类型
func getSysInfo() sysInfo {
name := runtime.GOOS
release := "-"
machine := runtime.GOARCH
if out, err := exec.Command("uname", "-s").CombinedOutput(); err == nil {
name = string(bytes.TrimSpace(out))
}
if out, err := exec.Command("uname", "-r").CombinedOutput(); err == nil {
release = string(bytes.TrimSpace(out))
}
if out, err := exec.Command("uname", "-m").CombinedOutput(); err == nil {
machine = string(bytes.TrimSpace(out))
}
return sysInfo{name: name, release: release, machine: machine}
}
// GetNowSec returns Unix time, the number of seconds elapsed since January 1, 1970 UTC.
// 获取当前时间从UTC开始的秒数。
func GetNowSec() int64 {
return time.Now().Unix()
}
// GetNowNanoSec returns t as a Unix time, the number of nanoseconds elapsed
// since January 1, 1970 UTC. The result is undefined if the Unix time
// in nanoseconds cannot be represented by an int64. Note that this
// means the result of calling UnixNano on the zero Time is undefined.
// 获取当前时间从UTC开始的纳秒。
func GetNowNanoSec() int64 {
return time.Now().UnixNano()
}
// GetNowGMT 获取当前时间,格式形如"Mon, 02 Jan 2006 15:04:05 GMT"HTTP中使用的时间格式
func GetNowGMT() string {
return time.Now().UTC().Format(http.TimeFormat)
}
// FileChunk 文件片定义
type FileChunk struct {
Number int // 块序号
Offset int64 // 块在文件中的偏移量
Size int64 // 块大小
}
// SplitFileByPartNum Split big file to part by the num of part
// 按指定的块数分割文件。返回值FileChunk为分割结果error为nil时有效。
func SplitFileByPartNum(fileName string, chunkNum int) ([]FileChunk, error) {
if chunkNum <= 0 || chunkNum > 10000 {
return nil, errors.New("chunkNum invalid")
}
file, err := os.Open(fileName)
if err != nil {
return nil, err
}
defer file.Close()
stat, err := file.Stat()
if err != nil {
return nil, err
}
if int64(chunkNum) > stat.Size() {
return nil, errors.New("oss: chunkNum invalid")
}
var chunks []FileChunk
var chunk = FileChunk{}
var chunkN = (int64)(chunkNum)
for i := int64(0); i < chunkN; i++ {
chunk.Number = int(i + 1)
chunk.Offset = i * (stat.Size() / chunkN)
if i == chunkN-1 {
chunk.Size = stat.Size()/chunkN + stat.Size()%chunkN
} else {
chunk.Size = stat.Size() / chunkN
}
chunks = append(chunks, chunk)
}
return chunks, nil
}
// SplitFileByPartSize Split big file to part by the size of part
// 按块大小分割文件。返回值FileChunk为分割结果error为nil时有效。
func SplitFileByPartSize(fileName string, chunkSize int64) ([]FileChunk, error) {
if chunkSize <= 0 {
return nil, errors.New("chunkSize invalid")
}
file, err := os.Open(fileName)
if err != nil {
return nil, err
}
defer file.Close()
stat, err := file.Stat()
if err != nil {
return nil, err
}
var chunkN = stat.Size() / chunkSize
if chunkN >= 10000 {
return nil, errors.New("Too many parts, please increase part size.")
}
var chunks []FileChunk
var chunk = FileChunk{}
for i := int64(0); i < chunkN; i++ {
chunk.Number = int(i + 1)
chunk.Offset = i * chunkSize
chunk.Size = chunkSize
chunks = append(chunks, chunk)
}
if stat.Size()%chunkSize > 0 {
chunk.Number = len(chunks) + 1
chunk.Offset = int64(len(chunks)) * chunkSize
chunk.Size = stat.Size() % chunkSize
chunks = append(chunks, chunk)
}
return chunks, nil
}
// GetPartEnd 计算结束位置
func GetPartEnd(begin int64, total int64, per int64) int64 {
if begin+per > total {
return total - 1
}
return begin + per - 1
}
// crcTable returns the Table constructed from the specified polynomial
var crcTable = func() *crc64.Table {
return crc64.MakeTable(crc64.ECMA)
}

View File

@ -7,8 +7,8 @@ import (
"log"
"net/http"
"net/url"
"time"
"strings"
"time"
"github.com/denverdino/aliyungo/util"
)
@ -21,6 +21,9 @@ type Client struct {
httpClient *http.Client
endpoint string
version string
serviceCode string
regionID Region
businessInfo string
}
// NewClient creates a new instance of ECS client
@ -33,6 +36,26 @@ func (client *Client) Init(endpoint, version, accessKeyId, accessKeySecret strin
client.version = version
}
func (client *Client) NewInit(endpoint, version, accessKeyId, accessKeySecret, serviceCode string, regionID Region) {
client.Init(endpoint, version, accessKeyId, accessKeySecret)
client.serviceCode = serviceCode
client.regionID = regionID
client.setEndpointByLocation(regionID, serviceCode, accessKeyId, accessKeySecret)
}
//NewClient using location service
func (client *Client) setEndpointByLocation(region Region, serviceCode, accessKeyId, accessKeySecret string) {
locationClient := NewLocationClient(accessKeyId, accessKeySecret)
ep := locationClient.DescribeOpenAPIEndpoint(region, serviceCode)
if ep == "" {
ep = loadEndpointFromFile(region, serviceCode)
}
if ep != "" {
client.endpoint = ep
}
}
// SetEndpoint sets custom endpoint
func (client *Client) SetEndpoint(endpoint string) {
client.endpoint = endpoint
@ -43,6 +66,15 @@ func (client *Client) SetVersion(version string) {
client.version = version
}
func (client *Client) SetRegionID(regionID Region) {
client.regionID = regionID
}
//SetServiceCode sets serviceCode
func (client *Client) SetServiceCode(serviceCode string) {
client.serviceCode = serviceCode
}
// SetAccessKeyId sets new AccessKeyId
func (client *Client) SetAccessKeyId(id string) {
client.AccessKeyId = id
@ -58,6 +90,15 @@ func (client *Client) SetDebug(debug bool) {
client.debug = debug
}
// SetBusinessInfo sets business info to log the request/response message
func (client *Client) SetBusinessInfo(businessInfo string) {
if strings.HasPrefix(businessInfo, "/") {
client.businessInfo = businessInfo
} else if businessInfo != "" {
client.businessInfo = "/" + businessInfo
}
}
// Invoke sends the raw HTTP request for ECS services
func (client *Client) Invoke(action string, args interface{}, response interface{}) error {
@ -80,7 +121,7 @@ func (client *Client) Invoke(action string, args interface{}, response interface
}
// TODO move to util and add build val flag
httpReq.Header.Set("X-SDK-Client", `AliyunGO/`+Version)
httpReq.Header.Set("X-SDK-Client", `AliyunGO/`+Version+client.businessInfo)
t0 := time.Now()
httpResp, err := client.httpClient.Do(httpReq)
@ -152,7 +193,7 @@ func (client *Client) InvokeByAnyMethod(method, action, path string, args interf
httpReq, err = http.NewRequest(method, requestURL, nil)
} else {
//fmt.Println(client.endpoint + path)
httpReq, err = http.NewRequest(method, client.endpoint + path, strings.NewReader(data.Encode()))
httpReq, err = http.NewRequest(method, client.endpoint+path, strings.NewReader(data.Encode()))
httpReq.Header.Set("Content-Type", "application/x-www-form-urlencoded")
}
@ -161,7 +202,7 @@ func (client *Client) InvokeByAnyMethod(method, action, path string, args interf
}
// TODO move to util and add build val flag
httpReq.Header.Set("X-SDK-Client", `AliyunGO/` + Version)
httpReq.Header.Set("X-SDK-Client", `AliyunGO/`+Version+client.businessInfo)
t0 := time.Now()
httpResp, err := client.httpClient.Do(httpReq)

View File

@ -0,0 +1,118 @@
package common
import (
"encoding/xml"
"fmt"
"io/ioutil"
"os"
"strings"
)
const (
// LocationDefaultEndpoint is the default API endpoint of Location services
locationDefaultEndpoint = "https://location.aliyuncs.com"
locationAPIVersion = "2015-06-12"
HTTP_PROTOCOL = "http"
HTTPS_PROTOCOL = "https"
)
var (
endpoints = make(map[Region]map[string]string)
)
//init endpoints from file
func init() {
}
func NewLocationClient(accessKeyId, accessKeySecret string) *Client {
endpoint := os.Getenv("LOCATION_ENDPOINT")
if endpoint == "" {
endpoint = locationDefaultEndpoint
}
client := &Client{}
client.Init(endpoint, locationAPIVersion, accessKeyId, accessKeySecret)
return client
}
func (client *Client) DescribeEndpoint(args *DescribeEndpointArgs) (*DescribeEndpointResponse, error) {
response := &DescribeEndpointResponse{}
err := client.Invoke("DescribeEndpoint", args, response)
if err != nil {
return nil, err
}
return response, err
}
func getProductRegionEndpoint(region Region, serviceCode string) string {
if sp, ok := endpoints[region]; ok {
if endpoint, ok := sp[serviceCode]; ok {
return endpoint
}
}
return ""
}
func setProductRegionEndpoint(region Region, serviceCode string, endpoint string) {
endpoints[region] = map[string]string{
serviceCode: endpoint,
}
}
func (client *Client) DescribeOpenAPIEndpoint(region Region, serviceCode string) string {
if endpoint := getProductRegionEndpoint(region, serviceCode); endpoint != "" {
return endpoint
}
defaultProtocols := HTTP_PROTOCOL
args := &DescribeEndpointArgs{
Id: region,
ServiceCode: serviceCode,
Type: "openAPI",
}
endpoint, err := client.DescribeEndpoint(args)
if err != nil || endpoint.Endpoint == "" {
return ""
}
for _, protocol := range endpoint.Protocols.Protocols {
if strings.ToLower(protocol) == HTTPS_PROTOCOL {
defaultProtocols = HTTPS_PROTOCOL
break
}
}
ep := fmt.Sprintf("%s://%s", defaultProtocols, endpoint.Endpoint)
setProductRegionEndpoint(region, serviceCode, ep)
return ep
}
func loadEndpointFromFile(region Region, serviceCode string) string {
data, err := ioutil.ReadFile("./endpoints.xml")
if err != nil {
return ""
}
var endpoints Endpoints
err = xml.Unmarshal(data, &endpoints)
if err != nil {
return ""
}
for _, endpoint := range endpoints.Endpoint {
if endpoint.RegionIds.RegionId == string(region) {
for _, product := range endpoint.Products.Product {
if strings.ToLower(product.ProductName) == serviceCode {
return fmt.Sprintf("%s://%s", HTTPS_PROTOCOL, product.DomainName)
}
}
}
}
return ""
}

File diff suppressed because it is too large Load Diff

View File

@ -13,3 +13,77 @@ const (
PrePaid = InstanceChargeType("PrePaid")
PostPaid = InstanceChargeType("PostPaid")
)
type DescribeEndpointArgs struct {
Id Region
ServiceCode string
Type string
}
type EndpointItem struct {
Protocols struct {
Protocols []string
}
Type string
Namespace string
Id Region
SerivceCode string
Endpoint string
}
type DescribeEndpointResponse struct {
Response
EndpointItem
}
type NetType string
const (
Internet = NetType("Internet")
Intranet = NetType("Intranet")
)
type TimeType string
const (
Hour = TimeType("Hour")
Day = TimeType("Day")
Month = TimeType("Month")
Year = TimeType("Year")
)
type NetworkType string
const (
Classic = NetworkType("Classic")
VPC = NetworkType("VPC")
)
type BusinessInfo struct {
Pack string `json:"pack,omitempty"`
ActivityId string `json:"activityId,omitempty"`
}
//xml
type Endpoints struct {
Endpoint []Endpoint `xml:"Endpoint"`
}
type Endpoint struct {
Name string `xml:"name,attr"`
RegionIds RegionIds `xml:"RegionIds"`
Products Products `xml:"Products"`
}
type RegionIds struct {
RegionId string `xml:"RegionId"`
}
type Products struct {
Product []Product `xml:"Product"`
}
type Product struct {
ProductName string `xml:"ProductName"`
DomainName string `xml:"DomainName"`
}

View File

@ -1,8 +1,9 @@
package ecs
import (
"github.com/denverdino/aliyungo/common"
"os"
"github.com/denverdino/aliyungo/common"
)
// Interval for checking status in WaitForXXX method
@ -19,6 +20,8 @@ const (
// ECSDefaultEndpoint is the default API endpoint of ECS services
ECSDefaultEndpoint = "https://ecs-cn-hangzhou.aliyuncs.com"
ECSAPIVersion = "2014-05-26"
ECSServiceCode = "ecs"
)
// NewClient creates a new instance of ECS client
@ -30,6 +33,21 @@ func NewClient(accessKeyId, accessKeySecret string) *Client {
return NewClientWithEndpoint(endpoint, accessKeyId, accessKeySecret)
}
func NewECSClient(accessKeyId, accessKeySecret string, regionID common.Region) *Client {
endpoint := os.Getenv("ECS_ENDPOINT")
if endpoint == "" {
endpoint = ECSDefaultEndpoint
}
return NewClientWithRegion(endpoint, accessKeyId, accessKeySecret, regionID)
}
func NewClientWithRegion(endpoint string, accessKeyId, accessKeySecret string, regionID common.Region) *Client {
client := &Client{}
client.NewInit(endpoint, ECSAPIVersion, accessKeyId, accessKeySecret, ECSServiceCode, regionID)
return client
}
func NewClientWithEndpoint(endpoint string, accessKeyId, accessKeySecret string) *Client {
client := &Client{}
client.Init(endpoint, ECSAPIVersion, accessKeyId, accessKeySecret)

View File

@ -65,6 +65,10 @@ type DiskDeviceMapping struct {
//Why Size Field is string-type.
Size string
Device string
//For import images
Format string
OSSBucket string
OSSObject string
}
//
@ -228,6 +232,38 @@ func (client *Client) CopyImage(args *CopyImageArgs) (string, error) {
return response.ImageId, nil
}
// ImportImageArgs repsents arguements to import image from oss
type ImportImageArgs struct {
RegionId common.Region
ImageName string
ImageVersion string
Description string
ClientToken string
Architecture string
OSType string
Platform string
DiskDeviceMappings struct {
DiskDeviceMapping []DiskDeviceMapping
}
}
func (client *Client) ImportImage(args *ImportImageArgs) (string, error) {
response := &CopyImageResponse{}
err := client.Invoke("ImportImage", args, &response)
if err != nil {
return "", err
}
return response.ImageId, nil
}
type ImportImageResponse struct {
common.Response
RegionId common.Region
ImageId string
ImportTaskId string
}
// Default timeout value for WaitForImageReady method
const ImageDefaultTimeout = 120

View File

@ -339,6 +339,7 @@ func (client *Client) WaitForInstanceAsyn(instanceId string, status InstanceStat
if e.ErrorResponse.Code != "InvalidInstanceId.NotFound" {
return err
}
time.Sleep(DefaultWaitForInterval * time.Second)
continue
}
if instance.Status == status {
@ -564,8 +565,8 @@ type InstanceIdSets struct {
}
type BusinessInfo struct {
Pack string `json:"pack,omitempty" yaml:"pack,omitempty"`
ActivityId string `json:"activityId,omitempty" yaml:"activityId,omitempty"`
Pack string `json:"pack,omitempty"`
ActivityId string `json:"activityId,omitempty"`
}
func (client *Client) RunInstances(args *RunInstanceArgs) (instanceIdSet []string, err error) {

View File

@ -0,0 +1,193 @@
package ecs
import (
"github.com/denverdino/aliyungo/common"
)
type BandwidthPackageType struct {
IpCount int
Bandwidth int
Zone string
}
type CreateNatGatewayArgs struct {
RegionId common.Region
VpcId string
Spec string
BandwidthPackage []BandwidthPackageType
Name string
Description string
ClientToken string
}
type ForwardTableIdType struct {
ForwardTableId []string
}
type BandwidthPackageIdType struct {
BandwidthPackageId []string
}
type CreateNatGatewayResponse struct {
common.Response
NatGatewayId string
ForwardTableIds ForwardTableIdType
BandwidthPackageIds BandwidthPackageIdType
}
// CreateNatGateway creates Virtual Private Cloud
//
// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/vpc&createvpc
func (client *Client) CreateNatGateway(args *CreateNatGatewayArgs) (resp *CreateNatGatewayResponse, err error) {
response := CreateNatGatewayResponse{}
err = client.Invoke("CreateNatGateway", args, &response)
if err != nil {
return nil, err
}
return &response, err
}
type NatGatewaySetType struct {
BusinessStatus string
Description string
BandwidthPackageIds BandwidthPackageIdType
ForwardTableIds ForwardTableIdType
InstanceChargeType string
Name string
NatGatewayId string
RegionId common.Region
Spec string
Status string
VpcId string
}
type DescribeNatGatewayResponse struct {
common.Response
common.PaginationResult
NatGateways struct {
NatGateway []NatGatewaySetType
}
}
type DescribeNatGatewaysArgs struct {
RegionId common.Region
NatGatewayId string
VpcId string
common.Pagination
}
func (client *Client) DescribeNatGateways(args *DescribeNatGatewaysArgs) (natGateways []NatGatewaySetType,
pagination *common.PaginationResult, err error) {
args.Validate()
response := DescribeNatGatewayResponse{}
err = client.Invoke("DescribeNatGateways", args, &response)
if err == nil {
return response.NatGateways.NatGateway, &response.PaginationResult, nil
}
return nil, nil, err
}
type ModifyNatGatewayAttributeArgs struct {
RegionId common.Region
NatGatewayId string
Name string
Description string
}
type ModifyNatGatewayAttributeResponse struct {
common.Response
}
func (client *Client) ModifyNatGatewayAttribute(args *ModifyNatGatewayAttributeArgs) error {
response := ModifyNatGatewayAttributeResponse{}
return client.Invoke("ModifyNatGatewayAttribute", args, &response)
}
type ModifyNatGatewaySpecArgs struct {
RegionId common.Region
NatGatewayId string
Spec NatGatewaySpec
}
func (client *Client) ModifyNatGatewaySpec(args *ModifyNatGatewaySpecArgs) error {
response := ModifyNatGatewayAttributeResponse{}
return client.Invoke("ModifyNatGatewaySpec", args, &response)
}
type DeleteNatGatewayArgs struct {
RegionId common.Region
NatGatewayId string
}
type DeleteNatGatewayResponse struct {
common.Response
}
func (client *Client) DeleteNatGateway(args *DeleteNatGatewayArgs) error {
response := DeleteNatGatewayResponse{}
err := client.Invoke("DeleteNatGateway", args, &response)
return err
}
type DescribeBandwidthPackagesArgs struct {
RegionId common.Region
BandwidthPackageId string
NatGatewayId string
}
type DescribeBandwidthPackageType struct {
Bandwidth string
BandwidthPackageId string
IpCount string
}
type DescribeBandwidthPackagesResponse struct {
common.Response
BandwidthPackages struct {
BandwidthPackage []DescribeBandwidthPackageType
}
}
func (client *Client) DescribeBandwidthPackages(args *DescribeBandwidthPackagesArgs) ([]DescribeBandwidthPackageType, error) {
response := &DescribeBandwidthPackagesResponse{}
err := client.Invoke("DescribeBandwidthPackages", args, response)
if err != nil {
return nil, err
}
return response.BandwidthPackages.BandwidthPackage, err
}
type DeleteBandwidthPackageArgs struct {
RegionId common.Region
BandwidthPackageId string
}
type DeleteBandwidthPackageResponse struct {
common.Response
}
func (client *Client) DeleteBandwidthPackage(args *DeleteBandwidthPackageArgs) error {
response := DeleteBandwidthPackageResponse{}
err := client.Invoke("DeleteBandwidthPackage", args, &response)
return err
}
type DescribeSnatTableEntriesArgs struct {
RegionId common.Region
}
func (client *Client) DescribeSnatTableEntries(args *DescribeSnatTableEntriesArgs) {
}
type NatGatewaySpec string
const (
NatGatewaySmallSpec = NatGatewaySpec("Small")
NatGatewayMiddleSpec = NatGatewaySpec("Middle")
NatGatewayLargeSpec = NatGatewaySpec("Large")
)

78
vendor/github.com/denverdino/aliyungo/ram/account.go generated vendored Normal file
View File

@ -0,0 +1,78 @@
package ram
type UserRequest struct {
User
}
type UserResponse struct {
RamCommonResponse
User User
}
type UpdateUserRequest struct {
UserName string
NewUserName string
NewDisplayName string
NewMobilePhone string
NewEmail string
NewComments string
}
type ListUserRequest struct {
Marker string
MaxItems int8
}
type ListUserResponse struct {
RamCommonResponse
IsTruncated bool
Marker string
Users struct {
User []User
}
}
func (client *RamClient) CreateUser(user UserRequest) (UserResponse, error) {
var userResponse UserResponse
err := client.Invoke("CreateUser", user, &userResponse)
if err != nil {
return UserResponse{}, err
}
return userResponse, nil
}
func (client *RamClient) GetUser(userQuery UserQueryRequest) (UserResponse, error) {
var userResponse UserResponse
err := client.Invoke("GetUser", userQuery, &userResponse)
if err != nil {
return UserResponse{}, nil
}
return userResponse, nil
}
func (client *RamClient) UpdateUser(newUser UpdateUserRequest) (UserResponse, error) {
var userResponse UserResponse
err := client.Invoke("UpdateUser", newUser, &userResponse)
if err != nil {
return UserResponse{}, err
}
return userResponse, nil
}
func (client *RamClient) DeleteUser(userQuery UserQueryRequest) (RamCommonResponse, error) {
var commonResp RamCommonResponse
err := client.Invoke("DeleteUser", userQuery, &commonResp)
if err != nil {
return RamCommonResponse{}, err
}
return commonResp, nil
}
func (client *RamClient) ListUsers(listParams ListUserRequest) (ListUserResponse, error) {
var userList ListUserResponse
err := client.Invoke("ListUsers", listParams, &userList)
if err != nil {
return ListUserResponse{}, err
}
return userList, nil
}

63
vendor/github.com/denverdino/aliyungo/ram/ak.go generated vendored Normal file
View File

@ -0,0 +1,63 @@
package ram
/*
CreateAccessKey()
UpdateAccessKey()
DeleteAccessKey()
ListAccessKeys()
*/
type State string
type AccessKeyResponse struct {
RamCommonResponse
AccessKey AccessKey
}
type UpdateAccessKeyRequest struct {
UserAccessKeyId string
Status State
UserName string
}
type AccessKeyListResponse struct {
RamCommonResponse
AccessKeys struct {
AccessKey []AccessKey
}
}
func (client *RamClient) CreateAccessKey(userQuery UserQueryRequest) (AccessKeyResponse, error) {
var accesskeyResp AccessKeyResponse
err := client.Invoke("CreateAccessKey", userQuery, &accesskeyResp)
if err != nil {
return AccessKeyResponse{}, err
}
return accesskeyResp, nil
}
func (client *RamClient) UpdateAccessKey(accessKeyRequest UpdateAccessKeyRequest) (RamCommonResponse, error) {
var commonResp RamCommonResponse
err := client.Invoke("UpdateAccessKey", accessKeyRequest, &commonResp)
if err != nil {
return RamCommonResponse{}, err
}
return commonResp, nil
}
func (client *RamClient) DeleteAccessKey(accessKeyRequest UpdateAccessKeyRequest) (RamCommonResponse, error) {
var commonResp RamCommonResponse
err := client.Invoke("DeleteAccessKey", accessKeyRequest, &commonResp)
if err != nil {
return RamCommonResponse{}, err
}
return commonResp, nil
}
func (client *RamClient) ListAccessKeys(userQuery UserQueryRequest) (AccessKeyListResponse, error) {
var accessKeyListResp AccessKeyListResponse
err := client.Invoke("ListAccessKeys", userQuery, &accessKeyListResp)
if err != nil {
return AccessKeyListResponse{}, err
}
return accessKeyListResp, nil
}

79
vendor/github.com/denverdino/aliyungo/ram/api.go generated vendored Normal file
View File

@ -0,0 +1,79 @@
package ram
/*
ringtail 2016/1/19
All RAM apis provided
*/
type RamClientInterface interface {
//ram user
CreateUser(user UserRequest) (UserResponse, error)
GetUser(userQuery UserQueryRequest) (UserResponse, error)
UpdateUser(newUser UpdateUserRequest) (UserResponse, error)
DeleteUser(userQuery UserQueryRequest) (RamCommonResponse, error)
ListUsers(listParams ListUserRequest) (ListUserResponse, error)
//TODO login ram console
CreateLoginProfile()
GetLoginProfile()
DeleteLoginProfile()
UpdateLoginProfile()
//ram ak
CreateAccessKey(userQuery UserQueryRequest) (AccessKeyResponse, error)
UpdateAccessKey(accessKeyRequest UpdateAccessKeyRequest) (RamCommonResponse, error)
DeleteAccessKey(accessKeyRequest UpdateAccessKeyRequest) (RamCommonResponse, error)
ListAccessKeys(userQuery UserQueryRequest) (AccessKeyListResponse, error)
//TODO MFA
CreateVirtualMFADevices()
ListVirtualMFADevices()
DeleteVirtualMFADevices()
BindMFADevice()
GetUserMFAInfo()
//TODO group
CreateGroup()
GetGroup()
UpdateGroup()
ListGroup()
DeleteGroup()
AddUserToGroup()
RemoveUserFromGroup()
ListGroupsForUser()
ListUsersForGroup()
CreateRole(role RoleRequest) (RoleResponse, error)
GetRole(roleQuery RoleQueryRequest) (RoleResponse, error)
UpdateRole(newRole UpdateRoleRequest) (RoleResponse, error)
ListRoles() (ListRoleResponse, error)
DeleteRole(roleQuery RoleQueryRequest) (RamCommonResponse, error)
//DONE policy
CreatePolicy(policyReq PolicyRequest) (PolicyResponse, error)
GetPolicy(policyReq PolicyRequest) (PolicyResponse, error)
DeletePolicy(policyReq PolicyRequest) (RamCommonResponse, error)
ListPolicies(policyQuery PolicyQueryRequest) (PolicyQueryResponse, error)
ListPoliciesForUser(userQuery UserQueryRequest) (PolicyListResponse, error)
//TODO policy
CreatePolicyVersion(policyReq PolicyRequest) (PolicyVersionResponse, error)
GetPolicyVersion(policyReq PolicyRequest) (PolicyVersionResponse, error)
DeletePolicyVersion(policyReq PolicyRequest) (RamCommonResponse, error)
ListPolicyVersions(policyReq PolicyRequest) (PolicyVersionResponse, error)
AttachPolicyToUser(attachPolicyRequest AttachPolicyRequest) (RamCommonResponse, error)
DetachPolicyFromUser(attachPolicyRequest AttachPolicyRequest) (RamCommonResponse, error)
ListEnitiesForPolicy()
SetDefaultPolicyVersion()
ListPoliciesForGroup()
AttachPolicyToRole(attachPolicyRequest AttachPolicyToRoleRequest) (RamCommonResponse, error)
DetachPolicyFromRole(attachPolicyRequest AttachPolicyToRoleRequest) (RamCommonResponse, error)
ListPoliciesForRole(roleQuery RoleQueryRequest) (PolicyListResponse, error)
//TODO security apis
SetAccountAlias(accountAlias AccountAlias) (RamCommonResponse, error)
GetAccountAlias() (AccountAliasResponse, error)
ClearAccountAlias() (RamCommonResponse, error)
SetPasswordPolicy(passwordPolicy PasswordPolicyRequest) (PasswordPolicyResponse, error)
GetPasswordPolicy(accountAlias AccountAlias) (PasswordPolicyResponse, error)
}

30
vendor/github.com/denverdino/aliyungo/ram/client.go generated vendored Normal file
View File

@ -0,0 +1,30 @@
package ram
import (
"github.com/denverdino/aliyungo/common"
"os"
)
const (
// RAMDefaultEndpoint is the default API endpoint of RAM services
RAMDefaultEndpoint = "https://ram.aliyuncs.com"
RAMAPIVersion = "2015-05-01"
)
type RamClient struct {
common.Client
}
func NewClient(accessKeyId string, accessKeySecret string) RamClientInterface {
endpoint := os.Getenv("RAM_ENDPOINT")
if endpoint == "" {
endpoint = RAMDefaultEndpoint
}
return NewClientWithEndpoint(endpoint, accessKeyId, accessKeySecret)
}
func NewClientWithEndpoint(endpoint string, accessKeyId string, accessKeySecret string) RamClientInterface {
client := &RamClient{}
client.Init(endpoint, RAMAPIVersion, accessKeyId, accessKeySecret)
return client
}

4
vendor/github.com/denverdino/aliyungo/ram/error.go generated vendored Normal file
View File

@ -0,0 +1,4 @@
package ram
//common errors
var ()

11
vendor/github.com/denverdino/aliyungo/ram/group.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
package ram
func (client *RamClient) CreateGroup() {}
func (client *RamClient) GetGroup() {}
func (client *RamClient) UpdateGroup() {}
func (client *RamClient) ListGroup() {}
func (client *RamClient) DeleteGroup() {}
func (client *RamClient) AddUserToGroup() {}
func (client *RamClient) RemoveUserFromGroup() {}
func (client *RamClient) ListGroupsForUser() {}
func (client *RamClient) ListUsersForGroup() {}

11
vendor/github.com/denverdino/aliyungo/ram/mfa.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
package ram
func (client *RamClient) CreateVirtualMFADevices() {}
func (client *RamClient) ListVirtualMFADevices() {}
func (client *RamClient) DeleteVirtualMFADevices() {}
func (client *RamClient) BindMFADevice() {}
func (client *RamClient) GetUserMFAInfo() {}

195
vendor/github.com/denverdino/aliyungo/ram/policy.go generated vendored Normal file
View File

@ -0,0 +1,195 @@
package ram
type PolicyRequest struct {
PolicyName string
PolicyType string
Description string
PolicyDocument string
SetAsDefault string
VersionId string
}
type PolicyListResponse struct {
RamCommonResponse
Policies struct {
Policy []Policy
}
}
type PolicyResponse struct {
RamCommonResponse
Policy Policy
}
type PolicyQueryRequest struct {
PolicyType string
Marker string
MaxItems int8
}
type PolicyQueryResponse struct {
IsTruncated bool
Marker string
Policies struct {
Policy []Policy
}
}
type PolicyVersionResponse struct {
RamCommonResponse
IsDefaultVersion bool
VersionId string
CreateDate string
PolicyDocument string
}
type AttachPolicyRequest struct {
PolicyRequest
UserName string
}
type AttachPolicyToRoleRequest struct {
PolicyRequest
RoleName string
}
func (client *RamClient) CreatePolicy(policyReq PolicyRequest) (PolicyResponse, error) {
var resp PolicyResponse
err := client.Invoke("CreatePolicy", policyReq, &resp)
if err != nil {
return PolicyResponse{}, err
}
return resp, nil
}
func (client *RamClient) GetPolicy(policyReq PolicyRequest) (PolicyResponse, error) {
var resp PolicyResponse
err := client.Invoke("GetPolicy", policyReq, &resp)
if err != nil {
return PolicyResponse{}, err
}
return resp, nil
}
func (client *RamClient) DeletePolicy(policyReq PolicyRequest) (RamCommonResponse, error) {
var resp RamCommonResponse
err := client.Invoke("DeletePolicy", policyReq, &resp)
if err != nil {
return RamCommonResponse{}, err
}
return resp, nil
}
func (client *RamClient) ListPolicies(policyQuery PolicyQueryRequest) (PolicyQueryResponse, error) {
var resp PolicyQueryResponse
err := client.Invoke("ListPolicies", policyQuery, &resp)
if err != nil {
return PolicyQueryResponse{}, err
}
return resp, nil
}
func (client *RamClient) CreatePolicyVersion(policyReq PolicyRequest) (PolicyVersionResponse, error) {
var resp PolicyVersionResponse
err := client.Invoke("CreatePolicyVersion", policyReq, &resp)
if err != nil {
return PolicyVersionResponse{}, err
}
return resp, nil
}
func (client *RamClient) GetPolicyVersion(policyReq PolicyRequest) (PolicyVersionResponse, error) {
var resp PolicyVersionResponse
err := client.Invoke("GetPolicyVersion", policyReq, &resp)
if err != nil {
return PolicyVersionResponse{}, err
}
return resp, nil
}
func (client *RamClient) DeletePolicyVersion(policyReq PolicyRequest) (RamCommonResponse, error) {
var resp RamCommonResponse
err := client.Invoke("DeletePolicyVersion", policyReq, &resp)
if err != nil {
return RamCommonResponse{}, err
}
return resp, nil
}
func (client *RamClient) ListPolicyVersions(policyReq PolicyRequest) (PolicyVersionResponse, error) {
var resp PolicyVersionResponse
err := client.Invoke("ListPolicyVersions", policyReq, &resp)
if err != nil {
return PolicyVersionResponse{}, err
}
return resp, nil
}
//TODO
func (client *RamClient) SetDefaultPolicyVersion() {}
func (client *RamClient) AttachPolicyToUser(attachPolicyRequest AttachPolicyRequest) (RamCommonResponse, error) {
var resp RamCommonResponse
err := client.Invoke("AttachPolicyToUser", attachPolicyRequest, &resp)
if err != nil {
return RamCommonResponse{}, err
}
return resp, nil
}
func (client *RamClient) DetachPolicyFromUser(attachPolicyRequest AttachPolicyRequest) (RamCommonResponse, error) {
var resp RamCommonResponse
err := client.Invoke("DetachPolicyFromUser", attachPolicyRequest, &resp)
if err != nil {
return RamCommonResponse{}, err
}
return resp, nil
}
//TODO
func (client *RamClient) ListEnitiesForPolicy() {}
func (client *RamClient) ListPoliciesForUser(userQuery UserQueryRequest) (PolicyListResponse, error) {
var resp PolicyListResponse
err := client.Invoke("ListPoliciesForUser", userQuery, &resp)
if err != nil {
return PolicyListResponse{}, err
}
return resp, nil
}
//
//Role related
//
func (client *RamClient) AttachPolicyToRole(attachPolicyRequest AttachPolicyToRoleRequest) (RamCommonResponse, error) {
var resp RamCommonResponse
err := client.Invoke("AttachPolicyToRole", attachPolicyRequest, &resp)
if err != nil {
return RamCommonResponse{}, err
}
return resp, nil
}
func (client *RamClient) DetachPolicyFromRole(attachPolicyRequest AttachPolicyToRoleRequest) (RamCommonResponse, error) {
var resp RamCommonResponse
err := client.Invoke("DetachPolicyFromRole", attachPolicyRequest, &resp)
if err != nil {
return RamCommonResponse{}, err
}
return resp, nil
}
func (client *RamClient) ListPoliciesForRole(roleQuery RoleQueryRequest) (PolicyListResponse, error) {
var resp PolicyListResponse
err := client.Invoke("ListPoliciesForRole", roleQuery, &resp)
if err != nil {
return PolicyListResponse{}, err
}
return resp, nil
}
//
//Group related
//
//TODO
//
func (client *RamClient) ListPoliciesForGroup() {}

24
vendor/github.com/denverdino/aliyungo/ram/profile.go generated vendored Normal file
View File

@ -0,0 +1,24 @@
package ram
/*
CreateLoginProfile()
GetLoginProfile()
DeleteLoginProfile()
UpdateLoginProfile()
*/
func (client *RamClient) CreateLoginProfile() {
}
func (client *RamClient) GetLoginProfile() {
}
func (client *RamClient) DeleteLoginProfile() {
}
func (client *RamClient) UpdateLoginProfile() {
}

73
vendor/github.com/denverdino/aliyungo/ram/role.go generated vendored Normal file
View File

@ -0,0 +1,73 @@
package ram
type RoleRequest struct {
RoleName string
AssumeRolePolicyDocument string
Description string
}
type RoleResponse struct {
RamCommonResponse
Role Role
}
type RoleQueryRequest struct {
RoleName string
}
type UpdateRoleRequest struct {
RoleName string
NewAssumeRolePolicyDocument string
}
type ListRoleResponse struct {
RamCommonResponse
Roles struct {
Role []Role
}
}
func (client *RamClient) CreateRole(role RoleRequest) (RoleResponse, error) {
var roleResponse RoleResponse
err := client.Invoke("CreateRole", role, &roleResponse)
if err != nil {
return RoleResponse{}, err
}
return roleResponse, nil
}
func (client *RamClient) GetRole(roleQuery RoleQueryRequest) (RoleResponse, error) {
var roleResponse RoleResponse
err := client.Invoke("GetRole", roleQuery, &roleResponse)
if err != nil {
return RoleResponse{}, nil
}
return roleResponse, nil
}
func (client *RamClient) UpdateRole(newRole UpdateRoleRequest) (RoleResponse, error) {
var roleResponse RoleResponse
err := client.Invoke("UpdateRole", newRole, &roleResponse)
if err != nil {
return RoleResponse{}, err
}
return roleResponse, nil
}
func (client *RamClient) ListRoles() (ListRoleResponse, error) {
var roleList ListRoleResponse
err := client.Invoke("ListRoles", struct{}{}, &roleList)
if err != nil {
return ListRoleResponse{}, err
}
return roleList, nil
}
func (client *RamClient) DeleteRole(roleQuery RoleQueryRequest) (RamCommonResponse, error) {
var commonResp RamCommonResponse
err := client.Invoke("DeleteRole", roleQuery, &commonResp)
if err != nil {
return RamCommonResponse{}, err
}
return commonResp, nil
}

40
vendor/github.com/denverdino/aliyungo/ram/security.go generated vendored Normal file
View File

@ -0,0 +1,40 @@
package ram
//TODO implement ram api about security
/*
SetAccountAlias()
GetAccountAlias()
ClearAccountAlias()
SetPasswordPolicy()
GetPasswordPolicy()
*/
type AccountAliasResponse struct {
RamCommonResponse
AccountAlias string
}
type PasswordPolicyResponse struct {
RamCommonResponse
PasswordPolicy
}
type PasswordPolicyRequest struct {
PasswordPolicy
}
func (client *RamClient) SetAccountAlias(accountalias AccountAlias) (RamCommonResponse, error) {
return RamCommonResponse{}, nil
}
func (client *RamClient) GetAccountAlias() (AccountAliasResponse, error) {
return AccountAliasResponse{}, nil
}
func (client *RamClient) ClearAccountAlias() (RamCommonResponse, error) {
return RamCommonResponse{}, nil
}
func (client *RamClient) SetPasswordPolicy(passwordPolicy PasswordPolicyRequest) (PasswordPolicyResponse, error) {
return PasswordPolicyResponse{}, nil
}
func (client *RamClient) GetPasswordPolicy(accountAlias AccountAlias) (PasswordPolicyResponse, error) {
return PasswordPolicyResponse{}, nil
}

126
vendor/github.com/denverdino/aliyungo/ram/types.go generated vendored Normal file
View File

@ -0,0 +1,126 @@
package ram
import (
"github.com/denverdino/aliyungo/common"
)
/*
All common struct
*/
const (
Active State = "Active"
Inactive State = "Inactive"
)
/*
AccountAlias
类型String
必须
描述指定云账号的别名, 长度限制为3-63个字符
限制^[a-z0-9](([a-z0-9]|-(?!-))*[a-z0-9])?$
*/
type AccountAlias string
type UserQueryRequest struct {
UserName string
}
type User struct {
UserId string
UserName string
DisplayName string
MobilePhone string
Email string
Comments string
CreateDate string
UpdateDate string
LastLoginDate string
}
type LoginProfile struct {
}
type MFADevice struct {
}
type VirtualMFADevice struct {
}
type AccessKey struct {
AccessKeyId string
AccessKeySecret string
Status State
CreateDate string
}
type Group struct {
}
type Role struct {
RoleId string
RoleName string
Arn string
Description string
AssumeRolePolicyDocument string
CreateDate string
UpdateDate string
}
type Policy struct {
PolicyName string
PolicyType string
Description string
DefaultVersion string
CreateDate string
UpdateDate string
AttachmentCount int64
}
type PolicyDocument struct {
Statement []PolicyItem
Version string
}
type PolicyItem struct {
Action string
Effect string
Resource string
}
type AssumeRolePolicyDocument struct {
Statement []AssumeRolePolicyItem
Version string
}
type AssumeRolePolicyItem struct {
Action string
Effect string
Principal AssumeRolePolicyPrincpal
}
type AssumeRolePolicyPrincpal struct {
RAM []string
}
/*
"PasswordPolicy": {
"MinimumPasswordLength": 12,
"RequireLowercaseCharacters": true,
"RequireUppercaseCharacters": true,
"RequireNumbers": true,
"RequireSymbols": true
}
*/
type PasswordPolicy struct {
MinimumPasswordLength int8
RequireLowercaseCharacters bool
RequireUppercaseCharacters bool
RequireNumbers bool
RequireSymbols bool
}
type RamCommonResponse struct {
common.Response
}

View File

@ -1,8 +1,9 @@
package slb
import (
"github.com/denverdino/aliyungo/common"
"os"
"github.com/denverdino/aliyungo/common"
)
type Client struct {
@ -13,6 +14,8 @@ const (
// SLBDefaultEndpoint is the default API endpoint of SLB services
SLBDefaultEndpoint = "https://slb.aliyuncs.com"
SLBAPIVersion = "2014-05-15"
SLBServiceCode = "slb"
)
// NewClient creates a new instance of ECS client
@ -29,3 +32,18 @@ func NewClientWithEndpoint(endpoint string, accessKeyId, accessKeySecret string)
client.Init(endpoint, SLBAPIVersion, accessKeyId, accessKeySecret)
return client
}
func NewSLBClient(accessKeyId, accessKeySecret string, regionID common.Region) *Client {
endpoint := os.Getenv("SLB_ENDPOINT")
if endpoint == "" {
endpoint = SLBDefaultEndpoint
}
return NewClientWithRegion(endpoint, accessKeyId, accessKeySecret, regionID)
}
func NewClientWithRegion(endpoint string, accessKeyId, accessKeySecret string, regionID common.Region) *Client {
client := &Client{}
client.NewInit(endpoint, SLBAPIVersion, accessKeyId, accessKeySecret, SLBServiceCode, regionID)
return client
}

View File

@ -150,7 +150,7 @@ type TCPListenerType struct {
HealthCheckConnectPort int
HealthyThreshold int
UnhealthyThreshold int
HealthCheckTimeout int
HealthCheckConnectTimeout int
HealthCheckInterval int
HealthCheckHttpCode HealthCheckHttpCodeType
VServerGroupId string
@ -177,7 +177,7 @@ type UDPListenerType struct {
HealthCheckConnectPort int
HealthyThreshold int
UnhealthyThreshold int
HealthCheckTimeout int
HealthCheckConnectTimeout int
HealthCheckInterval int
VServerGroupId string
}

126
vendor/github.com/denverdino/aliyungo/slb/rules.go generated vendored Normal file
View File

@ -0,0 +1,126 @@
package slb
import "github.com/denverdino/aliyungo/common"
type CreateRulesResponse struct {
common.Response
}
type CreateRulesArgs struct {
RegionId common.Region
LoadBalancerId string
ListenerPort int
RuleList string
}
type Rule struct {
RuleId string
RuleName string
Domain string
Url string `json:",omitempty"`
VServerGroupId string
}
// Create forward rules
//
// You can read doc at https://help.aliyun.com/document_detail/35226.html?spm=5176.doc35226.6.671.625Omh
func (client *Client) CreateRules(args *CreateRulesArgs) error {
response := CreateRulesResponse{}
err := client.Invoke("CreateRules", args, &response)
if err != nil {
return err
}
return err
}
type DeleteRulesArgs struct {
RegionId common.Region
RuleIds string
}
type DeleteRulesResponse struct {
common.Response
}
// Delete forward rules
//
// You can read doc at https://help.aliyun.com/document_detail/35227.html?spm=5176.doc35226.6.672.6iNBtR
func (client *Client) DeleteRules(args *DeleteRulesArgs) error {
response := DeleteRulesResponse{}
err := client.Invoke("DeleteRules", args, &response)
if err != nil {
return err
}
return err
}
type SetRuleArgs struct {
RegionId common.Region
RuleId string
VServerGroupId string
}
type SetRuleResponse struct {
common.Response
}
// Modify forward rules
//
// You can read doc at https://help.aliyun.com/document_detail/35228.html?spm=5176.doc35227.6.673.rq40a9
func (client *Client) SetRule(args *SetRuleArgs) error {
response := SetRuleResponse{}
err := client.Invoke("SetRule", args, &response)
if err != nil {
return err
}
return err
}
type DescribeRuleAttributeArgs struct {
RegionId common.Region
RuleId string
}
type DescribeRuleAttributeResponse struct {
common.Response
LoadBalancerId string
ListenerPort int
Rule
}
// Describe rule
//
// You can read doc at https://help.aliyun.com/document_detail/35229.html?spm=5176.doc35226.6.674.DRJeKJ
func (client *Client) DescribeRuleAttribute(args *DescribeRuleAttributeArgs) (*DescribeRuleAttributeResponse, error) {
response := &DescribeRuleAttributeResponse{}
err := client.Invoke("DescribeRuleAttribute", args, response)
if err != nil {
return nil, err
}
return response, nil
}
type DescribeRulesArgs struct {
RegionId common.Region
LoadBalancerId string
ListenerPort int
}
type DescribeRulesResponse struct {
common.Response
Rules struct {
Rule []Rule
}
}
// Describe rule
//
// You can read doc at https://help.aliyun.com/document_detail/35229.html?spm=5176.doc35226.6.674.DRJeKJ
func (client *Client) DescribeRules(args *DescribeRulesArgs) (*DescribeRulesResponse, error) {
response := &DescribeRulesResponse{}
err := client.Invoke("DescribeRules", args, response)
if err != nil {
return nil, err
}
return response, nil
}

BIN
vendor/github.com/denverdino/aliyungo/slb/slb.test generated vendored Executable file

Binary file not shown.

60
vendor/vendor.json vendored
View File

@ -8,30 +8,6 @@
"revision": "5678f03fc801525df794f953aa82f5ad7555a2ef",
"revisionTime": "2016-08-11T22:04:02Z"
},
{
"checksumSHA1": "e6yzSIwLwJV0tb2YQupLL0FO1BM=",
"path": "github.com/denverdino/aliyungo/common",
"revision": "5b4f72bfcf17a2e6ec8238708bd161a2413e97e6",
"revisionTime": "2017-02-01T08:56:36Z"
},
{
"checksumSHA1": "YLooG/WAFF61eBUxg/R5cpfi5DE=",
"path": "github.com/denverdino/aliyungo/ecs",
"revision": "019da4915136cd9fed94064a0604321d9bc09245",
"revisionTime": "2017-02-23T07:38:06Z"
},
{
"checksumSHA1": "QlA7zv05k7HWeR3tg4uHqIlFcg8=",
"path": "github.com/denverdino/aliyungo/slb",
"revision": "5b4f72bfcf17a2e6ec8238708bd161a2413e97e6",
"revisionTime": "2017-02-01T08:56:36Z"
},
{
"checksumSHA1": "Lp0KtT7ycgq31ox3Uzhpxyw0U+Y=",
"path": "github.com/denverdino/aliyungo/util",
"revision": "5b4f72bfcf17a2e6ec8238708bd161a2413e97e6",
"revisionTime": "2017-02-01T08:56:36Z"
},
{
"checksumSHA1": "/WG++Jij8INZ80tER+FAiIDMmws=",
"comment": "v3.1.0-beta",
@ -97,6 +73,12 @@
"path": "github.com/Azure/go-ntlmssp",
"revision": "e0b63eb299a769ea4b04dadfe530f6074b277afb"
},
{
"checksumSHA1": "HttiPj314X1a0i2Jen1p6lRH/vE=",
"path": "github.com/aliyun/aliyun-oss-go-sdk/oss",
"revision": "e6dbea820a9f304b43d3b357dd48ced23b86df72",
"revisionTime": "2017-01-13T02:27:42Z"
},
{
"checksumSHA1": "gc98KRYAAiw4g1FrSTsuggSNv8k=",
"path": "github.com/approvals/go-approval-tests",
@ -330,6 +312,36 @@
"path": "github.com/davecgh/go-spew/spew",
"revision": "6d212800a42e8ab5c146b8ace3490ee17e5225f9"
},
{
"checksumSHA1": "InnjAEH008xuFzlslJn2IaG7McM=",
"path": "github.com/denverdino/aliyungo/common",
"revision": "4102657a3f4885557c5e19dee53decb1fbf49d2a",
"revisionTime": "2017-03-09T13:03:54Z"
},
{
"checksumSHA1": "yhWqkta/EcCfgSy1ZV7T/CR6VTo=",
"path": "github.com/denverdino/aliyungo/ecs",
"revision": "4102657a3f4885557c5e19dee53decb1fbf49d2a",
"revisionTime": "2017-03-09T13:03:54Z"
},
{
"checksumSHA1": "40PbgN4Emct6AIiJbhPoEdd+vIA=",
"path": "github.com/denverdino/aliyungo/ram",
"revision": "4102657a3f4885557c5e19dee53decb1fbf49d2a",
"revisionTime": "2017-03-09T13:03:54Z"
},
{
"checksumSHA1": "ufG04XwbDZklJlmRINMh4DErUiI=",
"path": "github.com/denverdino/aliyungo/slb",
"revision": "4102657a3f4885557c5e19dee53decb1fbf49d2a",
"revisionTime": "2017-03-09T13:03:54Z"
},
{
"checksumSHA1": "Lp0KtT7ycgq31ox3Uzhpxyw0U+Y=",
"path": "github.com/denverdino/aliyungo/util",
"revision": "4102657a3f4885557c5e19dee53decb1fbf49d2a",
"revisionTime": "2017-03-09T13:03:54Z"
},
{
"checksumSHA1": "D37uI+U+FYvTJIdG2TTozXe7i7U=",
"comment": "v3.0.0",

View File

@ -9,7 +9,7 @@ page_title: Alicloud Image Builder
Type: `alicloud-ecs`
Alicloud Packer builder plugin provide the capability to build customized images based on the existing base images.
Alicloud Packer builder plugin provide the capability to build customized images based on an existing base images.
## Configuration Reference

View File

@ -35,7 +35,7 @@
<li>
<h4>Builders</h4>
</li>
<li><a href="/docs/builders/alicloud.html">Alicloud Image</a></li>
<li><a href="/docs/builders/alicloud-ecs.html">Alicloud Image</a></li>
<li><a href="/docs/builders/amazon.html">Amazon EC2 (AMI)</a></li>
<li><a href="/docs/builders/azure-arm.html">Azure Resource Manager</a></li>
<li><a href="/docs/builders/cloudstack.html">CloudStack</a></li>