packer-cn/builder/tencentcloud/cvm/step_run_instance.go
ZhiQiang Fan 7235a4a775 feature: support data disks for tencentcloud builder
Data disk is an important feature for instances and images, this
patch implements the basic functionality of it. More work needs
to be done to make it friendly to users. Docs has pointed out
current limitation.

Also update dependencies because this feature requires new code in
tencentcloud go sdk.
2019-06-30 00:05:38 +08:00

222 lines
6.6 KiB
Go

package cvm
import (
"context"
"encoding/base64"
"fmt"
"io/ioutil"
"log"
"time"
"github.com/hashicorp/packer/common/retry"
"github.com/hashicorp/packer/helper/multistep"
"github.com/hashicorp/packer/packer"
cvm "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cvm/v20170312"
)
type stepRunInstance struct {
InstanceType string
UserData string
UserDataFile string
instanceId string
ZoneId string
InstanceName string
DiskType string
DiskSize int64
HostName string
InternetMaxBandwidthOut int64
AssociatePublicIpAddress bool
Tags map[string]string
DataDisks []tencentCloudDataDisk
}
func (s *stepRunInstance) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
client := state.Get("cvm_client").(*cvm.Client)
config := state.Get("config").(*Config)
ui := state.Get("ui").(packer.Ui)
source_image := state.Get("source_image").(*cvm.Image)
vpc_id := state.Get("vpc_id").(string)
subnet_id := state.Get("subnet_id").(string)
security_group_id := state.Get("security_group_id").(string)
password := config.Comm.SSHPassword
if password == "" && config.Comm.WinRMPassword != "" {
password = config.Comm.WinRMPassword
}
userData, err := s.getUserData(state)
if err != nil {
err := fmt.Errorf("get user_data failed: %s", err.Error())
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
ui.Say("Creating Instance.")
// config RunInstances parameters
POSTPAID_BY_HOUR := "POSTPAID_BY_HOUR"
req := cvm.NewRunInstancesRequest()
if s.ZoneId != "" {
req.Placement = &cvm.Placement{
Zone: &s.ZoneId,
}
}
req.ImageId = source_image.ImageId
req.InstanceChargeType = &POSTPAID_BY_HOUR
req.InstanceType = &s.InstanceType
// TODO: Add check for system disk size, it should be larger than image system disk size.
req.SystemDisk = &cvm.SystemDisk{
DiskType: &s.DiskType,
DiskSize: &s.DiskSize,
}
// System disk snapshot is mandatory, so if there are additional data disks,
// length will be larger than 1.
if source_image.SnapshotSet != nil && len(source_image.SnapshotSet) > 1 {
ui.Say("Use source image snapshot data disks, ignore user data disk settings.")
var dataDisks []*cvm.DataDisk
for _, snapshot := range source_image.SnapshotSet {
if *snapshot.DiskUsage == "DATA_DISK" {
var dataDisk cvm.DataDisk
// FIXME: Currently we have no way to get original disk type
// from data disk snapshots, and we don't allow user to overwrite
// snapshot settings, and we cannot guarantee a certain hard-coded type
// is not sold out, so here we use system disk type as a workaround.
//
// Eventually, we need to allow user to overwrite snapshot disk
// settings.
dataDisk.DiskType = &s.DiskType
dataDisk.DiskSize = snapshot.DiskSize
dataDisk.SnapshotId = snapshot.SnapshotId
dataDisks = append(dataDisks, &dataDisk)
}
}
req.DataDisks = dataDisks
} else {
var dataDisks []*cvm.DataDisk
for _, disk := range s.DataDisks {
var dataDisk cvm.DataDisk
dataDisk.DiskType = &disk.DiskType
dataDisk.DiskSize = &disk.DiskSize
if disk.SnapshotId != "" {
dataDisk.SnapshotId = &disk.SnapshotId
}
dataDisks = append(dataDisks, &dataDisk)
}
req.DataDisks = dataDisks
}
req.VirtualPrivateCloud = &cvm.VirtualPrivateCloud{
VpcId: &vpc_id,
SubnetId: &subnet_id,
}
TRAFFIC_POSTPAID_BY_HOUR := "TRAFFIC_POSTPAID_BY_HOUR"
if s.AssociatePublicIpAddress {
req.InternetAccessible = &cvm.InternetAccessible{
InternetChargeType: &TRAFFIC_POSTPAID_BY_HOUR,
InternetMaxBandwidthOut: &s.InternetMaxBandwidthOut,
}
}
req.InstanceName = &s.InstanceName
loginSettings := cvm.LoginSettings{}
if password != "" {
loginSettings.Password = &password
}
if config.Comm.SSHKeyPairName != "" {
loginSettings.KeyIds = []*string{&config.Comm.SSHKeyPairName}
}
req.LoginSettings = &loginSettings
req.SecurityGroupIds = []*string{&security_group_id}
req.ClientToken = &s.InstanceName
req.HostName = &s.HostName
req.UserData = &userData
var tags []*cvm.Tag
for k, v := range s.Tags {
tags = append(tags, &cvm.Tag{
Key: &k,
Value: &v,
})
}
resourceType := "instance"
if len(tags) > 0 {
req.TagSpecification = []*cvm.TagSpecification{
&cvm.TagSpecification{
ResourceType: &resourceType,
Tags: tags,
},
}
}
resp, err := client.RunInstances(req)
if err != nil {
err := fmt.Errorf("create instance failed: %s", err.Error())
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
if len(resp.Response.InstanceIdSet) != 1 {
err := fmt.Errorf("create instance failed: %d instance(s) created", len(resp.Response.InstanceIdSet))
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
s.instanceId = *resp.Response.InstanceIdSet[0]
err = WaitForInstance(client, s.instanceId, "RUNNING", 1800)
if err != nil {
err := fmt.Errorf("wait instance launch failed: %s", err.Error())
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
describeReq := cvm.NewDescribeInstancesRequest()
describeReq.InstanceIds = []*string{&s.instanceId}
describeResp, err := client.DescribeInstances(describeReq)
if err != nil {
err := fmt.Errorf("wait instance launch failed: %s", err.Error())
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
state.Put("instance", describeResp.Response.InstanceSet[0])
return multistep.ActionContinue
}
func (s *stepRunInstance) getUserData(state multistep.StateBag) (string, error) {
userData := s.UserData
if userData == "" && s.UserDataFile != "" {
data, err := ioutil.ReadFile(s.UserDataFile)
if err != nil {
return "", err
}
userData = string(data)
}
userData = base64.StdEncoding.EncodeToString([]byte(userData))
log.Printf(fmt.Sprintf("user_data: %s", userData))
return userData, nil
}
func (s *stepRunInstance) Cleanup(state multistep.StateBag) {
if s.instanceId == "" {
return
}
MessageClean(state, "Instance")
client := state.Get("cvm_client").(*cvm.Client)
ui := state.Get("ui").(packer.Ui)
req := cvm.NewTerminateInstancesRequest()
req.InstanceIds = []*string{&s.instanceId}
ctx := context.TODO()
err := retry.Config{
Tries: 60,
RetryDelay: (&retry.Backoff{
InitialBackoff: 5 * time.Second,
MaxBackoff: 5 * time.Second,
Multiplier: 2,
}).Linear,
}.Run(ctx, func(ctx context.Context) error {
_, err := client.TerminateInstances(req)
return err
})
if err != nil {
ui.Error(fmt.Sprintf("terminate instance(%s) failed: %s", s.instanceId, err.Error()))
}
}