packer-cn/post-processor/digitalocean-import/post-processor.go

387 lines
11 KiB
Go
Raw Normal View History

//go:generate mapstructure-to-hcl2 -type Config
2018-11-22 21:33:34 -05:00
package digitaloceanimport
import (
"context"
"fmt"
"log"
"os"
"strings"
"time"
2019-03-22 09:56:02 -04:00
"golang.org/x/oauth2"
2018-11-22 21:33:34 -05:00
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/digitalocean/godo"
build using HCL2 (#8423) This follows #8232 which added the code to generate the code required to parse HCL files for each packer component. All old config files of packer will keep on working the same. Packer takes one argument. When a directory is passed, all files in the folder with a name ending with “.pkr.hcl” or “.pkr.json” will be parsed using the HCL2 format. When a file ending with “.pkr.hcl” or “.pkr.json” is passed it will be parsed using the HCL2 format. For every other case; the old packer style will be used. ## 1. the hcl2template pkg can create a packer.Build from a set of HCL (v2) files I had to make the packer.coreBuild (which is our one and only packer.Build ) a public struct with public fields ## 2. Components interfaces get a new ConfigSpec Method to read a file from an HCL file. This is a breaking change for packer plugins. a packer component can be a: builder/provisioner/post-processor each component interface now gets a `ConfigSpec() hcldec.ObjectSpec` which allows packer to tell what is the layout of the hcl2 config meant to configure that specific component. This ObjectSpec is sent through the wire (RPC) and a cty.Value is now sent through the already existing configuration entrypoints: Provisioner.Prepare(raws ...interface{}) error Builder.Prepare(raws ...interface{}) ([]string, error) PostProcessor.Configure(raws ...interface{}) error close #1768 Example hcl files: ```hcl // file amazon-ebs-kms-key/run.pkr.hcl build { sources = [ "source.amazon-ebs.first", ] provisioner "shell" { inline = [ "sleep 5" ] } post-processor "shell-local" { inline = [ "sleep 5" ] } } // amazon-ebs-kms-key/source.pkr.hcl source "amazon-ebs" "first" { ami_name = "hcl2-test" region = "us-east-1" instance_type = "t2.micro" kms_key_id = "c729958f-c6ba-44cd-ab39-35ab68ce0a6c" encrypt_boot = true source_ami_filter { filters { virtualization-type = "hvm" name = "amzn-ami-hvm-????.??.?.????????-x86_64-gp2" root-device-type = "ebs" } most_recent = true owners = ["amazon"] } launch_block_device_mappings { device_name = "/dev/xvda" volume_size = 20 volume_type = "gp2" delete_on_termination = "true" } launch_block_device_mappings { device_name = "/dev/xvdf" volume_size = 500 volume_type = "gp2" delete_on_termination = true encrypted = true } ami_regions = ["eu-central-1"] run_tags { Name = "packer-solr-something" stack-name = "DevOps Tools" } communicator = "ssh" ssh_pty = true ssh_username = "ec2-user" associate_public_ip_address = true } ```
2019-12-17 05:25:56 -05:00
"github.com/hashicorp/hcl/v2/hcldec"
2018-11-22 21:33:34 -05:00
"github.com/hashicorp/packer/builder/digitalocean"
"github.com/hashicorp/packer/common"
"github.com/hashicorp/packer/helper/config"
"github.com/hashicorp/packer/packer"
"github.com/hashicorp/packer/template/interpolate"
)
const BuilderId = "packer.post-processor.digitalocean-import"
type Config struct {
common.PackerConfig `mapstructure:",squash"`
APIToken string `mapstructure:"api_token"`
SpacesKey string `mapstructure:"spaces_key"`
SpacesSecret string `mapstructure:"spaces_secret"`
SpacesRegion string `mapstructure:"spaces_region"`
SpaceName string `mapstructure:"space_name"`
ObjectName string `mapstructure:"space_object_name"`
SkipClean bool `mapstructure:"skip_clean"`
Tags []string `mapstructure:"image_tags"`
Name string `mapstructure:"image_name"`
Description string `mapstructure:"image_description"`
Distribution string `mapstructure:"image_distribution"`
ImageRegions []string `mapstructure:"image_regions"`
Timeout time.Duration `mapstructure:"timeout"`
2018-11-22 21:33:34 -05:00
ctx interpolate.Context
}
type PostProcessor struct {
config Config
}
type apiTokenSource struct {
AccessToken string
}
type logger struct {
logger *log.Logger
}
func (t *apiTokenSource) Token() (*oauth2.Token, error) {
return &oauth2.Token{
AccessToken: t.AccessToken,
}, nil
}
func (l logger) Log(args ...interface{}) {
l.logger.Println(args...)
}
build using HCL2 (#8423) This follows #8232 which added the code to generate the code required to parse HCL files for each packer component. All old config files of packer will keep on working the same. Packer takes one argument. When a directory is passed, all files in the folder with a name ending with “.pkr.hcl” or “.pkr.json” will be parsed using the HCL2 format. When a file ending with “.pkr.hcl” or “.pkr.json” is passed it will be parsed using the HCL2 format. For every other case; the old packer style will be used. ## 1. the hcl2template pkg can create a packer.Build from a set of HCL (v2) files I had to make the packer.coreBuild (which is our one and only packer.Build ) a public struct with public fields ## 2. Components interfaces get a new ConfigSpec Method to read a file from an HCL file. This is a breaking change for packer plugins. a packer component can be a: builder/provisioner/post-processor each component interface now gets a `ConfigSpec() hcldec.ObjectSpec` which allows packer to tell what is the layout of the hcl2 config meant to configure that specific component. This ObjectSpec is sent through the wire (RPC) and a cty.Value is now sent through the already existing configuration entrypoints: Provisioner.Prepare(raws ...interface{}) error Builder.Prepare(raws ...interface{}) ([]string, error) PostProcessor.Configure(raws ...interface{}) error close #1768 Example hcl files: ```hcl // file amazon-ebs-kms-key/run.pkr.hcl build { sources = [ "source.amazon-ebs.first", ] provisioner "shell" { inline = [ "sleep 5" ] } post-processor "shell-local" { inline = [ "sleep 5" ] } } // amazon-ebs-kms-key/source.pkr.hcl source "amazon-ebs" "first" { ami_name = "hcl2-test" region = "us-east-1" instance_type = "t2.micro" kms_key_id = "c729958f-c6ba-44cd-ab39-35ab68ce0a6c" encrypt_boot = true source_ami_filter { filters { virtualization-type = "hvm" name = "amzn-ami-hvm-????.??.?.????????-x86_64-gp2" root-device-type = "ebs" } most_recent = true owners = ["amazon"] } launch_block_device_mappings { device_name = "/dev/xvda" volume_size = 20 volume_type = "gp2" delete_on_termination = "true" } launch_block_device_mappings { device_name = "/dev/xvdf" volume_size = 500 volume_type = "gp2" delete_on_termination = true encrypted = true } ami_regions = ["eu-central-1"] run_tags { Name = "packer-solr-something" stack-name = "DevOps Tools" } communicator = "ssh" ssh_pty = true ssh_username = "ec2-user" associate_public_ip_address = true } ```
2019-12-17 05:25:56 -05:00
func (p *PostProcessor) ConfigSpec() hcldec.ObjectSpec { return p.config.FlatMapstructure().HCL2Spec() }
2018-11-22 21:33:34 -05:00
func (p *PostProcessor) Configure(raws ...interface{}) error {
err := config.Decode(&p.config, &config.DecodeOpts{
Interpolate: true,
InterpolateContext: &p.config.ctx,
InterpolateFilter: &interpolate.RenderFilter{
Exclude: []string{"space_object_name"},
},
}, raws...)
if err != nil {
return err
}
if p.config.SpacesKey == "" {
2019-02-11 18:20:35 -05:00
p.config.SpacesKey = os.Getenv("DIGITALOCEAN_SPACES_ACCESS_KEY")
2018-11-22 21:33:34 -05:00
}
if p.config.SpacesSecret == "" {
2019-02-11 18:20:35 -05:00
p.config.SpacesSecret = os.Getenv("DIGITALOCEAN_SPACES_SECRET_KEY")
2018-11-22 21:33:34 -05:00
}
if p.config.APIToken == "" {
p.config.APIToken = os.Getenv("DIGITALOCEAN_API_TOKEN")
}
if p.config.ObjectName == "" {
p.config.ObjectName = "packer-import-{{timestamp}}"
}
if p.config.Distribution == "" {
p.config.Distribution = "Unkown"
}
if p.config.Timeout == 0 {
p.config.Timeout = 20 * time.Minute
2018-11-22 21:33:34 -05:00
}
errs := new(packer.MultiError)
if err = interpolate.Validate(p.config.ObjectName, &p.config.ctx); err != nil {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Error parsing space_object_name template: %s", err))
}
2019-02-13 18:55:27 -05:00
requiredArgs := map[string]*string{
2018-11-22 21:33:34 -05:00
"api_token": &p.config.APIToken,
"spaces_key": &p.config.SpacesKey,
"spaces_secret": &p.config.SpacesSecret,
"spaces_region": &p.config.SpacesRegion,
"space_name": &p.config.SpaceName,
"image_name": &p.config.Name,
}
2019-02-13 18:55:27 -05:00
for key, ptr := range requiredArgs {
2018-11-22 21:33:34 -05:00
if *ptr == "" {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("%s must be set", key))
}
}
if len(p.config.ImageRegions) == 0 {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("image_regions must be set"))
}
2018-11-22 21:33:34 -05:00
if len(errs.Errors) > 0 {
return errs
}
packer.LogSecretFilter.Set(p.config.SpacesKey, p.config.SpacesSecret, p.config.APIToken)
log.Println(p.config)
return nil
}
func (p *PostProcessor) PostProcess(ctx context.Context, ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, bool, error) {
2018-11-22 21:33:34 -05:00
var err error
generatedData := artifact.State("generated_data")
if generatedData == nil {
// Make sure it's not a nil map so we can assign to it later.
generatedData = make(map[string]interface{})
}
p.config.ctx.Data = generatedData
2018-11-22 21:33:34 -05:00
p.config.ObjectName, err = interpolate.Render(p.config.ObjectName, &p.config.ctx)
if err != nil {
2019-04-02 19:51:58 -04:00
return nil, false, false, fmt.Errorf("Error rendering space_object_name template: %s", err)
2018-11-22 21:33:34 -05:00
}
log.Printf("Rendered space_object_name as %s", p.config.ObjectName)
source := ""
artifacts := artifact.Files()
log.Println("Looking for image in artifact")
if len(artifacts) > 1 {
validSuffix := []string{"raw", "img", "qcow2", "vhdx", "vdi", "vmdk", "tar.bz2", "tar.xz", "tar.gz"}
for _, path := range artifact.Files() {
for _, suffix := range validSuffix {
if strings.HasSuffix(path, suffix) {
source = path
break
}
}
if source != "" {
2018-11-22 21:33:34 -05:00
break
}
}
} else {
source = artifact.Files()[0]
2018-11-22 21:33:34 -05:00
}
2018-11-22 21:33:34 -05:00
if source == "" {
2019-04-02 19:51:58 -04:00
return nil, false, false, fmt.Errorf("Image file not found")
2018-11-22 21:33:34 -05:00
}
spacesCreds := credentials.NewStaticCredentials(p.config.SpacesKey, p.config.SpacesSecret, "")
spacesEndpoint := fmt.Sprintf("https://%s.digitaloceanspaces.com", p.config.SpacesRegion)
spacesConfig := &aws.Config{
Credentials: spacesCreds,
Endpoint: aws.String(spacesEndpoint),
Region: aws.String(p.config.SpacesRegion),
LogLevel: aws.LogLevel(aws.LogDebugWithSigning),
Logger: &logger{
logger: log.New(os.Stderr, "", log.LstdFlags),
},
}
sess, err := session.NewSession(spacesConfig)
if err != nil {
return nil, false, false, err
}
2018-11-22 21:33:34 -05:00
ui.Message(fmt.Sprintf("Uploading %s to spaces://%s/%s", source, p.config.SpaceName, p.config.ObjectName))
err = uploadImageToSpaces(source, p, sess)
if err != nil {
2019-04-02 19:51:58 -04:00
return nil, false, false, err
2018-11-22 21:33:34 -05:00
}
ui.Message(fmt.Sprintf("Completed upload of %s to spaces://%s/%s", source, p.config.SpaceName, p.config.ObjectName))
client := godo.NewClient(oauth2.NewClient(context.Background(), &apiTokenSource{
2018-11-22 21:33:34 -05:00
AccessToken: p.config.APIToken,
}))
ui.Message(fmt.Sprintf("Started import of spaces://%s/%s", p.config.SpaceName, p.config.ObjectName))
image, err := importImageFromSpaces(p, client)
if err != nil {
2019-04-02 19:51:58 -04:00
return nil, false, false, err
2018-11-22 21:33:34 -05:00
}
ui.Message(fmt.Sprintf("Waiting for import of image %s to complete (may take a while)", p.config.Name))
err = waitUntilImageAvailable(client, image.ID, p.config.Timeout)
2018-11-22 21:33:34 -05:00
if err != nil {
2019-04-02 19:51:58 -04:00
return nil, false, false, fmt.Errorf("Import of image %s failed with error: %s", p.config.Name, err)
2018-11-22 21:33:34 -05:00
}
ui.Message(fmt.Sprintf("Import of image %s complete", p.config.Name))
if len(p.config.ImageRegions) > 1 {
// Remove the first region from the slice as the image is already there.
regions := p.config.ImageRegions
regions[0] = regions[len(regions)-1]
regions[len(regions)-1] = ""
regions = regions[:len(regions)-1]
ui.Message(fmt.Sprintf("Distributing image %s to additional regions: %v", p.config.Name, regions))
err = distributeImageToRegions(client, image.ID, regions, p.config.Timeout)
2018-11-22 21:33:34 -05:00
if err != nil {
2019-04-02 19:51:58 -04:00
return nil, false, false, err
2018-11-22 21:33:34 -05:00
}
}
log.Printf("Adding created image ID %v to output artifacts", image.ID)
artifact = &digitalocean.Artifact{
SnapshotName: image.Name,
SnapshotId: image.ID,
RegionNames: p.config.ImageRegions,
Client: client,
}
if !p.config.SkipClean {
ui.Message(fmt.Sprintf("Deleting import source spaces://%s/%s", p.config.SpaceName, p.config.ObjectName))
err = deleteImageFromSpaces(p, sess)
if err != nil {
2019-04-02 19:51:58 -04:00
return nil, false, false, err
2018-11-22 21:33:34 -05:00
}
}
2019-04-02 19:51:58 -04:00
return artifact, false, false, nil
2018-11-22 21:33:34 -05:00
}
func uploadImageToSpaces(source string, p *PostProcessor, s *session.Session) (err error) {
file, err := os.Open(source)
if err != nil {
return fmt.Errorf("Failed to open %s: %s", source, err)
}
uploader := s3manager.NewUploader(s)
_, err = uploader.Upload(&s3manager.UploadInput{
Body: file,
Bucket: &p.config.SpaceName,
Key: &p.config.ObjectName,
ACL: aws.String("public-read"),
})
if err != nil {
return fmt.Errorf("Failed to upload %s: %s", source, err)
}
file.Close()
return nil
}
func importImageFromSpaces(p *PostProcessor, client *godo.Client) (image *godo.Image, err error) {
log.Printf("Importing custom image from spaces://%s/%s", p.config.SpaceName, p.config.ObjectName)
url := fmt.Sprintf("https://%s.%s.digitaloceanspaces.com/%s", p.config.SpaceName, p.config.SpacesRegion, p.config.ObjectName)
createRequest := &godo.CustomImageCreateRequest{
Name: p.config.Name,
Url: url,
Region: p.config.ImageRegions[0],
Distribution: p.config.Distribution,
Description: p.config.Description,
Tags: p.config.Tags,
}
image, _, err = client.Images.Create(context.TODO(), createRequest)
if err != nil {
return image, fmt.Errorf("Failed to import from spaces://%s/%s: %s", p.config.SpaceName, p.config.ObjectName, err)
}
return image, nil
}
func waitUntilImageAvailable(client *godo.Client, imageId int, timeout time.Duration) (err error) {
done := make(chan struct{})
defer close(done)
result := make(chan error, 1)
go func() {
attempts := 0
for {
attempts += 1
log.Printf("Waiting for image to become available... (attempt: %d)", attempts)
image, _, err := client.Images.GetByID(context.TODO(), imageId)
if err != nil {
result <- err
return
}
if image.Status == "available" {
result <- nil
return
}
if image.ErrorMessage != "" {
result <- fmt.Errorf("%v", image.ErrorMessage)
return
}
time.Sleep(3 * time.Second)
select {
case <-done:
return
default:
}
}
}()
log.Printf("Waiting for up to %d seconds for image to become available", timeout/time.Second)
select {
case err := <-result:
return err
case <-time.After(timeout):
err := fmt.Errorf("Timeout while waiting to for action to become available")
return err
}
}
func distributeImageToRegions(client *godo.Client, imageId int, regions []string, timeout time.Duration) (err error) {
for _, region := range regions {
transferRequest := &godo.ActionRequest{
"type": "transfer",
"region": region,
}
log.Printf("Transferring image to %s", region)
action, _, err := client.ImageActions.Transfer(context.TODO(), imageId, transferRequest)
if err != nil {
return fmt.Errorf("Error transferring image: %s", err)
}
if err := digitalocean.WaitForImageState(godo.ActionCompleted, imageId, action.ID, client, timeout); err != nil {
if err != nil {
return fmt.Errorf("Error transferring image: %s", err)
}
}
}
return nil
}
func deleteImageFromSpaces(p *PostProcessor, s *session.Session) (err error) {
s3conn := s3.New(s)
_, err = s3conn.DeleteObject(&s3.DeleteObjectInput{
Bucket: &p.config.SpaceName,
Key: &p.config.ObjectName,
})
if err != nil {
return fmt.Errorf("Failed to delete spaces://%s/%s: %s", p.config.SpaceName, p.config.ObjectName, err)
}
return nil
}