2015-11-22 18:32:03 -05:00
|
|
|
package amazonimport
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
2015-11-23 18:08:31 -05:00
|
|
|
"log"
|
2016-02-12 02:53:40 -05:00
|
|
|
"os"
|
|
|
|
"strings"
|
2015-11-22 18:32:03 -05:00
|
|
|
|
2015-11-23 20:23:19 -05:00
|
|
|
"github.com/aws/aws-sdk-go/aws"
|
2015-11-22 18:32:03 -05:00
|
|
|
"github.com/aws/aws-sdk-go/aws/session"
|
|
|
|
"github.com/aws/aws-sdk-go/service/ec2"
|
2015-11-23 18:08:31 -05:00
|
|
|
"github.com/aws/aws-sdk-go/service/s3"
|
2016-02-12 02:53:40 -05:00
|
|
|
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
2015-11-22 18:32:03 -05:00
|
|
|
awscommon "github.com/mitchellh/packer/builder/amazon/common"
|
|
|
|
"github.com/mitchellh/packer/common"
|
|
|
|
"github.com/mitchellh/packer/helper/config"
|
|
|
|
"github.com/mitchellh/packer/packer"
|
|
|
|
"github.com/mitchellh/packer/template/interpolate"
|
|
|
|
)
|
|
|
|
|
2015-11-22 21:55:09 -05:00
|
|
|
const BuilderId = "packer.post-processor.amazon-import"
|
|
|
|
|
2015-11-22 18:32:03 -05:00
|
|
|
// Configuration of this post processor
|
|
|
|
type Config struct {
|
2016-02-12 02:53:40 -05:00
|
|
|
common.PackerConfig `mapstructure:",squash"`
|
2015-11-22 18:32:03 -05:00
|
|
|
awscommon.AccessConfig `mapstructure:",squash"`
|
|
|
|
|
2016-02-12 02:53:40 -05:00
|
|
|
// Variables specific to this post processor
|
|
|
|
S3Bucket string `mapstructure:"s3_bucket_name"`
|
|
|
|
S3Key string `mapstructure:"s3_key_name"`
|
|
|
|
SkipClean bool `mapstructure:"skip_clean"`
|
|
|
|
Tags map[string]string `mapstructure:"tags"`
|
2016-09-29 11:06:28 -04:00
|
|
|
Name string `mapstructure:"ami_name"`
|
2015-11-22 18:32:03 -05:00
|
|
|
|
2016-09-29 11:06:28 -04:00
|
|
|
ctx interpolate.Context
|
2015-11-22 18:32:03 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
type PostProcessor struct {
|
|
|
|
config Config
|
|
|
|
}
|
|
|
|
|
|
|
|
// Entry point for configuration parisng when we've defined
|
|
|
|
func (p *PostProcessor) Configure(raws ...interface{}) error {
|
|
|
|
p.config.ctx.Funcs = awscommon.TemplateFuncs
|
|
|
|
err := config.Decode(&p.config, &config.DecodeOpts{
|
2016-02-12 02:53:40 -05:00
|
|
|
Interpolate: true,
|
|
|
|
InterpolateContext: &p.config.ctx,
|
|
|
|
InterpolateFilter: &interpolate.RenderFilter{
|
2015-11-25 20:02:15 -05:00
|
|
|
Exclude: []string{
|
|
|
|
"s3_key_name",
|
|
|
|
},
|
2015-11-22 18:32:03 -05:00
|
|
|
},
|
|
|
|
}, raws...)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set defaults
|
2015-11-24 16:06:35 -05:00
|
|
|
if p.config.S3Key == "" {
|
2015-11-25 20:02:15 -05:00
|
|
|
p.config.S3Key = "packer-import-{{timestamp}}.ova"
|
2015-11-22 18:32:03 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
errs := new(packer.MultiError)
|
|
|
|
|
2015-11-25 20:02:15 -05:00
|
|
|
// Check and render s3_key_name
|
|
|
|
if err = interpolate.Validate(p.config.S3Key, &p.config.ctx); err != nil {
|
|
|
|
errs = packer.MultiErrorAppend(
|
|
|
|
errs, fmt.Errorf("Error parsing s3_key_name template: %s", err))
|
|
|
|
}
|
|
|
|
|
2015-11-22 18:32:03 -05:00
|
|
|
// Check we have AWS access variables defined somewhere
|
|
|
|
errs = packer.MultiErrorAppend(errs, p.config.AccessConfig.Prepare(&p.config.ctx)...)
|
|
|
|
|
|
|
|
// define all our required paramaters
|
|
|
|
templates := map[string]*string{
|
2016-02-12 02:53:40 -05:00
|
|
|
"s3_bucket_name": &p.config.S3Bucket,
|
2015-11-22 18:32:03 -05:00
|
|
|
}
|
|
|
|
// Check out required params are defined
|
|
|
|
for key, ptr := range templates {
|
|
|
|
if *ptr == "" {
|
|
|
|
errs = packer.MultiErrorAppend(
|
|
|
|
errs, fmt.Errorf("%s must be set", key))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Anything which flagged return back up the stack
|
|
|
|
if len(errs.Errors) > 0 {
|
|
|
|
return errs
|
|
|
|
}
|
|
|
|
|
2016-03-10 19:52:16 -05:00
|
|
|
log.Println(common.ScrubConfig(p.config, p.config.AccessKey, p.config.SecretKey))
|
2015-11-22 18:32:03 -05:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) {
|
|
|
|
var err error
|
|
|
|
|
|
|
|
config, err := p.config.Config()
|
|
|
|
if err != nil {
|
|
|
|
return nil, false, err
|
|
|
|
}
|
|
|
|
|
2015-11-25 20:02:15 -05:00
|
|
|
// Render this key since we didn't in the configure phase
|
2016-02-12 02:53:40 -05:00
|
|
|
p.config.S3Key, err = interpolate.Render(p.config.S3Key, &p.config.ctx)
|
2015-11-25 20:02:15 -05:00
|
|
|
if err != nil {
|
2016-02-12 02:53:40 -05:00
|
|
|
return nil, false, fmt.Errorf("Error rendering s3_key_name template: %s", err)
|
|
|
|
}
|
2015-11-25 20:02:15 -05:00
|
|
|
log.Printf("Rendered s3_key_name as %s", p.config.S3Key)
|
|
|
|
|
2015-11-23 20:23:19 -05:00
|
|
|
log.Println("Looking for OVA in artifact")
|
2015-11-22 18:32:03 -05:00
|
|
|
// Locate the files output from the builder
|
|
|
|
source := ""
|
|
|
|
for _, path := range artifact.Files() {
|
|
|
|
if strings.HasSuffix(path, ".ova") {
|
|
|
|
source = path
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Hope we found something useful
|
|
|
|
if source == "" {
|
2015-11-24 16:06:35 -05:00
|
|
|
return nil, false, fmt.Errorf("No OVA file found in artifact from builder")
|
2015-11-22 18:32:03 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Set up the AWS session
|
2015-11-23 20:23:19 -05:00
|
|
|
log.Println("Creating AWS session")
|
2015-11-22 18:32:03 -05:00
|
|
|
session := session.New(config)
|
|
|
|
|
|
|
|
// open the source file
|
2015-11-23 20:23:19 -05:00
|
|
|
log.Printf("Opening file %s to upload", source)
|
2015-11-22 18:32:03 -05:00
|
|
|
file, err := os.Open(source)
|
|
|
|
if err != nil {
|
|
|
|
return nil, false, fmt.Errorf("Failed to open %s: %s", source, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
ui.Message(fmt.Sprintf("Uploading %s to s3://%s/%s", source, p.config.S3Bucket, p.config.S3Key))
|
|
|
|
|
2016-02-12 02:53:40 -05:00
|
|
|
// Copy the OVA file into the S3 bucket specified
|
2015-11-22 18:32:03 -05:00
|
|
|
uploader := s3manager.NewUploader(session)
|
|
|
|
_, err = uploader.Upload(&s3manager.UploadInput{
|
2016-02-12 02:53:40 -05:00
|
|
|
Body: file,
|
|
|
|
Bucket: &p.config.S3Bucket,
|
|
|
|
Key: &p.config.S3Key,
|
2015-11-22 18:32:03 -05:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, false, fmt.Errorf("Failed to upload %s: %s", source, err)
|
|
|
|
}
|
|
|
|
|
2015-11-23 18:08:31 -05:00
|
|
|
// May as well stop holding this open now
|
|
|
|
file.Close()
|
|
|
|
|
2015-11-22 18:32:03 -05:00
|
|
|
ui.Message(fmt.Sprintf("Completed upload of %s to s3://%s/%s", source, p.config.S3Bucket, p.config.S3Key))
|
|
|
|
|
|
|
|
// Call EC2 image import process
|
2015-11-25 20:02:15 -05:00
|
|
|
log.Printf("Calling EC2 to import from s3://%s/%s", p.config.S3Bucket, p.config.S3Key)
|
|
|
|
|
2015-11-22 18:32:03 -05:00
|
|
|
ec2conn := ec2.New(session)
|
2015-11-22 21:55:09 -05:00
|
|
|
import_start, err := ec2conn.ImportImage(&ec2.ImportImageInput{
|
2015-11-22 18:32:03 -05:00
|
|
|
DiskContainers: []*ec2.ImageDiskContainer{
|
|
|
|
{
|
|
|
|
UserBucket: &ec2.UserBucket{
|
2016-02-12 02:53:40 -05:00
|
|
|
S3Bucket: &p.config.S3Bucket,
|
|
|
|
S3Key: &p.config.S3Key,
|
2015-11-22 18:32:03 -05:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return nil, false, fmt.Errorf("Failed to start import from s3://%s/%s: %s", p.config.S3Bucket, p.config.S3Key, err)
|
|
|
|
}
|
|
|
|
|
2015-11-22 21:55:09 -05:00
|
|
|
ui.Message(fmt.Sprintf("Started import of s3://%s/%s, task id %s", p.config.S3Bucket, p.config.S3Key, *import_start.ImportTaskId))
|
|
|
|
|
|
|
|
// Wait for import process to complete, this takess a while
|
|
|
|
ui.Message(fmt.Sprintf("Waiting for task %s to complete (may take a while)", *import_start.ImportTaskId))
|
|
|
|
|
|
|
|
stateChange := awscommon.StateChangeConf{
|
2016-02-12 02:53:40 -05:00
|
|
|
Pending: []string{"pending", "active"},
|
2015-11-22 21:55:09 -05:00
|
|
|
Refresh: awscommon.ImportImageRefreshFunc(ec2conn, *import_start.ImportTaskId),
|
2016-02-12 02:53:40 -05:00
|
|
|
Target: "completed",
|
2015-11-22 21:55:09 -05:00
|
|
|
}
|
|
|
|
|
2015-11-25 20:02:15 -05:00
|
|
|
// Actually do the wait for state change
|
|
|
|
// We ignore errors out of this and check job state in AWS API
|
|
|
|
awscommon.WaitForState(&stateChange)
|
2015-11-22 21:55:09 -05:00
|
|
|
|
2015-11-25 20:02:15 -05:00
|
|
|
// Retrieve what the outcome was for the import task
|
2015-11-22 21:55:09 -05:00
|
|
|
import_result, err := ec2conn.DescribeImportImageTasks(&ec2.DescribeImportImageTasksInput{
|
2016-02-12 02:53:40 -05:00
|
|
|
ImportTaskIds: []*string{
|
|
|
|
import_start.ImportTaskId,
|
2015-11-22 21:55:09 -05:00
|
|
|
},
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
2015-11-23 18:08:31 -05:00
|
|
|
return nil, false, fmt.Errorf("Failed to find import task %s: %s", *import_start.ImportTaskId, err)
|
2015-11-22 21:55:09 -05:00
|
|
|
}
|
|
|
|
|
2015-11-25 20:02:15 -05:00
|
|
|
// Check it was actually completed
|
|
|
|
if *import_result.ImportImageTasks[0].Status != "completed" {
|
|
|
|
// The most useful error message is from the job itself
|
|
|
|
return nil, false, fmt.Errorf("Import task %s failed: %s", *import_start.ImportTaskId, *import_result.ImportImageTasks[0].StatusMessage)
|
|
|
|
}
|
|
|
|
|
2015-11-23 18:08:31 -05:00
|
|
|
ui.Message(fmt.Sprintf("Import task %s complete", *import_start.ImportTaskId))
|
|
|
|
|
2015-11-25 20:02:15 -05:00
|
|
|
// Pull AMI ID out of the completed job
|
2015-11-23 20:23:19 -05:00
|
|
|
createdami := *import_result.ImportImageTasks[0].ImageId
|
|
|
|
|
2016-09-29 11:06:28 -04:00
|
|
|
if p.config.Name != "" {
|
|
|
|
|
|
|
|
ui.Message(fmt.Sprintf("Starting rename of AMI (%s)", createdami))
|
|
|
|
|
|
|
|
resp, err := ec2conn.CopyImage(&ec2.CopyImageInput{
|
|
|
|
Name: &p.config.Name,
|
|
|
|
SourceImageId: &createdami,
|
|
|
|
SourceRegion: config.Region,
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return nil, false, fmt.Errorf("Error Copying AMI (%s): %s", createdami, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
ui.Message(fmt.Sprintf("Waiting for AMI rename to complete (may take a while)"))
|
|
|
|
|
|
|
|
stateChange := awscommon.StateChangeConf{
|
|
|
|
Pending: []string{"pending"},
|
|
|
|
Target: "available",
|
|
|
|
Refresh: awscommon.AMIStateRefreshFunc(ec2conn, *resp.ImageId),
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, err := awscommon.WaitForState(&stateChange); err != nil {
|
|
|
|
return nil, false, fmt.Errorf("Error waiting for AMI (%s): %s", *resp.ImageId, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
ec2conn.DeregisterImage(&ec2.DeregisterImageInput{
|
|
|
|
ImageId: &createdami,
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return nil, false, fmt.Errorf("Error deregistering existing AMI: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
ui.Message(fmt.Sprintf("AMI rename completed"))
|
|
|
|
|
|
|
|
createdami = *resp.ImageId
|
|
|
|
}
|
|
|
|
|
2015-11-23 20:23:19 -05:00
|
|
|
// If we have tags, then apply them now to both the AMI and snaps
|
|
|
|
// created by the import
|
|
|
|
if len(p.config.Tags) > 0 {
|
2016-02-12 02:53:40 -05:00
|
|
|
var ec2Tags []*ec2.Tag
|
2015-11-23 20:23:19 -05:00
|
|
|
|
|
|
|
log.Printf("Repacking tags into AWS format")
|
|
|
|
|
|
|
|
for key, value := range p.config.Tags {
|
|
|
|
ui.Message(fmt.Sprintf("Adding tag \"%s\": \"%s\"", key, value))
|
|
|
|
ec2Tags = append(ec2Tags, &ec2.Tag{
|
2016-02-12 02:53:40 -05:00
|
|
|
Key: aws.String(key),
|
2015-11-23 20:23:19 -05:00
|
|
|
Value: aws.String(value),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
resourceIds := []*string{&createdami}
|
|
|
|
|
|
|
|
log.Printf("Getting details of %s", createdami)
|
|
|
|
|
|
|
|
imageResp, err := ec2conn.DescribeImages(&ec2.DescribeImagesInput{
|
2016-02-12 02:53:40 -05:00
|
|
|
ImageIds: resourceIds,
|
2015-11-23 20:23:19 -05:00
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return nil, false, fmt.Errorf("Failed to retrieve details for AMI %s: %s", createdami, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(imageResp.Images) == 0 {
|
|
|
|
return nil, false, fmt.Errorf("AMI %s has no images", createdami)
|
|
|
|
}
|
|
|
|
|
|
|
|
image := imageResp.Images[0]
|
|
|
|
|
|
|
|
log.Printf("Walking block device mappings for %s to find snapshots", createdami)
|
|
|
|
|
|
|
|
for _, device := range image.BlockDeviceMappings {
|
|
|
|
if device.Ebs != nil && device.Ebs.SnapshotId != nil {
|
|
|
|
ui.Message(fmt.Sprintf("Tagging snapshot %s", *device.Ebs.SnapshotId))
|
|
|
|
resourceIds = append(resourceIds, device.Ebs.SnapshotId)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ui.Message(fmt.Sprintf("Tagging AMI %s", createdami))
|
|
|
|
|
|
|
|
_, err = ec2conn.CreateTags(&ec2.CreateTagsInput{
|
2016-02-12 02:53:40 -05:00
|
|
|
Resources: resourceIds,
|
|
|
|
Tags: ec2Tags,
|
2015-11-23 20:23:19 -05:00
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return nil, false, fmt.Errorf("Failed to add tags to resources %#v: %s", resourceIds, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2015-11-23 18:08:31 -05:00
|
|
|
// Add the reported AMI ID to the artifact list
|
2015-11-23 20:23:19 -05:00
|
|
|
log.Printf("Adding created AMI ID %s in region %s to output artifacts", createdami, *config.Region)
|
2015-11-22 21:55:09 -05:00
|
|
|
artifact = &awscommon.Artifact{
|
2016-02-12 02:53:40 -05:00
|
|
|
Amis: map[string]string{
|
|
|
|
*config.Region: createdami,
|
2015-11-22 21:55:09 -05:00
|
|
|
},
|
2016-02-12 02:53:40 -05:00
|
|
|
BuilderIdValue: BuilderId,
|
|
|
|
Conn: ec2conn,
|
2015-11-22 21:55:09 -05:00
|
|
|
}
|
2015-11-22 18:32:03 -05:00
|
|
|
|
2016-02-12 02:53:40 -05:00
|
|
|
if !p.config.SkipClean {
|
|
|
|
ui.Message(fmt.Sprintf("Deleting import source s3://%s/%s", p.config.S3Bucket, p.config.S3Key))
|
|
|
|
s3conn := s3.New(session)
|
|
|
|
_, err = s3conn.DeleteObject(&s3.DeleteObjectInput{
|
|
|
|
Bucket: &p.config.S3Bucket,
|
|
|
|
Key: &p.config.S3Key,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, false, fmt.Errorf("Failed to delete s3://%s/%s: %s", p.config.S3Bucket, p.config.S3Key, err)
|
|
|
|
}
|
|
|
|
}
|
2015-11-23 18:08:31 -05:00
|
|
|
|
2015-11-22 18:32:03 -05:00
|
|
|
return artifact, false, nil
|
|
|
|
}
|