From 439d1b17887f5e034b78f14caec8d8d644f72c97 Mon Sep 17 00:00:00 2001 From: David Zanetti Date: Mon, 23 Nov 2015 12:32:03 +1300 Subject: [PATCH 1/8] First cut at amazon ova importer --- .../amazon-import/post-processor.go | 164 ++++++++++++++++++ 1 file changed, 164 insertions(+) create mode 100644 post-processor/amazon-import/post-processor.go diff --git a/post-processor/amazon-import/post-processor.go b/post-processor/amazon-import/post-processor.go new file mode 100644 index 000000000..955209938 --- /dev/null +++ b/post-processor/amazon-import/post-processor.go @@ -0,0 +1,164 @@ +package amazonimport + +import ( + "fmt" + "strings" + "os" + + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/s3/s3manager" +// This is bad, it should be pulled out into a common folder across +// both builders and post-processors + awscommon "github.com/mitchellh/packer/builder/amazon/common" + "github.com/mitchellh/packer/common" + "github.com/mitchellh/packer/helper/config" + "github.com/mitchellh/packer/packer" + "github.com/mitchellh/packer/template/interpolate" +) + +// We accept the output from vmware or vmware-esx +var builtins = map[string]string{ + "mitchellh.vmware": "amazon-ova", + "mitchellh.vmware-esx": "amazon-ova", +} + +// Configuration of this post processor +type Config struct { + common.PackerConfig `mapstructure:",squash"` + awscommon.AccessConfig `mapstructure:",squash"` + +// Variables specific to this post processor + S3Bucket string `mapstructure:"s3_bucket_name"` + S3Key string `mapstructure:"s3_key_name"` + ImportTaskDesc string `mapstructure:"import_task_desc"` + ImportDiskDesc string `mapstructure:"import_disk_desc"` + + ctx interpolate.Context +} + +type PostProcessor struct { + config Config +} + +// Entry point for configuration parisng when we've defined +func (p *PostProcessor) Configure(raws ...interface{}) error { + p.config.ctx.Funcs = awscommon.TemplateFuncs + err := config.Decode(&p.config, &config.DecodeOpts{ + Interpolate: true, + InterpolateContext: &p.config.ctx, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{}, + }, + }, raws...) + if err != nil { + return err + } + + // Set defaults + if p.config.ImportTaskDesc == "" { + p.config.ImportTaskDesc = "packer-amazon-ova task" + } + if p.config.ImportDiskDesc == "" { + p.config.ImportDiskDesc = "packer-amazon-ova disk" + } + + errs := new(packer.MultiError) + + // Check we have AWS access variables defined somewhere + errs = packer.MultiErrorAppend(errs, p.config.AccessConfig.Prepare(&p.config.ctx)...) + + // define all our required paramaters + templates := map[string]*string{ + "s3_bucket_name": &p.config.S3Bucket, + "s3_key_name": &p.config.S3Key, + } + // Check out required params are defined + for key, ptr := range templates { + if *ptr == "" { + errs = packer.MultiErrorAppend( + errs, fmt.Errorf("%s must be set", key)) + } + } + + // Anything which flagged return back up the stack + if len(errs.Errors) > 0 { + return errs + } + + return nil +} + +func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { + var err error + + config, err := p.config.Config() + if err != nil { + return nil, false, err + } + // Confirm we're dealing with the result of a builder we like + if _, ok := builtins[artifact.BuilderId()]; !ok { + return nil, false, fmt.Errorf("Artifact type %s is not supported by this post-processor", artifact.BuilderId()) + } + + // Locate the files output from the builder + source := "" + for _, path := range artifact.Files() { + if strings.HasSuffix(path, ".ova") { + source = path + break + } + } + + // Hope we found something useful + if source == "" { + return nil, false, fmt.Errorf("OVA file not found") + } + + // Set up the AWS session + session := session.New(config) + + // open the source file + file, err := os.Open(source) + if err != nil { + return nil, false, fmt.Errorf("Failed to open %s: %s", source, err) + } + + ui.Message(fmt.Sprintf("Uploading %s to s3://%s/%s", source, p.config.S3Bucket, p.config.S3Key)) + + // Copy the OVA file into the S3 bucket specified + uploader := s3manager.NewUploader(session) + _, err = uploader.Upload(&s3manager.UploadInput{ + Body: file, + Bucket: &p.config.S3Bucket, + Key: &p.config.S3Key, + }) + if err != nil { + return nil, false, fmt.Errorf("Failed to upload %s: %s", source, err) + } + + ui.Message(fmt.Sprintf("Completed upload of %s to s3://%s/%s", source, p.config.S3Bucket, p.config.S3Key)) + + // Call EC2 image import process + ec2conn := ec2.New(session) + impres, err := ec2conn.ImportImage(&ec2.ImportImageInput{ + Description: &p.config.ImportTaskDesc, + DiskContainers: []*ec2.ImageDiskContainer{ + { + Description: &p.config.ImportDiskDesc, + UserBucket: &ec2.UserBucket{ + S3Bucket: &p.config.S3Bucket, + S3Key: &p.config.S3Key, + }, + }, + }, + }) + + if err != nil { + return nil, false, fmt.Errorf("Failed to start import from s3://%s/%s: %s", p.config.S3Bucket, p.config.S3Key, err) + } + + ui.Message(fmt.Sprintf("Started import of s3://%s/%s, task id %s", p.config.S3Bucket, p.config.S3Key, *impres.ImportTaskId)) + + return artifact, false, nil +} From 418da1699169c2b0fb2078bbea70e98a15d99c3f Mon Sep 17 00:00:00 2001 From: David Zanetti Date: Mon, 23 Nov 2015 15:55:09 +1300 Subject: [PATCH 2/8] Track the import task and report AMIs created from it --- builder/amazon/common/state.go | 29 ++++++++++++ .../amazon-import/post-processor.go | 47 +++++++++++++++++-- 2 files changed, 72 insertions(+), 4 deletions(-) diff --git a/builder/amazon/common/state.go b/builder/amazon/common/state.go index 6c9de3eb2..080b69ee9 100644 --- a/builder/amazon/common/state.go +++ b/builder/amazon/common/state.go @@ -8,6 +8,7 @@ import ( "os" "strconv" "time" + "strings" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" @@ -128,6 +129,34 @@ func SpotRequestStateRefreshFunc(conn *ec2.EC2, spotRequestId string) StateRefre } } +func ImportImageRefreshFunc(conn *ec2.EC2, importTaskId string) StateRefreshFunc { + return func() (interface{}, string, error) { + resp, err := conn.DescribeImportImageTasks(&ec2.DescribeImportImageTasksInput{ + ImportTaskIds: []*string{ + &importTaskId, + }, + }, + ) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && strings.HasPrefix(ec2err.Code(),"InvalidConversionTaskId") { + resp = nil + } else if isTransientNetworkError(err) { + resp = nil + } else { + log.Printf("Error on ImportImageRefresh: %s", err) + return nil, "", err + } + } + + if resp == nil || len(resp.ImportImageTasks) == 0 { + return nil, "", nil + } + + i := resp.ImportImageTasks[0] + return i, *i.Status, nil + } +} + // WaitForState watches an object and waits for it to achieve a certain // state. func WaitForState(conf *StateChangeConf) (i interface{}, err error) { diff --git a/post-processor/amazon-import/post-processor.go b/post-processor/amazon-import/post-processor.go index 955209938..102106a74 100644 --- a/post-processor/amazon-import/post-processor.go +++ b/post-processor/amazon-import/post-processor.go @@ -17,10 +17,12 @@ import ( "github.com/mitchellh/packer/template/interpolate" ) +const BuilderId = "packer.post-processor.amazon-import" + // We accept the output from vmware or vmware-esx var builtins = map[string]string{ - "mitchellh.vmware": "amazon-ova", - "mitchellh.vmware-esx": "amazon-ova", + "mitchellh.vmware": "amazon-import", + "mitchellh.vmware-esx": "amazon-import", } // Configuration of this post processor @@ -141,7 +143,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac // Call EC2 image import process ec2conn := ec2.New(session) - impres, err := ec2conn.ImportImage(&ec2.ImportImageInput{ + import_start, err := ec2conn.ImportImage(&ec2.ImportImageInput{ Description: &p.config.ImportTaskDesc, DiskContainers: []*ec2.ImageDiskContainer{ { @@ -158,7 +160,44 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac return nil, false, fmt.Errorf("Failed to start import from s3://%s/%s: %s", p.config.S3Bucket, p.config.S3Key, err) } - ui.Message(fmt.Sprintf("Started import of s3://%s/%s, task id %s", p.config.S3Bucket, p.config.S3Key, *impres.ImportTaskId)) + ui.Message(fmt.Sprintf("Started import of s3://%s/%s, task id %s", p.config.S3Bucket, p.config.S3Key, *import_start.ImportTaskId)) + + // Wait for import process to complete, this takess a while + ui.Message(fmt.Sprintf("Waiting for task %s to complete (may take a while)", *import_start.ImportTaskId)) + + stateChange := awscommon.StateChangeConf{ + Pending: []string{"pending","active"}, + Refresh: awscommon.ImportImageRefreshFunc(ec2conn, *import_start.ImportTaskId), + Target: "completed", + } + _, err = awscommon.WaitForState(&stateChange) + + if err != nil { + return nil, false, fmt.Errorf("Import task %s failed: %s", *import_start.ImportTaskId, err) + } + + // Extract the AMI ID and return this as the artifact of the + // post processor + import_result, err := ec2conn.DescribeImportImageTasks(&ec2.DescribeImportImageTasksInput{ + ImportTaskIds: []*string{ + import_start.ImportTaskId, + }, + }) + + if err != nil { + return nil, false, fmt.Errorf("API error for import task id %s: %s", *import_start.ImportTaskId, err) + } + + // Add the discvered AMI ID to the artifact list + artifact = &awscommon.Artifact{ + Amis: map[string]string{ + *config.Region: *import_result.ImportImageTasks[0].ImageId, + }, + BuilderIdValue: BuilderId, + Conn: ec2conn, + } return artifact, false, nil } + + From 95b3ea50ae6f8c0cdece174a07b8d610422ca000 Mon Sep 17 00:00:00 2001 From: David Zanetti Date: Tue, 24 Nov 2015 12:08:31 +1300 Subject: [PATCH 3/8] Add more useful messages and clean up after ourselves - S3 object uploaded removed after import (with disable option) - Indicate to user when import is complete - Close the source file uploaded after upload is done - Each step of import process logs a debug message --- .../amazon-import/post-processor.go | 35 +++++++++++++++---- 1 file changed, 29 insertions(+), 6 deletions(-) diff --git a/post-processor/amazon-import/post-processor.go b/post-processor/amazon-import/post-processor.go index 102106a74..1794b7bf8 100644 --- a/post-processor/amazon-import/post-processor.go +++ b/post-processor/amazon-import/post-processor.go @@ -4,10 +4,12 @@ import ( "fmt" "strings" "os" + "log" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/s3/s3manager" + "github.com/aws/aws-sdk-go/service/s3" // This is bad, it should be pulled out into a common folder across // both builders and post-processors awscommon "github.com/mitchellh/packer/builder/amazon/common" @@ -33,6 +35,7 @@ type Config struct { // Variables specific to this post processor S3Bucket string `mapstructure:"s3_bucket_name"` S3Key string `mapstructure:"s3_key_name"` + SkipClean bool `mapstructure:"skip_clean"` ImportTaskDesc string `mapstructure:"import_task_desc"` ImportDiskDesc string `mapstructure:"import_disk_desc"` @@ -103,6 +106,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac return nil, false, fmt.Errorf("Artifact type %s is not supported by this post-processor", artifact.BuilderId()) } + log.Println("Looking for OVA in artifact...") // Locate the files output from the builder source := "" for _, path := range artifact.Files() { @@ -118,9 +122,11 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac } // Set up the AWS session + log.Println("Creating AWS session...") session := session.New(config) // open the source file + log.Printf("Opening file %s to upload...", source) file, err := os.Open(source) if err != nil { return nil, false, fmt.Errorf("Failed to open %s: %s", source, err) @@ -139,8 +145,12 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac return nil, false, fmt.Errorf("Failed to upload %s: %s", source, err) } + // May as well stop holding this open now + file.Close() + ui.Message(fmt.Sprintf("Completed upload of %s to s3://%s/%s", source, p.config.S3Bucket, p.config.S3Key)) + log.Printf("Calling EC2 to import from s3://%s/%s", p.config.S3Bucket, p.config.S3Key) // Call EC2 image import process ec2conn := ec2.New(session) import_start, err := ec2conn.ImportImage(&ec2.ImportImageInput{ @@ -170,14 +180,14 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac Refresh: awscommon.ImportImageRefreshFunc(ec2conn, *import_start.ImportTaskId), Target: "completed", } + // Actually do the wait for state change _, err = awscommon.WaitForState(&stateChange) if err != nil { return nil, false, fmt.Errorf("Import task %s failed: %s", *import_start.ImportTaskId, err) } - // Extract the AMI ID and return this as the artifact of the - // post processor + // Extract the AMI ID from the completed import task import_result, err := ec2conn.DescribeImportImageTasks(&ec2.DescribeImportImageTasksInput{ ImportTaskIds: []*string{ import_start.ImportTaskId, @@ -185,10 +195,13 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac }) if err != nil { - return nil, false, fmt.Errorf("API error for import task id %s: %s", *import_start.ImportTaskId, err) + return nil, false, fmt.Errorf("Failed to find import task %s: %s", *import_start.ImportTaskId, err) } - // Add the discvered AMI ID to the artifact list + ui.Message(fmt.Sprintf("Import task %s complete", *import_start.ImportTaskId)) + + // Add the reported AMI ID to the artifact list + log.Printf("Adding created AMI ID %s in region %s to output artifacts", *import_result.ImportImageTasks[0].ImageId, *config.Region) artifact = &awscommon.Artifact{ Amis: map[string]string{ *config.Region: *import_result.ImportImageTasks[0].ImageId, @@ -197,7 +210,17 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac Conn: ec2conn, } + if !p.config.SkipClean { + ui.Message(fmt.Sprintf("Deleting import source s3://%s/%s", p.config.S3Bucket, p.config.S3Key)) + s3conn := s3.New(session) + _, err = s3conn.DeleteObject(&s3.DeleteObjectInput{ + Bucket: &p.config.S3Bucket, + Key: &p.config.S3Key, + }) + if err != nil { + return nil, false, fmt.Errorf("Failed to delete s3://%s/%s: %s", p.config.S3Bucket, p.config.S3Key, err) + } + } + return artifact, false, nil } - - From 258fd7c6b6fdcf852694271409b7085e4df1e950 Mon Sep 17 00:00:00 2001 From: David Zanetti Date: Tue, 24 Nov 2015 12:19:20 +1300 Subject: [PATCH 4/8] First cut at documentation for post-processor amazon-import --- .../amazon-import.html.markdown | 40 +++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 website/source/docs/post-processors/amazon-import.html.markdown diff --git a/website/source/docs/post-processors/amazon-import.html.markdown b/website/source/docs/post-processors/amazon-import.html.markdown new file mode 100644 index 000000000..e2699701a --- /dev/null +++ b/website/source/docs/post-processors/amazon-import.html.markdown @@ -0,0 +1,40 @@ +--- +description: | + The Packer Amazon Import post-processor takes an OVA artifact from the VMware builder and + imports it to an AMI available to Amazon Web Services EC2. +layout: docs +page_title: 'Amazon Import Post-Processor' +... + +# Amazon Import Post-Processor + +Type: `amazon-import` + +The Packer Amazon Import post-processor takes an OVA artifact from the VMware builder and imports it to an AMI available to Amazon Web Services EC2. + +\~> This post-processor is for advanced users. Please ensure you read the ["prerequisites for import"](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/VMImportPrerequisites.html) before using this post-processor. You are strongly recommended to understand what behaviour is expected from an AMI before using this post-processor. + +## How Does it Work? + +The import process operates by copying the OVA to an S3 bucket, and calling an import task in EC2 on the OVA file. Once completed, an AMI is returned containing the converted virtual machine. + +The import process itself run by AWS includes modifications to the image uploaded, to allow it to boot and operate in the AWS EC2 environment. However, not all modifications required to make the machine run well in EC2 are performed. Take care around console output from the machine, as debugging can be very difficult without it. + +Further information about the import process can be found in AWS's ["EC2 Import/Export Instance documentation"](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instances_of_your_vm.html). + +## Configuration + +There are some configuration options available for the post-processor. They are +segmented below into two categories: required and optional parameters. Within +each category, the available configuration keys are alphabetized. + +Required: + +- `s3_bucket` (string) - The name of the bucket where the OVA file will be copied to for import. + +- `s3_key` (string) - The name of the key where the OVA file will be copied to for import. + +Optional: + +- `skip_clean` (boolean) - Whether we should skip removing the OVA file uploaded to S3 after the import process has completed. "true" means that we should leave it in the S3 bucket, "false" means to clean it out. Defaults to "false". + From 8c04f1646bb59de46780344d3c64fb4f1d592795 Mon Sep 17 00:00:00 2001 From: David Zanetti Date: Tue, 24 Nov 2015 13:54:05 +1300 Subject: [PATCH 5/8] Expand and update amazon-import documentation - Add example of usage - Expand on how it works - Correctly reflect the hard requirement for AWS access keys and region - Add tag support documentation --- .../amazon-import.html.markdown | 50 ++++++++++++++++--- 1 file changed, 43 insertions(+), 7 deletions(-) diff --git a/website/source/docs/post-processors/amazon-import.html.markdown b/website/source/docs/post-processors/amazon-import.html.markdown index e2699701a..ae9e6ea87 100644 --- a/website/source/docs/post-processors/amazon-import.html.markdown +++ b/website/source/docs/post-processors/amazon-import.html.markdown @@ -12,29 +12,65 @@ Type: `amazon-import` The Packer Amazon Import post-processor takes an OVA artifact from the VMware builder and imports it to an AMI available to Amazon Web Services EC2. -\~> This post-processor is for advanced users. Please ensure you read the ["prerequisites for import"](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/VMImportPrerequisites.html) before using this post-processor. You are strongly recommended to understand what behaviour is expected from an AMI before using this post-processor. +\~> This post-processor is for advanced users. It depends on specific IAM roles inside AWS and is best used with images that operate with the EC2 configuration model (eg, cloud-init for Linux systems). Please ensure you read the [prerequisites for import](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/VMImportPrerequisites.html) before using this post-processor. ## How Does it Work? The import process operates by copying the OVA to an S3 bucket, and calling an import task in EC2 on the OVA file. Once completed, an AMI is returned containing the converted virtual machine. -The import process itself run by AWS includes modifications to the image uploaded, to allow it to boot and operate in the AWS EC2 environment. However, not all modifications required to make the machine run well in EC2 are performed. Take care around console output from the machine, as debugging can be very difficult without it. +The import process itself run by AWS includes modifications to the image uploaded, to allow it to boot and operate in the AWS EC2 environment. However, not all modifications required to make the machine run well in EC2 are performed. Take care around console output from the machine, as debugging can be very difficult without it. You may also want to include tools suitable for instances in EC2 such as cloud-init for Linux. -Further information about the import process can be found in AWS's ["EC2 Import/Export Instance documentation"](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instances_of_your_vm.html). +Further information about the import process can be found in AWS's [EC2 Import/Export Instance documentation](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instances_of_your_vm.html). ## Configuration There are some configuration options available for the post-processor. They are -segmented below into two categories: required and optional parameters. Within -each category, the available configuration keys are alphabetized. +segmented below into three categories: required and optional parameters. +Within each category, the available configuration keys are alphabetized. Required: -- `s3_bucket` (string) - The name of the bucket where the OVA file will be copied to for import. +- `access_key` (string) - The access key used to communicate with AWS. [Learn + how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) -- `s3_key` (string) - The name of the key where the OVA file will be copied to for import. +- `region` (string) - The name of the region, such as "us-east-1" in which to upload the OVA file to S3 and create the AMI. A list of valid regions can be obtained with AWS CLI tools or by consulting the AWS website. + +- `s3_bucket_name` (string) - The name of the S3 bucket where the OVA file will be copied to for import. This bucket must exist when the post-processor is run. + +- `s3_key_name` (string) - The name of the key in `s3_bucket` where the OVA file will be copied to for import. This key will be removed after import, unless `skip_clean` is true. + +- `secret_key` (string) - The secret key used to communicate with AWS. [Learn + how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) Optional: - `skip_clean` (boolean) - Whether we should skip removing the OVA file uploaded to S3 after the import process has completed. "true" means that we should leave it in the S3 bucket, "false" means to clean it out. Defaults to "false". +- `tags` (object of key/value strings) - Tags applied to the created AMI and + relevant snapshots. + +## Basic Example + +Here is a basic example. This assumes that the builder has produced an OVA artifact for us to work with. + +``` {.javascript} +{ + "type": "amazon-import", + "access_key": "YOUR KEY HERE", + "secret_key": "YOUR SECRET KEY HERE", + "region": "us-east-1", + "s3_bucket_name": "importbucket", + "s3_key_name": "import.ova", + "tags": { + "Description": "packer amazon-import {{timestamp}}" + } +} +``` + +> **Note:** Packer can also read the access key and secret access key from +environmental variables. See the configuration reference in the section above +for more information on what environmental variables Packer will look for. + +This will locate the OVA created by the builder, upload it into the S3 bucket called "importbucket" which must exist when the post-process runs, call the OVA file "import.ova" in that bucket, and then kick off an import process into an AMI. The region used for both the S3 upload and the AMI import will be "us-east-1". + +Once created, both the AMI and the snapshots associated with it would be tagged with a key called "Description" and a value of "packer amazon-import" with the timestamp appended. From 274630bd9c238a32f18e3a7e7ca6ce8291e6a668 Mon Sep 17 00:00:00 2001 From: David Zanetti Date: Tue, 24 Nov 2015 14:23:19 +1300 Subject: [PATCH 6/8] Support adding tags to AMI and snapshots created by amazon-import - Tags follows the same approach as the amazon-ebs builder - Clean up some debug messages - Improve readability by pulling out AMI id into seperate variable Note: this duplicates the tag creation code in builder/amazon/common/step_create_tags.go. Maybe this should be a multistep post-processor instead, and we re-use steps from the builder. --- .../amazon-import/post-processor.go | 69 +++++++++++++++++-- 1 file changed, 64 insertions(+), 5 deletions(-) diff --git a/post-processor/amazon-import/post-processor.go b/post-processor/amazon-import/post-processor.go index 1794b7bf8..45ff58e5d 100644 --- a/post-processor/amazon-import/post-processor.go +++ b/post-processor/amazon-import/post-processor.go @@ -6,6 +6,7 @@ import ( "os" "log" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/s3/s3manager" @@ -38,6 +39,7 @@ type Config struct { SkipClean bool `mapstructure:"skip_clean"` ImportTaskDesc string `mapstructure:"import_task_desc"` ImportDiskDesc string `mapstructure:"import_disk_desc"` + Tags map[string]string `mapstructure:"tags"` ctx interpolate.Context } @@ -106,7 +108,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac return nil, false, fmt.Errorf("Artifact type %s is not supported by this post-processor", artifact.BuilderId()) } - log.Println("Looking for OVA in artifact...") + log.Println("Looking for OVA in artifact") // Locate the files output from the builder source := "" for _, path := range artifact.Files() { @@ -122,11 +124,11 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac } // Set up the AWS session - log.Println("Creating AWS session...") + log.Println("Creating AWS session") session := session.New(config) // open the source file - log.Printf("Opening file %s to upload...", source) + log.Printf("Opening file %s to upload", source) file, err := os.Open(source) if err != nil { return nil, false, fmt.Errorf("Failed to open %s: %s", source, err) @@ -200,11 +202,68 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac ui.Message(fmt.Sprintf("Import task %s complete", *import_start.ImportTaskId)) + createdami := *import_result.ImportImageTasks[0].ImageId + + // If we have tags, then apply them now to both the AMI and snaps + // created by the import + if len(p.config.Tags) > 0 { + var ec2Tags []*ec2.Tag; + + log.Printf("Repacking tags into AWS format") + + for key, value := range p.config.Tags { + ui.Message(fmt.Sprintf("Adding tag \"%s\": \"%s\"", key, value)) + ec2Tags = append(ec2Tags, &ec2.Tag{ + Key: aws.String(key), + Value: aws.String(value), + }) + } + + resourceIds := []*string{&createdami} + + log.Printf("Getting details of %s", createdami) + + imageResp, err := ec2conn.DescribeImages(&ec2.DescribeImagesInput{ + ImageIds: resourceIds, + }) + + if err != nil { + return nil, false, fmt.Errorf("Failed to retrieve details for AMI %s: %s", createdami, err) + } + + if len(imageResp.Images) == 0 { + return nil, false, fmt.Errorf("AMI %s has no images", createdami) + } + + image := imageResp.Images[0] + + log.Printf("Walking block device mappings for %s to find snapshots", createdami) + + for _, device := range image.BlockDeviceMappings { + if device.Ebs != nil && device.Ebs.SnapshotId != nil { + ui.Message(fmt.Sprintf("Tagging snapshot %s", *device.Ebs.SnapshotId)) + resourceIds = append(resourceIds, device.Ebs.SnapshotId) + } + } + + ui.Message(fmt.Sprintf("Tagging AMI %s", createdami)) + + _, err = ec2conn.CreateTags(&ec2.CreateTagsInput{ + Resources: resourceIds, + Tags: ec2Tags, + }) + + if err != nil { + return nil, false, fmt.Errorf("Failed to add tags to resources %#v: %s", resourceIds, err) + } + + } + // Add the reported AMI ID to the artifact list - log.Printf("Adding created AMI ID %s in region %s to output artifacts", *import_result.ImportImageTasks[0].ImageId, *config.Region) + log.Printf("Adding created AMI ID %s in region %s to output artifacts", createdami, *config.Region) artifact = &awscommon.Artifact{ Amis: map[string]string{ - *config.Region: *import_result.ImportImageTasks[0].ImageId, + *config.Region: createdami, }, BuilderIdValue: BuilderId, Conn: ec2conn, From 873dc89478041df7f83229631e05b8321a796c77 Mon Sep 17 00:00:00 2001 From: David Zanetti Date: Wed, 25 Nov 2015 10:06:35 +1300 Subject: [PATCH 7/8] Accept any OVA. Fix names for tasks/import/copy. Update docs. - s3_key_name is now optional, default is equivilent to "packer-import-{{timestamp}}" - Remove restriction on builder used, anything producing an OVA is okay - Fix task and ova description passed to import API call, correctly adds timestamp to both - Documentation updated - Remove VMware-specific text - Mark s3_key_name as optional - Remove s3_key_name from example now it's optional - Explain the import process more clearly in example --- .../amazon-import/post-processor.go | 20 ++++++------------- .../amazon-import.html.markdown | 19 +++++++++--------- 2 files changed, 16 insertions(+), 23 deletions(-) diff --git a/post-processor/amazon-import/post-processor.go b/post-processor/amazon-import/post-processor.go index 45ff58e5d..1b4e1d384 100644 --- a/post-processor/amazon-import/post-processor.go +++ b/post-processor/amazon-import/post-processor.go @@ -22,12 +22,6 @@ import ( const BuilderId = "packer.post-processor.amazon-import" -// We accept the output from vmware or vmware-esx -var builtins = map[string]string{ - "mitchellh.vmware": "amazon-import", - "mitchellh.vmware-esx": "amazon-import", -} - // Configuration of this post processor type Config struct { common.PackerConfig `mapstructure:",squash"` @@ -64,10 +58,13 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { // Set defaults if p.config.ImportTaskDesc == "" { - p.config.ImportTaskDesc = "packer-amazon-ova task" + p.config.ImportTaskDesc = fmt.Sprintf("packer-import-%d", interpolate.InitTime.Unix()) } if p.config.ImportDiskDesc == "" { - p.config.ImportDiskDesc = "packer-amazon-ova disk" + p.config.ImportDiskDesc = fmt.Sprintf("packer-import-ova-%d", interpolate.InitTime.Unix()) + } + if p.config.S3Key == "" { + p.config.S3Key = fmt.Sprintf("packer-import-%d.ova", interpolate.InitTime.Unix()) } errs := new(packer.MultiError) @@ -78,7 +75,6 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { // define all our required paramaters templates := map[string]*string{ "s3_bucket_name": &p.config.S3Bucket, - "s3_key_name": &p.config.S3Key, } // Check out required params are defined for key, ptr := range templates { @@ -103,10 +99,6 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac if err != nil { return nil, false, err } - // Confirm we're dealing with the result of a builder we like - if _, ok := builtins[artifact.BuilderId()]; !ok { - return nil, false, fmt.Errorf("Artifact type %s is not supported by this post-processor", artifact.BuilderId()) - } log.Println("Looking for OVA in artifact") // Locate the files output from the builder @@ -120,7 +112,7 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac // Hope we found something useful if source == "" { - return nil, false, fmt.Errorf("OVA file not found") + return nil, false, fmt.Errorf("No OVA file found in artifact from builder") } // Set up the AWS session diff --git a/website/source/docs/post-processors/amazon-import.html.markdown b/website/source/docs/post-processors/amazon-import.html.markdown index ae9e6ea87..bf927179b 100644 --- a/website/source/docs/post-processors/amazon-import.html.markdown +++ b/website/source/docs/post-processors/amazon-import.html.markdown @@ -1,6 +1,6 @@ --- description: | - The Packer Amazon Import post-processor takes an OVA artifact from the VMware builder and + The Packer Amazon Import post-processor takes an OVA artifact from various builders and imports it to an AMI available to Amazon Web Services EC2. layout: docs page_title: 'Amazon Import Post-Processor' @@ -10,13 +10,13 @@ page_title: 'Amazon Import Post-Processor' Type: `amazon-import` -The Packer Amazon Import post-processor takes an OVA artifact from the VMware builder and imports it to an AMI available to Amazon Web Services EC2. +The Packer Amazon Import post-processor takes an OVA artifact from various builder and imports it to an AMI available to Amazon Web Services EC2. \~> This post-processor is for advanced users. It depends on specific IAM roles inside AWS and is best used with images that operate with the EC2 configuration model (eg, cloud-init for Linux systems). Please ensure you read the [prerequisites for import](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/VMImportPrerequisites.html) before using this post-processor. ## How Does it Work? -The import process operates by copying the OVA to an S3 bucket, and calling an import task in EC2 on the OVA file. Once completed, an AMI is returned containing the converted virtual machine. +The import process operates making a temporary copy of the OVA to an S3 bucket, and calling an import task in EC2 on the OVA file. Once completed, an AMI is returned containing the converted virtual machine. The temporary OVA copy in S3 can be discarded after the import is complete. The import process itself run by AWS includes modifications to the image uploaded, to allow it to boot and operate in the AWS EC2 environment. However, not all modifications required to make the machine run well in EC2 are performed. Take care around console output from the machine, as debugging can be very difficult without it. You may also want to include tools suitable for instances in EC2 such as cloud-init for Linux. @@ -37,13 +37,13 @@ Required: - `s3_bucket_name` (string) - The name of the S3 bucket where the OVA file will be copied to for import. This bucket must exist when the post-processor is run. -- `s3_key_name` (string) - The name of the key in `s3_bucket` where the OVA file will be copied to for import. This key will be removed after import, unless `skip_clean` is true. - - `secret_key` (string) - The secret key used to communicate with AWS. [Learn how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) Optional: +- `s3_key_name` (string) - The name of the key in `s3_bucket_name` where the OVA file will be copied to for import. If not specified, this will default to "packer-import-{{timestamp}}.ova". This key (ie, the uploaded OVA) will be removed after import, unless `skip_clean` is true. + - `skip_clean` (boolean) - Whether we should skip removing the OVA file uploaded to S3 after the import process has completed. "true" means that we should leave it in the S3 bucket, "false" means to clean it out. Defaults to "false". - `tags` (object of key/value strings) - Tags applied to the created AMI and @@ -51,7 +51,7 @@ Optional: ## Basic Example -Here is a basic example. This assumes that the builder has produced an OVA artifact for us to work with. +Here is a basic example. This assumes that the builder has produced an OVA artifact for us to work with, and IAM roles for import exist in the AWS account being imported into. ``` {.javascript} { @@ -60,7 +60,6 @@ Here is a basic example. This assumes that the builder has produced an OVA artif "secret_key": "YOUR SECRET KEY HERE", "region": "us-east-1", "s3_bucket_name": "importbucket", - "s3_key_name": "import.ova", "tags": { "Description": "packer amazon-import {{timestamp}}" } @@ -71,6 +70,8 @@ Here is a basic example. This assumes that the builder has produced an OVA artif environmental variables. See the configuration reference in the section above for more information on what environmental variables Packer will look for. -This will locate the OVA created by the builder, upload it into the S3 bucket called "importbucket" which must exist when the post-process runs, call the OVA file "import.ova" in that bucket, and then kick off an import process into an AMI. The region used for both the S3 upload and the AMI import will be "us-east-1". +This will take the OVA generated by a builder and upload it to S3. In this case, an existing bucket called "importbucket" in the "us-east-1" region will be where the copy is placed. The key name of the copy will be a default name generated by packer. -Once created, both the AMI and the snapshots associated with it would be tagged with a key called "Description" and a value of "packer amazon-import" with the timestamp appended. +Once uploaded, the import process will start, creating an AMI in the "us-east-1" region with a "Description" tag applied to both the AMI and the snapshots associated with it. Note: the import process does not allow you to name the AMI, the name is automatically generated by AWS. + +After tagging is completed, the OVA uploaded to S3 will be removed. From a3911ad47c2187fa6e5d707ef7a59a9cce794626 Mon Sep 17 00:00:00 2001 From: David Zanetti Date: Thu, 26 Nov 2015 14:02:15 +1300 Subject: [PATCH 8/8] Use interpolation on default s3_key_name. Report import errors from AWS. - Import errors were not very useful messages, obtain the import status from the import task as AWS reports it - Interpolate s3_key_name as per PR comments (rather than hard-code the generated parts of the default value) - Remove descriptions on AWS import job, they are optional anyway. --- .../amazon-import/post-processor.go | 49 ++++++++++++------- 1 file changed, 30 insertions(+), 19 deletions(-) diff --git a/post-processor/amazon-import/post-processor.go b/post-processor/amazon-import/post-processor.go index 1b4e1d384..80352af94 100644 --- a/post-processor/amazon-import/post-processor.go +++ b/post-processor/amazon-import/post-processor.go @@ -31,8 +31,6 @@ type Config struct { S3Bucket string `mapstructure:"s3_bucket_name"` S3Key string `mapstructure:"s3_key_name"` SkipClean bool `mapstructure:"skip_clean"` - ImportTaskDesc string `mapstructure:"import_task_desc"` - ImportDiskDesc string `mapstructure:"import_disk_desc"` Tags map[string]string `mapstructure:"tags"` ctx interpolate.Context @@ -49,7 +47,9 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { Interpolate: true, InterpolateContext: &p.config.ctx, InterpolateFilter: &interpolate.RenderFilter{ - Exclude: []string{}, + Exclude: []string{ + "s3_key_name", + }, }, }, raws...) if err != nil { @@ -57,18 +57,18 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { } // Set defaults - if p.config.ImportTaskDesc == "" { - p.config.ImportTaskDesc = fmt.Sprintf("packer-import-%d", interpolate.InitTime.Unix()) - } - if p.config.ImportDiskDesc == "" { - p.config.ImportDiskDesc = fmt.Sprintf("packer-import-ova-%d", interpolate.InitTime.Unix()) - } if p.config.S3Key == "" { - p.config.S3Key = fmt.Sprintf("packer-import-%d.ova", interpolate.InitTime.Unix()) + p.config.S3Key = "packer-import-{{timestamp}}.ova" } errs := new(packer.MultiError) + // Check and render s3_key_name + if err = interpolate.Validate(p.config.S3Key, &p.config.ctx); err != nil { + errs = packer.MultiErrorAppend( + errs, fmt.Errorf("Error parsing s3_key_name template: %s", err)) + } + // Check we have AWS access variables defined somewhere errs = packer.MultiErrorAppend(errs, p.config.AccessConfig.Prepare(&p.config.ctx)...) @@ -100,6 +100,13 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac return nil, false, err } + // Render this key since we didn't in the configure phase + p.config.S3Key, err = interpolate.Render(p.config.S3Key, &p.config.ctx) + if err != nil { + return nil, false, fmt.Errorf("Error rendering s3_key_name template: %s", err) + } + log.Printf("Rendered s3_key_name as %s", p.config.S3Key) + log.Println("Looking for OVA in artifact") // Locate the files output from the builder source := "" @@ -144,14 +151,13 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac ui.Message(fmt.Sprintf("Completed upload of %s to s3://%s/%s", source, p.config.S3Bucket, p.config.S3Key)) - log.Printf("Calling EC2 to import from s3://%s/%s", p.config.S3Bucket, p.config.S3Key) // Call EC2 image import process + log.Printf("Calling EC2 to import from s3://%s/%s", p.config.S3Bucket, p.config.S3Key) + ec2conn := ec2.New(session) import_start, err := ec2conn.ImportImage(&ec2.ImportImageInput{ - Description: &p.config.ImportTaskDesc, DiskContainers: []*ec2.ImageDiskContainer{ { - Description: &p.config.ImportDiskDesc, UserBucket: &ec2.UserBucket{ S3Bucket: &p.config.S3Bucket, S3Key: &p.config.S3Key, @@ -174,14 +180,12 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac Refresh: awscommon.ImportImageRefreshFunc(ec2conn, *import_start.ImportTaskId), Target: "completed", } + // Actually do the wait for state change - _, err = awscommon.WaitForState(&stateChange) + // We ignore errors out of this and check job state in AWS API + awscommon.WaitForState(&stateChange) - if err != nil { - return nil, false, fmt.Errorf("Import task %s failed: %s", *import_start.ImportTaskId, err) - } - - // Extract the AMI ID from the completed import task + // Retrieve what the outcome was for the import task import_result, err := ec2conn.DescribeImportImageTasks(&ec2.DescribeImportImageTasksInput{ ImportTaskIds: []*string{ import_start.ImportTaskId, @@ -192,8 +196,15 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac return nil, false, fmt.Errorf("Failed to find import task %s: %s", *import_start.ImportTaskId, err) } + // Check it was actually completed + if *import_result.ImportImageTasks[0].Status != "completed" { + // The most useful error message is from the job itself + return nil, false, fmt.Errorf("Import task %s failed: %s", *import_start.ImportTaskId, *import_result.ImportImageTasks[0].StatusMessage) + } + ui.Message(fmt.Sprintf("Import task %s complete", *import_start.ImportTaskId)) + // Pull AMI ID out of the completed job createdami := *import_result.ImportImageTasks[0].ImageId // If we have tags, then apply them now to both the AMI and snaps