packer-cn/post-processor/amazon-import/post-processor.go

437 lines
13 KiB
Go
Raw Normal View History

2015-11-22 18:32:03 -05:00
package amazonimport
import (
"fmt"
"log"
2016-02-12 02:53:40 -05:00
"os"
"strings"
2015-11-22 18:32:03 -05:00
"github.com/aws/aws-sdk-go/aws"
2015-11-22 18:32:03 -05:00
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/s3"
2016-02-12 02:53:40 -05:00
"github.com/aws/aws-sdk-go/service/s3/s3manager"
2017-04-04 16:39:01 -04:00
awscommon "github.com/hashicorp/packer/builder/amazon/common"
"github.com/hashicorp/packer/common"
"github.com/hashicorp/packer/helper/config"
"github.com/hashicorp/packer/packer"
"github.com/hashicorp/packer/template/interpolate"
2015-11-22 18:32:03 -05:00
)
const BuilderId = "packer.post-processor.amazon-import"
2015-11-22 18:32:03 -05:00
// Configuration of this post processor
type Config struct {
2016-02-12 02:53:40 -05:00
common.PackerConfig `mapstructure:",squash"`
2015-11-22 18:32:03 -05:00
awscommon.AccessConfig `mapstructure:",squash"`
2016-02-12 02:53:40 -05:00
// Variables specific to this post processor
2019-03-11 16:21:47 -04:00
S3Bucket string `mapstructure:"s3_bucket_name"`
S3Key string `mapstructure:"s3_key_name"`
S3Encryption string `mapstructure:"s3_encryption"`
S3EncryptionKey string `mapstructure:"s3_encryption_key"`
SkipClean bool `mapstructure:"skip_clean"`
Tags map[string]string `mapstructure:"tags"`
Name string `mapstructure:"ami_name"`
Description string `mapstructure:"ami_description"`
Users []string `mapstructure:"ami_users"`
Groups []string `mapstructure:"ami_groups"`
Encrypt bool `mapstructure:"ami_encrypt"`
KMSKey string `mapstructure:"ami_kms_key"`
LicenseType string `mapstructure:"license_type"`
RoleName string `mapstructure:"role_name"`
Format string `mapstructure:"format"`
2015-11-22 18:32:03 -05:00
2016-10-11 17:43:50 -04:00
ctx interpolate.Context
2015-11-22 18:32:03 -05:00
}
type PostProcessor struct {
config Config
}
2017-03-28 20:45:01 -04:00
// Entry point for configuration parsing when we've defined
2015-11-22 18:32:03 -05:00
func (p *PostProcessor) Configure(raws ...interface{}) error {
p.config.ctx.Funcs = awscommon.TemplateFuncs
err := config.Decode(&p.config, &config.DecodeOpts{
2016-02-12 02:53:40 -05:00
Interpolate: true,
InterpolateContext: &p.config.ctx,
InterpolateFilter: &interpolate.RenderFilter{
Exclude: []string{
"s3_key_name",
},
2015-11-22 18:32:03 -05:00
},
}, raws...)
if err != nil {
return err
}
// Set defaults
2018-10-29 17:52:12 -04:00
if p.config.Format == "" {
p.config.Format = "ova"
}
if p.config.S3Key == "" {
2018-10-29 17:52:12 -04:00
p.config.S3Key = "packer-import-{{timestamp}}." + p.config.Format
2015-11-22 18:32:03 -05:00
}
errs := new(packer.MultiError)
// Check and render s3_key_name
if err = interpolate.Validate(p.config.S3Key, &p.config.ctx); err != nil {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Error parsing s3_key_name template: %s", err))
}
2015-11-22 18:32:03 -05:00
// Check we have AWS access variables defined somewhere
errs = packer.MultiErrorAppend(errs, p.config.AccessConfig.Prepare(&p.config.ctx)...)
2017-03-28 20:45:01 -04:00
// define all our required parameters
2015-11-22 18:32:03 -05:00
templates := map[string]*string{
2016-02-12 02:53:40 -05:00
"s3_bucket_name": &p.config.S3Bucket,
2015-11-22 18:32:03 -05:00
}
// Check out required params are defined
for key, ptr := range templates {
if *ptr == "" {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("%s must be set", key))
}
}
switch p.config.Format {
case "ova", "raw", "vmdk", "vhd", "vhdx":
default:
2018-10-29 17:52:12 -04:00
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("invalid format '%s'. Only 'ova', 'raw', 'vhd', 'vhdx', or 'vmdk' are allowed", p.config.Format))
}
2019-03-11 16:21:47 -04:00
if p.config.S3Encryption != "" && p.config.S3Encryption != "AES256" && p.config.S3Encryption != "aws:kms" {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("invalid s3 encryption format '%s'. Only 'AES256' and 'aws:kms' are allowed", p.config.S3Encryption))
}
2015-11-22 18:32:03 -05:00
// Anything which flagged return back up the stack
if len(errs.Errors) > 0 {
return errs
}
awscommon.LogEnvOverrideWarnings()
2015-11-22 18:32:03 -05:00
packer.LogSecretFilter.Set(p.config.AccessKey, p.config.SecretKey, p.config.Token)
log.Println(p.config)
2015-11-22 18:32:03 -05:00
return nil
}
func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) {
var err error
2017-03-01 19:43:09 -05:00
session, err := p.config.Session()
2015-11-22 18:32:03 -05:00
if err != nil {
return nil, false, err
}
2017-03-01 19:43:09 -05:00
config := session.Config
2015-11-22 18:32:03 -05:00
// Render this key since we didn't in the configure phase
2016-02-12 02:53:40 -05:00
p.config.S3Key, err = interpolate.Render(p.config.S3Key, &p.config.ctx)
if err != nil {
2016-02-12 02:53:40 -05:00
return nil, false, fmt.Errorf("Error rendering s3_key_name template: %s", err)
}
log.Printf("Rendered s3_key_name as %s", p.config.S3Key)
2018-10-29 17:52:12 -04:00
log.Println("Looking for image in artifact")
2015-11-22 18:32:03 -05:00
// Locate the files output from the builder
source := ""
for _, path := range artifact.Files() {
2018-10-29 17:52:12 -04:00
if strings.HasSuffix(path, "."+p.config.Format) {
2015-11-22 18:32:03 -05:00
source = path
break
}
}
// Hope we found something useful
if source == "" {
2018-10-29 17:52:12 -04:00
return nil, false, fmt.Errorf("No %s image file found in artifact from builder", p.config.Format)
2015-11-22 18:32:03 -05:00
}
2019-03-11 16:21:47 -04:00
if p.config.S3Encryption == "AES256" && p.config.S3EncryptionKey != "" {
ui.Message(fmt.Sprintf("Ignoring s3_encryption_key because s3_encryption is set to '%s'", p.config.S3Encryption))
}
2015-11-22 18:32:03 -05:00
// open the source file
log.Printf("Opening file %s to upload", source)
2015-11-22 18:32:03 -05:00
file, err := os.Open(source)
if err != nil {
return nil, false, fmt.Errorf("Failed to open %s: %s", source, err)
}
ui.Message(fmt.Sprintf("Uploading %s to s3://%s/%s", source, p.config.S3Bucket, p.config.S3Key))
2019-03-11 16:21:47 -04:00
// Prepare S3 request
updata := &s3manager.UploadInput{
2016-02-12 02:53:40 -05:00
Body: file,
Bucket: &p.config.S3Bucket,
Key: &p.config.S3Key,
2019-03-11 16:21:47 -04:00
}
// Add encryption if specified in the config
if p.config.S3Encryption != "" {
updata.ServerSideEncryption = &p.config.S3Encryption
if p.config.S3Encryption == "aws:kms" && p.config.S3EncryptionKey != "" {
updata.SSEKMSKeyId = &p.config.S3EncryptionKey
}
}
// Copy the image file into the S3 bucket specified
uploader := s3manager.NewUploader(session)
if _, err = uploader.Upload(updata); err != nil {
2015-11-22 18:32:03 -05:00
return nil, false, fmt.Errorf("Failed to upload %s: %s", source, err)
}
// May as well stop holding this open now
file.Close()
2015-11-22 18:32:03 -05:00
ui.Message(fmt.Sprintf("Completed upload of %s to s3://%s/%s", source, p.config.S3Bucket, p.config.S3Key))
// Call EC2 image import process
log.Printf("Calling EC2 to import from s3://%s/%s", p.config.S3Bucket, p.config.S3Key)
ec2conn := ec2.New(session)
params := &ec2.ImportImageInput{
2019-03-11 16:21:47 -04:00
Encrypted: &p.config.Encrypt,
2015-11-22 18:32:03 -05:00
DiskContainers: []*ec2.ImageDiskContainer{
{
2018-10-29 17:52:12 -04:00
Format: &p.config.Format,
2015-11-22 18:32:03 -05:00
UserBucket: &ec2.UserBucket{
2016-02-12 02:53:40 -05:00
S3Bucket: &p.config.S3Bucket,
S3Key: &p.config.S3Key,
2015-11-22 18:32:03 -05:00
},
},
},
2018-01-25 13:37:34 -05:00
}
2019-03-11 16:21:47 -04:00
if p.config.Encrypt && p.config.KMSKey != "" {
params.KmsKeyId = &p.config.KMSKey
}
2018-01-25 13:37:34 -05:00
if p.config.RoleName != "" {
params.SetRoleName(p.config.RoleName)
}
if p.config.LicenseType != "" {
ui.Message(fmt.Sprintf("Setting license type to '%s'", p.config.LicenseType))
params.LicenseType = &p.config.LicenseType
}
import_start, err := ec2conn.ImportImage(params)
2015-11-22 18:32:03 -05:00
if err != nil {
return nil, false, fmt.Errorf("Failed to start import from s3://%s/%s: %s", p.config.S3Bucket, p.config.S3Key, err)
}
ui.Message(fmt.Sprintf("Started import of s3://%s/%s, task id %s", p.config.S3Bucket, p.config.S3Key, *import_start.ImportTaskId))
2017-03-28 20:45:01 -04:00
// Wait for import process to complete, this takes a while
ui.Message(fmt.Sprintf("Waiting for task %s to complete (may take a while)", *import_start.ImportTaskId))
err = awscommon.WaitUntilImageImported(aws.BackgroundContext(), ec2conn, *import_start.ImportTaskId)
if err != nil {
// Retrieve the status message
import_result, err2 := ec2conn.DescribeImportImageTasks(&ec2.DescribeImportImageTasksInput{
ImportTaskIds: []*string{
import_start.ImportTaskId,
},
})
statusMessage := "Error retrieving status message"
if err2 == nil {
statusMessage = *import_result.ImportImageTasks[0].StatusMessage
}
return nil, false, fmt.Errorf("Import task %s failed with status message: %s, error: %s", *import_start.ImportTaskId, statusMessage, err)
}
// Retrieve what the outcome was for the import task
import_result, err := ec2conn.DescribeImportImageTasks(&ec2.DescribeImportImageTasksInput{
2016-02-12 02:53:40 -05:00
ImportTaskIds: []*string{
import_start.ImportTaskId,
},
})
if err != nil {
return nil, false, fmt.Errorf("Failed to find import task %s: %s", *import_start.ImportTaskId, err)
}
// Check it was actually completed
if *import_result.ImportImageTasks[0].Status != "completed" {
// The most useful error message is from the job itself
return nil, false, fmt.Errorf("Import task %s failed: %s", *import_start.ImportTaskId, *import_result.ImportImageTasks[0].StatusMessage)
}
ui.Message(fmt.Sprintf("Import task %s complete", *import_start.ImportTaskId))
// Pull AMI ID out of the completed job
createdami := *import_result.ImportImageTasks[0].ImageId
if p.config.Name != "" {
ui.Message(fmt.Sprintf("Starting rename of AMI (%s)", createdami))
resp, err := ec2conn.CopyImage(&ec2.CopyImageInput{
2016-10-11 17:43:50 -04:00
Name: &p.config.Name,
SourceImageId: &createdami,
2016-10-11 17:43:50 -04:00
SourceRegion: config.Region,
})
if err != nil {
return nil, false, fmt.Errorf("Error Copying AMI (%s): %s", createdami, err)
}
ui.Message(fmt.Sprintf("Waiting for AMI rename to complete (may take a while)"))
if err := awscommon.WaitUntilAMIAvailable(aws.BackgroundContext(), ec2conn, *resp.ImageId); err != nil {
return nil, false, fmt.Errorf("Error waiting for AMI (%s): %s", *resp.ImageId, err)
}
2017-03-29 16:38:31 -04:00
_, err = ec2conn.DeregisterImage(&ec2.DeregisterImageInput{
ImageId: &createdami,
})
if err != nil {
return nil, false, fmt.Errorf("Error deregistering existing AMI: %s", err)
}
ui.Message(fmt.Sprintf("AMI rename completed"))
createdami = *resp.ImageId
}
// If we have tags, then apply them now to both the AMI and snaps
// created by the import
if len(p.config.Tags) > 0 {
2016-02-12 02:53:40 -05:00
var ec2Tags []*ec2.Tag
log.Printf("Repacking tags into AWS format")
for key, value := range p.config.Tags {
ui.Message(fmt.Sprintf("Adding tag \"%s\": \"%s\"", key, value))
ec2Tags = append(ec2Tags, &ec2.Tag{
2016-02-12 02:53:40 -05:00
Key: aws.String(key),
Value: aws.String(value),
})
}
resourceIds := []*string{&createdami}
log.Printf("Getting details of %s", createdami)
imageResp, err := ec2conn.DescribeImages(&ec2.DescribeImagesInput{
2016-02-12 02:53:40 -05:00
ImageIds: resourceIds,
})
if err != nil {
return nil, false, fmt.Errorf("Failed to retrieve details for AMI %s: %s", createdami, err)
}
if len(imageResp.Images) == 0 {
return nil, false, fmt.Errorf("AMI %s has no images", createdami)
}
image := imageResp.Images[0]
log.Printf("Walking block device mappings for %s to find snapshots", createdami)
for _, device := range image.BlockDeviceMappings {
if device.Ebs != nil && device.Ebs.SnapshotId != nil {
ui.Message(fmt.Sprintf("Tagging snapshot %s", *device.Ebs.SnapshotId))
resourceIds = append(resourceIds, device.Ebs.SnapshotId)
}
}
ui.Message(fmt.Sprintf("Tagging AMI %s", createdami))
_, err = ec2conn.CreateTags(&ec2.CreateTagsInput{
2016-02-12 02:53:40 -05:00
Resources: resourceIds,
Tags: ec2Tags,
})
if err != nil {
return nil, false, fmt.Errorf("Failed to add tags to resources %#v: %s", resourceIds, err)
}
}
2017-03-28 20:45:01 -04:00
// Apply attributes for AMI specified in config
// (duped from builder/amazon/common/step_modify_ami_attributes.go)
options := make(map[string]*ec2.ModifyImageAttributeInput)
if p.config.Description != "" {
options["description"] = &ec2.ModifyImageAttributeInput{
Description: &ec2.AttributeValue{Value: &p.config.Description},
}
}
if len(p.config.Groups) > 0 {
groups := make([]*string, len(p.config.Groups))
adds := make([]*ec2.LaunchPermission, len(p.config.Groups))
addGroups := &ec2.ModifyImageAttributeInput{
LaunchPermission: &ec2.LaunchPermissionModifications{},
}
for i, g := range p.config.Groups {
groups[i] = aws.String(g)
adds[i] = &ec2.LaunchPermission{
Group: aws.String(g),
}
}
addGroups.UserGroups = groups
addGroups.LaunchPermission.Add = adds
options["groups"] = addGroups
}
if len(p.config.Users) > 0 {
users := make([]*string, len(p.config.Users))
adds := make([]*ec2.LaunchPermission, len(p.config.Users))
for i, u := range p.config.Users {
users[i] = aws.String(u)
adds[i] = &ec2.LaunchPermission{UserId: aws.String(u)}
}
options["users"] = &ec2.ModifyImageAttributeInput{
UserIds: users,
LaunchPermission: &ec2.LaunchPermissionModifications{
Add: adds,
},
}
}
if len(options) > 0 {
for name, input := range options {
ui.Message(fmt.Sprintf("Modifying: %s", name))
input.ImageId = &createdami
_, err := ec2conn.ModifyImageAttribute(input)
if err != nil {
return nil, false, fmt.Errorf("Error modifying AMI attributes: %s", err)
}
}
}
// Add the reported AMI ID to the artifact list
log.Printf("Adding created AMI ID %s in region %s to output artifacts", createdami, *config.Region)
artifact = &awscommon.Artifact{
2016-02-12 02:53:40 -05:00
Amis: map[string]string{
*config.Region: createdami,
},
2016-02-12 02:53:40 -05:00
BuilderIdValue: BuilderId,
Session: session,
}
2015-11-22 18:32:03 -05:00
2016-02-12 02:53:40 -05:00
if !p.config.SkipClean {
ui.Message(fmt.Sprintf("Deleting import source s3://%s/%s", p.config.S3Bucket, p.config.S3Key))
s3conn := s3.New(session)
_, err = s3conn.DeleteObject(&s3.DeleteObjectInput{
Bucket: &p.config.S3Bucket,
Key: &p.config.S3Key,
})
if err != nil {
return nil, false, fmt.Errorf("Failed to delete s3://%s/%s: %s", p.config.S3Bucket, p.config.S3Key, err)
}
}
2015-11-22 18:32:03 -05:00
return artifact, false, nil
}