packer-cn/post-processor/amazon-import/post-processor.go

468 lines
14 KiB
Go
Raw Normal View History

//go:generate mapstructure-to-hcl2 -type Config
2015-11-22 18:32:03 -05:00
package amazonimport
import (
2019-03-22 09:56:02 -04:00
"context"
2015-11-22 18:32:03 -05:00
"fmt"
"log"
2016-02-12 02:53:40 -05:00
"os"
"strings"
"time"
2015-11-22 18:32:03 -05:00
"github.com/aws/aws-sdk-go/aws"
2015-11-22 18:32:03 -05:00
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/s3"
2016-02-12 02:53:40 -05:00
"github.com/aws/aws-sdk-go/service/s3/s3manager"
build using HCL2 (#8423) This follows #8232 which added the code to generate the code required to parse HCL files for each packer component. All old config files of packer will keep on working the same. Packer takes one argument. When a directory is passed, all files in the folder with a name ending with “.pkr.hcl” or “.pkr.json” will be parsed using the HCL2 format. When a file ending with “.pkr.hcl” or “.pkr.json” is passed it will be parsed using the HCL2 format. For every other case; the old packer style will be used. ## 1. the hcl2template pkg can create a packer.Build from a set of HCL (v2) files I had to make the packer.coreBuild (which is our one and only packer.Build ) a public struct with public fields ## 2. Components interfaces get a new ConfigSpec Method to read a file from an HCL file. This is a breaking change for packer plugins. a packer component can be a: builder/provisioner/post-processor each component interface now gets a `ConfigSpec() hcldec.ObjectSpec` which allows packer to tell what is the layout of the hcl2 config meant to configure that specific component. This ObjectSpec is sent through the wire (RPC) and a cty.Value is now sent through the already existing configuration entrypoints: Provisioner.Prepare(raws ...interface{}) error Builder.Prepare(raws ...interface{}) ([]string, error) PostProcessor.Configure(raws ...interface{}) error close #1768 Example hcl files: ```hcl // file amazon-ebs-kms-key/run.pkr.hcl build { sources = [ "source.amazon-ebs.first", ] provisioner "shell" { inline = [ "sleep 5" ] } post-processor "shell-local" { inline = [ "sleep 5" ] } } // amazon-ebs-kms-key/source.pkr.hcl source "amazon-ebs" "first" { ami_name = "hcl2-test" region = "us-east-1" instance_type = "t2.micro" kms_key_id = "c729958f-c6ba-44cd-ab39-35ab68ce0a6c" encrypt_boot = true source_ami_filter { filters { virtualization-type = "hvm" name = "amzn-ami-hvm-????.??.?.????????-x86_64-gp2" root-device-type = "ebs" } most_recent = true owners = ["amazon"] } launch_block_device_mappings { device_name = "/dev/xvda" volume_size = 20 volume_type = "gp2" delete_on_termination = "true" } launch_block_device_mappings { device_name = "/dev/xvdf" volume_size = 500 volume_type = "gp2" delete_on_termination = true encrypted = true } ami_regions = ["eu-central-1"] run_tags { Name = "packer-solr-something" stack-name = "DevOps Tools" } communicator = "ssh" ssh_pty = true ssh_username = "ec2-user" associate_public_ip_address = true } ```
2019-12-17 05:25:56 -05:00
"github.com/hashicorp/hcl/v2/hcldec"
2017-04-04 16:39:01 -04:00
awscommon "github.com/hashicorp/packer/builder/amazon/common"
"github.com/hashicorp/packer/common"
"github.com/hashicorp/packer/common/retry"
2017-04-04 16:39:01 -04:00
"github.com/hashicorp/packer/helper/config"
"github.com/hashicorp/packer/packer"
"github.com/hashicorp/packer/template/interpolate"
2015-11-22 18:32:03 -05:00
)
const BuilderId = "packer.post-processor.amazon-import"
2015-11-22 18:32:03 -05:00
// Configuration of this post processor
type Config struct {
2016-02-12 02:53:40 -05:00
common.PackerConfig `mapstructure:",squash"`
2015-11-22 18:32:03 -05:00
awscommon.AccessConfig `mapstructure:",squash"`
2016-02-12 02:53:40 -05:00
// Variables specific to this post processor
2019-03-11 16:21:47 -04:00
S3Bucket string `mapstructure:"s3_bucket_name"`
S3Key string `mapstructure:"s3_key_name"`
S3Encryption string `mapstructure:"s3_encryption"`
S3EncryptionKey string `mapstructure:"s3_encryption_key"`
SkipClean bool `mapstructure:"skip_clean"`
Tags map[string]string `mapstructure:"tags"`
Name string `mapstructure:"ami_name"`
Description string `mapstructure:"ami_description"`
Users []string `mapstructure:"ami_users"`
Groups []string `mapstructure:"ami_groups"`
Encrypt bool `mapstructure:"ami_encrypt"`
KMSKey string `mapstructure:"ami_kms_key"`
LicenseType string `mapstructure:"license_type"`
RoleName string `mapstructure:"role_name"`
Format string `mapstructure:"format"`
2015-11-22 18:32:03 -05:00
2016-10-11 17:43:50 -04:00
ctx interpolate.Context
2015-11-22 18:32:03 -05:00
}
type PostProcessor struct {
config Config
}
build using HCL2 (#8423) This follows #8232 which added the code to generate the code required to parse HCL files for each packer component. All old config files of packer will keep on working the same. Packer takes one argument. When a directory is passed, all files in the folder with a name ending with “.pkr.hcl” or “.pkr.json” will be parsed using the HCL2 format. When a file ending with “.pkr.hcl” or “.pkr.json” is passed it will be parsed using the HCL2 format. For every other case; the old packer style will be used. ## 1. the hcl2template pkg can create a packer.Build from a set of HCL (v2) files I had to make the packer.coreBuild (which is our one and only packer.Build ) a public struct with public fields ## 2. Components interfaces get a new ConfigSpec Method to read a file from an HCL file. This is a breaking change for packer plugins. a packer component can be a: builder/provisioner/post-processor each component interface now gets a `ConfigSpec() hcldec.ObjectSpec` which allows packer to tell what is the layout of the hcl2 config meant to configure that specific component. This ObjectSpec is sent through the wire (RPC) and a cty.Value is now sent through the already existing configuration entrypoints: Provisioner.Prepare(raws ...interface{}) error Builder.Prepare(raws ...interface{}) ([]string, error) PostProcessor.Configure(raws ...interface{}) error close #1768 Example hcl files: ```hcl // file amazon-ebs-kms-key/run.pkr.hcl build { sources = [ "source.amazon-ebs.first", ] provisioner "shell" { inline = [ "sleep 5" ] } post-processor "shell-local" { inline = [ "sleep 5" ] } } // amazon-ebs-kms-key/source.pkr.hcl source "amazon-ebs" "first" { ami_name = "hcl2-test" region = "us-east-1" instance_type = "t2.micro" kms_key_id = "c729958f-c6ba-44cd-ab39-35ab68ce0a6c" encrypt_boot = true source_ami_filter { filters { virtualization-type = "hvm" name = "amzn-ami-hvm-????.??.?.????????-x86_64-gp2" root-device-type = "ebs" } most_recent = true owners = ["amazon"] } launch_block_device_mappings { device_name = "/dev/xvda" volume_size = 20 volume_type = "gp2" delete_on_termination = "true" } launch_block_device_mappings { device_name = "/dev/xvdf" volume_size = 500 volume_type = "gp2" delete_on_termination = true encrypted = true } ami_regions = ["eu-central-1"] run_tags { Name = "packer-solr-something" stack-name = "DevOps Tools" } communicator = "ssh" ssh_pty = true ssh_username = "ec2-user" associate_public_ip_address = true } ```
2019-12-17 05:25:56 -05:00
func (p *PostProcessor) ConfigSpec() hcldec.ObjectSpec { return p.config.FlatMapstructure().HCL2Spec() }
2015-11-22 18:32:03 -05:00
func (p *PostProcessor) Configure(raws ...interface{}) error {
p.config.ctx.Funcs = awscommon.TemplateFuncs
err := config.Decode(&p.config, &config.DecodeOpts{
2016-02-12 02:53:40 -05:00
Interpolate: true,
InterpolateContext: &p.config.ctx,
InterpolateFilter: &interpolate.RenderFilter{
Exclude: []string{
"s3_key_name",
},
2015-11-22 18:32:03 -05:00
},
}, raws...)
if err != nil {
return err
}
// Set defaults
2018-10-29 17:52:12 -04:00
if p.config.Format == "" {
p.config.Format = "ova"
}
if p.config.S3Key == "" {
2018-10-29 17:52:12 -04:00
p.config.S3Key = "packer-import-{{timestamp}}." + p.config.Format
2015-11-22 18:32:03 -05:00
}
errs := new(packer.MultiError)
// Check and render s3_key_name
if err = interpolate.Validate(p.config.S3Key, &p.config.ctx); err != nil {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Error parsing s3_key_name template: %s", err))
}
2015-11-22 18:32:03 -05:00
// Check we have AWS access variables defined somewhere
errs = packer.MultiErrorAppend(errs, p.config.AccessConfig.Prepare(&p.config.ctx)...)
2017-03-28 20:45:01 -04:00
// define all our required parameters
2015-11-22 18:32:03 -05:00
templates := map[string]*string{
2016-02-12 02:53:40 -05:00
"s3_bucket_name": &p.config.S3Bucket,
2015-11-22 18:32:03 -05:00
}
// Check out required params are defined
for key, ptr := range templates {
if *ptr == "" {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("%s must be set", key))
}
}
switch p.config.Format {
case "ova", "raw", "vmdk", "vhd", "vhdx":
default:
2018-10-29 17:52:12 -04:00
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("invalid format '%s'. Only 'ova', 'raw', 'vhd', 'vhdx', or 'vmdk' are allowed", p.config.Format))
}
2019-03-11 16:21:47 -04:00
if p.config.S3Encryption != "" && p.config.S3Encryption != "AES256" && p.config.S3Encryption != "aws:kms" {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("invalid s3 encryption format '%s'. Only 'AES256' and 'aws:kms' are allowed", p.config.S3Encryption))
}
2015-11-22 18:32:03 -05:00
// Anything which flagged return back up the stack
if len(errs.Errors) > 0 {
return errs
}
if p.config.PollingConfig == nil {
p.config.PollingConfig = new(awscommon.AWSPollingConfig)
}
p.config.PollingConfig.LogEnvOverrideWarnings()
2015-11-22 18:32:03 -05:00
packer.LogSecretFilter.Set(p.config.AccessKey, p.config.SecretKey, p.config.Token)
log.Println(p.config)
2015-11-22 18:32:03 -05:00
return nil
}
func (p *PostProcessor) PostProcess(ctx context.Context, ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, bool, error) {
2015-11-22 18:32:03 -05:00
var err error
generatedData := artifact.State("generated_data")
if generatedData == nil {
// Make sure it's not a nil map so we can assign to it later.
generatedData = make(map[string]interface{})
}
p.config.ctx.Data = generatedData
2017-03-01 19:43:09 -05:00
session, err := p.config.Session()
2015-11-22 18:32:03 -05:00
if err != nil {
2019-04-02 19:51:58 -04:00
return nil, false, false, err
2015-11-22 18:32:03 -05:00
}
2017-03-01 19:43:09 -05:00
config := session.Config
2015-11-22 18:32:03 -05:00
// Render this key since we didn't in the configure phase
2016-02-12 02:53:40 -05:00
p.config.S3Key, err = interpolate.Render(p.config.S3Key, &p.config.ctx)
if err != nil {
2019-04-02 19:51:58 -04:00
return nil, false, false, fmt.Errorf("Error rendering s3_key_name template: %s", err)
2016-02-12 02:53:40 -05:00
}
log.Printf("Rendered s3_key_name as %s", p.config.S3Key)
2018-10-29 17:52:12 -04:00
log.Println("Looking for image in artifact")
2015-11-22 18:32:03 -05:00
// Locate the files output from the builder
source := ""
for _, path := range artifact.Files() {
2018-10-29 17:52:12 -04:00
if strings.HasSuffix(path, "."+p.config.Format) {
2015-11-22 18:32:03 -05:00
source = path
break
}
}
// Hope we found something useful
if source == "" {
2019-04-02 19:51:58 -04:00
return nil, false, false, fmt.Errorf("No %s image file found in artifact from builder", p.config.Format)
2015-11-22 18:32:03 -05:00
}
2019-03-11 16:21:47 -04:00
if p.config.S3Encryption == "AES256" && p.config.S3EncryptionKey != "" {
ui.Message(fmt.Sprintf("Ignoring s3_encryption_key because s3_encryption is set to '%s'", p.config.S3Encryption))
}
2015-11-22 18:32:03 -05:00
// open the source file
log.Printf("Opening file %s to upload", source)
2015-11-22 18:32:03 -05:00
file, err := os.Open(source)
if err != nil {
2019-04-02 19:51:58 -04:00
return nil, false, false, fmt.Errorf("Failed to open %s: %s", source, err)
2015-11-22 18:32:03 -05:00
}
ui.Message(fmt.Sprintf("Uploading %s to s3://%s/%s", source, p.config.S3Bucket, p.config.S3Key))
2019-03-11 16:21:47 -04:00
// Prepare S3 request
updata := &s3manager.UploadInput{
2016-02-12 02:53:40 -05:00
Body: file,
Bucket: &p.config.S3Bucket,
Key: &p.config.S3Key,
2019-03-11 16:21:47 -04:00
}
// Add encryption if specified in the config
if p.config.S3Encryption != "" {
updata.ServerSideEncryption = &p.config.S3Encryption
if p.config.S3Encryption == "aws:kms" && p.config.S3EncryptionKey != "" {
updata.SSEKMSKeyId = &p.config.S3EncryptionKey
}
}
// Copy the image file into the S3 bucket specified
uploader := s3manager.NewUploader(session)
if _, err = uploader.Upload(updata); err != nil {
2019-04-02 19:51:58 -04:00
return nil, false, false, fmt.Errorf("Failed to upload %s: %s", source, err)
2015-11-22 18:32:03 -05:00
}
// May as well stop holding this open now
file.Close()
2015-11-22 18:32:03 -05:00
ui.Message(fmt.Sprintf("Completed upload of %s to s3://%s/%s", source, p.config.S3Bucket, p.config.S3Key))
// Call EC2 image import process
log.Printf("Calling EC2 to import from s3://%s/%s", p.config.S3Bucket, p.config.S3Key)
ec2conn := ec2.New(session)
params := &ec2.ImportImageInput{
2019-03-11 16:21:47 -04:00
Encrypted: &p.config.Encrypt,
2015-11-22 18:32:03 -05:00
DiskContainers: []*ec2.ImageDiskContainer{
{
2018-10-29 17:52:12 -04:00
Format: &p.config.Format,
2015-11-22 18:32:03 -05:00
UserBucket: &ec2.UserBucket{
2016-02-12 02:53:40 -05:00
S3Bucket: &p.config.S3Bucket,
S3Key: &p.config.S3Key,
2015-11-22 18:32:03 -05:00
},
},
},
2018-01-25 13:37:34 -05:00
}
2019-03-11 16:21:47 -04:00
if p.config.Encrypt && p.config.KMSKey != "" {
params.KmsKeyId = &p.config.KMSKey
}
2018-01-25 13:37:34 -05:00
if p.config.RoleName != "" {
params.SetRoleName(p.config.RoleName)
}
if p.config.LicenseType != "" {
ui.Message(fmt.Sprintf("Setting license type to '%s'", p.config.LicenseType))
params.LicenseType = &p.config.LicenseType
}
var import_start *ec2.ImportImageOutput
err = retry.Config{
Tries: 11,
RetryDelay: (&retry.Backoff{InitialBackoff: 200 * time.Millisecond, MaxBackoff: 30 * time.Second, Multiplier: 2}).Linear,
}.Run(ctx, func(ctx context.Context) error {
import_start, err = ec2conn.ImportImage(params)
return err
})
2015-11-22 18:32:03 -05:00
if err != nil {
2019-04-02 19:51:58 -04:00
return nil, false, false, fmt.Errorf("Failed to start import from s3://%s/%s: %s", p.config.S3Bucket, p.config.S3Key, err)
2015-11-22 18:32:03 -05:00
}
ui.Message(fmt.Sprintf("Started import of s3://%s/%s, task id %s", p.config.S3Bucket, p.config.S3Key, *import_start.ImportTaskId))
2017-03-28 20:45:01 -04:00
// Wait for import process to complete, this takes a while
ui.Message(fmt.Sprintf("Waiting for task %s to complete (may take a while)", *import_start.ImportTaskId))
err = p.config.PollingConfig.WaitUntilImageImported(aws.BackgroundContext(), ec2conn, *import_start.ImportTaskId)
if err != nil {
// Retrieve the status message
import_result, err2 := ec2conn.DescribeImportImageTasks(&ec2.DescribeImportImageTasksInput{
ImportTaskIds: []*string{
import_start.ImportTaskId,
},
})
statusMessage := "Error retrieving status message"
if err2 == nil {
statusMessage = *import_result.ImportImageTasks[0].StatusMessage
}
2019-04-02 19:51:58 -04:00
return nil, false, false, fmt.Errorf("Import task %s failed with status message: %s, error: %s", *import_start.ImportTaskId, statusMessage, err)
}
// Retrieve what the outcome was for the import task
import_result, err := ec2conn.DescribeImportImageTasks(&ec2.DescribeImportImageTasksInput{
2016-02-12 02:53:40 -05:00
ImportTaskIds: []*string{
import_start.ImportTaskId,
},
})
if err != nil {
2019-04-02 19:51:58 -04:00
return nil, false, false, fmt.Errorf("Failed to find import task %s: %s", *import_start.ImportTaskId, err)
}
// Check it was actually completed
if *import_result.ImportImageTasks[0].Status != "completed" {
// The most useful error message is from the job itself
2019-04-02 19:51:58 -04:00
return nil, false, false, fmt.Errorf("Import task %s failed: %s", *import_start.ImportTaskId, *import_result.ImportImageTasks[0].StatusMessage)
}
ui.Message(fmt.Sprintf("Import task %s complete", *import_start.ImportTaskId))
// Pull AMI ID out of the completed job
createdami := *import_result.ImportImageTasks[0].ImageId
if p.config.Name != "" {
ui.Message(fmt.Sprintf("Starting rename of AMI (%s)", createdami))
2019-09-16 16:45:28 -04:00
copyInput := &ec2.CopyImageInput{
2016-10-11 17:43:50 -04:00
Name: &p.config.Name,
SourceImageId: &createdami,
2016-10-11 17:43:50 -04:00
SourceRegion: config.Region,
2019-09-16 16:45:28 -04:00
}
if p.config.Encrypt {
copyInput.Encrypted = aws.Bool(p.config.Encrypt)
if p.config.KMSKey != "" {
copyInput.KmsKeyId = &p.config.KMSKey
}
}
resp, err := ec2conn.CopyImage(copyInput)
if err != nil {
2019-04-02 19:51:58 -04:00
return nil, false, false, fmt.Errorf("Error Copying AMI (%s): %s", createdami, err)
}
ui.Message(fmt.Sprintf("Waiting for AMI rename to complete (may take a while)"))
if err := p.config.PollingConfig.WaitUntilAMIAvailable(aws.BackgroundContext(), ec2conn, *resp.ImageId); err != nil {
2019-04-02 19:51:58 -04:00
return nil, false, false, fmt.Errorf("Error waiting for AMI (%s): %s", *resp.ImageId, err)
}
// Clean up intermediary image now that it has successfully been renamed.
ui.Message("Destroying intermediary AMI...")
err = awscommon.DestroyAMIs([]*string{&createdami}, ec2conn)
if err != nil {
2019-04-02 19:51:58 -04:00
return nil, false, false, fmt.Errorf("Error deregistering existing AMI: %s", err)
}
ui.Message(fmt.Sprintf("AMI rename completed"))
createdami = *resp.ImageId
}
// If we have tags, then apply them now to both the AMI and snaps
// created by the import
if len(p.config.Tags) > 0 {
2016-02-12 02:53:40 -05:00
var ec2Tags []*ec2.Tag
log.Printf("Repacking tags into AWS format")
for key, value := range p.config.Tags {
ui.Message(fmt.Sprintf("Adding tag \"%s\": \"%s\"", key, value))
ec2Tags = append(ec2Tags, &ec2.Tag{
2016-02-12 02:53:40 -05:00
Key: aws.String(key),
Value: aws.String(value),
})
}
resourceIds := []*string{&createdami}
log.Printf("Getting details of %s", createdami)
imageResp, err := ec2conn.DescribeImages(&ec2.DescribeImagesInput{
2016-02-12 02:53:40 -05:00
ImageIds: resourceIds,
})
if err != nil {
2019-04-02 19:51:58 -04:00
return nil, false, false, fmt.Errorf("Failed to retrieve details for AMI %s: %s", createdami, err)
}
if len(imageResp.Images) == 0 {
2019-04-02 19:51:58 -04:00
return nil, false, false, fmt.Errorf("AMI %s has no images", createdami)
}
image := imageResp.Images[0]
log.Printf("Walking block device mappings for %s to find snapshots", createdami)
for _, device := range image.BlockDeviceMappings {
if device.Ebs != nil && device.Ebs.SnapshotId != nil {
ui.Message(fmt.Sprintf("Tagging snapshot %s", *device.Ebs.SnapshotId))
resourceIds = append(resourceIds, device.Ebs.SnapshotId)
}
}
ui.Message(fmt.Sprintf("Tagging AMI %s", createdami))
_, err = ec2conn.CreateTags(&ec2.CreateTagsInput{
2016-02-12 02:53:40 -05:00
Resources: resourceIds,
Tags: ec2Tags,
})
if err != nil {
2019-04-02 19:51:58 -04:00
return nil, false, false, fmt.Errorf("Failed to add tags to resources %#v: %s", resourceIds, err)
}
}
2017-03-28 20:45:01 -04:00
// Apply attributes for AMI specified in config
// (duped from builder/amazon/common/step_modify_ami_attributes.go)
options := make(map[string]*ec2.ModifyImageAttributeInput)
if p.config.Description != "" {
options["description"] = &ec2.ModifyImageAttributeInput{
Description: &ec2.AttributeValue{Value: &p.config.Description},
}
}
if len(p.config.Groups) > 0 {
groups := make([]*string, len(p.config.Groups))
adds := make([]*ec2.LaunchPermission, len(p.config.Groups))
addGroups := &ec2.ModifyImageAttributeInput{
LaunchPermission: &ec2.LaunchPermissionModifications{},
}
for i, g := range p.config.Groups {
groups[i] = aws.String(g)
adds[i] = &ec2.LaunchPermission{
Group: aws.String(g),
}
}
addGroups.UserGroups = groups
addGroups.LaunchPermission.Add = adds
options["groups"] = addGroups
}
if len(p.config.Users) > 0 {
users := make([]*string, len(p.config.Users))
adds := make([]*ec2.LaunchPermission, len(p.config.Users))
for i, u := range p.config.Users {
users[i] = aws.String(u)
adds[i] = &ec2.LaunchPermission{UserId: aws.String(u)}
}
options["users"] = &ec2.ModifyImageAttributeInput{
UserIds: users,
LaunchPermission: &ec2.LaunchPermissionModifications{
Add: adds,
},
}
}
if len(options) > 0 {
for name, input := range options {
ui.Message(fmt.Sprintf("Modifying: %s", name))
input.ImageId = &createdami
_, err := ec2conn.ModifyImageAttribute(input)
if err != nil {
2019-04-02 19:51:58 -04:00
return nil, false, false, fmt.Errorf("Error modifying AMI attributes: %s", err)
}
}
}
// Add the reported AMI ID to the artifact list
log.Printf("Adding created AMI ID %s in region %s to output artifacts", createdami, *config.Region)
artifact = &awscommon.Artifact{
2016-02-12 02:53:40 -05:00
Amis: map[string]string{
*config.Region: createdami,
},
2016-02-12 02:53:40 -05:00
BuilderIdValue: BuilderId,
Session: session,
}
2015-11-22 18:32:03 -05:00
2016-02-12 02:53:40 -05:00
if !p.config.SkipClean {
ui.Message(fmt.Sprintf("Deleting import source s3://%s/%s", p.config.S3Bucket, p.config.S3Key))
s3conn := s3.New(session)
_, err = s3conn.DeleteObject(&s3.DeleteObjectInput{
Bucket: &p.config.S3Bucket,
Key: &p.config.S3Key,
})
if err != nil {
2019-04-02 19:51:58 -04:00
return nil, false, false, fmt.Errorf("Failed to delete s3://%s/%s: %s", p.config.S3Bucket, p.config.S3Key, err)
2016-02-12 02:53:40 -05:00
}
}
2019-04-02 19:51:58 -04:00
return artifact, false, false, nil
2015-11-22 18:32:03 -05:00
}