remove aws builders
This commit is contained in:
parent
692433721d
commit
453e56e554
|
@ -1,535 +0,0 @@
|
|||
//go:generate struct-markdown
|
||||
//go:generate mapstructure-to-hcl2 -type Config,BlockDevices,BlockDevice
|
||||
|
||||
// The chroot package is able to create an Amazon AMI without requiring the
|
||||
// launch of a new instance for every build. It does this by attaching and
|
||||
// mounting the root volume of another AMI and chrooting into that directory.
|
||||
// It then creates an AMI from that attached drive.
|
||||
package chroot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/hashicorp/hcl/v2/hcldec"
|
||||
"github.com/hashicorp/packer-plugin-sdk/chroot"
|
||||
"github.com/hashicorp/packer-plugin-sdk/common"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/packerbuilderdata"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/config"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/interpolate"
|
||||
awscommon "github.com/hashicorp/packer/builder/amazon/common"
|
||||
)
|
||||
|
||||
// The unique ID for this builder
|
||||
const BuilderId = "mitchellh.amazon.chroot"
|
||||
|
||||
// Config is the configuration that is chained through the steps and settable
|
||||
// from the template.
|
||||
type Config struct {
|
||||
common.PackerConfig `mapstructure:",squash"`
|
||||
awscommon.AMIConfig `mapstructure:",squash"`
|
||||
awscommon.AccessConfig `mapstructure:",squash"`
|
||||
// Add one or more [block device
|
||||
// mappings](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html)
|
||||
// to the AMI. If this field is populated, and you are building from an
|
||||
// existing source image, the block device mappings in the source image
|
||||
// will be overwritten. This means you must have a block device mapping
|
||||
// entry for your root volume, `root_volume_size` and `root_device_name`.
|
||||
// See the [BlockDevices](#block-devices-configuration) documentation for
|
||||
// fields.
|
||||
AMIMappings awscommon.BlockDevices `mapstructure:"ami_block_device_mappings" hcl2-schema-generator:"ami_block_device_mappings,direct" required:"false"`
|
||||
// This is a list of devices to mount into the chroot environment. This
|
||||
// configuration parameter requires some additional documentation which is
|
||||
// in the Chroot Mounts section. Please read that section for more
|
||||
// information on how to use this.
|
||||
ChrootMounts [][]string `mapstructure:"chroot_mounts" required:"false"`
|
||||
// How to run shell commands. This defaults to `{{.Command}}`. This may be
|
||||
// useful to set if you want to set environmental variables or perhaps run
|
||||
// it with sudo or so on. This is a configuration template where the
|
||||
// .Command variable is replaced with the command to be run. Defaults to
|
||||
// `{{.Command}}`.
|
||||
CommandWrapper string `mapstructure:"command_wrapper" required:"false"`
|
||||
// Paths to files on the running EC2 instance that will be copied into the
|
||||
// chroot environment prior to provisioning. Defaults to /etc/resolv.conf
|
||||
// so that DNS lookups work. Pass an empty list to skip copying
|
||||
// /etc/resolv.conf. You may need to do this if you're building an image
|
||||
// that uses systemd.
|
||||
CopyFiles []string `mapstructure:"copy_files" required:"false"`
|
||||
// The path to the device where the root volume of the source AMI will be
|
||||
// attached. This defaults to "" (empty string), which forces Packer to
|
||||
// find an open device automatically.
|
||||
DevicePath string `mapstructure:"device_path" required:"false"`
|
||||
// When we call the mount command (by default mount -o device dir), the
|
||||
// string provided in nvme_mount_path will replace device in that command.
|
||||
// When this option is not set, device in that command will be something
|
||||
// like /dev/sdf1, mirroring the attached device name. This assumption
|
||||
// works for most instances but will fail with c5 and m5 instances. In
|
||||
// order to use the chroot builder with c5 and m5 instances, you must
|
||||
// manually set nvme_device_path and device_path.
|
||||
NVMEDevicePath string `mapstructure:"nvme_device_path" required:"false"`
|
||||
// Build a new volume instead of starting from an existing AMI root volume
|
||||
// snapshot. Default false. If true, source_ami/source_ami_filter are no
|
||||
// longer used and the following options become required:
|
||||
// ami_virtualization_type, pre_mount_commands and root_volume_size.
|
||||
FromScratch bool `mapstructure:"from_scratch" required:"false"`
|
||||
// Options to supply the mount command when mounting devices. Each option
|
||||
// will be prefixed with -o and supplied to the mount command ran by
|
||||
// Packer. Because this command is ran in a shell, user discretion is
|
||||
// advised. See this manual page for the mount command for valid file
|
||||
// system specific options.
|
||||
MountOptions []string `mapstructure:"mount_options" required:"false"`
|
||||
// The partition number containing the / partition. By default this is the
|
||||
// first partition of the volume, (for example, xvda1) but you can
|
||||
// designate the entire block device by setting "mount_partition": "0" in
|
||||
// your config, which will mount xvda instead.
|
||||
MountPartition string `mapstructure:"mount_partition" required:"false"`
|
||||
// The path where the volume will be mounted. This is where the chroot
|
||||
// environment will be. This defaults to
|
||||
// `/mnt/packer-amazon-chroot-volumes/{{.Device}}`. This is a configuration
|
||||
// template where the .Device variable is replaced with the name of the
|
||||
// device where the volume is attached.
|
||||
MountPath string `mapstructure:"mount_path" required:"false"`
|
||||
// As pre_mount_commands, but the commands are executed after mounting the
|
||||
// root device and before the extra mount and copy steps. The device and
|
||||
// mount path are provided by `{{.Device}}` and `{{.MountPath}}`.
|
||||
PostMountCommands []string `mapstructure:"post_mount_commands" required:"false"`
|
||||
// A series of commands to execute after attaching the root volume and
|
||||
// before mounting the chroot. This is not required unless using
|
||||
// from_scratch. If so, this should include any partitioning and filesystem
|
||||
// creation commands. The path to the device is provided by `{{.Device}}`.
|
||||
PreMountCommands []string `mapstructure:"pre_mount_commands" required:"false"`
|
||||
// The root device name. For example, xvda.
|
||||
RootDeviceName string `mapstructure:"root_device_name" required:"false"`
|
||||
// The size of the root volume in GB for the chroot environment and the
|
||||
// resulting AMI. Default size is the snapshot size of the source_ami
|
||||
// unless from_scratch is true, in which case this field must be defined.
|
||||
RootVolumeSize int64 `mapstructure:"root_volume_size" required:"false"`
|
||||
// The type of EBS volume for the chroot environment and resulting AMI. The
|
||||
// default value is the type of the source_ami, unless from_scratch is
|
||||
// true, in which case the default value is gp2. You can only specify io1
|
||||
// if building based on top of a source_ami which is also io1.
|
||||
RootVolumeType string `mapstructure:"root_volume_type" required:"false"`
|
||||
// The source AMI whose root volume will be copied and provisioned on the
|
||||
// currently running instance. This must be an EBS-backed AMI with a root
|
||||
// volume snapshot that you have access to. Note: this is not used when
|
||||
// from_scratch is set to true.
|
||||
SourceAmi string `mapstructure:"source_ami" required:"true"`
|
||||
// Filters used to populate the source_ami field. Example:
|
||||
//
|
||||
//```json
|
||||
//{
|
||||
// "source_ami_filter": {
|
||||
// "filters": {
|
||||
// "virtualization-type": "hvm",
|
||||
// "name": "ubuntu/images/*ubuntu-xenial-16.04-amd64-server-*",
|
||||
// "root-device-type": "ebs"
|
||||
// },
|
||||
// "owners": ["099720109477"],
|
||||
// "most_recent": true
|
||||
// }
|
||||
//}
|
||||
//```
|
||||
//
|
||||
//This selects the most recent Ubuntu 16.04 HVM EBS AMI from Canonical. NOTE:
|
||||
//This will fail unless *exactly* one AMI is returned. In the above example,
|
||||
//`most_recent` will cause this to succeed by selecting the newest image.
|
||||
//
|
||||
//- `filters` (map of strings) - filters used to select a `source_ami`.
|
||||
// NOTE: This will fail unless *exactly* one AMI is returned. Any filter
|
||||
// described in the docs for
|
||||
// [DescribeImages](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html)
|
||||
// is valid.
|
||||
//
|
||||
//- `owners` (array of strings) - Filters the images by their owner. You
|
||||
// may specify one or more AWS account IDs, "self" (which will use the
|
||||
// account whose credentials you are using to run Packer), or an AWS owner
|
||||
// alias: for example, "amazon", "aws-marketplace", or "microsoft". This
|
||||
// option is required for security reasons.
|
||||
//
|
||||
//- `most_recent` (boolean) - Selects the newest created image when true.
|
||||
// This is most useful for selecting a daily distro build.
|
||||
//
|
||||
//You may set this in place of `source_ami` or in conjunction with it. If you
|
||||
//set this in conjunction with `source_ami`, the `source_ami` will be added
|
||||
//to the filter. The provided `source_ami` must meet all of the filtering
|
||||
//criteria provided in `source_ami_filter`; this pins the AMI returned by the
|
||||
//filter, but will cause Packer to fail if the `source_ami` does not exist.
|
||||
SourceAmiFilter awscommon.AmiFilterOptions `mapstructure:"source_ami_filter" required:"false"`
|
||||
// Key/value pair tags to apply to the volumes that are *launched*. This is
|
||||
// a [template engine](/docs/templates/legacy_json_templates/engine), see [Build template
|
||||
// data](#build-template-data) for more information.
|
||||
RootVolumeTags map[string]string `mapstructure:"root_volume_tags" required:"false"`
|
||||
// Same as [`root_volume_tags`](#root_volume_tags) but defined as a
|
||||
// singular block containing a `key` and a `value` field. In HCL2 mode the
|
||||
// [`dynamic_block`](/docs/templates/hcl_templates/expressions#dynamic-blocks)
|
||||
// will allow you to create those programatically.
|
||||
RootVolumeTag config.KeyValues `mapstructure:"root_volume_tag" required:"false"`
|
||||
// Whether or not to encrypt the volumes that are *launched*. By default, Packer will keep
|
||||
// the encryption setting to what it was in the source image when set to `false`. Setting true will
|
||||
// always result in an encrypted one.
|
||||
RootVolumeEncryptBoot config.Trilean `mapstructure:"root_volume_encrypt_boot" required:"false"`
|
||||
// ID, alias or ARN of the KMS key to use for *launched* volumes encryption.
|
||||
//
|
||||
// Set this value if you select `root_volume_encrypt_boot`, but don't want to use the
|
||||
// region's default KMS key.
|
||||
//
|
||||
// If you have a custom kms key you'd like to apply to the launch volume,
|
||||
// and are only building in one region, it is more efficient to set this
|
||||
// and `root_volume_encrypt_boot` to `true` and not use `encrypt_boot` and `kms_key_id`. This saves
|
||||
// potentially many minutes at the end of the build by preventing Packer
|
||||
// from having to copy and re-encrypt the image at the end of the build.
|
||||
//
|
||||
// For valid formats see *KmsKeyId* in the [AWS API docs -
|
||||
// CopyImage](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CopyImage.html).
|
||||
// This field is validated by Packer, when using an alias, you will have to
|
||||
// prefix `kms_key_id` with `alias/`.
|
||||
RootVolumeKmsKeyId string `mapstructure:"root_volume_kms_key_id" required:"false"`
|
||||
// what architecture to use when registering the final AMI; valid options
|
||||
// are "x86_64" or "arm64". Defaults to "x86_64".
|
||||
Architecture string `mapstructure:"ami_architecture" required:"false"`
|
||||
|
||||
ctx interpolate.Context
|
||||
}
|
||||
|
||||
func (c *Config) GetContext() interpolate.Context {
|
||||
return c.ctx
|
||||
}
|
||||
|
||||
type wrappedCommandTemplate struct {
|
||||
Command string
|
||||
}
|
||||
|
||||
type Builder struct {
|
||||
config Config
|
||||
runner multistep.Runner
|
||||
}
|
||||
|
||||
func (b *Builder) ConfigSpec() hcldec.ObjectSpec { return b.config.FlatMapstructure().HCL2Spec() }
|
||||
|
||||
func (b *Builder) Prepare(raws ...interface{}) ([]string, []string, error) {
|
||||
b.config.ctx.Funcs = awscommon.TemplateFuncs
|
||||
err := config.Decode(&b.config, &config.DecodeOpts{
|
||||
PluginType: BuilderId,
|
||||
Interpolate: true,
|
||||
InterpolateContext: &b.config.ctx,
|
||||
InterpolateFilter: &interpolate.RenderFilter{
|
||||
Exclude: []string{
|
||||
"ami_description",
|
||||
"snapshot_tags",
|
||||
"snapshot_tag",
|
||||
"tags",
|
||||
"tag",
|
||||
"root_volume_tags",
|
||||
"root_volume_tag",
|
||||
"command_wrapper",
|
||||
"post_mount_commands",
|
||||
"pre_mount_commands",
|
||||
"mount_path",
|
||||
},
|
||||
},
|
||||
}, raws...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if b.config.Architecture == "" {
|
||||
b.config.Architecture = "x86_64"
|
||||
}
|
||||
|
||||
if b.config.PackerConfig.PackerForce {
|
||||
b.config.AMIForceDeregister = true
|
||||
}
|
||||
|
||||
// Defaults
|
||||
if b.config.ChrootMounts == nil {
|
||||
b.config.ChrootMounts = make([][]string, 0)
|
||||
}
|
||||
|
||||
if len(b.config.ChrootMounts) == 0 {
|
||||
b.config.ChrootMounts = [][]string{
|
||||
{"proc", "proc", "/proc"},
|
||||
{"sysfs", "sysfs", "/sys"},
|
||||
{"bind", "/dev", "/dev"},
|
||||
{"devpts", "devpts", "/dev/pts"},
|
||||
{"binfmt_misc", "binfmt_misc", "/proc/sys/fs/binfmt_misc"},
|
||||
}
|
||||
}
|
||||
|
||||
// set default copy file if we're not giving our own
|
||||
if b.config.CopyFiles == nil {
|
||||
if !b.config.FromScratch {
|
||||
b.config.CopyFiles = []string{"/etc/resolv.conf"}
|
||||
}
|
||||
}
|
||||
|
||||
if b.config.CommandWrapper == "" {
|
||||
b.config.CommandWrapper = "{{.Command}}"
|
||||
}
|
||||
|
||||
if b.config.MountPath == "" {
|
||||
b.config.MountPath = "/mnt/packer-amazon-chroot-volumes/{{.Device}}"
|
||||
}
|
||||
|
||||
if b.config.MountPartition == "" {
|
||||
b.config.MountPartition = "1"
|
||||
}
|
||||
|
||||
// Accumulate any errors or warnings
|
||||
var errs *packersdk.MultiError
|
||||
var warns []string
|
||||
|
||||
errs = packersdk.MultiErrorAppend(errs, b.config.RootVolumeTag.CopyOn(&b.config.RootVolumeTags)...)
|
||||
errs = packersdk.MultiErrorAppend(errs, b.config.AccessConfig.Prepare()...)
|
||||
errs = packersdk.MultiErrorAppend(errs,
|
||||
b.config.AMIConfig.Prepare(&b.config.AccessConfig, &b.config.ctx)...)
|
||||
|
||||
for _, mounts := range b.config.ChrootMounts {
|
||||
if len(mounts) != 3 {
|
||||
errs = packersdk.MultiErrorAppend(
|
||||
errs, errors.New("Each chroot_mounts entry should be three elements."))
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if b.config.FromScratch {
|
||||
if b.config.SourceAmi != "" || !b.config.SourceAmiFilter.Empty() {
|
||||
warns = append(warns, "source_ami and source_ami_filter are unused when from_scratch is true")
|
||||
}
|
||||
if b.config.RootVolumeSize == 0 {
|
||||
errs = packersdk.MultiErrorAppend(
|
||||
errs, errors.New("root_volume_size is required with from_scratch."))
|
||||
}
|
||||
if len(b.config.PreMountCommands) == 0 {
|
||||
errs = packersdk.MultiErrorAppend(
|
||||
errs, errors.New("pre_mount_commands is required with from_scratch."))
|
||||
}
|
||||
if b.config.AMIVirtType == "" {
|
||||
errs = packersdk.MultiErrorAppend(
|
||||
errs, errors.New("ami_virtualization_type is required with from_scratch."))
|
||||
}
|
||||
if b.config.RootDeviceName == "" {
|
||||
errs = packersdk.MultiErrorAppend(
|
||||
errs, errors.New("root_device_name is required with from_scratch."))
|
||||
}
|
||||
if len(b.config.AMIMappings) == 0 {
|
||||
errs = packersdk.MultiErrorAppend(
|
||||
errs, errors.New("ami_block_device_mappings is required with from_scratch."))
|
||||
}
|
||||
} else {
|
||||
if b.config.SourceAmi == "" && b.config.SourceAmiFilter.Empty() {
|
||||
errs = packersdk.MultiErrorAppend(
|
||||
errs, errors.New("source_ami or source_ami_filter is required."))
|
||||
}
|
||||
if len(b.config.AMIMappings) > 0 && b.config.RootDeviceName != "" {
|
||||
if b.config.RootVolumeSize == 0 {
|
||||
// Although, they can specify the device size in the block
|
||||
// device mapping, it's easier to be specific here.
|
||||
errs = packersdk.MultiErrorAppend(
|
||||
errs, errors.New("root_volume_size is required if ami_block_device_mappings is specified"))
|
||||
}
|
||||
warns = append(warns, "ami_block_device_mappings from source image will be completely overwritten")
|
||||
} else if len(b.config.AMIMappings) > 0 {
|
||||
errs = packersdk.MultiErrorAppend(
|
||||
errs, errors.New("If ami_block_device_mappings is specified, root_device_name must be specified"))
|
||||
} else if b.config.RootDeviceName != "" {
|
||||
errs = packersdk.MultiErrorAppend(
|
||||
errs, errors.New("If root_device_name is specified, ami_block_device_mappings must be specified"))
|
||||
}
|
||||
|
||||
if b.config.RootVolumeKmsKeyId != "" {
|
||||
if b.config.RootVolumeEncryptBoot.False() {
|
||||
errs = packersdk.MultiErrorAppend(
|
||||
errs, errors.New("If you have set root_volume_kms_key_id, root_volume_encrypt_boot must also be true."))
|
||||
} else if b.config.RootVolumeEncryptBoot.True() && !awscommon.ValidateKmsKey(b.config.RootVolumeKmsKeyId) {
|
||||
errs = packersdk.MultiErrorAppend(
|
||||
errs, fmt.Errorf("%q is not a valid KMS Key Id.", b.config.RootVolumeKmsKeyId))
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
valid := false
|
||||
for _, validArch := range []string{"x86_64", "arm64"} {
|
||||
if validArch == b.config.Architecture {
|
||||
valid = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !valid {
|
||||
errs = packersdk.MultiErrorAppend(errs, errors.New(`The only valid ami_architecture values are "x86_64" and "arm64"`))
|
||||
}
|
||||
|
||||
if errs != nil && len(errs.Errors) > 0 {
|
||||
return nil, warns, errs
|
||||
}
|
||||
|
||||
packersdk.LogSecretFilter.Set(b.config.AccessKey, b.config.SecretKey, b.config.Token)
|
||||
generatedData := awscommon.GetGeneratedDataList()
|
||||
generatedData = append(generatedData, "Device", "MountPath")
|
||||
|
||||
return generatedData, warns, nil
|
||||
}
|
||||
|
||||
func (b *Builder) Run(ctx context.Context, ui packersdk.Ui, hook packersdk.Hook) (packersdk.Artifact, error) {
|
||||
if runtime.GOOS != "linux" {
|
||||
return nil, errors.New("The amazon-chroot builder only works on Linux environments.")
|
||||
}
|
||||
|
||||
session, err := b.config.Session()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ec2conn := ec2.New(session)
|
||||
|
||||
wrappedCommand := func(command string) (string, error) {
|
||||
ictx := b.config.ctx
|
||||
ictx.Data = &wrappedCommandTemplate{Command: command}
|
||||
return interpolate.Render(b.config.CommandWrapper, &ictx)
|
||||
}
|
||||
|
||||
// Setup the state bag and initial state for the steps
|
||||
state := new(multistep.BasicStateBag)
|
||||
state.Put("config", &b.config)
|
||||
state.Put("access_config", &b.config.AccessConfig)
|
||||
state.Put("ami_config", &b.config.AMIConfig)
|
||||
state.Put("ec2", ec2conn)
|
||||
state.Put("awsSession", session)
|
||||
state.Put("hook", hook)
|
||||
state.Put("ui", ui)
|
||||
state.Put("wrappedCommand", common.CommandWrapper(wrappedCommand))
|
||||
generatedData := &packerbuilderdata.GeneratedData{State: state}
|
||||
|
||||
// Build the steps
|
||||
steps := []multistep.Step{
|
||||
&awscommon.StepPreValidate{
|
||||
DestAmiName: b.config.AMIName,
|
||||
ForceDeregister: b.config.AMIForceDeregister,
|
||||
},
|
||||
&StepInstanceInfo{},
|
||||
}
|
||||
|
||||
if !b.config.FromScratch {
|
||||
steps = append(steps,
|
||||
&awscommon.StepSourceAMIInfo{
|
||||
SourceAmi: b.config.SourceAmi,
|
||||
EnableAMISriovNetSupport: b.config.AMISriovNetSupport,
|
||||
EnableAMIENASupport: b.config.AMIENASupport,
|
||||
AmiFilters: b.config.SourceAmiFilter,
|
||||
AMIVirtType: b.config.AMIVirtType,
|
||||
},
|
||||
&StepCheckRootDevice{},
|
||||
)
|
||||
}
|
||||
|
||||
steps = append(steps,
|
||||
&StepFlock{},
|
||||
&StepPrepareDevice{
|
||||
GeneratedData: generatedData,
|
||||
},
|
||||
&StepCreateVolume{
|
||||
PollingConfig: b.config.PollingConfig,
|
||||
RootVolumeType: b.config.RootVolumeType,
|
||||
RootVolumeSize: b.config.RootVolumeSize,
|
||||
RootVolumeTags: b.config.RootVolumeTags,
|
||||
RootVolumeEncryptBoot: b.config.RootVolumeEncryptBoot,
|
||||
RootVolumeKmsKeyId: b.config.RootVolumeKmsKeyId,
|
||||
Ctx: b.config.ctx,
|
||||
},
|
||||
&StepAttachVolume{
|
||||
PollingConfig: b.config.PollingConfig,
|
||||
},
|
||||
&StepEarlyUnflock{},
|
||||
&chroot.StepPreMountCommands{
|
||||
Commands: b.config.PreMountCommands,
|
||||
},
|
||||
&StepMountDevice{
|
||||
MountOptions: b.config.MountOptions,
|
||||
MountPartition: b.config.MountPartition,
|
||||
GeneratedData: generatedData,
|
||||
},
|
||||
&chroot.StepPostMountCommands{
|
||||
Commands: b.config.PostMountCommands,
|
||||
},
|
||||
&chroot.StepMountExtra{
|
||||
ChrootMounts: b.config.ChrootMounts,
|
||||
},
|
||||
&chroot.StepCopyFiles{
|
||||
Files: b.config.CopyFiles,
|
||||
},
|
||||
&awscommon.StepSetGeneratedData{
|
||||
GeneratedData: generatedData,
|
||||
},
|
||||
&chroot.StepChrootProvision{},
|
||||
&chroot.StepEarlyCleanup{},
|
||||
&StepSnapshot{
|
||||
PollingConfig: b.config.PollingConfig,
|
||||
},
|
||||
&awscommon.StepDeregisterAMI{
|
||||
AccessConfig: &b.config.AccessConfig,
|
||||
ForceDeregister: b.config.AMIForceDeregister,
|
||||
ForceDeleteSnapshot: b.config.AMIForceDeleteSnapshot,
|
||||
AMIName: b.config.AMIName,
|
||||
Regions: b.config.AMIRegions,
|
||||
},
|
||||
&StepRegisterAMI{
|
||||
RootVolumeSize: b.config.RootVolumeSize,
|
||||
EnableAMISriovNetSupport: b.config.AMISriovNetSupport,
|
||||
EnableAMIENASupport: b.config.AMIENASupport,
|
||||
AMISkipBuildRegion: b.config.AMISkipBuildRegion,
|
||||
PollingConfig: b.config.PollingConfig,
|
||||
},
|
||||
&awscommon.StepAMIRegionCopy{
|
||||
AccessConfig: &b.config.AccessConfig,
|
||||
Regions: b.config.AMIRegions,
|
||||
AMIKmsKeyId: b.config.AMIKmsKeyId,
|
||||
RegionKeyIds: b.config.AMIRegionKMSKeyIDs,
|
||||
EncryptBootVolume: b.config.AMIEncryptBootVolume,
|
||||
Name: b.config.AMIName,
|
||||
OriginalRegion: *ec2conn.Config.Region,
|
||||
},
|
||||
&awscommon.StepModifyAMIAttributes{
|
||||
Description: b.config.AMIDescription,
|
||||
Users: b.config.AMIUsers,
|
||||
Groups: b.config.AMIGroups,
|
||||
ProductCodes: b.config.AMIProductCodes,
|
||||
SnapshotUsers: b.config.SnapshotUsers,
|
||||
SnapshotGroups: b.config.SnapshotGroups,
|
||||
Ctx: b.config.ctx,
|
||||
GeneratedData: generatedData,
|
||||
},
|
||||
&awscommon.StepCreateTags{
|
||||
Tags: b.config.AMITags,
|
||||
SnapshotTags: b.config.SnapshotTags,
|
||||
Ctx: b.config.ctx,
|
||||
},
|
||||
)
|
||||
|
||||
// Run!
|
||||
b.runner = commonsteps.NewRunner(steps, b.config.PackerConfig, ui)
|
||||
b.runner.Run(ctx, state)
|
||||
|
||||
// If there was an error, return that
|
||||
if rawErr, ok := state.GetOk("error"); ok {
|
||||
return nil, rawErr.(error)
|
||||
}
|
||||
|
||||
// If there are no AMIs, then just return
|
||||
if _, ok := state.GetOk("amis"); !ok {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Build the artifact and return it
|
||||
artifact := &awscommon.Artifact{
|
||||
Amis: state.Get("amis").(map[string]string),
|
||||
BuilderIdValue: BuilderId,
|
||||
Session: session,
|
||||
StateData: map[string]interface{}{"generated_data": state.Get("generated_data")},
|
||||
}
|
||||
|
||||
return artifact, nil
|
||||
}
|
|
@ -1,167 +0,0 @@
|
|||
// Code generated by "mapstructure-to-hcl2 -type Config,BlockDevices,BlockDevice"; DO NOT EDIT.
|
||||
|
||||
package chroot
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl/v2/hcldec"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/config"
|
||||
"github.com/hashicorp/packer/builder/amazon/common"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
// FlatConfig is an auto-generated flat version of Config.
|
||||
// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up.
|
||||
type FlatConfig struct {
|
||||
PackerBuildName *string `mapstructure:"packer_build_name" cty:"packer_build_name" hcl:"packer_build_name"`
|
||||
PackerBuilderType *string `mapstructure:"packer_builder_type" cty:"packer_builder_type" hcl:"packer_builder_type"`
|
||||
PackerCoreVersion *string `mapstructure:"packer_core_version" cty:"packer_core_version" hcl:"packer_core_version"`
|
||||
PackerDebug *bool `mapstructure:"packer_debug" cty:"packer_debug" hcl:"packer_debug"`
|
||||
PackerForce *bool `mapstructure:"packer_force" cty:"packer_force" hcl:"packer_force"`
|
||||
PackerOnError *string `mapstructure:"packer_on_error" cty:"packer_on_error" hcl:"packer_on_error"`
|
||||
PackerUserVars map[string]string `mapstructure:"packer_user_variables" cty:"packer_user_variables" hcl:"packer_user_variables"`
|
||||
PackerSensitiveVars []string `mapstructure:"packer_sensitive_variables" cty:"packer_sensitive_variables" hcl:"packer_sensitive_variables"`
|
||||
AMIName *string `mapstructure:"ami_name" required:"true" cty:"ami_name" hcl:"ami_name"`
|
||||
AMIDescription *string `mapstructure:"ami_description" required:"false" cty:"ami_description" hcl:"ami_description"`
|
||||
AMIVirtType *string `mapstructure:"ami_virtualization_type" required:"false" cty:"ami_virtualization_type" hcl:"ami_virtualization_type"`
|
||||
AMIUsers []string `mapstructure:"ami_users" required:"false" cty:"ami_users" hcl:"ami_users"`
|
||||
AMIGroups []string `mapstructure:"ami_groups" required:"false" cty:"ami_groups" hcl:"ami_groups"`
|
||||
AMIProductCodes []string `mapstructure:"ami_product_codes" required:"false" cty:"ami_product_codes" hcl:"ami_product_codes"`
|
||||
AMIRegions []string `mapstructure:"ami_regions" required:"false" cty:"ami_regions" hcl:"ami_regions"`
|
||||
AMISkipRegionValidation *bool `mapstructure:"skip_region_validation" required:"false" cty:"skip_region_validation" hcl:"skip_region_validation"`
|
||||
AMITags map[string]string `mapstructure:"tags" required:"false" cty:"tags" hcl:"tags"`
|
||||
AMITag []config.FlatKeyValue `mapstructure:"tag" required:"false" cty:"tag" hcl:"tag"`
|
||||
AMIENASupport *bool `mapstructure:"ena_support" required:"false" cty:"ena_support" hcl:"ena_support"`
|
||||
AMISriovNetSupport *bool `mapstructure:"sriov_support" required:"false" cty:"sriov_support" hcl:"sriov_support"`
|
||||
AMIForceDeregister *bool `mapstructure:"force_deregister" required:"false" cty:"force_deregister" hcl:"force_deregister"`
|
||||
AMIForceDeleteSnapshot *bool `mapstructure:"force_delete_snapshot" required:"false" cty:"force_delete_snapshot" hcl:"force_delete_snapshot"`
|
||||
AMIEncryptBootVolume *bool `mapstructure:"encrypt_boot" required:"false" cty:"encrypt_boot" hcl:"encrypt_boot"`
|
||||
AMIKmsKeyId *string `mapstructure:"kms_key_id" required:"false" cty:"kms_key_id" hcl:"kms_key_id"`
|
||||
AMIRegionKMSKeyIDs map[string]string `mapstructure:"region_kms_key_ids" required:"false" cty:"region_kms_key_ids" hcl:"region_kms_key_ids"`
|
||||
AMISkipBuildRegion *bool `mapstructure:"skip_save_build_region" cty:"skip_save_build_region" hcl:"skip_save_build_region"`
|
||||
SnapshotTags map[string]string `mapstructure:"snapshot_tags" required:"false" cty:"snapshot_tags" hcl:"snapshot_tags"`
|
||||
SnapshotTag []config.FlatKeyValue `mapstructure:"snapshot_tag" required:"false" cty:"snapshot_tag" hcl:"snapshot_tag"`
|
||||
SnapshotUsers []string `mapstructure:"snapshot_users" required:"false" cty:"snapshot_users" hcl:"snapshot_users"`
|
||||
SnapshotGroups []string `mapstructure:"snapshot_groups" required:"false" cty:"snapshot_groups" hcl:"snapshot_groups"`
|
||||
AccessKey *string `mapstructure:"access_key" required:"true" cty:"access_key" hcl:"access_key"`
|
||||
AssumeRole *common.FlatAssumeRoleConfig `mapstructure:"assume_role" required:"false" cty:"assume_role" hcl:"assume_role"`
|
||||
CustomEndpointEc2 *string `mapstructure:"custom_endpoint_ec2" required:"false" cty:"custom_endpoint_ec2" hcl:"custom_endpoint_ec2"`
|
||||
CredsFilename *string `mapstructure:"shared_credentials_file" required:"false" cty:"shared_credentials_file" hcl:"shared_credentials_file"`
|
||||
DecodeAuthZMessages *bool `mapstructure:"decode_authorization_messages" required:"false" cty:"decode_authorization_messages" hcl:"decode_authorization_messages"`
|
||||
InsecureSkipTLSVerify *bool `mapstructure:"insecure_skip_tls_verify" required:"false" cty:"insecure_skip_tls_verify" hcl:"insecure_skip_tls_verify"`
|
||||
MaxRetries *int `mapstructure:"max_retries" required:"false" cty:"max_retries" hcl:"max_retries"`
|
||||
MFACode *string `mapstructure:"mfa_code" required:"false" cty:"mfa_code" hcl:"mfa_code"`
|
||||
ProfileName *string `mapstructure:"profile" required:"false" cty:"profile" hcl:"profile"`
|
||||
RawRegion *string `mapstructure:"region" required:"true" cty:"region" hcl:"region"`
|
||||
SecretKey *string `mapstructure:"secret_key" required:"true" cty:"secret_key" hcl:"secret_key"`
|
||||
SkipMetadataApiCheck *bool `mapstructure:"skip_metadata_api_check" cty:"skip_metadata_api_check" hcl:"skip_metadata_api_check"`
|
||||
SkipCredsValidation *bool `mapstructure:"skip_credential_validation" cty:"skip_credential_validation" hcl:"skip_credential_validation"`
|
||||
Token *string `mapstructure:"token" required:"false" cty:"token" hcl:"token"`
|
||||
VaultAWSEngine *common.FlatVaultAWSEngineOptions `mapstructure:"vault_aws_engine" required:"false" cty:"vault_aws_engine" hcl:"vault_aws_engine"`
|
||||
PollingConfig *common.FlatAWSPollingConfig `mapstructure:"aws_polling" required:"false" cty:"aws_polling" hcl:"aws_polling"`
|
||||
AMIMappings []common.FlatBlockDevice `mapstructure:"ami_block_device_mappings" hcl2-schema-generator:"ami_block_device_mappings,direct" required:"false" cty:"ami_block_device_mappings" hcl:"ami_block_device_mappings"`
|
||||
ChrootMounts [][]string `mapstructure:"chroot_mounts" required:"false" cty:"chroot_mounts" hcl:"chroot_mounts"`
|
||||
CommandWrapper *string `mapstructure:"command_wrapper" required:"false" cty:"command_wrapper" hcl:"command_wrapper"`
|
||||
CopyFiles []string `mapstructure:"copy_files" required:"false" cty:"copy_files" hcl:"copy_files"`
|
||||
DevicePath *string `mapstructure:"device_path" required:"false" cty:"device_path" hcl:"device_path"`
|
||||
NVMEDevicePath *string `mapstructure:"nvme_device_path" required:"false" cty:"nvme_device_path" hcl:"nvme_device_path"`
|
||||
FromScratch *bool `mapstructure:"from_scratch" required:"false" cty:"from_scratch" hcl:"from_scratch"`
|
||||
MountOptions []string `mapstructure:"mount_options" required:"false" cty:"mount_options" hcl:"mount_options"`
|
||||
MountPartition *string `mapstructure:"mount_partition" required:"false" cty:"mount_partition" hcl:"mount_partition"`
|
||||
MountPath *string `mapstructure:"mount_path" required:"false" cty:"mount_path" hcl:"mount_path"`
|
||||
PostMountCommands []string `mapstructure:"post_mount_commands" required:"false" cty:"post_mount_commands" hcl:"post_mount_commands"`
|
||||
PreMountCommands []string `mapstructure:"pre_mount_commands" required:"false" cty:"pre_mount_commands" hcl:"pre_mount_commands"`
|
||||
RootDeviceName *string `mapstructure:"root_device_name" required:"false" cty:"root_device_name" hcl:"root_device_name"`
|
||||
RootVolumeSize *int64 `mapstructure:"root_volume_size" required:"false" cty:"root_volume_size" hcl:"root_volume_size"`
|
||||
RootVolumeType *string `mapstructure:"root_volume_type" required:"false" cty:"root_volume_type" hcl:"root_volume_type"`
|
||||
SourceAmi *string `mapstructure:"source_ami" required:"true" cty:"source_ami" hcl:"source_ami"`
|
||||
SourceAmiFilter *common.FlatAmiFilterOptions `mapstructure:"source_ami_filter" required:"false" cty:"source_ami_filter" hcl:"source_ami_filter"`
|
||||
RootVolumeTags map[string]string `mapstructure:"root_volume_tags" required:"false" cty:"root_volume_tags" hcl:"root_volume_tags"`
|
||||
RootVolumeTag []config.FlatKeyValue `mapstructure:"root_volume_tag" required:"false" cty:"root_volume_tag" hcl:"root_volume_tag"`
|
||||
RootVolumeEncryptBoot *bool `mapstructure:"root_volume_encrypt_boot" required:"false" cty:"root_volume_encrypt_boot" hcl:"root_volume_encrypt_boot"`
|
||||
RootVolumeKmsKeyId *string `mapstructure:"root_volume_kms_key_id" required:"false" cty:"root_volume_kms_key_id" hcl:"root_volume_kms_key_id"`
|
||||
Architecture *string `mapstructure:"ami_architecture" required:"false" cty:"ami_architecture" hcl:"ami_architecture"`
|
||||
}
|
||||
|
||||
// FlatMapstructure returns a new FlatConfig.
|
||||
// FlatConfig is an auto-generated flat version of Config.
|
||||
// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up.
|
||||
func (*Config) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } {
|
||||
return new(FlatConfig)
|
||||
}
|
||||
|
||||
// HCL2Spec returns the hcl spec of a Config.
|
||||
// This spec is used by HCL to read the fields of Config.
|
||||
// The decoded values from this spec will then be applied to a FlatConfig.
|
||||
func (*FlatConfig) HCL2Spec() map[string]hcldec.Spec {
|
||||
s := map[string]hcldec.Spec{
|
||||
"packer_build_name": &hcldec.AttrSpec{Name: "packer_build_name", Type: cty.String, Required: false},
|
||||
"packer_builder_type": &hcldec.AttrSpec{Name: "packer_builder_type", Type: cty.String, Required: false},
|
||||
"packer_core_version": &hcldec.AttrSpec{Name: "packer_core_version", Type: cty.String, Required: false},
|
||||
"packer_debug": &hcldec.AttrSpec{Name: "packer_debug", Type: cty.Bool, Required: false},
|
||||
"packer_force": &hcldec.AttrSpec{Name: "packer_force", Type: cty.Bool, Required: false},
|
||||
"packer_on_error": &hcldec.AttrSpec{Name: "packer_on_error", Type: cty.String, Required: false},
|
||||
"packer_user_variables": &hcldec.AttrSpec{Name: "packer_user_variables", Type: cty.Map(cty.String), Required: false},
|
||||
"packer_sensitive_variables": &hcldec.AttrSpec{Name: "packer_sensitive_variables", Type: cty.List(cty.String), Required: false},
|
||||
"ami_name": &hcldec.AttrSpec{Name: "ami_name", Type: cty.String, Required: false},
|
||||
"ami_description": &hcldec.AttrSpec{Name: "ami_description", Type: cty.String, Required: false},
|
||||
"ami_virtualization_type": &hcldec.AttrSpec{Name: "ami_virtualization_type", Type: cty.String, Required: false},
|
||||
"ami_users": &hcldec.AttrSpec{Name: "ami_users", Type: cty.List(cty.String), Required: false},
|
||||
"ami_groups": &hcldec.AttrSpec{Name: "ami_groups", Type: cty.List(cty.String), Required: false},
|
||||
"ami_product_codes": &hcldec.AttrSpec{Name: "ami_product_codes", Type: cty.List(cty.String), Required: false},
|
||||
"ami_regions": &hcldec.AttrSpec{Name: "ami_regions", Type: cty.List(cty.String), Required: false},
|
||||
"skip_region_validation": &hcldec.AttrSpec{Name: "skip_region_validation", Type: cty.Bool, Required: false},
|
||||
"tags": &hcldec.AttrSpec{Name: "tags", Type: cty.Map(cty.String), Required: false},
|
||||
"tag": &hcldec.BlockListSpec{TypeName: "tag", Nested: hcldec.ObjectSpec((*config.FlatKeyValue)(nil).HCL2Spec())},
|
||||
"ena_support": &hcldec.AttrSpec{Name: "ena_support", Type: cty.Bool, Required: false},
|
||||
"sriov_support": &hcldec.AttrSpec{Name: "sriov_support", Type: cty.Bool, Required: false},
|
||||
"force_deregister": &hcldec.AttrSpec{Name: "force_deregister", Type: cty.Bool, Required: false},
|
||||
"force_delete_snapshot": &hcldec.AttrSpec{Name: "force_delete_snapshot", Type: cty.Bool, Required: false},
|
||||
"encrypt_boot": &hcldec.AttrSpec{Name: "encrypt_boot", Type: cty.Bool, Required: false},
|
||||
"kms_key_id": &hcldec.AttrSpec{Name: "kms_key_id", Type: cty.String, Required: false},
|
||||
"region_kms_key_ids": &hcldec.AttrSpec{Name: "region_kms_key_ids", Type: cty.Map(cty.String), Required: false},
|
||||
"skip_save_build_region": &hcldec.AttrSpec{Name: "skip_save_build_region", Type: cty.Bool, Required: false},
|
||||
"snapshot_tags": &hcldec.AttrSpec{Name: "snapshot_tags", Type: cty.Map(cty.String), Required: false},
|
||||
"snapshot_tag": &hcldec.BlockListSpec{TypeName: "snapshot_tag", Nested: hcldec.ObjectSpec((*config.FlatKeyValue)(nil).HCL2Spec())},
|
||||
"snapshot_users": &hcldec.AttrSpec{Name: "snapshot_users", Type: cty.List(cty.String), Required: false},
|
||||
"snapshot_groups": &hcldec.AttrSpec{Name: "snapshot_groups", Type: cty.List(cty.String), Required: false},
|
||||
"access_key": &hcldec.AttrSpec{Name: "access_key", Type: cty.String, Required: false},
|
||||
"assume_role": &hcldec.BlockSpec{TypeName: "assume_role", Nested: hcldec.ObjectSpec((*common.FlatAssumeRoleConfig)(nil).HCL2Spec())},
|
||||
"custom_endpoint_ec2": &hcldec.AttrSpec{Name: "custom_endpoint_ec2", Type: cty.String, Required: false},
|
||||
"shared_credentials_file": &hcldec.AttrSpec{Name: "shared_credentials_file", Type: cty.String, Required: false},
|
||||
"decode_authorization_messages": &hcldec.AttrSpec{Name: "decode_authorization_messages", Type: cty.Bool, Required: false},
|
||||
"insecure_skip_tls_verify": &hcldec.AttrSpec{Name: "insecure_skip_tls_verify", Type: cty.Bool, Required: false},
|
||||
"max_retries": &hcldec.AttrSpec{Name: "max_retries", Type: cty.Number, Required: false},
|
||||
"mfa_code": &hcldec.AttrSpec{Name: "mfa_code", Type: cty.String, Required: false},
|
||||
"profile": &hcldec.AttrSpec{Name: "profile", Type: cty.String, Required: false},
|
||||
"region": &hcldec.AttrSpec{Name: "region", Type: cty.String, Required: false},
|
||||
"secret_key": &hcldec.AttrSpec{Name: "secret_key", Type: cty.String, Required: false},
|
||||
"skip_metadata_api_check": &hcldec.AttrSpec{Name: "skip_metadata_api_check", Type: cty.Bool, Required: false},
|
||||
"skip_credential_validation": &hcldec.AttrSpec{Name: "skip_credential_validation", Type: cty.Bool, Required: false},
|
||||
"token": &hcldec.AttrSpec{Name: "token", Type: cty.String, Required: false},
|
||||
"vault_aws_engine": &hcldec.BlockSpec{TypeName: "vault_aws_engine", Nested: hcldec.ObjectSpec((*common.FlatVaultAWSEngineOptions)(nil).HCL2Spec())},
|
||||
"aws_polling": &hcldec.BlockSpec{TypeName: "aws_polling", Nested: hcldec.ObjectSpec((*common.FlatAWSPollingConfig)(nil).HCL2Spec())},
|
||||
"ami_block_device_mappings": &hcldec.BlockListSpec{TypeName: "ami_block_device_mappings", Nested: hcldec.ObjectSpec((*common.FlatBlockDevice)(nil).HCL2Spec())},
|
||||
"chroot_mounts": &hcldec.AttrSpec{Name: "chroot_mounts", Type: cty.List(cty.List(cty.String)), Required: false},
|
||||
"command_wrapper": &hcldec.AttrSpec{Name: "command_wrapper", Type: cty.String, Required: false},
|
||||
"copy_files": &hcldec.AttrSpec{Name: "copy_files", Type: cty.List(cty.String), Required: false},
|
||||
"device_path": &hcldec.AttrSpec{Name: "device_path", Type: cty.String, Required: false},
|
||||
"nvme_device_path": &hcldec.AttrSpec{Name: "nvme_device_path", Type: cty.String, Required: false},
|
||||
"from_scratch": &hcldec.AttrSpec{Name: "from_scratch", Type: cty.Bool, Required: false},
|
||||
"mount_options": &hcldec.AttrSpec{Name: "mount_options", Type: cty.List(cty.String), Required: false},
|
||||
"mount_partition": &hcldec.AttrSpec{Name: "mount_partition", Type: cty.String, Required: false},
|
||||
"mount_path": &hcldec.AttrSpec{Name: "mount_path", Type: cty.String, Required: false},
|
||||
"post_mount_commands": &hcldec.AttrSpec{Name: "post_mount_commands", Type: cty.List(cty.String), Required: false},
|
||||
"pre_mount_commands": &hcldec.AttrSpec{Name: "pre_mount_commands", Type: cty.List(cty.String), Required: false},
|
||||
"root_device_name": &hcldec.AttrSpec{Name: "root_device_name", Type: cty.String, Required: false},
|
||||
"root_volume_size": &hcldec.AttrSpec{Name: "root_volume_size", Type: cty.Number, Required: false},
|
||||
"root_volume_type": &hcldec.AttrSpec{Name: "root_volume_type", Type: cty.String, Required: false},
|
||||
"source_ami": &hcldec.AttrSpec{Name: "source_ami", Type: cty.String, Required: false},
|
||||
"source_ami_filter": &hcldec.BlockSpec{TypeName: "source_ami_filter", Nested: hcldec.ObjectSpec((*common.FlatAmiFilterOptions)(nil).HCL2Spec())},
|
||||
"root_volume_tags": &hcldec.AttrSpec{Name: "root_volume_tags", Type: cty.Map(cty.String), Required: false},
|
||||
"root_volume_tag": &hcldec.BlockListSpec{TypeName: "root_volume_tag", Nested: hcldec.ObjectSpec((*config.FlatKeyValue)(nil).HCL2Spec())},
|
||||
"root_volume_encrypt_boot": &hcldec.AttrSpec{Name: "root_volume_encrypt_boot", Type: cty.Bool, Required: false},
|
||||
"root_volume_kms_key_id": &hcldec.AttrSpec{Name: "root_volume_kms_key_id", Type: cty.String, Required: false},
|
||||
"ami_architecture": &hcldec.AttrSpec{Name: "ami_architecture", Type: cty.String, Required: false},
|
||||
}
|
||||
return s
|
||||
}
|
|
@ -1,251 +0,0 @@
|
|||
package chroot
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
func testConfig() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"ami_name": "foo",
|
||||
"source_ami": "foo",
|
||||
"region": "us-east-1",
|
||||
// region validation logic is checked in ami_config_test
|
||||
"skip_region_validation": true,
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilder_ImplementsBuilder(t *testing.T) {
|
||||
var raw interface{}
|
||||
raw = &Builder{}
|
||||
if _, ok := raw.(packersdk.Builder); !ok {
|
||||
t.Fatalf("Builder should be a builder")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderPrepare_AMIName(t *testing.T) {
|
||||
var b Builder
|
||||
config := testConfig()
|
||||
|
||||
// Test good
|
||||
config["ami_name"] = "foo"
|
||||
config["skip_region_validation"] = true
|
||||
_, warnings, err := b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
|
||||
// Test bad
|
||||
config["ami_name"] = "foo {{"
|
||||
b = Builder{}
|
||||
_, warnings, err = b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err == nil {
|
||||
t.Fatal("should have error")
|
||||
}
|
||||
|
||||
// Test bad
|
||||
delete(config, "ami_name")
|
||||
b = Builder{}
|
||||
_, warnings, err = b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err == nil {
|
||||
t.Fatal("should have error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderPrepare_ChrootMounts(t *testing.T) {
|
||||
b := &Builder{}
|
||||
config := testConfig()
|
||||
|
||||
config["chroot_mounts"] = nil
|
||||
_, warnings, err := b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("err: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderPrepare_ChrootMountsBadDefaults(t *testing.T) {
|
||||
b := &Builder{}
|
||||
config := testConfig()
|
||||
|
||||
config["chroot_mounts"] = [][]string{
|
||||
{"bad"},
|
||||
}
|
||||
_, warnings, err := b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err == nil {
|
||||
t.Fatal("should have error")
|
||||
}
|
||||
}
|
||||
func TestBuilderPrepare_SourceAmi(t *testing.T) {
|
||||
b := &Builder{}
|
||||
config := testConfig()
|
||||
|
||||
config["source_ami"] = ""
|
||||
_, warnings, err := b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err == nil {
|
||||
t.Fatal("should have error")
|
||||
}
|
||||
|
||||
config["source_ami"] = "foo"
|
||||
_, warnings, err = b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("err: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderPrepare_CommandWrapper(t *testing.T) {
|
||||
b := &Builder{}
|
||||
config := testConfig()
|
||||
|
||||
config["command_wrapper"] = "echo hi; {{.Command}}"
|
||||
_, warnings, err := b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("err: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderPrepare_CopyFiles(t *testing.T) {
|
||||
b := &Builder{}
|
||||
config := testConfig()
|
||||
|
||||
_, warnings, err := b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("err: %s", err)
|
||||
}
|
||||
|
||||
if len(b.config.CopyFiles) != 1 && b.config.CopyFiles[0] != "/etc/resolv.conf" {
|
||||
t.Errorf("Was expecting default value for copy_files.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderPrepare_CopyFilesNoDefault(t *testing.T) {
|
||||
b := &Builder{}
|
||||
config := testConfig()
|
||||
|
||||
config["copy_files"] = []string{}
|
||||
_, warnings, err := b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("err: %s", err)
|
||||
}
|
||||
|
||||
if len(b.config.CopyFiles) > 0 {
|
||||
t.Errorf("Was expecting no default value for copy_files. Found %v",
|
||||
b.config.CopyFiles)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderPrepare_RootDeviceNameAndAMIMappings(t *testing.T) {
|
||||
var b Builder
|
||||
config := testConfig()
|
||||
|
||||
config["root_device_name"] = "/dev/sda"
|
||||
config["ami_block_device_mappings"] = []interface{}{map[string]string{}}
|
||||
config["root_volume_size"] = 15
|
||||
_, warnings, err := b.Prepare(config)
|
||||
if len(warnings) == 0 {
|
||||
t.Fatal("Missing warning, stating block device mappings will be overwritten")
|
||||
} else if len(warnings) > 1 {
|
||||
t.Fatalf("excessive warnings: %#v", warnings)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderPrepare_AMIMappingsNoRootDeviceName(t *testing.T) {
|
||||
var b Builder
|
||||
config := testConfig()
|
||||
|
||||
config["ami_block_device_mappings"] = []interface{}{map[string]string{}}
|
||||
_, warnings, err := b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err == nil {
|
||||
t.Fatalf("should have error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderPrepare_RootDeviceNameNoAMIMappings(t *testing.T) {
|
||||
var b Builder
|
||||
config := testConfig()
|
||||
|
||||
config["root_device_name"] = "/dev/sda"
|
||||
_, warnings, err := b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err == nil {
|
||||
t.Fatalf("should have error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderPrepare_ReturnGeneratedData(t *testing.T) {
|
||||
var b Builder
|
||||
config := testConfig()
|
||||
|
||||
generatedData, warnings, err := b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
if len(generatedData) == 0 {
|
||||
t.Fatalf("Generated data should not be empty")
|
||||
}
|
||||
if generatedData[0] != "SourceAMIName" {
|
||||
t.Fatalf("Generated data should contain SourceAMIName")
|
||||
}
|
||||
if generatedData[1] != "BuildRegion" {
|
||||
t.Fatalf("Generated data should contain BuildRegion")
|
||||
}
|
||||
if generatedData[2] != "SourceAMI" {
|
||||
t.Fatalf("Generated data should contain SourceAMI")
|
||||
}
|
||||
if generatedData[3] != "SourceAMICreationDate" {
|
||||
t.Fatalf("Generated data should contain SourceAMICreationDate")
|
||||
}
|
||||
if generatedData[4] != "SourceAMIOwner" {
|
||||
t.Fatalf("Generated data should contain SourceAMIOwner")
|
||||
}
|
||||
if generatedData[5] != "SourceAMIOwnerName" {
|
||||
t.Fatalf("Generated data should contain SourceAMIOwnerName")
|
||||
}
|
||||
if generatedData[6] != "Device" {
|
||||
t.Fatalf("Generated data should contain Device")
|
||||
}
|
||||
if generatedData[7] != "MountPath" {
|
||||
t.Fatalf("Generated data should contain MountPath")
|
||||
}
|
||||
}
|
|
@ -1 +0,0 @@
|
|||
package chroot
|
|
@ -1,51 +0,0 @@
|
|||
package chroot
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/common"
|
||||
)
|
||||
|
||||
func TestCopyFile(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
return
|
||||
}
|
||||
|
||||
first, err := ioutil.TempFile("", "copy_files_test")
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't create temp file.")
|
||||
}
|
||||
defer os.Remove(first.Name())
|
||||
newName := first.Name() + "-new"
|
||||
|
||||
payload := "copy_files_test.go payload"
|
||||
if _, err = first.WriteString(payload); err != nil {
|
||||
t.Fatalf("Couldn't write payload to first file.")
|
||||
}
|
||||
first.Sync()
|
||||
|
||||
cmd := common.ShellCommand(fmt.Sprintf("cp %s %s", first.Name(), newName))
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Fatalf("Couldn't copy file")
|
||||
}
|
||||
defer os.Remove(newName)
|
||||
|
||||
second, err := os.Open(newName)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't open copied file.")
|
||||
}
|
||||
defer second.Close()
|
||||
|
||||
var copiedPayload = make([]byte, len(payload))
|
||||
if _, err := second.Read(copiedPayload); err != nil {
|
||||
t.Fatalf("Couldn't open copied file for reading.")
|
||||
}
|
||||
|
||||
if string(copiedPayload) != payload {
|
||||
t.Fatalf("payload not copied.")
|
||||
}
|
||||
}
|
|
@ -1,70 +0,0 @@
|
|||
package chroot
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// AvailableDevice finds an available device and returns it. Note that
|
||||
// you should externally hold a flock or something in order to guarantee
|
||||
// that this device is available across processes.
|
||||
func AvailableDevice() (string, error) {
|
||||
prefix, err := devicePrefix()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
letters := "fghijklmnop"
|
||||
for _, letter := range letters {
|
||||
device := fmt.Sprintf("/dev/%s%c", prefix, letter)
|
||||
|
||||
// If the block device itself, i.e. /dev/sf, exists, then we
|
||||
// can't use any of the numbers either.
|
||||
if _, err := os.Stat(device); err == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// To be able to build both Paravirtual and HVM images, the unnumbered
|
||||
// device and the first numbered one must be available.
|
||||
// E.g. /dev/xvdf and /dev/xvdf1
|
||||
numbered_device := fmt.Sprintf("%s%d", device, 1)
|
||||
if _, err := os.Stat(numbered_device); err != nil {
|
||||
return device, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", errors.New("available device could not be found")
|
||||
}
|
||||
|
||||
// devicePrefix returns the prefix ("sd" or "xvd" or so on) of the devices
|
||||
// on the system.
|
||||
func devicePrefix() (string, error) {
|
||||
available := []string{"sd", "xvd"}
|
||||
|
||||
f, err := os.Open("/sys/block")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
dirs, err := f.Readdirnames(-1)
|
||||
if dirs != nil && len(dirs) > 0 {
|
||||
for _, dir := range dirs {
|
||||
dirBase := filepath.Base(dir)
|
||||
for _, prefix := range available {
|
||||
if strings.HasPrefix(dirBase, prefix) {
|
||||
return prefix, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return "", errors.New("device prefix could not be detected")
|
||||
}
|
|
@ -1,10 +0,0 @@
|
|||
package chroot
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestDevicePrefixMatch(t *testing.T) {
|
||||
/*
|
||||
if devicePrefixMatch("nvme0n1") != "" {
|
||||
}
|
||||
*/
|
||||
}
|
|
@ -1,16 +0,0 @@
|
|||
// +build windows
|
||||
|
||||
package chroot
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
)
|
||||
|
||||
func lockFile(*os.File) error {
|
||||
return errors.New("not supported on Windows")
|
||||
}
|
||||
|
||||
func unlockFile(f *os.File) error {
|
||||
return nil
|
||||
}
|
|
@ -1,27 +0,0 @@
|
|||
// +build !windows
|
||||
|
||||
package chroot
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// See: http://linux.die.net/include/sys/file.h
|
||||
const LOCK_EX = 2
|
||||
const LOCK_NB = 4
|
||||
const LOCK_UN = 8
|
||||
|
||||
func lockFile(f *os.File) error {
|
||||
err := unix.Flock(int(f.Fd()), LOCK_EX)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func unlockFile(f *os.File) error {
|
||||
return unix.Flock(int(f.Fd()), LOCK_UN)
|
||||
}
|
|
@ -1,97 +0,0 @@
|
|||
package chroot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
awscommon "github.com/hashicorp/packer/builder/amazon/common"
|
||||
)
|
||||
|
||||
// StepAttachVolume attaches the previously created volume to an
|
||||
// available device location.
|
||||
//
|
||||
// Produces:
|
||||
// device string - The location where the volume was attached.
|
||||
// attach_cleanup CleanupFunc
|
||||
type StepAttachVolume struct {
|
||||
PollingConfig *awscommon.AWSPollingConfig
|
||||
attached bool
|
||||
volumeId string
|
||||
}
|
||||
|
||||
func (s *StepAttachVolume) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
device := state.Get("device").(string)
|
||||
instance := state.Get("instance").(*ec2.Instance)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
volumeId := state.Get("volume_id").(string)
|
||||
|
||||
// For the API call, it expects "sd" prefixed devices.
|
||||
attachVolume := strings.Replace(device, "/xvd", "/sd", 1)
|
||||
|
||||
ui.Say(fmt.Sprintf("Attaching the root volume to %s", attachVolume))
|
||||
_, err := ec2conn.AttachVolume(&ec2.AttachVolumeInput{
|
||||
InstanceId: instance.InstanceId,
|
||||
VolumeId: &volumeId,
|
||||
Device: &attachVolume,
|
||||
})
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error attaching volume: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Mark that we attached it so we can detach it later
|
||||
s.attached = true
|
||||
s.volumeId = volumeId
|
||||
|
||||
// Wait for the volume to become attached
|
||||
err = s.PollingConfig.WaitUntilVolumeAttached(ctx, ec2conn, s.volumeId)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error waiting for volume: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
state.Put("attach_cleanup", s)
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepAttachVolume) Cleanup(state multistep.StateBag) {
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
if err := s.CleanupFunc(state); err != nil {
|
||||
ui.Error(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (s *StepAttachVolume) CleanupFunc(state multistep.StateBag) error {
|
||||
if !s.attached {
|
||||
return nil
|
||||
}
|
||||
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
ui.Say("Detaching EBS volume...")
|
||||
_, err := ec2conn.DetachVolume(&ec2.DetachVolumeInput{VolumeId: &s.volumeId})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error detaching EBS volume: %s", err)
|
||||
}
|
||||
|
||||
s.attached = false
|
||||
|
||||
// Wait for the volume to detach
|
||||
err = s.PollingConfig.WaitUntilVolumeDetached(aws.BackgroundContext(), ec2conn, s.volumeId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error waiting for volume: %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,15 +0,0 @@
|
|||
package chroot
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/chroot"
|
||||
)
|
||||
|
||||
func TestAttachVolumeCleanupFunc_ImplementsCleanupFunc(t *testing.T) {
|
||||
var raw interface{}
|
||||
raw = new(StepAttachVolume)
|
||||
if _, ok := raw.(chroot.Cleanup); !ok {
|
||||
t.Fatalf("cleanup func should be a CleanupFunc")
|
||||
}
|
||||
}
|
|
@ -1,32 +0,0 @@
|
|||
package chroot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
// StepCheckRootDevice makes sure the root device on the AMI is EBS-backed.
|
||||
type StepCheckRootDevice struct{}
|
||||
|
||||
func (s *StepCheckRootDevice) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
image := state.Get("source_image").(*ec2.Image)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
ui.Say("Checking the root device on source AMI...")
|
||||
|
||||
// It must be EBS-backed otherwise the build won't work
|
||||
if *image.RootDeviceType != "ebs" {
|
||||
err := fmt.Errorf("The root device of the source AMI must be EBS-backed.")
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepCheckRootDevice) Cleanup(multistep.StateBag) {}
|
|
@ -1,182 +0,0 @@
|
|||
package chroot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/config"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/interpolate"
|
||||
awscommon "github.com/hashicorp/packer/builder/amazon/common"
|
||||
)
|
||||
|
||||
// StepCreateVolume creates a new volume from the snapshot of the root
|
||||
// device of the AMI.
|
||||
//
|
||||
// Produces:
|
||||
// volume_id string - The ID of the created volume
|
||||
type StepCreateVolume struct {
|
||||
PollingConfig *awscommon.AWSPollingConfig
|
||||
volumeId string
|
||||
RootVolumeSize int64
|
||||
RootVolumeType string
|
||||
RootVolumeTags map[string]string
|
||||
RootVolumeEncryptBoot config.Trilean
|
||||
RootVolumeKmsKeyId string
|
||||
Ctx interpolate.Context
|
||||
}
|
||||
|
||||
func (s *StepCreateVolume) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
config := state.Get("config").(*Config)
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
instance := state.Get("instance").(*ec2.Instance)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
volTags, err := awscommon.TagMap(s.RootVolumeTags).EC2Tags(s.Ctx, *ec2conn.Config.Region, state)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error tagging volumes: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Collect tags for tagging on resource creation
|
||||
var tagSpecs []*ec2.TagSpecification
|
||||
|
||||
if len(volTags) > 0 {
|
||||
runVolTags := &ec2.TagSpecification{
|
||||
ResourceType: aws.String("volume"),
|
||||
Tags: volTags,
|
||||
}
|
||||
|
||||
tagSpecs = append(tagSpecs, runVolTags)
|
||||
}
|
||||
|
||||
var createVolume *ec2.CreateVolumeInput
|
||||
if config.FromScratch {
|
||||
rootVolumeType := ec2.VolumeTypeGp2
|
||||
if s.RootVolumeType == "io1" {
|
||||
err := errors.New("Cannot use io1 volume when building from scratch")
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
} else if s.RootVolumeType != "" {
|
||||
rootVolumeType = s.RootVolumeType
|
||||
}
|
||||
createVolume = &ec2.CreateVolumeInput{
|
||||
AvailabilityZone: instance.Placement.AvailabilityZone,
|
||||
Size: aws.Int64(s.RootVolumeSize),
|
||||
VolumeType: aws.String(rootVolumeType),
|
||||
}
|
||||
|
||||
} else {
|
||||
// Determine the root device snapshot
|
||||
image := state.Get("source_image").(*ec2.Image)
|
||||
log.Printf("Searching for root device of the image (%s)", *image.RootDeviceName)
|
||||
var rootDevice *ec2.BlockDeviceMapping
|
||||
for _, device := range image.BlockDeviceMappings {
|
||||
if *device.DeviceName == *image.RootDeviceName {
|
||||
rootDevice = device
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
ui.Say("Creating the root volume...")
|
||||
createVolume, err = s.buildCreateVolumeInput(*instance.Placement.AvailabilityZone, rootDevice)
|
||||
if err != nil {
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
}
|
||||
|
||||
if len(tagSpecs) > 0 {
|
||||
createVolume.SetTagSpecifications(tagSpecs)
|
||||
volTags.Report(ui)
|
||||
}
|
||||
log.Printf("Create args: %+v", createVolume)
|
||||
|
||||
createVolumeResp, err := ec2conn.CreateVolume(createVolume)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error creating root volume: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Set the volume ID so we remember to delete it later
|
||||
s.volumeId = *createVolumeResp.VolumeId
|
||||
log.Printf("Volume ID: %s", s.volumeId)
|
||||
|
||||
// Wait for the volume to become ready
|
||||
err = s.PollingConfig.WaitUntilVolumeAvailable(ctx, ec2conn, s.volumeId)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error waiting for volume: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
state.Put("volume_id", s.volumeId)
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepCreateVolume) Cleanup(state multistep.StateBag) {
|
||||
if s.volumeId == "" {
|
||||
return
|
||||
}
|
||||
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
ui.Say("Deleting the created EBS volume...")
|
||||
_, err := ec2conn.DeleteVolume(&ec2.DeleteVolumeInput{VolumeId: &s.volumeId})
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf("Error deleting EBS volume: %s", err))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *StepCreateVolume) buildCreateVolumeInput(az string, rootDevice *ec2.BlockDeviceMapping) (*ec2.CreateVolumeInput, error) {
|
||||
if rootDevice == nil {
|
||||
return nil, fmt.Errorf("Couldn't find root device!")
|
||||
}
|
||||
createVolumeInput := &ec2.CreateVolumeInput{
|
||||
AvailabilityZone: aws.String(az),
|
||||
Size: rootDevice.Ebs.VolumeSize,
|
||||
SnapshotId: rootDevice.Ebs.SnapshotId,
|
||||
VolumeType: rootDevice.Ebs.VolumeType,
|
||||
Iops: rootDevice.Ebs.Iops,
|
||||
Encrypted: rootDevice.Ebs.Encrypted,
|
||||
KmsKeyId: rootDevice.Ebs.KmsKeyId,
|
||||
}
|
||||
if s.RootVolumeSize > *rootDevice.Ebs.VolumeSize {
|
||||
createVolumeInput.Size = aws.Int64(s.RootVolumeSize)
|
||||
}
|
||||
|
||||
if s.RootVolumeEncryptBoot.True() {
|
||||
createVolumeInput.Encrypted = aws.Bool(true)
|
||||
}
|
||||
|
||||
if s.RootVolumeKmsKeyId != "" {
|
||||
createVolumeInput.KmsKeyId = aws.String(s.RootVolumeKmsKeyId)
|
||||
}
|
||||
|
||||
if s.RootVolumeType == "" || s.RootVolumeType == *rootDevice.Ebs.VolumeType {
|
||||
return createVolumeInput, nil
|
||||
}
|
||||
|
||||
if s.RootVolumeType == "io1" {
|
||||
return nil, fmt.Errorf("Root volume type cannot be io1, because existing root volume type was %s", *rootDevice.Ebs.VolumeType)
|
||||
}
|
||||
|
||||
createVolumeInput.VolumeType = aws.String(s.RootVolumeType)
|
||||
// non io1 cannot set iops
|
||||
createVolumeInput.Iops = nil
|
||||
|
||||
return createVolumeInput, nil
|
||||
}
|
|
@ -1,97 +0,0 @@
|
|||
package chroot
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
confighelper "github.com/hashicorp/packer-plugin-sdk/template/config"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func buildTestRootDevice() *ec2.BlockDeviceMapping {
|
||||
return &ec2.BlockDeviceMapping{
|
||||
Ebs: &ec2.EbsBlockDevice{
|
||||
VolumeSize: aws.Int64(10),
|
||||
SnapshotId: aws.String("snap-1234"),
|
||||
VolumeType: aws.String("gp2"),
|
||||
Encrypted: aws.Bool(false),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateVolume_Default(t *testing.T) {
|
||||
stepCreateVolume := new(StepCreateVolume)
|
||||
_, err := stepCreateVolume.buildCreateVolumeInput("test-az", buildTestRootDevice())
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestCreateVolume_Shrink(t *testing.T) {
|
||||
stepCreateVolume := StepCreateVolume{RootVolumeSize: 1}
|
||||
testRootDevice := buildTestRootDevice()
|
||||
ret, err := stepCreateVolume.buildCreateVolumeInput("test-az", testRootDevice)
|
||||
assert.NoError(t, err)
|
||||
// Ensure that the new value is equal to the size of the old root device
|
||||
assert.Equal(t, *ret.Size, *testRootDevice.Ebs.VolumeSize)
|
||||
}
|
||||
|
||||
func TestCreateVolume_Expand(t *testing.T) {
|
||||
stepCreateVolume := StepCreateVolume{RootVolumeSize: 25}
|
||||
testRootDevice := buildTestRootDevice()
|
||||
ret, err := stepCreateVolume.buildCreateVolumeInput("test-az", testRootDevice)
|
||||
assert.NoError(t, err)
|
||||
// Ensure that the new value is equal to the size of the value passed in
|
||||
assert.Equal(t, *ret.Size, stepCreateVolume.RootVolumeSize)
|
||||
}
|
||||
|
||||
func TestCreateVolume_io1_to_io1(t *testing.T) {
|
||||
stepCreateVolume := StepCreateVolume{RootVolumeType: "io1"}
|
||||
testRootDevice := buildTestRootDevice()
|
||||
testRootDevice.Ebs.VolumeType = aws.String("io1")
|
||||
testRootDevice.Ebs.Iops = aws.Int64(1000)
|
||||
ret, err := stepCreateVolume.buildCreateVolumeInput("test-az", testRootDevice)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, *ret.VolumeType, stepCreateVolume.RootVolumeType)
|
||||
assert.Equal(t, *ret.Iops, *testRootDevice.Ebs.Iops)
|
||||
}
|
||||
|
||||
func TestCreateVolume_io1_to_gp2(t *testing.T) {
|
||||
stepCreateVolume := StepCreateVolume{RootVolumeType: "gp2"}
|
||||
testRootDevice := buildTestRootDevice()
|
||||
testRootDevice.Ebs.VolumeType = aws.String("io1")
|
||||
testRootDevice.Ebs.Iops = aws.Int64(1000)
|
||||
|
||||
ret, err := stepCreateVolume.buildCreateVolumeInput("test-az", testRootDevice)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, *ret.VolumeType, stepCreateVolume.RootVolumeType)
|
||||
assert.Nil(t, ret.Iops)
|
||||
}
|
||||
|
||||
func TestCreateVolume_gp2_to_io1(t *testing.T) {
|
||||
stepCreateVolume := StepCreateVolume{RootVolumeType: "io1"}
|
||||
testRootDevice := buildTestRootDevice()
|
||||
|
||||
_, err := stepCreateVolume.buildCreateVolumeInput("test-az", testRootDevice)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestCreateVolume_Encrypted(t *testing.T) {
|
||||
stepCreateVolume := StepCreateVolume{RootVolumeEncryptBoot: confighelper.TrileanFromBool(true)}
|
||||
testRootDevice := buildTestRootDevice()
|
||||
ret, err := stepCreateVolume.buildCreateVolumeInput("test-az", testRootDevice)
|
||||
assert.NoError(t, err)
|
||||
// Ensure that the new value is equal to the the value passed in
|
||||
assert.Equal(t, confighelper.TrileanFromBool(*ret.Encrypted), stepCreateVolume.RootVolumeEncryptBoot)
|
||||
}
|
||||
|
||||
func TestCreateVolume_Custom_KMS_Key_Encrypted(t *testing.T) {
|
||||
stepCreateVolume := StepCreateVolume{
|
||||
RootVolumeEncryptBoot: confighelper.TrileanFromBool(true),
|
||||
RootVolumeKmsKeyId: "alias/1234",
|
||||
}
|
||||
testRootDevice := buildTestRootDevice()
|
||||
ret, err := stepCreateVolume.buildCreateVolumeInput("test-az", testRootDevice)
|
||||
assert.NoError(t, err)
|
||||
// Ensure that the new value is equal to the value passed in
|
||||
assert.Equal(t, *ret.KmsKeyId, stepCreateVolume.RootVolumeKmsKeyId)
|
||||
}
|
|
@ -1,31 +0,0 @@
|
|||
package chroot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/chroot"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
// StepEarlyUnflock unlocks the flock.
|
||||
type StepEarlyUnflock struct{}
|
||||
|
||||
func (s *StepEarlyUnflock) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
cleanup := state.Get("flock_cleanup").(chroot.Cleanup)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
log.Println("Unlocking file lock...")
|
||||
if err := cleanup.CleanupFunc(state); err != nil {
|
||||
err := fmt.Errorf("Error unlocking file lock: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepEarlyUnflock) Cleanup(state multistep.StateBag) {}
|
|
@ -1,74 +0,0 @@
|
|||
package chroot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
// StepFlock provisions the instance within a chroot.
|
||||
//
|
||||
// Produces:
|
||||
// flock_cleanup Cleanup - To perform early cleanup
|
||||
type StepFlock struct {
|
||||
fh *os.File
|
||||
}
|
||||
|
||||
func (s *StepFlock) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
lockfile := "/var/lock/packer-chroot/lock"
|
||||
if err := os.MkdirAll(filepath.Dir(lockfile), 0755); err != nil {
|
||||
err := fmt.Errorf("Error creating lock: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
log.Printf("Obtaining lock: %s", lockfile)
|
||||
f, err := os.Create(lockfile)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error creating lock: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// LOCK!
|
||||
if err := lockFile(f); err != nil {
|
||||
err := fmt.Errorf("Error obtaining lock: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Set the file handle, we can't close it because we need to hold
|
||||
// the lock.
|
||||
s.fh = f
|
||||
|
||||
state.Put("flock_cleanup", s)
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepFlock) Cleanup(state multistep.StateBag) {
|
||||
s.CleanupFunc(state)
|
||||
}
|
||||
|
||||
func (s *StepFlock) CleanupFunc(state multistep.StateBag) error {
|
||||
if s.fh == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Printf("Unlocking: %s", s.fh.Name())
|
||||
if err := unlockFile(s.fh); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.fh = nil
|
||||
return nil
|
||||
}
|
|
@ -1,15 +0,0 @@
|
|||
package chroot
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/chroot"
|
||||
)
|
||||
|
||||
func TestFlockCleanupFunc_ImplementsCleanupFunc(t *testing.T) {
|
||||
var raw interface{}
|
||||
raw = new(StepFlock)
|
||||
if _, ok := raw.(chroot.Cleanup); !ok {
|
||||
t.Fatalf("cleanup func should be a CleanupFunc")
|
||||
}
|
||||
}
|
|
@ -1,60 +0,0 @@
|
|||
package chroot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
// StepInstanceInfo verifies that this builder is running on an EC2 instance.
|
||||
type StepInstanceInfo struct{}
|
||||
|
||||
func (s *StepInstanceInfo) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
session := state.Get("awsSession").(*session.Session)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
// Get our own instance ID
|
||||
ui.Say("Gathering information about this EC2 instance...")
|
||||
|
||||
ec2meta := ec2metadata.New(session)
|
||||
identity, err := ec2meta.GetInstanceIdentityDocument()
|
||||
if err != nil {
|
||||
err := fmt.Errorf(
|
||||
"Error retrieving the ID of the instance Packer is running on.\n" +
|
||||
"Please verify Packer is running on a proper AWS EC2 instance.")
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
log.Printf("Instance ID: %s", identity.InstanceID)
|
||||
|
||||
// Query the entire instance metadata
|
||||
instancesResp, err := ec2conn.DescribeInstances(&ec2.DescribeInstancesInput{InstanceIds: []*string{&identity.InstanceID}})
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error getting instance data: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
if len(instancesResp.Reservations) == 0 {
|
||||
err := fmt.Errorf("Error getting instance data: no instance found.")
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
instance := instancesResp.Reservations[0].Instances[0]
|
||||
state.Put("instance", instance)
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepInstanceInfo) Cleanup(multistep.StateBag) {}
|
|
@ -1,157 +0,0 @@
|
|||
package chroot
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/hashicorp/packer-plugin-sdk/common"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/packerbuilderdata"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/interpolate"
|
||||
)
|
||||
|
||||
type mountPathData struct {
|
||||
Device string
|
||||
}
|
||||
|
||||
// StepMountDevice mounts the attached device.
|
||||
//
|
||||
// Produces:
|
||||
// mount_path string - The location where the volume was mounted.
|
||||
// mount_device_cleanup CleanupFunc - To perform early cleanup
|
||||
type StepMountDevice struct {
|
||||
MountOptions []string
|
||||
MountPartition string
|
||||
|
||||
mountPath string
|
||||
GeneratedData *packerbuilderdata.GeneratedData
|
||||
}
|
||||
|
||||
func (s *StepMountDevice) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
config := state.Get("config").(*Config)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
device := state.Get("device").(string)
|
||||
if config.NVMEDevicePath != "" {
|
||||
// customizable device path for mounting NVME block devices on c5 and m5 HVM
|
||||
device = config.NVMEDevicePath
|
||||
}
|
||||
wrappedCommand := state.Get("wrappedCommand").(common.CommandWrapper)
|
||||
|
||||
var virtualizationType string
|
||||
if config.FromScratch || config.AMIVirtType != "" {
|
||||
virtualizationType = config.AMIVirtType
|
||||
} else {
|
||||
image := state.Get("source_image").(*ec2.Image)
|
||||
virtualizationType = *image.VirtualizationType
|
||||
log.Printf("Source image virtualization type is: %s", virtualizationType)
|
||||
}
|
||||
|
||||
ictx := config.ctx
|
||||
|
||||
ictx.Data = &mountPathData{Device: filepath.Base(device)}
|
||||
mountPath, err := interpolate.Render(config.MountPath, &ictx)
|
||||
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error preparing mount directory: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
mountPath, err = filepath.Abs(mountPath)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error preparing mount directory: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
log.Printf("Mount path: %s", mountPath)
|
||||
|
||||
if err := os.MkdirAll(mountPath, 0755); err != nil {
|
||||
err := fmt.Errorf("Error creating mount directory: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
deviceMount := device
|
||||
|
||||
if virtualizationType == "hvm" && s.MountPartition != "0" {
|
||||
deviceMount = fmt.Sprintf("%s%s", device, s.MountPartition)
|
||||
}
|
||||
state.Put("deviceMount", deviceMount)
|
||||
|
||||
ui.Say("Mounting the root device...")
|
||||
stderr := new(bytes.Buffer)
|
||||
|
||||
// build mount options from mount_options config, useful for nouuid options
|
||||
// or other specific device type settings for mount
|
||||
opts := ""
|
||||
if len(s.MountOptions) > 0 {
|
||||
opts = "-o " + strings.Join(s.MountOptions, " -o ")
|
||||
}
|
||||
mountCommand, err := wrappedCommand(
|
||||
fmt.Sprintf("mount %s %s %s", opts, deviceMount, mountPath))
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error creating mount command: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
log.Printf("[DEBUG] (step mount) mount command is %s", mountCommand)
|
||||
cmd := common.ShellCommand(mountCommand)
|
||||
cmd.Stderr = stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
err := fmt.Errorf(
|
||||
"Error mounting root volume: %s\nStderr: %s", err, stderr.String())
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Set the mount path so we remember to unmount it later
|
||||
s.mountPath = mountPath
|
||||
state.Put("mount_path", s.mountPath)
|
||||
s.GeneratedData.Put("MountPath", s.mountPath)
|
||||
state.Put("mount_device_cleanup", s)
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepMountDevice) Cleanup(state multistep.StateBag) {
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
if err := s.CleanupFunc(state); err != nil {
|
||||
ui.Error(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (s *StepMountDevice) CleanupFunc(state multistep.StateBag) error {
|
||||
if s.mountPath == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
wrappedCommand := state.Get("wrappedCommand").(common.CommandWrapper)
|
||||
|
||||
ui.Say("Unmounting the root device...")
|
||||
unmountCommand, err := wrappedCommand(fmt.Sprintf("umount %s", s.mountPath))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating unmount command: %s", err)
|
||||
}
|
||||
|
||||
cmd := common.ShellCommand(unmountCommand)
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("Error unmounting root device: %s", err)
|
||||
}
|
||||
|
||||
s.mountPath = ""
|
||||
return nil
|
||||
}
|
|
@ -1,15 +0,0 @@
|
|||
package chroot
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/chroot"
|
||||
)
|
||||
|
||||
func TestMountDeviceCleanupFunc_ImplementsCleanupFunc(t *testing.T) {
|
||||
var raw interface{}
|
||||
raw = new(StepMountDevice)
|
||||
if _, ok := raw.(chroot.Cleanup); !ok {
|
||||
t.Fatalf("cleanup func should be a CleanupFunc")
|
||||
}
|
||||
}
|
|
@ -1,49 +0,0 @@
|
|||
package chroot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/packerbuilderdata"
|
||||
)
|
||||
|
||||
// StepPrepareDevice finds an available device and sets it.
|
||||
type StepPrepareDevice struct {
|
||||
GeneratedData *packerbuilderdata.GeneratedData
|
||||
}
|
||||
|
||||
func (s *StepPrepareDevice) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
config := state.Get("config").(*Config)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
device := config.DevicePath
|
||||
if device == "" {
|
||||
var err error
|
||||
log.Println("Device path not specified, searching for available device...")
|
||||
device, err = AvailableDevice()
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error finding available device: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := os.Stat(device); err == nil {
|
||||
err := fmt.Errorf("Device is in use: %s", device)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
log.Printf("Device: %s", device)
|
||||
state.Put("device", device)
|
||||
s.GeneratedData.Put("Device", device)
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepPrepareDevice) Cleanup(state multistep.StateBag) {}
|
|
@ -1,169 +0,0 @@
|
|||
package chroot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/random"
|
||||
confighelper "github.com/hashicorp/packer-plugin-sdk/template/config"
|
||||
awscommon "github.com/hashicorp/packer/builder/amazon/common"
|
||||
)
|
||||
|
||||
// StepRegisterAMI creates the AMI.
|
||||
type StepRegisterAMI struct {
|
||||
PollingConfig *awscommon.AWSPollingConfig
|
||||
RootVolumeSize int64
|
||||
EnableAMIENASupport confighelper.Trilean
|
||||
EnableAMISriovNetSupport bool
|
||||
AMISkipBuildRegion bool
|
||||
}
|
||||
|
||||
func (s *StepRegisterAMI) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
config := state.Get("config").(*Config)
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
snapshotID := state.Get("snapshot_id").(string)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
ui.Say("Registering the AMI...")
|
||||
|
||||
var registerOpts *ec2.RegisterImageInput
|
||||
|
||||
// Create the image
|
||||
amiName := config.AMIName
|
||||
state.Put("intermediary_image", false)
|
||||
if config.AMIEncryptBootVolume.True() || s.AMISkipBuildRegion {
|
||||
state.Put("intermediary_image", true)
|
||||
|
||||
// From AWS SDK docs: You can encrypt a copy of an unencrypted snapshot,
|
||||
// but you cannot use it to create an unencrypted copy of an encrypted
|
||||
// snapshot. Your default CMK for EBS is used unless you specify a
|
||||
// non-default key using KmsKeyId.
|
||||
|
||||
// If encrypt_boot is nil or true, we need to create a temporary image
|
||||
// so that in step_region_copy, we can copy it with the correct
|
||||
// encryption
|
||||
amiName = random.AlphaNum(7)
|
||||
}
|
||||
|
||||
// Source Image is only required to be passed if the image is not from scratch
|
||||
if config.FromScratch {
|
||||
registerOpts = buildBaseRegisterOpts(config, nil, s.RootVolumeSize, snapshotID, amiName)
|
||||
} else {
|
||||
image := state.Get("source_image").(*ec2.Image)
|
||||
registerOpts = buildBaseRegisterOpts(config, image, s.RootVolumeSize, snapshotID, amiName)
|
||||
}
|
||||
|
||||
if s.EnableAMISriovNetSupport {
|
||||
// Set SriovNetSupport to "simple". See http://goo.gl/icuXh5
|
||||
// As of February 2017, this applies to C3, C4, D2, I2, R3, and M4 (excluding m4.16xlarge)
|
||||
registerOpts.SriovNetSupport = aws.String("simple")
|
||||
}
|
||||
if s.EnableAMIENASupport.True() {
|
||||
// Set EnaSupport to true
|
||||
// As of February 2017, this applies to C5, I3, P2, R4, X1, and m4.16xlarge
|
||||
registerOpts.EnaSupport = aws.Bool(true)
|
||||
}
|
||||
|
||||
registerResp, err := ec2conn.RegisterImage(registerOpts)
|
||||
if err != nil {
|
||||
state.Put("error", fmt.Errorf("Error registering AMI: %s", err))
|
||||
ui.Error(state.Get("error").(error).Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Set the AMI ID in the state
|
||||
ui.Say(fmt.Sprintf("AMI: %s", *registerResp.ImageId))
|
||||
amis := make(map[string]string)
|
||||
amis[*ec2conn.Config.Region] = *registerResp.ImageId
|
||||
state.Put("amis", amis)
|
||||
|
||||
ui.Say("Waiting for AMI to become ready...")
|
||||
if err := s.PollingConfig.WaitUntilAMIAvailable(ctx, ec2conn, *registerResp.ImageId); err != nil {
|
||||
err := fmt.Errorf("Error waiting for AMI: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepRegisterAMI) Cleanup(state multistep.StateBag) {}
|
||||
|
||||
// Builds the base register opts with architecture, name, root block device, mappings, virtualizationtype
|
||||
func buildBaseRegisterOpts(config *Config, sourceImage *ec2.Image, rootVolumeSize int64, snapshotID string, amiName string) *ec2.RegisterImageInput {
|
||||
var (
|
||||
mappings []*ec2.BlockDeviceMapping
|
||||
rootDeviceName string
|
||||
)
|
||||
|
||||
generatingNewBlockDeviceMappings := config.FromScratch || len(config.AMIMappings) > 0
|
||||
if generatingNewBlockDeviceMappings {
|
||||
mappings = config.AMIMappings.BuildEC2BlockDeviceMappings()
|
||||
rootDeviceName = config.RootDeviceName
|
||||
} else {
|
||||
// If config.FromScratch is false, source image must be set
|
||||
mappings = sourceImage.BlockDeviceMappings
|
||||
rootDeviceName = *sourceImage.RootDeviceName
|
||||
}
|
||||
|
||||
newMappings := make([]*ec2.BlockDeviceMapping, len(mappings))
|
||||
for i, device := range mappings {
|
||||
newDevice := device
|
||||
if *newDevice.DeviceName == rootDeviceName {
|
||||
if newDevice.Ebs != nil {
|
||||
newDevice.Ebs.SnapshotId = aws.String(snapshotID)
|
||||
} else {
|
||||
newDevice.Ebs = &ec2.EbsBlockDevice{SnapshotId: aws.String(snapshotID)}
|
||||
}
|
||||
|
||||
if generatingNewBlockDeviceMappings || rootVolumeSize > *newDevice.Ebs.VolumeSize {
|
||||
newDevice.Ebs.VolumeSize = aws.Int64(rootVolumeSize)
|
||||
}
|
||||
}
|
||||
|
||||
// assume working from a snapshot, so we unset the Encrypted field if set,
|
||||
// otherwise AWS API will return InvalidParameter
|
||||
if newDevice.Ebs != nil && newDevice.Ebs.Encrypted != nil {
|
||||
newDevice.Ebs.Encrypted = nil
|
||||
}
|
||||
|
||||
newMappings[i] = newDevice
|
||||
}
|
||||
|
||||
if config.FromScratch {
|
||||
return &ec2.RegisterImageInput{
|
||||
Name: &amiName,
|
||||
Architecture: aws.String(config.Architecture),
|
||||
RootDeviceName: aws.String(rootDeviceName),
|
||||
VirtualizationType: aws.String(config.AMIVirtType),
|
||||
BlockDeviceMappings: newMappings,
|
||||
}
|
||||
}
|
||||
|
||||
return buildRegisterOptsFromExistingImage(config, sourceImage, newMappings, rootDeviceName, amiName)
|
||||
}
|
||||
|
||||
func buildRegisterOptsFromExistingImage(config *Config, image *ec2.Image, mappings []*ec2.BlockDeviceMapping, rootDeviceName string, amiName string) *ec2.RegisterImageInput {
|
||||
registerOpts := &ec2.RegisterImageInput{
|
||||
Name: &amiName,
|
||||
Architecture: image.Architecture,
|
||||
RootDeviceName: &rootDeviceName,
|
||||
BlockDeviceMappings: mappings,
|
||||
VirtualizationType: image.VirtualizationType,
|
||||
}
|
||||
|
||||
if config.AMIVirtType != "" {
|
||||
registerOpts.VirtualizationType = aws.String(config.AMIVirtType)
|
||||
}
|
||||
|
||||
if config.AMIVirtType != "hvm" {
|
||||
registerOpts.KernelId = image.KernelId
|
||||
registerOpts.RamdiskId = image.RamdiskId
|
||||
}
|
||||
return registerOpts
|
||||
}
|
|
@ -1,216 +0,0 @@
|
|||
package chroot
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/common"
|
||||
amazon "github.com/hashicorp/packer/builder/amazon/common"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
)
|
||||
|
||||
func testImage() ec2.Image {
|
||||
return ec2.Image{
|
||||
ImageId: aws.String("ami-abcd1234"),
|
||||
Name: aws.String("ami_test_name"),
|
||||
Architecture: aws.String("x86_64"),
|
||||
KernelId: aws.String("aki-abcd1234"),
|
||||
}
|
||||
}
|
||||
|
||||
func TestStepRegisterAmi_buildRegisterOpts_pv(t *testing.T) {
|
||||
config := Config{}
|
||||
config.AMIName = "test_ami_name"
|
||||
config.AMIDescription = "test_ami_description"
|
||||
config.AMIVirtType = "paravirtual"
|
||||
rootDeviceName := "foo"
|
||||
|
||||
image := testImage()
|
||||
|
||||
blockDevices := []*ec2.BlockDeviceMapping{}
|
||||
|
||||
opts := buildRegisterOptsFromExistingImage(&config, &image, blockDevices, rootDeviceName, config.AMIName)
|
||||
|
||||
expected := config.AMIVirtType
|
||||
if *opts.VirtualizationType != expected {
|
||||
t.Fatalf("Unexpected VirtType value: expected %s got %s\n", expected, *opts.VirtualizationType)
|
||||
}
|
||||
|
||||
expected = config.AMIName
|
||||
if *opts.Name != expected {
|
||||
t.Fatalf("Unexpected Name value: expected %s got %s\n", expected, *opts.Name)
|
||||
}
|
||||
|
||||
expected = *image.KernelId
|
||||
if *opts.KernelId != expected {
|
||||
t.Fatalf("Unexpected KernelId value: expected %s got %s\n", expected, *opts.KernelId)
|
||||
}
|
||||
|
||||
expected = rootDeviceName
|
||||
if *opts.RootDeviceName != expected {
|
||||
t.Fatalf("Unexpected RootDeviceName value: expected %s got %s\n", expected, *opts.RootDeviceName)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStepRegisterAmi_buildRegisterOpts_hvm(t *testing.T) {
|
||||
config := Config{}
|
||||
config.AMIName = "test_ami_name"
|
||||
config.AMIDescription = "test_ami_description"
|
||||
config.AMIVirtType = "hvm"
|
||||
rootDeviceName := "foo"
|
||||
|
||||
image := testImage()
|
||||
|
||||
blockDevices := []*ec2.BlockDeviceMapping{}
|
||||
|
||||
opts := buildRegisterOptsFromExistingImage(&config, &image, blockDevices, rootDeviceName, config.AMIName)
|
||||
|
||||
expected := config.AMIVirtType
|
||||
if *opts.VirtualizationType != expected {
|
||||
t.Fatalf("Unexpected VirtType value: expected %s got %s\n", expected, *opts.VirtualizationType)
|
||||
}
|
||||
|
||||
expected = config.AMIName
|
||||
if *opts.Name != expected {
|
||||
t.Fatalf("Unexpected Name value: expected %s got %s\n", expected, *opts.Name)
|
||||
}
|
||||
|
||||
if opts.KernelId != nil {
|
||||
t.Fatalf("Unexpected KernelId value: expected nil got %s\n", *opts.KernelId)
|
||||
}
|
||||
|
||||
expected = rootDeviceName
|
||||
if *opts.RootDeviceName != expected {
|
||||
t.Fatalf("Unexpected RootDeviceName value: expected %s got %s\n", expected, *opts.RootDeviceName)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStepRegisterAmi_buildRegisterOptsFromScratch(t *testing.T) {
|
||||
rootDeviceName := "/dev/sda"
|
||||
snapshotID := "foo"
|
||||
config := Config{
|
||||
FromScratch: true,
|
||||
PackerConfig: common.PackerConfig{},
|
||||
AMIMappings: []amazon.BlockDevice{
|
||||
{
|
||||
DeviceName: rootDeviceName,
|
||||
},
|
||||
},
|
||||
RootDeviceName: rootDeviceName,
|
||||
}
|
||||
registerOpts := buildBaseRegisterOpts(&config, nil, 10, snapshotID, config.AMIName)
|
||||
|
||||
if len(registerOpts.BlockDeviceMappings) != 1 {
|
||||
t.Fatal("Expected block device mapping of length 1")
|
||||
}
|
||||
|
||||
if *registerOpts.BlockDeviceMappings[0].Ebs.SnapshotId != snapshotID {
|
||||
t.Fatalf("Snapshot ID of root disk not set to snapshot id %s", rootDeviceName)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStepRegisterAmi_buildRegisterOptFromExistingImage(t *testing.T) {
|
||||
rootDeviceName := "/dev/sda"
|
||||
snapshotID := "foo"
|
||||
|
||||
config := Config{
|
||||
FromScratch: false,
|
||||
PackerConfig: common.PackerConfig{},
|
||||
}
|
||||
sourceImage := ec2.Image{
|
||||
RootDeviceName: &rootDeviceName,
|
||||
BlockDeviceMappings: []*ec2.BlockDeviceMapping{
|
||||
{
|
||||
DeviceName: &rootDeviceName,
|
||||
Ebs: &ec2.EbsBlockDevice{
|
||||
VolumeSize: aws.Int64(10),
|
||||
},
|
||||
},
|
||||
// Throw in an ephemeral device, it seems like all devices in the return struct in a source AMI have
|
||||
// a size, even if it's for ephemeral
|
||||
{
|
||||
DeviceName: aws.String("/dev/sdb"),
|
||||
VirtualName: aws.String("ephemeral0"),
|
||||
Ebs: &ec2.EbsBlockDevice{
|
||||
VolumeSize: aws.Int64(0),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
registerOpts := buildBaseRegisterOpts(&config, &sourceImage, 15, snapshotID, config.AMIName)
|
||||
|
||||
if len(registerOpts.BlockDeviceMappings) != 2 {
|
||||
t.Fatal("Expected block device mapping of length 2")
|
||||
}
|
||||
|
||||
for _, dev := range registerOpts.BlockDeviceMappings {
|
||||
if dev.Ebs.SnapshotId != nil && *dev.Ebs.SnapshotId == snapshotID {
|
||||
// Even though root volume size is in config, it isn't used, instead we use the root volume size
|
||||
// that's derived when we build the step
|
||||
if *dev.Ebs.VolumeSize != 15 {
|
||||
t.Fatalf("Root volume size not 15 GB instead %d", *dev.Ebs.VolumeSize)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
t.Fatalf("Could not find device with snapshot ID %s", snapshotID)
|
||||
}
|
||||
|
||||
func TestStepRegisterAmi_buildRegisterOptFromExistingImageWithBlockDeviceMappings(t *testing.T) {
|
||||
const (
|
||||
rootDeviceName = "/dev/xvda"
|
||||
oldRootDevice = "/dev/sda1"
|
||||
)
|
||||
snapshotId := "foo"
|
||||
|
||||
config := Config{
|
||||
FromScratch: false,
|
||||
PackerConfig: common.PackerConfig{},
|
||||
AMIMappings: []amazon.BlockDevice{
|
||||
{
|
||||
DeviceName: rootDeviceName,
|
||||
},
|
||||
},
|
||||
RootDeviceName: rootDeviceName,
|
||||
}
|
||||
|
||||
// Intentionally try to use a different root devicename
|
||||
sourceImage := ec2.Image{
|
||||
RootDeviceName: aws.String(oldRootDevice),
|
||||
BlockDeviceMappings: []*ec2.BlockDeviceMapping{
|
||||
{
|
||||
DeviceName: aws.String(oldRootDevice),
|
||||
Ebs: &ec2.EbsBlockDevice{
|
||||
VolumeSize: aws.Int64(10),
|
||||
},
|
||||
},
|
||||
// Throw in an ephemeral device, it seems like all devices in the return struct in a source AMI have
|
||||
// a size, even if it's for ephemeral
|
||||
{
|
||||
DeviceName: aws.String("/dev/sdb"),
|
||||
VirtualName: aws.String("ephemeral0"),
|
||||
Ebs: &ec2.EbsBlockDevice{
|
||||
VolumeSize: aws.Int64(0),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
registerOpts := buildBaseRegisterOpts(&config, &sourceImage, 15, snapshotId, config.AMIName)
|
||||
|
||||
if len(registerOpts.BlockDeviceMappings) != 1 {
|
||||
t.Fatal("Expected block device mapping of length 1")
|
||||
}
|
||||
|
||||
if *registerOpts.BlockDeviceMappings[0].Ebs.SnapshotId != snapshotId {
|
||||
t.Fatalf("Snapshot ID of root disk set to '%s' expected '%s'", *registerOpts.BlockDeviceMappings[0].Ebs.SnapshotId, rootDeviceName)
|
||||
}
|
||||
|
||||
if *registerOpts.RootDeviceName != rootDeviceName {
|
||||
t.Fatalf("Root device set to '%s' expected %s", *registerOpts.RootDeviceName, rootDeviceName)
|
||||
}
|
||||
|
||||
if *registerOpts.BlockDeviceMappings[0].Ebs.VolumeSize != 15 {
|
||||
t.Fatalf("Size of root disk not set to 15 GB, instead %d", *registerOpts.BlockDeviceMappings[0].Ebs.VolumeSize)
|
||||
}
|
||||
}
|
|
@ -1,82 +0,0 @@
|
|||
package chroot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
awscommon "github.com/hashicorp/packer/builder/amazon/common"
|
||||
)
|
||||
|
||||
// StepSnapshot creates a snapshot of the created volume.
|
||||
//
|
||||
// Produces:
|
||||
// snapshot_id string - ID of the created snapshot
|
||||
type StepSnapshot struct {
|
||||
PollingConfig *awscommon.AWSPollingConfig
|
||||
snapshotId string
|
||||
}
|
||||
|
||||
func (s *StepSnapshot) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
volumeId := state.Get("volume_id").(string)
|
||||
|
||||
ui.Say("Creating snapshot...")
|
||||
description := fmt.Sprintf("Packer: %s", time.Now().String())
|
||||
|
||||
createSnapResp, err := ec2conn.CreateSnapshot(&ec2.CreateSnapshotInput{
|
||||
VolumeId: &volumeId,
|
||||
Description: &description,
|
||||
})
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error creating snapshot: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Set the snapshot ID so we can delete it later
|
||||
s.snapshotId = *createSnapResp.SnapshotId
|
||||
ui.Message(fmt.Sprintf("Snapshot ID: %s", s.snapshotId))
|
||||
|
||||
// Wait for the snapshot to be ready
|
||||
err = s.PollingConfig.WaitUntilSnapshotDone(ctx, ec2conn, s.snapshotId)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error waiting for snapshot: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
state.Put("snapshot_id", s.snapshotId)
|
||||
|
||||
snapshots := map[string][]string{
|
||||
*ec2conn.Config.Region: {s.snapshotId},
|
||||
}
|
||||
state.Put("snapshots", snapshots)
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepSnapshot) Cleanup(state multistep.StateBag) {
|
||||
if s.snapshotId == "" {
|
||||
return
|
||||
}
|
||||
|
||||
_, cancelled := state.GetOk(multistep.StateCancelled)
|
||||
_, halted := state.GetOk(multistep.StateHalted)
|
||||
|
||||
if cancelled || halted {
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
ui.Say("Removing snapshot since we cancelled or halted...")
|
||||
_, err := ec2conn.DeleteSnapshot(&ec2.DeleteSnapshotInput{SnapshotId: &s.snapshotId})
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf("Error: %s", err))
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,433 +0,0 @@
|
|||
//go:generate struct-markdown
|
||||
//go:generate mapstructure-to-hcl2 -type VaultAWSEngineOptions,AssumeRoleConfig
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
awsCredentials "github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
|
||||
awsbase "github.com/hashicorp/aws-sdk-go-base"
|
||||
cleanhttp "github.com/hashicorp/go-cleanhttp"
|
||||
"github.com/hashicorp/packer/builder/amazon/common/awserrors"
|
||||
vaultapi "github.com/hashicorp/vault/api"
|
||||
)
|
||||
|
||||
// AssumeRoleConfig lets users set configuration options for assuming a special
|
||||
// role when executing Packer.
|
||||
//
|
||||
// Usage example:
|
||||
//
|
||||
// HCL config example:
|
||||
//
|
||||
// ```HCL
|
||||
// source "example" "amazon-ebs"{
|
||||
// assume_role {
|
||||
// role_arn = "arn:aws:iam::ACCOUNT_ID:role/ROLE_NAME"
|
||||
// session_name = "SESSION_NAME"
|
||||
// external_id = "EXTERNAL_ID"
|
||||
// }
|
||||
// }
|
||||
// ```
|
||||
//
|
||||
// JSON config example:
|
||||
//
|
||||
// ```json
|
||||
// builder{
|
||||
// "type": "amazon-ebs",
|
||||
// "assume_role": {
|
||||
// "role_arn" : "arn:aws:iam::ACCOUNT_ID:role/ROLE_NAME",
|
||||
// "session_name": "SESSION_NAME",
|
||||
// "external_id" : "EXTERNAL_ID"
|
||||
// }
|
||||
// }
|
||||
// ```
|
||||
type AssumeRoleConfig struct {
|
||||
// Amazon Resource Name (ARN) of the IAM Role to assume.
|
||||
AssumeRoleARN string `mapstructure:"role_arn" required:"false"`
|
||||
// Number of seconds to restrict the assume role session duration.
|
||||
AssumeRoleDurationSeconds int `mapstructure:"duration_seconds" required:"false"`
|
||||
// The external ID to use when assuming the role. If omitted, no external
|
||||
// ID is passed to the AssumeRole call.
|
||||
AssumeRoleExternalID string `mapstructure:"external_id" required:"false"`
|
||||
// IAM Policy JSON describing further restricting permissions for the IAM
|
||||
// Role being assumed.
|
||||
AssumeRolePolicy string `mapstructure:"policy" required:"false"`
|
||||
// Set of Amazon Resource Names (ARNs) of IAM Policies describing further
|
||||
// restricting permissions for the IAM Role being
|
||||
AssumeRolePolicyARNs []string `mapstructure:"policy_arns" required:"false"`
|
||||
// Session name to use when assuming the role.
|
||||
AssumeRoleSessionName string `mapstructure:"session_name" required:"false"`
|
||||
// Map of assume role session tags.
|
||||
AssumeRoleTags map[string]string `mapstructure:"tags" required:"false"`
|
||||
// Set of assume role session tag keys to pass to any subsequent sessions.
|
||||
AssumeRoleTransitiveTagKeys []string `mapstructure:"transitive_tag_keys" required:"false"`
|
||||
}
|
||||
|
||||
type VaultAWSEngineOptions struct {
|
||||
Name string `mapstructure:"name"`
|
||||
RoleARN string `mapstructure:"role_arn"`
|
||||
// Specifies the TTL for the use of the STS token. This
|
||||
// is specified as a string with a duration suffix. Valid only when
|
||||
// credential_type is assumed_role or federation_token. When not
|
||||
// specified, the default_sts_ttl set for the role will be used. If that
|
||||
// is also not set, then the default value of 3600s will be used. AWS
|
||||
// places limits on the maximum TTL allowed. See the AWS documentation on
|
||||
// the DurationSeconds parameter for AssumeRole (for assumed_role
|
||||
// credential types) and GetFederationToken (for federation_token
|
||||
// credential types) for more details.
|
||||
TTL string `mapstructure:"ttl" required:"false"`
|
||||
EngineName string `mapstructure:"engine_name"`
|
||||
}
|
||||
|
||||
func (v *VaultAWSEngineOptions) Empty() bool {
|
||||
return len(v.Name) == 0 && len(v.RoleARN) == 0 &&
|
||||
len(v.EngineName) == 0 && len(v.TTL) == 0
|
||||
}
|
||||
|
||||
// AccessConfig is for common configuration related to AWS access
|
||||
type AccessConfig struct {
|
||||
// The access key used to communicate with AWS. [Learn how to set this]
|
||||
// (/docs/builders/amazon#specifying-amazon-credentials). On EBS, this
|
||||
// is not required if you are using `use_vault_aws_engine` for
|
||||
// authentication instead.
|
||||
AccessKey string `mapstructure:"access_key" required:"true"`
|
||||
// If provided with a role ARN, Packer will attempt to assume this role
|
||||
// using the supplied credentials. See
|
||||
// [AssumeRoleConfig](#assume-role-configuration) below for more
|
||||
// details on all of the options available, and for a usage example.
|
||||
AssumeRole AssumeRoleConfig `mapstructure:"assume_role" required:"false"`
|
||||
// This option is useful if you use a cloud
|
||||
// provider whose API is compatible with aws EC2. Specify another endpoint
|
||||
// like this https://ec2.custom.endpoint.com.
|
||||
CustomEndpointEc2 string `mapstructure:"custom_endpoint_ec2" required:"false"`
|
||||
// Path to a credentials file to load credentials from
|
||||
CredsFilename string `mapstructure:"shared_credentials_file" required:"false"`
|
||||
// Enable automatic decoding of any encoded authorization (error) messages
|
||||
// using the `sts:DecodeAuthorizationMessage` API. Note: requires that the
|
||||
// effective user/role have permissions to `sts:DecodeAuthorizationMessage`
|
||||
// on resource `*`. Default `false`.
|
||||
DecodeAuthZMessages bool `mapstructure:"decode_authorization_messages" required:"false"`
|
||||
// This allows skipping TLS
|
||||
// verification of the AWS EC2 endpoint. The default is false.
|
||||
InsecureSkipTLSVerify bool `mapstructure:"insecure_skip_tls_verify" required:"false"`
|
||||
// This is the maximum number of times an API call is retried, in the case
|
||||
// where requests are being throttled or experiencing transient failures.
|
||||
// The delay between the subsequent API calls increases exponentially.
|
||||
MaxRetries int `mapstructure:"max_retries" required:"false"`
|
||||
// The MFA
|
||||
// [TOTP](https://en.wikipedia.org/wiki/Time-based_One-time_Password_Algorithm)
|
||||
// code. This should probably be a user variable since it changes all the
|
||||
// time.
|
||||
MFACode string `mapstructure:"mfa_code" required:"false"`
|
||||
// The profile to use in the shared credentials file for
|
||||
// AWS. See Amazon's documentation on [specifying
|
||||
// profiles](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-profiles)
|
||||
// for more details.
|
||||
ProfileName string `mapstructure:"profile" required:"false"`
|
||||
// The name of the region, such as `us-east-1`, in which
|
||||
// to launch the EC2 instance to create the AMI.
|
||||
// When chroot building, this value is guessed from environment.
|
||||
RawRegion string `mapstructure:"region" required:"true"`
|
||||
// The secret key used to communicate with AWS. [Learn how to set
|
||||
// this](/docs/builders/amazon#specifying-amazon-credentials). This is not required
|
||||
// if you are using `use_vault_aws_engine` for authentication instead.
|
||||
SecretKey string `mapstructure:"secret_key" required:"true"`
|
||||
SkipMetadataApiCheck bool `mapstructure:"skip_metadata_api_check"`
|
||||
// Set to true if you want to skip validating AWS credentials before runtime.
|
||||
SkipCredsValidation bool `mapstructure:"skip_credential_validation"`
|
||||
// The access token to use. This is different from the
|
||||
// access key and secret key. If you're not sure what this is, then you
|
||||
// probably don't need it. This will also be read from the AWS_SESSION_TOKEN
|
||||
// environmental variable.
|
||||
Token string `mapstructure:"token" required:"false"`
|
||||
session *session.Session
|
||||
// Get credentials from Hashicorp Vault's aws secrets engine. You must
|
||||
// already have created a role to use. For more information about
|
||||
// generating credentials via the Vault engine, see the [Vault
|
||||
// docs.](https://www.vaultproject.io/api/secret/aws#generate-credentials)
|
||||
// If you set this flag, you must also set the below options:
|
||||
// - `name` (string) - Required. Specifies the name of the role to generate
|
||||
// credentials against. This is part of the request URL.
|
||||
// - `engine_name` (string) - The name of the aws secrets engine. In the
|
||||
// Vault docs, this is normally referred to as "aws", and Packer will
|
||||
// default to "aws" if `engine_name` is not set.
|
||||
// - `role_arn` (string)- The ARN of the role to assume if credential\_type
|
||||
// on the Vault role is assumed\_role. Must match one of the allowed role
|
||||
// ARNs in the Vault role. Optional if the Vault role only allows a single
|
||||
// AWS role ARN; required otherwise.
|
||||
// - `ttl` (string) - Specifies the TTL for the use of the STS token. This
|
||||
// is specified as a string with a duration suffix. Valid only when
|
||||
// credential\_type is assumed\_role or federation\_token. When not
|
||||
// specified, the default\_sts\_ttl set for the role will be used. If that
|
||||
// is also not set, then the default value of 3600s will be used. AWS
|
||||
// places limits on the maximum TTL allowed. See the AWS documentation on
|
||||
// the DurationSeconds parameter for AssumeRole (for assumed\_role
|
||||
// credential types) and GetFederationToken (for federation\_token
|
||||
// credential types) for more details.
|
||||
//
|
||||
// JSON example:
|
||||
//
|
||||
// ```json
|
||||
// {
|
||||
// "vault_aws_engine": {
|
||||
// "name": "myrole",
|
||||
// "role_arn": "myarn",
|
||||
// "ttl": "3600s"
|
||||
// }
|
||||
// }
|
||||
// ```
|
||||
//
|
||||
// HCL2 example:
|
||||
//
|
||||
// ```hcl
|
||||
// vault_aws_engine {
|
||||
// name = "myrole"
|
||||
// role_arn = "myarn"
|
||||
// ttl = "3600s"
|
||||
// }
|
||||
// ```
|
||||
VaultAWSEngine VaultAWSEngineOptions `mapstructure:"vault_aws_engine" required:"false"`
|
||||
// [Polling configuration](#polling-configuration) for the AWS waiter. Configures the waiter that checks
|
||||
// resource state.
|
||||
PollingConfig *AWSPollingConfig `mapstructure:"aws_polling" required:"false"`
|
||||
|
||||
getEC2Connection func() ec2iface.EC2API
|
||||
}
|
||||
|
||||
// Config returns a valid aws.Config object for access to AWS services, or
|
||||
// an error if the authentication and region couldn't be resolved
|
||||
func (c *AccessConfig) Session() (*session.Session, error) {
|
||||
if c.session != nil {
|
||||
return c.session, nil
|
||||
}
|
||||
|
||||
// Create new AWS config
|
||||
config := aws.NewConfig().WithCredentialsChainVerboseErrors(true)
|
||||
if c.MaxRetries > 0 {
|
||||
config = config.WithMaxRetries(c.MaxRetries)
|
||||
}
|
||||
|
||||
// Set AWS config defaults.
|
||||
if c.RawRegion != "" {
|
||||
config = config.WithRegion(c.RawRegion)
|
||||
}
|
||||
|
||||
if c.CustomEndpointEc2 != "" {
|
||||
config = config.WithEndpoint(c.CustomEndpointEc2)
|
||||
}
|
||||
|
||||
config = config.WithHTTPClient(cleanhttp.DefaultClient())
|
||||
transport := config.HTTPClient.Transport.(*http.Transport)
|
||||
if c.InsecureSkipTLSVerify {
|
||||
transport.TLSClientConfig = &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
}
|
||||
transport.Proxy = http.ProxyFromEnvironment
|
||||
|
||||
// Figure out which possible credential providers are valid; test that we
|
||||
// can get credentials via the selected providers, and set the providers in
|
||||
// the config.
|
||||
creds, err := c.GetCredentials(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config.WithCredentials(creds)
|
||||
|
||||
// Create session options based on our AWS config
|
||||
opts := session.Options{
|
||||
SharedConfigState: session.SharedConfigEnable,
|
||||
Config: *config,
|
||||
}
|
||||
|
||||
if c.ProfileName != "" {
|
||||
opts.Profile = c.ProfileName
|
||||
}
|
||||
|
||||
if c.MFACode != "" {
|
||||
opts.AssumeRoleTokenProvider = func() (string, error) {
|
||||
return c.MFACode, nil
|
||||
}
|
||||
}
|
||||
|
||||
sess, err := session.NewSessionWithOptions(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Printf("Found region %s", *sess.Config.Region)
|
||||
c.session = sess
|
||||
|
||||
cp, err := c.session.Config.Credentials.Get()
|
||||
|
||||
if awserrors.Matches(err, "NoCredentialProviders", "") {
|
||||
return nil, c.NewNoValidCredentialSourcesError(err)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error loading credentials for AWS Provider: %s", err)
|
||||
}
|
||||
|
||||
log.Printf("[INFO] AWS Auth provider used: %q", cp.ProviderName)
|
||||
|
||||
if c.DecodeAuthZMessages {
|
||||
DecodeAuthZMessages(c.session)
|
||||
}
|
||||
|
||||
return c.session, nil
|
||||
}
|
||||
|
||||
func (c *AccessConfig) SessionRegion() string {
|
||||
if c.session == nil {
|
||||
panic("access config session should be set.")
|
||||
}
|
||||
return aws.StringValue(c.session.Config.Region)
|
||||
}
|
||||
|
||||
func (c *AccessConfig) IsGovCloud() bool {
|
||||
return strings.HasPrefix(c.SessionRegion(), "us-gov-")
|
||||
}
|
||||
|
||||
func (c *AccessConfig) IsChinaCloud() bool {
|
||||
return strings.HasPrefix(c.SessionRegion(), "cn-")
|
||||
}
|
||||
|
||||
// GetCredentials gets credentials from the environment, shared credentials,
|
||||
// the session (which may include a credential process), or ECS/EC2 metadata
|
||||
// endpoints. GetCredentials also validates the credentials and the ability to
|
||||
// assume a role or will return an error if unsuccessful.
|
||||
func (c *AccessConfig) GetCredentials(config *aws.Config) (*awsCredentials.Credentials, error) {
|
||||
// Reload values into the config used by the Packer-Terraform shared SDK
|
||||
awsbaseConfig := &awsbase.Config{
|
||||
AccessKey: c.AccessKey,
|
||||
AssumeRoleARN: c.AssumeRole.AssumeRoleARN,
|
||||
AssumeRoleDurationSeconds: c.AssumeRole.AssumeRoleDurationSeconds,
|
||||
AssumeRoleExternalID: c.AssumeRole.AssumeRoleExternalID,
|
||||
AssumeRolePolicy: c.AssumeRole.AssumeRolePolicy,
|
||||
AssumeRolePolicyARNs: c.AssumeRole.AssumeRolePolicyARNs,
|
||||
AssumeRoleSessionName: c.AssumeRole.AssumeRoleSessionName,
|
||||
AssumeRoleTags: c.AssumeRole.AssumeRoleTags,
|
||||
AssumeRoleTransitiveTagKeys: c.AssumeRole.AssumeRoleTransitiveTagKeys,
|
||||
CredsFilename: c.CredsFilename,
|
||||
DebugLogging: false,
|
||||
// TODO: implement for Packer
|
||||
// IamEndpoint: c.Endpoints["iam"],
|
||||
Insecure: c.InsecureSkipTLSVerify,
|
||||
MaxRetries: c.MaxRetries,
|
||||
Profile: c.ProfileName,
|
||||
Region: c.RawRegion,
|
||||
SecretKey: c.SecretKey,
|
||||
SkipCredsValidation: c.SkipCredsValidation,
|
||||
SkipMetadataApiCheck: c.SkipMetadataApiCheck,
|
||||
// TODO: implement for Packer
|
||||
// SkipRequestingAccountId: c.SkipRequestingAccountId,
|
||||
// StsEndpoint: c.Endpoints["sts"],
|
||||
Token: c.Token,
|
||||
}
|
||||
|
||||
return awsbase.GetCredentials(awsbaseConfig)
|
||||
}
|
||||
|
||||
func (c *AccessConfig) GetCredsFromVault() error {
|
||||
// const EnvVaultAddress = "VAULT_ADDR"
|
||||
// const EnvVaultToken = "VAULT_TOKEN"
|
||||
vaultConfig := vaultapi.DefaultConfig()
|
||||
cli, err := vaultapi.NewClient(vaultConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error getting Vault client: %s", err)
|
||||
}
|
||||
if c.VaultAWSEngine.EngineName == "" {
|
||||
c.VaultAWSEngine.EngineName = "aws"
|
||||
}
|
||||
path := fmt.Sprintf("/%s/creds/%s", c.VaultAWSEngine.EngineName,
|
||||
c.VaultAWSEngine.Name)
|
||||
secret, err := cli.Logical().Read(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading vault secret: %s", err)
|
||||
}
|
||||
if secret == nil {
|
||||
return fmt.Errorf("Vault Secret does not exist at the given path.")
|
||||
}
|
||||
|
||||
c.AccessKey = secret.Data["access_key"].(string)
|
||||
c.SecretKey = secret.Data["secret_key"].(string)
|
||||
token := secret.Data["security_token"]
|
||||
if token != nil {
|
||||
c.Token = token.(string)
|
||||
} else {
|
||||
c.Token = ""
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *AccessConfig) Prepare() []error {
|
||||
var errs []error
|
||||
|
||||
if c.SkipMetadataApiCheck {
|
||||
log.Println("(WARN) skip_metadata_api_check ignored.")
|
||||
}
|
||||
|
||||
// Make sure it's obvious from the config how we're getting credentials:
|
||||
// Vault, Packer config, or environment.
|
||||
if !c.VaultAWSEngine.Empty() {
|
||||
if len(c.AccessKey) > 0 {
|
||||
errs = append(errs,
|
||||
fmt.Errorf("If you have set vault_aws_engine, you must not set"+
|
||||
" the access_key or secret_key."))
|
||||
}
|
||||
// Go ahead and grab those credentials from Vault now, so we can set
|
||||
// the keys and token now.
|
||||
err := c.GetCredsFromVault()
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
|
||||
if (len(c.AccessKey) > 0) != (len(c.SecretKey) > 0) {
|
||||
errs = append(errs,
|
||||
fmt.Errorf("`access_key` and `secret_key` must both be either set or not set."))
|
||||
}
|
||||
|
||||
if c.PollingConfig == nil {
|
||||
c.PollingConfig = new(AWSPollingConfig)
|
||||
}
|
||||
c.PollingConfig.LogEnvOverrideWarnings()
|
||||
|
||||
// Default MaxRetries to 10, to make throttling issues less likely. The
|
||||
// Aws sdk defaults this to 3, which regularly gets tripped by users.
|
||||
if c.MaxRetries == 0 {
|
||||
c.MaxRetries = 10
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
func (c *AccessConfig) NewNoValidCredentialSourcesError(err error) error {
|
||||
return fmt.Errorf("No valid credential sources found for AWS Builder. "+
|
||||
"Please see https://www.packer.io/docs/builders/amazon#authentication "+
|
||||
"for more information on providing credentials for the AWS Builder. "+
|
||||
"Error: %w", err)
|
||||
}
|
||||
|
||||
func (c *AccessConfig) NewEC2Connection() (ec2iface.EC2API, error) {
|
||||
if c.getEC2Connection != nil {
|
||||
return c.getEC2Connection(), nil
|
||||
}
|
||||
sess, err := c.Session()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ec2conn := ec2.New(sess)
|
||||
|
||||
return ec2conn, nil
|
||||
}
|
|
@ -1,74 +0,0 @@
|
|||
// Code generated by "mapstructure-to-hcl2 -type VaultAWSEngineOptions,AssumeRoleConfig"; DO NOT EDIT.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl/v2/hcldec"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
// FlatAssumeRoleConfig is an auto-generated flat version of AssumeRoleConfig.
|
||||
// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up.
|
||||
type FlatAssumeRoleConfig struct {
|
||||
AssumeRoleARN *string `mapstructure:"role_arn" required:"false" cty:"role_arn" hcl:"role_arn"`
|
||||
AssumeRoleDurationSeconds *int `mapstructure:"duration_seconds" required:"false" cty:"duration_seconds" hcl:"duration_seconds"`
|
||||
AssumeRoleExternalID *string `mapstructure:"external_id" required:"false" cty:"external_id" hcl:"external_id"`
|
||||
AssumeRolePolicy *string `mapstructure:"policy" required:"false" cty:"policy" hcl:"policy"`
|
||||
AssumeRolePolicyARNs []string `mapstructure:"policy_arns" required:"false" cty:"policy_arns" hcl:"policy_arns"`
|
||||
AssumeRoleSessionName *string `mapstructure:"session_name" required:"false" cty:"session_name" hcl:"session_name"`
|
||||
AssumeRoleTags map[string]string `mapstructure:"tags" required:"false" cty:"tags" hcl:"tags"`
|
||||
AssumeRoleTransitiveTagKeys []string `mapstructure:"transitive_tag_keys" required:"false" cty:"transitive_tag_keys" hcl:"transitive_tag_keys"`
|
||||
}
|
||||
|
||||
// FlatMapstructure returns a new FlatAssumeRoleConfig.
|
||||
// FlatAssumeRoleConfig is an auto-generated flat version of AssumeRoleConfig.
|
||||
// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up.
|
||||
func (*AssumeRoleConfig) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } {
|
||||
return new(FlatAssumeRoleConfig)
|
||||
}
|
||||
|
||||
// HCL2Spec returns the hcl spec of a AssumeRoleConfig.
|
||||
// This spec is used by HCL to read the fields of AssumeRoleConfig.
|
||||
// The decoded values from this spec will then be applied to a FlatAssumeRoleConfig.
|
||||
func (*FlatAssumeRoleConfig) HCL2Spec() map[string]hcldec.Spec {
|
||||
s := map[string]hcldec.Spec{
|
||||
"role_arn": &hcldec.AttrSpec{Name: "role_arn", Type: cty.String, Required: false},
|
||||
"duration_seconds": &hcldec.AttrSpec{Name: "duration_seconds", Type: cty.Number, Required: false},
|
||||
"external_id": &hcldec.AttrSpec{Name: "external_id", Type: cty.String, Required: false},
|
||||
"policy": &hcldec.AttrSpec{Name: "policy", Type: cty.String, Required: false},
|
||||
"policy_arns": &hcldec.AttrSpec{Name: "policy_arns", Type: cty.List(cty.String), Required: false},
|
||||
"session_name": &hcldec.AttrSpec{Name: "session_name", Type: cty.String, Required: false},
|
||||
"tags": &hcldec.AttrSpec{Name: "tags", Type: cty.Map(cty.String), Required: false},
|
||||
"transitive_tag_keys": &hcldec.AttrSpec{Name: "transitive_tag_keys", Type: cty.List(cty.String), Required: false},
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// FlatVaultAWSEngineOptions is an auto-generated flat version of VaultAWSEngineOptions.
|
||||
// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up.
|
||||
type FlatVaultAWSEngineOptions struct {
|
||||
Name *string `mapstructure:"name" cty:"name" hcl:"name"`
|
||||
RoleARN *string `mapstructure:"role_arn" cty:"role_arn" hcl:"role_arn"`
|
||||
TTL *string `mapstructure:"ttl" required:"false" cty:"ttl" hcl:"ttl"`
|
||||
EngineName *string `mapstructure:"engine_name" cty:"engine_name" hcl:"engine_name"`
|
||||
}
|
||||
|
||||
// FlatMapstructure returns a new FlatVaultAWSEngineOptions.
|
||||
// FlatVaultAWSEngineOptions is an auto-generated flat version of VaultAWSEngineOptions.
|
||||
// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up.
|
||||
func (*VaultAWSEngineOptions) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } {
|
||||
return new(FlatVaultAWSEngineOptions)
|
||||
}
|
||||
|
||||
// HCL2Spec returns the hcl spec of a VaultAWSEngineOptions.
|
||||
// This spec is used by HCL to read the fields of VaultAWSEngineOptions.
|
||||
// The decoded values from this spec will then be applied to a FlatVaultAWSEngineOptions.
|
||||
func (*FlatVaultAWSEngineOptions) HCL2Spec() map[string]hcldec.Spec {
|
||||
s := map[string]hcldec.Spec{
|
||||
"name": &hcldec.AttrSpec{Name: "name", Type: cty.String, Required: false},
|
||||
"role_arn": &hcldec.AttrSpec{Name: "role_arn", Type: cty.String, Required: false},
|
||||
"ttl": &hcldec.AttrSpec{Name: "ttl", Type: cty.String, Required: false},
|
||||
"engine_name": &hcldec.AttrSpec{Name: "engine_name", Type: cty.String, Required: false},
|
||||
}
|
||||
return s
|
||||
}
|
|
@ -1,57 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
|
||||
)
|
||||
|
||||
func testAccessConfig() *AccessConfig {
|
||||
return &AccessConfig{
|
||||
getEC2Connection: func() ec2iface.EC2API {
|
||||
return &mockEC2Client{}
|
||||
},
|
||||
PollingConfig: new(AWSPollingConfig),
|
||||
}
|
||||
}
|
||||
|
||||
func TestAccessConfigPrepare_Region(t *testing.T) {
|
||||
c := testAccessConfig()
|
||||
|
||||
c.RawRegion = "us-east-12"
|
||||
err := c.ValidateRegion(c.RawRegion)
|
||||
if err == nil {
|
||||
t.Fatalf("should have region validation err: %s", c.RawRegion)
|
||||
}
|
||||
|
||||
c.RawRegion = "us-east-1"
|
||||
err = c.ValidateRegion(c.RawRegion)
|
||||
if err != nil {
|
||||
t.Fatalf("shouldn't have region validation err: %s", c.RawRegion)
|
||||
}
|
||||
|
||||
c.RawRegion = "custom"
|
||||
err = c.ValidateRegion(c.RawRegion)
|
||||
if err == nil {
|
||||
t.Fatalf("should have region validation err: %s", c.RawRegion)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAccessConfigPrepare_RegionRestricted(t *testing.T) {
|
||||
c := testAccessConfig()
|
||||
|
||||
// Create a Session with a custom region
|
||||
c.session = session.Must(session.NewSession(&aws.Config{
|
||||
Region: aws.String("us-gov-west-1"),
|
||||
}))
|
||||
|
||||
if err := c.Prepare(); err != nil {
|
||||
t.Fatalf("shouldn't have err: %s", err)
|
||||
}
|
||||
|
||||
if !c.IsGovCloud() {
|
||||
t.Fatal("We should be in gov region.")
|
||||
}
|
||||
}
|
|
@ -1,314 +0,0 @@
|
|||
//go:generate struct-markdown
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"regexp"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/config"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/interpolate"
|
||||
)
|
||||
|
||||
// AMIConfig is for common configuration related to creating AMIs.
|
||||
type AMIConfig struct {
|
||||
// The name of the resulting AMI that will appear when managing AMIs in the
|
||||
// AWS console or via APIs. This must be unique. To help make this unique,
|
||||
// use a function like timestamp (see [template
|
||||
// engine](/docs/templates/legacy_json_templates/engine) for more info).
|
||||
AMIName string `mapstructure:"ami_name" required:"true"`
|
||||
// The description to set for the resulting
|
||||
// AMI(s). By default this description is empty. This is a
|
||||
// [template engine](/docs/templates/legacy_json_templates/engine), see [Build template
|
||||
// data](#build-template-data) for more information.
|
||||
AMIDescription string `mapstructure:"ami_description" required:"false"`
|
||||
// The type of virtualization for the AMI
|
||||
// you are building. This option is required to register HVM images. Can be
|
||||
// paravirtual (default) or hvm.
|
||||
AMIVirtType string `mapstructure:"ami_virtualization_type" required:"false"`
|
||||
// A list of account IDs that have access to
|
||||
// launch the resulting AMI(s). By default no additional users other than the
|
||||
// user creating the AMI has permissions to launch it.
|
||||
AMIUsers []string `mapstructure:"ami_users" required:"false"`
|
||||
// A list of groups that have access to
|
||||
// launch the resulting AMI(s). By default no groups have permission to launch
|
||||
// the AMI. all will make the AMI publicly accessible.
|
||||
AMIGroups []string `mapstructure:"ami_groups" required:"false"`
|
||||
// A list of product codes to
|
||||
// associate with the AMI. By default no product codes are associated with the
|
||||
// AMI.
|
||||
AMIProductCodes []string `mapstructure:"ami_product_codes" required:"false"`
|
||||
// A list of regions to copy the AMI to.
|
||||
// Tags and attributes are copied along with the AMI. AMI copying takes time
|
||||
// depending on the size of the AMI, but will generally take many minutes.
|
||||
AMIRegions []string `mapstructure:"ami_regions" required:"false"`
|
||||
// Set to true if you want to skip
|
||||
// validation of the ami_regions configuration option. Default false.
|
||||
AMISkipRegionValidation bool `mapstructure:"skip_region_validation" required:"false"`
|
||||
// Key/value pair tags applied to the AMI. This is a [template
|
||||
// engine](/docs/templates/legacy_json_templates/engine), see [Build template
|
||||
// data](#build-template-data) for more information.
|
||||
AMITags map[string]string `mapstructure:"tags" required:"false"`
|
||||
// Same as [`tags`](#tags) but defined as a singular repeatable block
|
||||
// containing a `key` and a `value` field. In HCL2 mode the
|
||||
// [`dynamic_block`](/docs/templates/hcl_templates/expressions#dynamic-blocks)
|
||||
// will allow you to create those programatically.
|
||||
AMITag config.KeyValues `mapstructure:"tag" required:"false"`
|
||||
// Enable enhanced networking (ENA but not SriovNetSupport) on
|
||||
// HVM-compatible AMIs. If set, add `ec2:ModifyInstanceAttribute` to your
|
||||
// AWS IAM policy.
|
||||
//
|
||||
// Note: you must make sure enhanced networking is enabled on your
|
||||
// instance. See [Amazon's documentation on enabling enhanced
|
||||
// networking](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking.html#enabling_enhanced_networking).
|
||||
AMIENASupport config.Trilean `mapstructure:"ena_support" required:"false"`
|
||||
// Enable enhanced networking (SriovNetSupport but not ENA) on
|
||||
// HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute` to your
|
||||
// AWS IAM policy. Note: you must make sure enhanced networking is enabled
|
||||
// on your instance. See [Amazon's documentation on enabling enhanced
|
||||
// networking](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking.html#enabling_enhanced_networking).
|
||||
// Default `false`.
|
||||
AMISriovNetSupport bool `mapstructure:"sriov_support" required:"false"`
|
||||
// Force Packer to first deregister an existing
|
||||
// AMI if one with the same name already exists. Default false.
|
||||
AMIForceDeregister bool `mapstructure:"force_deregister" required:"false"`
|
||||
// Force Packer to delete snapshots
|
||||
// associated with AMIs, which have been deregistered by force_deregister.
|
||||
// Default false.
|
||||
AMIForceDeleteSnapshot bool `mapstructure:"force_delete_snapshot" required:"false"`
|
||||
// Whether or not to encrypt the resulting AMI when
|
||||
// copying a provisioned instance to an AMI. By default, Packer will keep
|
||||
// the encryption setting to what it was in the source image. Setting false
|
||||
// will result in an unencrypted image, and true will result in an encrypted
|
||||
// one.
|
||||
//
|
||||
// If you have used the `launch_block_device_mappings` to set an encryption
|
||||
// key and that key is the same as the one you want the image encrypted with
|
||||
// at the end, then you don't need to set this field; leaving it empty will
|
||||
// prevent an unnecessary extra copy step and save you some time.
|
||||
//
|
||||
// Please note that if you are using an account with the global "Always
|
||||
// encrypt new EBS volumes" option set to `true`, Packer will be unable to
|
||||
// override this setting, and the final image will be encryoted whether
|
||||
// you set this value or not.
|
||||
AMIEncryptBootVolume config.Trilean `mapstructure:"encrypt_boot" required:"false"`
|
||||
// ID, alias or ARN of the KMS key to use for AMI encryption. This
|
||||
// only applies to the main `region` -- any regions the AMI gets copied to
|
||||
// copied will be encrypted by the default EBS KMS key for that region,
|
||||
// unless you set region-specific keys in AMIRegionKMSKeyIDs.
|
||||
//
|
||||
// Set this value if you select `encrypt_boot`, but don't want to use the
|
||||
// region's default KMS key.
|
||||
//
|
||||
// If you have a custom kms key you'd like to apply to the launch volume,
|
||||
// and are only building in one region, it is more efficient to leave this
|
||||
// and `encrypt_boot` empty and to instead set the key id in the
|
||||
// launch_block_device_mappings (you can find an example below). This saves
|
||||
// potentially many minutes at the end of the build by preventing Packer
|
||||
// from having to copy and re-encrypt the image at the end of the build.
|
||||
//
|
||||
// For valid formats see *KmsKeyId* in the [AWS API docs -
|
||||
// CopyImage](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CopyImage.html).
|
||||
// This field is validated by Packer, when using an alias, you will have to
|
||||
// prefix `kms_key_id` with `alias/`.
|
||||
AMIKmsKeyId string `mapstructure:"kms_key_id" required:"false"`
|
||||
// regions to copy the ami to, along with the custom kms key id (alias or
|
||||
// arn) to use for encryption for that region. Keys must match the regions
|
||||
// provided in `ami_regions`. If you just want to encrypt using a default
|
||||
// ID, you can stick with `kms_key_id` and `ami_regions`. If you want a
|
||||
// region to be encrypted with that region's default key ID, you can use an
|
||||
// empty string `""` instead of a key id in this map. (e.g. `"us-east-1":
|
||||
// ""`) However, you cannot use default key IDs if you are using this in
|
||||
// conjunction with `snapshot_users` -- in that situation you must use
|
||||
// custom keys. For valid formats see *KmsKeyId* in the [AWS API docs -
|
||||
// CopyImage](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CopyImage.html).
|
||||
//
|
||||
// This option supercedes the `kms_key_id` option -- if you set both, and
|
||||
// they are different, Packer will respect the value in
|
||||
// `region_kms_key_ids` for your build region and silently disregard the
|
||||
// value provided in `kms_key_id`.
|
||||
AMIRegionKMSKeyIDs map[string]string `mapstructure:"region_kms_key_ids" required:"false"`
|
||||
// If true, Packer will not check whether an AMI with the `ami_name` exists
|
||||
// in the region it is building in. It will use an intermediary AMI name,
|
||||
// which it will not convert to an AMI in the build region. It will copy
|
||||
// the intermediary AMI into any regions provided in `ami_regions`, then
|
||||
// delete the intermediary AMI. Default `false`.
|
||||
AMISkipBuildRegion bool `mapstructure:"skip_save_build_region"`
|
||||
// Key/value pair tags to apply to snapshot. They will override AMI tags if
|
||||
// already applied to snapshot. This is a [template
|
||||
// engine](/docs/templates/legacy_json_templates/engine), see [Build template
|
||||
// data](#build-template-data) for more information.
|
||||
SnapshotTags map[string]string `mapstructure:"snapshot_tags" required:"false"`
|
||||
// Same as [`snapshot_tags`](#snapshot_tags) but defined as a singular
|
||||
// repeatable block containing a `key` and a `value` field. In HCL2 mode the
|
||||
// [`dynamic_block`](/docs/templates/hcl_templates/expressions#dynamic-blocks)
|
||||
// will allow you to create those programatically.
|
||||
SnapshotTag config.KeyValues `mapstructure:"snapshot_tag" required:"false"`
|
||||
// A list of account IDs that have
|
||||
// access to create volumes from the snapshot(s). By default no additional
|
||||
// users other than the user creating the AMI has permissions to create
|
||||
// volumes from the backing snapshot(s).
|
||||
SnapshotUsers []string `mapstructure:"snapshot_users" required:"false"`
|
||||
// A list of groups that have access to
|
||||
// create volumes from the snapshot(s). By default no groups have permission
|
||||
// to create volumes from the snapshot(s). all will make the snapshot
|
||||
// publicly accessible.
|
||||
SnapshotGroups []string `mapstructure:"snapshot_groups" required:"false"`
|
||||
}
|
||||
|
||||
func stringInSlice(s []string, searchstr string) bool {
|
||||
for _, item := range s {
|
||||
if item == searchstr {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *AMIConfig) Prepare(accessConfig *AccessConfig, ctx *interpolate.Context) []error {
|
||||
var errs []error
|
||||
|
||||
errs = append(errs, c.SnapshotTag.CopyOn(&c.SnapshotTags)...)
|
||||
errs = append(errs, c.AMITag.CopyOn(&c.AMITags)...)
|
||||
|
||||
if c.AMIName == "" {
|
||||
errs = append(errs, fmt.Errorf("ami_name must be specified"))
|
||||
}
|
||||
|
||||
// Make sure that if we have region_kms_key_ids defined,
|
||||
// the regions in region_kms_key_ids are also in ami_regions
|
||||
if len(c.AMIRegionKMSKeyIDs) > 0 {
|
||||
for kmsKeyRegion := range c.AMIRegionKMSKeyIDs {
|
||||
if !stringInSlice(c.AMIRegions, kmsKeyRegion) {
|
||||
errs = append(errs, fmt.Errorf("Region %s is in region_kms_key_ids but not in ami_regions", kmsKeyRegion))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
errs = append(errs, c.prepareRegions(accessConfig)...)
|
||||
|
||||
// Prevent sharing of default KMS key encrypted volumes with other aws users
|
||||
if len(c.AMIUsers) > 0 {
|
||||
if len(c.AMIKmsKeyId) == 0 && c.AMIEncryptBootVolume.True() {
|
||||
errs = append(errs, fmt.Errorf("Cannot share AMI encrypted with default KMS key"))
|
||||
}
|
||||
if len(c.AMIRegionKMSKeyIDs) > 0 {
|
||||
for _, kmsKey := range c.AMIRegionKMSKeyIDs {
|
||||
if len(kmsKey) == 0 {
|
||||
errs = append(errs, fmt.Errorf("Cannot share AMI encrypted with default KMS key for other regions"))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
kmsKeys := make([]string, 0)
|
||||
if len(c.AMIKmsKeyId) > 0 {
|
||||
kmsKeys = append(kmsKeys, c.AMIKmsKeyId)
|
||||
}
|
||||
if len(c.AMIRegionKMSKeyIDs) > 0 {
|
||||
for _, kmsKey := range c.AMIRegionKMSKeyIDs {
|
||||
if len(kmsKey) > 0 {
|
||||
kmsKeys = append(kmsKeys, kmsKey)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(kmsKeys) > 0 && !c.AMIEncryptBootVolume.True() {
|
||||
errs = append(errs, fmt.Errorf("If you have set either "+
|
||||
"region_kms_key_ids or kms_key_id, encrypt_boot must also be true."))
|
||||
|
||||
}
|
||||
for _, kmsKey := range kmsKeys {
|
||||
if !ValidateKmsKey(kmsKey) {
|
||||
errs = append(errs, fmt.Errorf("%q is not a valid KMS Key Id.", kmsKey))
|
||||
}
|
||||
}
|
||||
|
||||
if len(c.SnapshotUsers) > 0 {
|
||||
if len(c.AMIKmsKeyId) == 0 && len(c.AMIRegionKMSKeyIDs) == 0 && c.AMIEncryptBootVolume.True() {
|
||||
errs = append(errs, fmt.Errorf("Cannot share snapshot encrypted "+
|
||||
"with default KMS key, see https://www.packer.io/docs/builders/amazon-ebs#region_kms_key_ids for more information"))
|
||||
}
|
||||
if len(c.AMIRegionKMSKeyIDs) > 0 {
|
||||
for _, kmsKey := range c.AMIRegionKMSKeyIDs {
|
||||
if len(kmsKey) == 0 {
|
||||
errs = append(errs, fmt.Errorf("Cannot share snapshot encrypted with default KMS key"))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(c.AMIName) < 3 || len(c.AMIName) > 128 {
|
||||
errs = append(errs, fmt.Errorf("ami_name must be between 3 and 128 characters long"))
|
||||
}
|
||||
|
||||
if c.AMIName != templateCleanAMIName(c.AMIName) {
|
||||
errs = append(errs, fmt.Errorf("AMIName should only contain "+
|
||||
"alphanumeric characters, parentheses (()), square brackets ([]), spaces "+
|
||||
"( ), periods (.), slashes (/), dashes (-), single quotes ('), at-signs "+
|
||||
"(@), or underscores(_). You can use the `clean_resource_name` template "+
|
||||
"filter to automatically clean your ami name."))
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
return errs
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *AMIConfig) prepareRegions(accessConfig *AccessConfig) (errs []error) {
|
||||
if len(c.AMIRegions) > 0 {
|
||||
regionSet := make(map[string]struct{})
|
||||
regions := make([]string, 0, len(c.AMIRegions))
|
||||
|
||||
for _, region := range c.AMIRegions {
|
||||
// If we already saw the region, then don't look again
|
||||
if _, ok := regionSet[region]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// Mark that we saw the region
|
||||
regionSet[region] = struct{}{}
|
||||
|
||||
// Make sure that if we have region_kms_key_ids defined,
|
||||
// the regions in ami_regions are also in region_kms_key_ids
|
||||
if len(c.AMIRegionKMSKeyIDs) > 0 {
|
||||
if _, ok := c.AMIRegionKMSKeyIDs[region]; !ok {
|
||||
errs = append(errs, fmt.Errorf("Region %s is in ami_regions but not in region_kms_key_ids", region))
|
||||
}
|
||||
}
|
||||
if (accessConfig != nil) && (region == accessConfig.RawRegion) {
|
||||
// make sure we don't try to copy to the region we originally
|
||||
// create the AMI in.
|
||||
log.Printf("Cannot copy AMI to AWS session region '%s', deleting it from `ami_regions`.", region)
|
||||
continue
|
||||
}
|
||||
regions = append(regions, region)
|
||||
}
|
||||
|
||||
c.AMIRegions = regions
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
// See https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CopyImage.html
|
||||
func ValidateKmsKey(kmsKey string) (valid bool) {
|
||||
kmsKeyIdPattern := `[a-f0-9-]+$`
|
||||
aliasPattern := `alias/[a-zA-Z0-9:/_-]+$`
|
||||
kmsArnStartPattern := `^arn:aws(-us-gov)?:kms:([a-z]{2}-(gov-)?[a-z]+-\d{1})?:(\d{12}):`
|
||||
if regexp.MustCompile(fmt.Sprintf("^%s", kmsKeyIdPattern)).MatchString(kmsKey) {
|
||||
return true
|
||||
}
|
||||
if regexp.MustCompile(fmt.Sprintf("^%s", aliasPattern)).MatchString(kmsKey) {
|
||||
return true
|
||||
}
|
||||
if regexp.MustCompile(fmt.Sprintf("%skey/%s", kmsArnStartPattern, kmsKeyIdPattern)).MatchString(kmsKey) {
|
||||
return true
|
||||
}
|
||||
if regexp.MustCompile(fmt.Sprintf("%s%s", kmsArnStartPattern, aliasPattern)).MatchString(kmsKey) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -1,249 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/config"
|
||||
)
|
||||
|
||||
func testAMIConfig() *AMIConfig {
|
||||
return &AMIConfig{
|
||||
AMIName: "foo",
|
||||
}
|
||||
}
|
||||
|
||||
func getFakeAccessConfig(region string) *AccessConfig {
|
||||
c := testAccessConfig()
|
||||
c.RawRegion = region
|
||||
return c
|
||||
}
|
||||
|
||||
func TestAMIConfigPrepare_name(t *testing.T) {
|
||||
c := testAMIConfig()
|
||||
accessConf := testAccessConfig()
|
||||
if err := c.Prepare(accessConf, nil); err != nil {
|
||||
t.Fatalf("shouldn't have err: %s", err)
|
||||
}
|
||||
|
||||
c.AMIName = ""
|
||||
if err := c.Prepare(accessConf, nil); err == nil {
|
||||
t.Fatal("should have error")
|
||||
}
|
||||
}
|
||||
|
||||
type mockEC2Client struct {
|
||||
ec2iface.EC2API
|
||||
}
|
||||
|
||||
func (m *mockEC2Client) DescribeRegions(*ec2.DescribeRegionsInput) (*ec2.DescribeRegionsOutput, error) {
|
||||
return &ec2.DescribeRegionsOutput{
|
||||
Regions: []*ec2.Region{
|
||||
{RegionName: aws.String("us-east-1")},
|
||||
{RegionName: aws.String("us-east-2")},
|
||||
{RegionName: aws.String("us-west-1")},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func TestAMIConfigPrepare_regions(t *testing.T) {
|
||||
c := testAMIConfig()
|
||||
c.AMIRegions = nil
|
||||
|
||||
var errs []error
|
||||
var err error
|
||||
accessConf := testAccessConfig()
|
||||
mockConn := &mockEC2Client{}
|
||||
if errs = c.prepareRegions(accessConf); len(errs) > 0 {
|
||||
t.Fatalf("shouldn't have err: %#v", errs)
|
||||
}
|
||||
|
||||
c.AMIRegions, err = listEC2Regions(mockConn)
|
||||
if err != nil {
|
||||
t.Fatalf("shouldn't have err: %s", err.Error())
|
||||
}
|
||||
if errs = c.prepareRegions(accessConf); len(errs) > 0 {
|
||||
t.Fatalf("shouldn't have err: %#v", errs)
|
||||
}
|
||||
|
||||
c.AMIRegions = []string{"us-east-1", "us-west-1", "us-east-1"}
|
||||
if errs = c.prepareRegions(accessConf); len(errs) > 0 {
|
||||
t.Fatalf("bad: %s", errs[0])
|
||||
}
|
||||
|
||||
expected := []string{"us-east-1", "us-west-1"}
|
||||
if !reflect.DeepEqual(c.AMIRegions, expected) {
|
||||
t.Fatalf("bad: %#v", c.AMIRegions)
|
||||
}
|
||||
|
||||
c.AMIRegions = []string{"custom"}
|
||||
if errs = c.prepareRegions(accessConf); len(errs) > 0 {
|
||||
t.Fatal("shouldn't have error")
|
||||
}
|
||||
|
||||
c.AMIRegions = []string{"us-east-1", "us-east-2", "us-west-1"}
|
||||
c.AMIRegionKMSKeyIDs = map[string]string{
|
||||
"us-east-1": "123-456-7890",
|
||||
"us-west-1": "789-012-3456",
|
||||
"us-east-2": "456-789-0123",
|
||||
}
|
||||
if errs = c.prepareRegions(accessConf); len(errs) > 0 {
|
||||
t.Fatal(fmt.Sprintf("shouldn't have error: %s", errs[0]))
|
||||
}
|
||||
|
||||
c.AMIRegions = []string{"us-east-1", "us-east-2", "us-west-1"}
|
||||
c.AMIRegionKMSKeyIDs = map[string]string{
|
||||
"us-east-1": "123-456-7890",
|
||||
"us-west-1": "789-012-3456",
|
||||
"us-east-2": "",
|
||||
}
|
||||
if errs = c.prepareRegions(accessConf); len(errs) > 0 {
|
||||
t.Fatal("should have passed; we are able to use default KMS key if not sharing")
|
||||
}
|
||||
|
||||
c.SnapshotUsers = []string{"user-foo", "user-bar"}
|
||||
c.AMIRegions = []string{"us-east-1", "us-east-2", "us-west-1"}
|
||||
c.AMIRegionKMSKeyIDs = map[string]string{
|
||||
"us-east-1": "123-456-7890",
|
||||
"us-west-1": "789-012-3456",
|
||||
"us-east-2": "",
|
||||
}
|
||||
if errs = c.prepareRegions(accessConf); len(errs) > 0 {
|
||||
t.Fatal("should have an error b/c can't use default KMS key if sharing")
|
||||
}
|
||||
|
||||
c.AMIRegions = []string{"us-east-1", "us-west-1"}
|
||||
c.AMIRegionKMSKeyIDs = map[string]string{
|
||||
"us-east-1": "123-456-7890",
|
||||
"us-west-1": "789-012-3456",
|
||||
"us-east-2": "456-789-0123",
|
||||
}
|
||||
if errs = c.prepareRegions(accessConf); len(errs) > 0 {
|
||||
t.Fatal("should have error b/c theres a region in the key map that isn't in ami_regions")
|
||||
}
|
||||
|
||||
c.AMIRegions = []string{"us-east-1", "us-west-1", "us-east-2"}
|
||||
c.AMIRegionKMSKeyIDs = map[string]string{
|
||||
"us-east-1": "123-456-7890",
|
||||
"us-west-1": "789-012-3456",
|
||||
}
|
||||
|
||||
if err := c.Prepare(accessConf, nil); err == nil {
|
||||
t.Fatal("should have error b/c theres a region in in ami_regions that isn't in the key map")
|
||||
}
|
||||
|
||||
c.SnapshotUsers = []string{"foo", "bar"}
|
||||
c.AMIKmsKeyId = "123-abc-456"
|
||||
c.AMIEncryptBootVolume = config.TriTrue
|
||||
c.AMIRegions = []string{"us-east-1", "us-west-1"}
|
||||
c.AMIRegionKMSKeyIDs = map[string]string{
|
||||
"us-east-1": "123-456-7890",
|
||||
"us-west-1": "",
|
||||
}
|
||||
if errs = c.prepareRegions(accessConf); len(errs) > 0 {
|
||||
t.Fatal("should have error b/c theres a region in in ami_regions that isn't in the key map")
|
||||
}
|
||||
|
||||
// allow rawregion to exist in ami_regions list.
|
||||
accessConf = getFakeAccessConfig("us-east-1")
|
||||
c.AMIRegions = []string{"us-east-1", "us-west-1", "us-east-2"}
|
||||
c.AMIRegionKMSKeyIDs = nil
|
||||
if errs = c.prepareRegions(accessConf); len(errs) > 0 {
|
||||
t.Fatal("should allow user to have the raw region in ami_regions")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestAMIConfigPrepare_Share_EncryptedBoot(t *testing.T) {
|
||||
c := testAMIConfig()
|
||||
c.AMIUsers = []string{"testAccountID"}
|
||||
c.AMIEncryptBootVolume = config.TriTrue
|
||||
|
||||
accessConf := testAccessConfig()
|
||||
|
||||
c.AMIKmsKeyId = ""
|
||||
if err := c.Prepare(accessConf, nil); err == nil {
|
||||
t.Fatal("shouldn't be able to share ami with encrypted boot volume")
|
||||
}
|
||||
c.AMIKmsKeyId = "89c3fb9a-de87-4f2a-aedc-fddc5138193c"
|
||||
if err := c.Prepare(accessConf, nil); err != nil {
|
||||
t.Fatal("should be able to share ami with encrypted boot volume")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAMIConfigPrepare_ValidateKmsKey(t *testing.T) {
|
||||
c := testAMIConfig()
|
||||
c.AMIEncryptBootVolume = config.TriTrue
|
||||
|
||||
accessConf := testAccessConfig()
|
||||
|
||||
validCases := []string{
|
||||
"abcd1234-e567-890f-a12b-a123b4cd56ef",
|
||||
"alias/foo/bar",
|
||||
"arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef",
|
||||
"arn:aws:kms:us-east-1:012345678910:alias/foo/bar",
|
||||
"arn:aws-us-gov:kms:us-gov-east-1:123456789012:key/12345678-1234-abcd-0000-123456789012",
|
||||
}
|
||||
for _, validCase := range validCases {
|
||||
c.AMIKmsKeyId = validCase
|
||||
if err := c.Prepare(accessConf, nil); err != nil {
|
||||
t.Fatalf("%s should not have failed KMS key validation", validCase)
|
||||
}
|
||||
}
|
||||
|
||||
invalidCases := []string{
|
||||
"ABCD1234-e567-890f-a12b-a123b4cd56ef",
|
||||
"ghij1234-e567-890f-a12b-a123b4cd56ef",
|
||||
"ghij1234+e567_890f-a12b-a123b4cd56ef",
|
||||
"foo/bar",
|
||||
"arn:aws:kms:us-east-1:012345678910:foo/bar",
|
||||
"arn:foo:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef",
|
||||
}
|
||||
for _, invalidCase := range invalidCases {
|
||||
c.AMIKmsKeyId = invalidCase
|
||||
if err := c.Prepare(accessConf, nil); err == nil {
|
||||
t.Fatalf("%s should have failed KMS key validation", invalidCase)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestAMINameValidation(t *testing.T) {
|
||||
c := testAMIConfig()
|
||||
|
||||
accessConf := testAccessConfig()
|
||||
|
||||
c.AMIName = "aa"
|
||||
if err := c.Prepare(accessConf, nil); err == nil {
|
||||
t.Fatal("shouldn't be able to have an ami name with less than 3 characters")
|
||||
}
|
||||
|
||||
var longAmiName string
|
||||
for i := 0; i < 129; i++ {
|
||||
longAmiName += "a"
|
||||
}
|
||||
c.AMIName = longAmiName
|
||||
if err := c.Prepare(accessConf, nil); err == nil {
|
||||
t.Fatal("shouldn't be able to have an ami name with great than 128 characters")
|
||||
}
|
||||
|
||||
c.AMIName = "+aaa"
|
||||
if err := c.Prepare(accessConf, nil); err == nil {
|
||||
t.Fatal("shouldn't be able to have an ami name with invalid characters")
|
||||
}
|
||||
|
||||
c.AMIName = "fooBAR1()[] ./-'@_"
|
||||
if err := c.Prepare(accessConf, nil); err != nil {
|
||||
t.Fatal("should be able to use all of the allowed AMI characters")
|
||||
}
|
||||
|
||||
c.AMIName = `xyz-base-2017-04-05-1934`
|
||||
if err := c.Prepare(accessConf, nil); err != nil {
|
||||
t.Fatalf("expected `xyz-base-2017-04-05-1934` to pass validation.")
|
||||
}
|
||||
|
||||
}
|
|
@ -1,77 +0,0 @@
|
|||
//go:generate struct-markdown
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
)
|
||||
|
||||
type AmiFilterOptions struct {
|
||||
// Filters used to select an AMI. Any filter described in the docs for
|
||||
// [DescribeImages](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html)
|
||||
// is valid.
|
||||
Filters map[string]string `mapstructure:"filters"`
|
||||
// Filters the images by their owner. You
|
||||
// may specify one or more AWS account IDs, "self" (which will use the
|
||||
// account whose credentials you are using to run Packer), or an AWS owner
|
||||
// alias: for example, `amazon`, `aws-marketplace`, or `microsoft`. This
|
||||
// option is required for security reasons.
|
||||
Owners []string `mapstructure:"owners"`
|
||||
// Selects the newest created image when true.
|
||||
// This is most useful for selecting a daily distro build.
|
||||
MostRecent bool `mapstructure:"most_recent"`
|
||||
}
|
||||
|
||||
func (d *AmiFilterOptions) GetOwners() []*string {
|
||||
res := make([]*string, 0, len(d.Owners))
|
||||
for _, owner := range d.Owners {
|
||||
i := owner
|
||||
res = append(res, &i)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (d *AmiFilterOptions) Empty() bool {
|
||||
return len(d.Owners) == 0 && len(d.Filters) == 0
|
||||
}
|
||||
|
||||
func (d *AmiFilterOptions) NoOwner() bool {
|
||||
return len(d.Owners) == 0
|
||||
}
|
||||
|
||||
func (d *AmiFilterOptions) GetFilteredImage(params *ec2.DescribeImagesInput, ec2conn *ec2.EC2) (*ec2.Image, error) {
|
||||
// We have filters to apply
|
||||
if len(d.Filters) > 0 {
|
||||
params.Filters = buildEc2Filters(d.Filters)
|
||||
}
|
||||
if len(d.Owners) > 0 {
|
||||
params.Owners = d.GetOwners()
|
||||
}
|
||||
|
||||
log.Printf("Using AMI Filters %v", params)
|
||||
imageResp, err := ec2conn.DescribeImages(params)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error querying AMI: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(imageResp.Images) == 0 {
|
||||
err := fmt.Errorf("No AMI was found matching filters: %v", params)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(imageResp.Images) > 1 && !d.MostRecent {
|
||||
err := fmt.Errorf("Your query returned more than one result. Please try a more specific search, or set most_recent to true.")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var image *ec2.Image
|
||||
if d.MostRecent {
|
||||
image = mostRecentAmi(imageResp.Images)
|
||||
} else {
|
||||
image = imageResp.Images[0]
|
||||
}
|
||||
return image, nil
|
||||
}
|
|
@ -1,121 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
// Artifact is an artifact implementation that contains built AMIs.
|
||||
type Artifact struct {
|
||||
// A map of regions to AMI IDs.
|
||||
Amis map[string]string
|
||||
|
||||
// BuilderId is the unique ID for the builder that created this AMI
|
||||
BuilderIdValue string
|
||||
|
||||
// StateData should store data such as GeneratedData
|
||||
// to be shared with post-processors
|
||||
StateData map[string]interface{}
|
||||
|
||||
// EC2 connection for performing API stuff.
|
||||
Session *session.Session
|
||||
}
|
||||
|
||||
func (a *Artifact) BuilderId() string {
|
||||
return a.BuilderIdValue
|
||||
}
|
||||
|
||||
func (*Artifact) Files() []string {
|
||||
// We have no files
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Artifact) Id() string {
|
||||
parts := make([]string, 0, len(a.Amis))
|
||||
for region, amiId := range a.Amis {
|
||||
parts = append(parts, fmt.Sprintf("%s:%s", region, amiId))
|
||||
}
|
||||
|
||||
sort.Strings(parts)
|
||||
return strings.Join(parts, ",")
|
||||
}
|
||||
|
||||
func (a *Artifact) String() string {
|
||||
amiStrings := make([]string, 0, len(a.Amis))
|
||||
for region, id := range a.Amis {
|
||||
single := fmt.Sprintf("%s: %s", region, id)
|
||||
amiStrings = append(amiStrings, single)
|
||||
}
|
||||
|
||||
sort.Strings(amiStrings)
|
||||
return fmt.Sprintf("AMIs were created:\n%s\n", strings.Join(amiStrings, "\n"))
|
||||
}
|
||||
|
||||
func (a *Artifact) State(name string) interface{} {
|
||||
if _, ok := a.StateData[name]; ok {
|
||||
return a.StateData[name]
|
||||
}
|
||||
|
||||
switch name {
|
||||
case "atlas.artifact.metadata":
|
||||
return a.stateAtlasMetadata()
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Artifact) Destroy() error {
|
||||
errors := make([]error, 0)
|
||||
|
||||
for region, imageId := range a.Amis {
|
||||
log.Printf("Deregistering image ID (%s) from region (%s)", imageId, region)
|
||||
|
||||
regionConn := ec2.New(a.Session, &aws.Config{
|
||||
Region: aws.String(region),
|
||||
})
|
||||
|
||||
// Get image metadata
|
||||
imageResp, err := regionConn.DescribeImages(&ec2.DescribeImagesInput{
|
||||
ImageIds: []*string{&imageId},
|
||||
})
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
if len(imageResp.Images) == 0 {
|
||||
err := fmt.Errorf("Error retrieving details for AMI (%s), no images found", imageId)
|
||||
errors = append(errors, err)
|
||||
}
|
||||
|
||||
err = DestroyAMIs([]*string{&imageId}, regionConn)
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
if len(errors) == 1 {
|
||||
return errors[0]
|
||||
} else {
|
||||
return &packersdk.MultiError{Errors: errors}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Artifact) stateAtlasMetadata() interface{} {
|
||||
metadata := make(map[string]string)
|
||||
for region, imageId := range a.Amis {
|
||||
k := fmt.Sprintf("region.%s", region)
|
||||
metadata[k] = imageId
|
||||
}
|
||||
|
||||
return metadata
|
||||
}
|
|
@ -1,90 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
func TestArtifact_Impl(t *testing.T) {
|
||||
var _ packersdk.Artifact = new(Artifact)
|
||||
}
|
||||
|
||||
func TestArtifactId(t *testing.T) {
|
||||
expected := `east:foo,west:bar`
|
||||
|
||||
amis := make(map[string]string)
|
||||
amis["east"] = "foo"
|
||||
amis["west"] = "bar"
|
||||
|
||||
a := &Artifact{
|
||||
Amis: amis,
|
||||
}
|
||||
|
||||
result := a.Id()
|
||||
if result != expected {
|
||||
t.Fatalf("bad: %s", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestArtifactState_atlasMetadata(t *testing.T) {
|
||||
a := &Artifact{
|
||||
Amis: map[string]string{
|
||||
"east": "foo",
|
||||
"west": "bar",
|
||||
},
|
||||
}
|
||||
|
||||
actual := a.State("atlas.artifact.metadata")
|
||||
expected := map[string]string{
|
||||
"region.east": "foo",
|
||||
"region.west": "bar",
|
||||
}
|
||||
if !reflect.DeepEqual(actual, expected) {
|
||||
t.Fatalf("bad: %#v", actual)
|
||||
}
|
||||
}
|
||||
|
||||
func TestArtifactString(t *testing.T) {
|
||||
expected := `AMIs were created:
|
||||
east: foo
|
||||
west: bar
|
||||
`
|
||||
|
||||
amis := make(map[string]string)
|
||||
amis["east"] = "foo"
|
||||
amis["west"] = "bar"
|
||||
|
||||
a := &Artifact{Amis: amis}
|
||||
result := a.String()
|
||||
if result != expected {
|
||||
t.Fatalf("bad: %s", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestArtifactState(t *testing.T) {
|
||||
expectedData := "this is the data"
|
||||
artifact := &Artifact{
|
||||
StateData: map[string]interface{}{"state_data": expectedData},
|
||||
}
|
||||
|
||||
// Valid state
|
||||
result := artifact.State("state_data")
|
||||
if result != expectedData {
|
||||
t.Fatalf("Bad: State data was %s instead of %s", result, expectedData)
|
||||
}
|
||||
|
||||
// Invalid state
|
||||
result = artifact.State("invalid_key")
|
||||
if result != nil {
|
||||
t.Fatalf("Bad: State should be nil for invalid state data name")
|
||||
}
|
||||
|
||||
// Nil StateData should not fail and should return nil
|
||||
artifact = &Artifact{}
|
||||
result = artifact.State("key")
|
||||
if result != nil {
|
||||
t.Fatalf("Bad: State should be nil for nil StateData")
|
||||
}
|
||||
}
|
|
@ -1,18 +0,0 @@
|
|||
package awserrors
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
)
|
||||
|
||||
// Returns true if the err matches all these conditions:
|
||||
// * err is of type awserr.Error
|
||||
// * Error.Code() matches code
|
||||
// * Error.Message() contains message
|
||||
func Matches(err error, code string, message string) bool {
|
||||
if err, ok := err.(awserr.Error); ok {
|
||||
return err.Code() == code && strings.Contains(err.Message(), message)
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -1,231 +0,0 @@
|
|||
//go:generate struct-markdown
|
||||
//go:generate mapstructure-to-hcl2 -type BlockDevice
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/config"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/interpolate"
|
||||
)
|
||||
|
||||
const (
|
||||
minIops = 100
|
||||
maxIops = 64000
|
||||
minIopsGp3 = 3000
|
||||
maxIopsGp3 = 16000
|
||||
minThroughput = 125
|
||||
maxThroughput = 1000
|
||||
)
|
||||
|
||||
// These will be attached when launching your instance. Your
|
||||
// options here may vary depending on the type of VM you use.
|
||||
//
|
||||
// Example use case:
|
||||
//
|
||||
// The following mapping will tell Packer to encrypt the root volume of the
|
||||
// build instance at launch using a specific non-default kms key:
|
||||
//
|
||||
// JSON example:
|
||||
//
|
||||
// ```json
|
||||
// launch_block_device_mappings: [
|
||||
// {
|
||||
// "device_name": "/dev/sda1",
|
||||
// "encrypted": true,
|
||||
// "kms_key_id": "1a2b3c4d-5e6f-1a2b-3c4d-5e6f1a2b3c4d"
|
||||
// }
|
||||
// ]
|
||||
// ```
|
||||
//
|
||||
// HCL2 example:
|
||||
//
|
||||
// ```hcl
|
||||
// launch_block_device_mappings {
|
||||
// device_name = "/dev/sda1"
|
||||
// encrypted = true
|
||||
// kms_key_id = "1a2b3c4d-5e6f-1a2b-3c4d-5e6f1a2b3c4d"
|
||||
// }
|
||||
// ```
|
||||
//
|
||||
// Please note that the kms_key_id option in this example exists for
|
||||
// launch_block_device_mappings but not ami_block_device_mappings.
|
||||
//
|
||||
// Documentation for Block Devices Mappings can be found here:
|
||||
// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html
|
||||
//
|
||||
type BlockDevice struct {
|
||||
// Indicates whether the EBS volume is deleted on instance termination.
|
||||
// Default false. NOTE: If this value is not explicitly set to true and
|
||||
// volumes are not cleaned up by an alternative method, additional volumes
|
||||
// will accumulate after every build.
|
||||
DeleteOnTermination bool `mapstructure:"delete_on_termination" required:"false"`
|
||||
// The device name exposed to the instance (for example, /dev/sdh or xvdh).
|
||||
// Required for every device in the block device mapping.
|
||||
DeviceName string `mapstructure:"device_name" required:"false"`
|
||||
// Indicates whether or not to encrypt the volume. By default, Packer will
|
||||
// keep the encryption setting to what it was in the source image. Setting
|
||||
// false will result in an unencrypted device, and true will result in an
|
||||
// encrypted one.
|
||||
Encrypted config.Trilean `mapstructure:"encrypted" required:"false"`
|
||||
// The number of I/O operations per second (IOPS) that the volume supports.
|
||||
// See the documentation on
|
||||
// [IOPs](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html)
|
||||
// for more information
|
||||
IOPS *int64 `mapstructure:"iops" required:"false"`
|
||||
// Suppresses the specified device included in the block device mapping of
|
||||
// the AMI.
|
||||
NoDevice bool `mapstructure:"no_device" required:"false"`
|
||||
// The ID of the snapshot.
|
||||
SnapshotId string `mapstructure:"snapshot_id" required:"false"`
|
||||
// The throughput for gp3 volumes, only valid for gp3 types
|
||||
// See the documentation on
|
||||
// [Throughput](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html)
|
||||
// for more information
|
||||
Throughput *int64 `mapstructure:"throughput" required:"false"`
|
||||
// The virtual device name. See the documentation on Block Device Mapping
|
||||
// for more information.
|
||||
VirtualName string `mapstructure:"virtual_name" required:"false"`
|
||||
// The volume type. gp2 & gp3 for General Purpose (SSD) volumes, io1 & io2
|
||||
// for Provisioned IOPS (SSD) volumes, st1 for Throughput Optimized HDD,
|
||||
// sc1 for Cold HDD, and standard for Magnetic volumes.
|
||||
VolumeType string `mapstructure:"volume_type" required:"false"`
|
||||
// The size of the volume, in GiB. Required if not specifying a
|
||||
// snapshot_id.
|
||||
VolumeSize int64 `mapstructure:"volume_size" required:"false"`
|
||||
// ID, alias or ARN of the KMS key to use for boot volume encryption.
|
||||
// This option exists for launch_block_device_mappings but not
|
||||
// ami_block_device_mappings. The kms key id defined here only applies to
|
||||
// the original build region; if the AMI gets copied to other regions, the
|
||||
// volume in those regions will be encrypted by the default EBS KMS key.
|
||||
// For valid formats see KmsKeyId in the [AWS API docs -
|
||||
// CopyImage](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CopyImage.html)
|
||||
// This field is validated by Packer. When using an alias, you will have to
|
||||
// prefix kms_key_id with alias/.
|
||||
KmsKeyId string `mapstructure:"kms_key_id" required:"false"`
|
||||
}
|
||||
|
||||
type BlockDevices []BlockDevice
|
||||
|
||||
func (bds BlockDevices) BuildEC2BlockDeviceMappings() []*ec2.BlockDeviceMapping {
|
||||
var blockDevices []*ec2.BlockDeviceMapping
|
||||
|
||||
for _, blockDevice := range bds {
|
||||
blockDevices = append(blockDevices, blockDevice.BuildEC2BlockDeviceMapping())
|
||||
}
|
||||
return blockDevices
|
||||
}
|
||||
|
||||
func (blockDevice BlockDevice) BuildEC2BlockDeviceMapping() *ec2.BlockDeviceMapping {
|
||||
|
||||
mapping := &ec2.BlockDeviceMapping{
|
||||
DeviceName: aws.String(blockDevice.DeviceName),
|
||||
}
|
||||
|
||||
if blockDevice.NoDevice {
|
||||
mapping.NoDevice = aws.String("")
|
||||
return mapping
|
||||
} else if blockDevice.VirtualName != "" {
|
||||
if strings.HasPrefix(blockDevice.VirtualName, "ephemeral") {
|
||||
mapping.VirtualName = aws.String(blockDevice.VirtualName)
|
||||
}
|
||||
return mapping
|
||||
}
|
||||
|
||||
ebsBlockDevice := &ec2.EbsBlockDevice{
|
||||
DeleteOnTermination: aws.Bool(blockDevice.DeleteOnTermination),
|
||||
}
|
||||
|
||||
if blockDevice.VolumeType != "" {
|
||||
ebsBlockDevice.VolumeType = aws.String(blockDevice.VolumeType)
|
||||
}
|
||||
|
||||
if blockDevice.VolumeSize > 0 {
|
||||
ebsBlockDevice.VolumeSize = aws.Int64(blockDevice.VolumeSize)
|
||||
}
|
||||
|
||||
switch blockDevice.VolumeType {
|
||||
case "io1", "io2", "gp3":
|
||||
ebsBlockDevice.Iops = blockDevice.IOPS
|
||||
}
|
||||
|
||||
// Throughput is only valid for gp3 types
|
||||
if blockDevice.VolumeType == "gp3" {
|
||||
ebsBlockDevice.Throughput = blockDevice.Throughput
|
||||
}
|
||||
|
||||
// You cannot specify Encrypted if you specify a Snapshot ID
|
||||
if blockDevice.SnapshotId != "" {
|
||||
ebsBlockDevice.SnapshotId = aws.String(blockDevice.SnapshotId)
|
||||
}
|
||||
ebsBlockDevice.Encrypted = blockDevice.Encrypted.ToBoolPointer()
|
||||
|
||||
if blockDevice.KmsKeyId != "" {
|
||||
ebsBlockDevice.KmsKeyId = aws.String(blockDevice.KmsKeyId)
|
||||
}
|
||||
|
||||
mapping.Ebs = ebsBlockDevice
|
||||
|
||||
return mapping
|
||||
}
|
||||
|
||||
var iopsRatios = map[string]int64{
|
||||
"io1": 50,
|
||||
"io2": 500,
|
||||
}
|
||||
|
||||
func (b *BlockDevice) Prepare(ctx *interpolate.Context) error {
|
||||
if b.DeviceName == "" {
|
||||
return fmt.Errorf("The `device_name` must be specified " +
|
||||
"for every device in the block device mapping.")
|
||||
}
|
||||
|
||||
// Warn that encrypted must be true or nil when setting kms_key_id
|
||||
if b.KmsKeyId != "" && b.Encrypted.False() {
|
||||
return fmt.Errorf("The device %v, must also have `encrypted: "+
|
||||
"true` when setting a kms_key_id.", b.DeviceName)
|
||||
}
|
||||
|
||||
if ratio, ok := iopsRatios[b.VolumeType]; b.VolumeSize != 0 && ok {
|
||||
if b.IOPS != nil && (*b.IOPS/b.VolumeSize > ratio) {
|
||||
return fmt.Errorf("%s: the maximum ratio of provisioned IOPS to requested volume size "+
|
||||
"(in GiB) is %v:1 for %s volumes", b.DeviceName, ratio, b.VolumeType)
|
||||
}
|
||||
|
||||
if b.IOPS != nil && (*b.IOPS < minIops || *b.IOPS > maxIops) {
|
||||
return fmt.Errorf("IOPS must be between %d and %d for device %s",
|
||||
minIops, maxIops, b.DeviceName)
|
||||
}
|
||||
}
|
||||
|
||||
if b.VolumeType == "gp3" {
|
||||
if b.Throughput != nil && (*b.Throughput < minThroughput || *b.Throughput > maxThroughput) {
|
||||
return fmt.Errorf("Throughput must be between %d and %d for device %s",
|
||||
minThroughput, maxThroughput, b.DeviceName)
|
||||
}
|
||||
|
||||
if b.IOPS != nil && (*b.IOPS < minIopsGp3 || *b.IOPS > maxIopsGp3) {
|
||||
return fmt.Errorf("IOPS must be between %d and %d for device %s",
|
||||
minIopsGp3, maxIopsGp3, b.DeviceName)
|
||||
}
|
||||
} else if b.Throughput != nil {
|
||||
return fmt.Errorf("Throughput is not available for device %s",
|
||||
b.DeviceName)
|
||||
}
|
||||
|
||||
_, err := interpolate.RenderInterface(&b, ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
func (bds BlockDevices) Prepare(ctx *interpolate.Context) (errs []error) {
|
||||
for _, block := range bds {
|
||||
if err := block.Prepare(ctx); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
return errs
|
||||
}
|
|
@ -1,51 +0,0 @@
|
|||
// Code generated by "mapstructure-to-hcl2 -type BlockDevice"; DO NOT EDIT.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl/v2/hcldec"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
// FlatBlockDevice is an auto-generated flat version of BlockDevice.
|
||||
// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up.
|
||||
type FlatBlockDevice struct {
|
||||
DeleteOnTermination *bool `mapstructure:"delete_on_termination" required:"false" cty:"delete_on_termination" hcl:"delete_on_termination"`
|
||||
DeviceName *string `mapstructure:"device_name" required:"false" cty:"device_name" hcl:"device_name"`
|
||||
Encrypted *bool `mapstructure:"encrypted" required:"false" cty:"encrypted" hcl:"encrypted"`
|
||||
IOPS *int64 `mapstructure:"iops" required:"false" cty:"iops" hcl:"iops"`
|
||||
NoDevice *bool `mapstructure:"no_device" required:"false" cty:"no_device" hcl:"no_device"`
|
||||
SnapshotId *string `mapstructure:"snapshot_id" required:"false" cty:"snapshot_id" hcl:"snapshot_id"`
|
||||
Throughput *int64 `mapstructure:"throughput" required:"false" cty:"throughput" hcl:"throughput"`
|
||||
VirtualName *string `mapstructure:"virtual_name" required:"false" cty:"virtual_name" hcl:"virtual_name"`
|
||||
VolumeType *string `mapstructure:"volume_type" required:"false" cty:"volume_type" hcl:"volume_type"`
|
||||
VolumeSize *int64 `mapstructure:"volume_size" required:"false" cty:"volume_size" hcl:"volume_size"`
|
||||
KmsKeyId *string `mapstructure:"kms_key_id" required:"false" cty:"kms_key_id" hcl:"kms_key_id"`
|
||||
}
|
||||
|
||||
// FlatMapstructure returns a new FlatBlockDevice.
|
||||
// FlatBlockDevice is an auto-generated flat version of BlockDevice.
|
||||
// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up.
|
||||
func (*BlockDevice) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } {
|
||||
return new(FlatBlockDevice)
|
||||
}
|
||||
|
||||
// HCL2Spec returns the hcl spec of a BlockDevice.
|
||||
// This spec is used by HCL to read the fields of BlockDevice.
|
||||
// The decoded values from this spec will then be applied to a FlatBlockDevice.
|
||||
func (*FlatBlockDevice) HCL2Spec() map[string]hcldec.Spec {
|
||||
s := map[string]hcldec.Spec{
|
||||
"delete_on_termination": &hcldec.AttrSpec{Name: "delete_on_termination", Type: cty.Bool, Required: false},
|
||||
"device_name": &hcldec.AttrSpec{Name: "device_name", Type: cty.String, Required: false},
|
||||
"encrypted": &hcldec.AttrSpec{Name: "encrypted", Type: cty.Bool, Required: false},
|
||||
"iops": &hcldec.AttrSpec{Name: "iops", Type: cty.Number, Required: false},
|
||||
"no_device": &hcldec.AttrSpec{Name: "no_device", Type: cty.Bool, Required: false},
|
||||
"snapshot_id": &hcldec.AttrSpec{Name: "snapshot_id", Type: cty.String, Required: false},
|
||||
"throughput": &hcldec.AttrSpec{Name: "throughput", Type: cty.Number, Required: false},
|
||||
"virtual_name": &hcldec.AttrSpec{Name: "virtual_name", Type: cty.String, Required: false},
|
||||
"volume_type": &hcldec.AttrSpec{Name: "volume_type", Type: cty.String, Required: false},
|
||||
"volume_size": &hcldec.AttrSpec{Name: "volume_size", Type: cty.Number, Required: false},
|
||||
"kms_key_id": &hcldec.AttrSpec{Name: "kms_key_id", Type: cty.String, Required: false},
|
||||
}
|
||||
return s
|
||||
}
|
|
@ -1,401 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/config"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/interpolate"
|
||||
)
|
||||
|
||||
func TestBlockDevice(t *testing.T) {
|
||||
cases := []struct {
|
||||
Config *BlockDevice
|
||||
Result *ec2.BlockDeviceMapping
|
||||
}{
|
||||
{
|
||||
Config: &BlockDevice{
|
||||
DeviceName: "/dev/sdb",
|
||||
SnapshotId: "snap-1234",
|
||||
VolumeType: "standard",
|
||||
VolumeSize: 8,
|
||||
DeleteOnTermination: true,
|
||||
},
|
||||
|
||||
Result: &ec2.BlockDeviceMapping{
|
||||
DeviceName: aws.String("/dev/sdb"),
|
||||
Ebs: &ec2.EbsBlockDevice{
|
||||
SnapshotId: aws.String("snap-1234"),
|
||||
VolumeType: aws.String("standard"),
|
||||
VolumeSize: aws.Int64(8),
|
||||
DeleteOnTermination: aws.Bool(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Config: &BlockDevice{
|
||||
DeviceName: "/dev/sdb",
|
||||
VolumeSize: 8,
|
||||
},
|
||||
|
||||
Result: &ec2.BlockDeviceMapping{
|
||||
DeviceName: aws.String("/dev/sdb"),
|
||||
Ebs: &ec2.EbsBlockDevice{
|
||||
VolumeSize: aws.Int64(8),
|
||||
DeleteOnTermination: aws.Bool(false),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Config: &BlockDevice{
|
||||
DeviceName: "/dev/sdb",
|
||||
VolumeType: "io1",
|
||||
VolumeSize: 8,
|
||||
DeleteOnTermination: true,
|
||||
IOPS: aws.Int64(1000),
|
||||
},
|
||||
|
||||
Result: &ec2.BlockDeviceMapping{
|
||||
DeviceName: aws.String("/dev/sdb"),
|
||||
Ebs: &ec2.EbsBlockDevice{
|
||||
VolumeType: aws.String("io1"),
|
||||
VolumeSize: aws.Int64(8),
|
||||
DeleteOnTermination: aws.Bool(true),
|
||||
Iops: aws.Int64(1000),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Config: &BlockDevice{
|
||||
DeviceName: "/dev/sdb",
|
||||
VolumeType: "io2",
|
||||
VolumeSize: 8,
|
||||
DeleteOnTermination: true,
|
||||
IOPS: aws.Int64(1000),
|
||||
},
|
||||
|
||||
Result: &ec2.BlockDeviceMapping{
|
||||
DeviceName: aws.String("/dev/sdb"),
|
||||
Ebs: &ec2.EbsBlockDevice{
|
||||
VolumeType: aws.String("io2"),
|
||||
VolumeSize: aws.Int64(8),
|
||||
DeleteOnTermination: aws.Bool(true),
|
||||
Iops: aws.Int64(1000),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Config: &BlockDevice{
|
||||
DeviceName: "/dev/sdb",
|
||||
VolumeType: "gp2",
|
||||
VolumeSize: 8,
|
||||
DeleteOnTermination: true,
|
||||
Encrypted: config.TriTrue,
|
||||
},
|
||||
|
||||
Result: &ec2.BlockDeviceMapping{
|
||||
DeviceName: aws.String("/dev/sdb"),
|
||||
Ebs: &ec2.EbsBlockDevice{
|
||||
VolumeType: aws.String("gp2"),
|
||||
VolumeSize: aws.Int64(8),
|
||||
DeleteOnTermination: aws.Bool(true),
|
||||
Encrypted: aws.Bool(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Config: &BlockDevice{
|
||||
DeviceName: "/dev/sdb",
|
||||
VolumeType: "gp2",
|
||||
VolumeSize: 8,
|
||||
DeleteOnTermination: true,
|
||||
Encrypted: config.TriTrue,
|
||||
KmsKeyId: "2Fa48a521f-3aff-4b34-a159-376ac5d37812",
|
||||
},
|
||||
|
||||
Result: &ec2.BlockDeviceMapping{
|
||||
DeviceName: aws.String("/dev/sdb"),
|
||||
Ebs: &ec2.EbsBlockDevice{
|
||||
VolumeType: aws.String("gp2"),
|
||||
VolumeSize: aws.Int64(8),
|
||||
DeleteOnTermination: aws.Bool(true),
|
||||
Encrypted: aws.Bool(true),
|
||||
KmsKeyId: aws.String("2Fa48a521f-3aff-4b34-a159-376ac5d37812"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Config: &BlockDevice{
|
||||
DeviceName: "/dev/sdb",
|
||||
VolumeType: "standard",
|
||||
DeleteOnTermination: true,
|
||||
},
|
||||
|
||||
Result: &ec2.BlockDeviceMapping{
|
||||
DeviceName: aws.String("/dev/sdb"),
|
||||
Ebs: &ec2.EbsBlockDevice{
|
||||
VolumeType: aws.String("standard"),
|
||||
DeleteOnTermination: aws.Bool(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Config: &BlockDevice{
|
||||
DeviceName: "/dev/sdb",
|
||||
VirtualName: "ephemeral0",
|
||||
},
|
||||
|
||||
Result: &ec2.BlockDeviceMapping{
|
||||
DeviceName: aws.String("/dev/sdb"),
|
||||
VirtualName: aws.String("ephemeral0"),
|
||||
},
|
||||
},
|
||||
{
|
||||
Config: &BlockDevice{
|
||||
DeviceName: "/dev/sdb",
|
||||
NoDevice: true,
|
||||
},
|
||||
|
||||
Result: &ec2.BlockDeviceMapping{
|
||||
DeviceName: aws.String("/dev/sdb"),
|
||||
NoDevice: aws.String(""),
|
||||
},
|
||||
},
|
||||
{
|
||||
Config: &BlockDevice{
|
||||
DeviceName: "/dev/sdb",
|
||||
VolumeType: "gp3",
|
||||
VolumeSize: 8,
|
||||
Throughput: aws.Int64(125),
|
||||
IOPS: aws.Int64(3000),
|
||||
DeleteOnTermination: true,
|
||||
Encrypted: config.TriTrue,
|
||||
},
|
||||
|
||||
Result: &ec2.BlockDeviceMapping{
|
||||
DeviceName: aws.String("/dev/sdb"),
|
||||
Ebs: &ec2.EbsBlockDevice{
|
||||
VolumeType: aws.String("gp3"),
|
||||
VolumeSize: aws.Int64(8),
|
||||
Throughput: aws.Int64(125),
|
||||
Iops: aws.Int64(3000),
|
||||
DeleteOnTermination: aws.Bool(true),
|
||||
Encrypted: aws.Bool(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
var amiBlockDevices BlockDevices = []BlockDevice{*tc.Config}
|
||||
|
||||
var launchBlockDevices BlockDevices = []BlockDevice{*tc.Config}
|
||||
|
||||
expected := []*ec2.BlockDeviceMapping{tc.Result}
|
||||
|
||||
amiResults := amiBlockDevices.BuildEC2BlockDeviceMappings()
|
||||
if diff := cmp.Diff(expected, amiResults); diff != "" {
|
||||
t.Fatalf("Bad block device: %s", diff)
|
||||
}
|
||||
|
||||
launchResults := launchBlockDevices.BuildEC2BlockDeviceMappings()
|
||||
if diff := cmp.Diff(expected, launchResults); diff != "" {
|
||||
t.Fatalf("Bad block device: %s", diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIOPSValidation(t *testing.T) {
|
||||
|
||||
cases := []struct {
|
||||
device BlockDevice
|
||||
ok bool
|
||||
msg string
|
||||
}{
|
||||
// volume size unknown
|
||||
{
|
||||
device: BlockDevice{
|
||||
DeviceName: "/dev/sdb",
|
||||
VolumeType: "io1",
|
||||
IOPS: aws.Int64(1000),
|
||||
},
|
||||
ok: true,
|
||||
},
|
||||
{
|
||||
device: BlockDevice{
|
||||
DeviceName: "/dev/sdb",
|
||||
VolumeType: "io2",
|
||||
IOPS: aws.Int64(1000),
|
||||
},
|
||||
ok: true,
|
||||
},
|
||||
// ratio requirement satisfied
|
||||
{
|
||||
device: BlockDevice{
|
||||
DeviceName: "/dev/sdb",
|
||||
VolumeType: "io1",
|
||||
VolumeSize: 50,
|
||||
IOPS: aws.Int64(1000),
|
||||
},
|
||||
ok: true,
|
||||
},
|
||||
{
|
||||
device: BlockDevice{
|
||||
DeviceName: "/dev/sdb",
|
||||
VolumeType: "io2",
|
||||
VolumeSize: 100,
|
||||
IOPS: aws.Int64(1000),
|
||||
},
|
||||
ok: true,
|
||||
},
|
||||
// ratio requirement not satisfied
|
||||
{
|
||||
device: BlockDevice{
|
||||
DeviceName: "/dev/sdb",
|
||||
VolumeType: "io1",
|
||||
VolumeSize: 10,
|
||||
IOPS: aws.Int64(2000),
|
||||
},
|
||||
ok: false,
|
||||
msg: "/dev/sdb: the maximum ratio of provisioned IOPS to requested volume size (in GiB) is 50:1 for io1 volumes",
|
||||
},
|
||||
{
|
||||
device: BlockDevice{
|
||||
DeviceName: "/dev/sdb",
|
||||
VolumeType: "io2",
|
||||
VolumeSize: 50,
|
||||
IOPS: aws.Int64(30000),
|
||||
},
|
||||
ok: false,
|
||||
msg: "/dev/sdb: the maximum ratio of provisioned IOPS to requested volume size (in GiB) is 500:1 for io2 volumes",
|
||||
},
|
||||
// exceed max iops
|
||||
{
|
||||
device: BlockDevice{
|
||||
DeviceName: "/dev/sdb",
|
||||
VolumeType: "io2",
|
||||
VolumeSize: 500,
|
||||
IOPS: aws.Int64(99999),
|
||||
},
|
||||
ok: false,
|
||||
msg: "IOPS must be between 100 and 64000 for device /dev/sdb",
|
||||
},
|
||||
// lower than min iops
|
||||
{
|
||||
device: BlockDevice{
|
||||
DeviceName: "/dev/sdb",
|
||||
VolumeType: "io2",
|
||||
VolumeSize: 50,
|
||||
IOPS: aws.Int64(10),
|
||||
},
|
||||
ok: false,
|
||||
msg: "IOPS must be between 100 and 64000 for device /dev/sdb",
|
||||
},
|
||||
// exceed max iops
|
||||
{
|
||||
device: BlockDevice{
|
||||
DeviceName: "/dev/sdb",
|
||||
VolumeType: "gp3",
|
||||
VolumeSize: 50,
|
||||
Throughput: aws.Int64(125),
|
||||
IOPS: aws.Int64(99999),
|
||||
},
|
||||
ok: false,
|
||||
msg: "IOPS must be between 3000 and 16000 for device /dev/sdb",
|
||||
},
|
||||
// lower than min iops
|
||||
{
|
||||
device: BlockDevice{
|
||||
DeviceName: "/dev/sdb",
|
||||
VolumeType: "gp3",
|
||||
VolumeSize: 50,
|
||||
Throughput: aws.Int64(125),
|
||||
IOPS: aws.Int64(10),
|
||||
},
|
||||
ok: false,
|
||||
msg: "IOPS must be between 3000 and 16000 for device /dev/sdb",
|
||||
},
|
||||
}
|
||||
|
||||
ctx := interpolate.Context{}
|
||||
for _, testCase := range cases {
|
||||
err := testCase.device.Prepare(&ctx)
|
||||
if testCase.ok && err != nil {
|
||||
t.Fatalf("should not error, but: %v", err)
|
||||
}
|
||||
if !testCase.ok {
|
||||
if err == nil {
|
||||
t.Fatalf("should error")
|
||||
} else if err.Error() != testCase.msg {
|
||||
t.Fatalf("wrong error: expected %s, found: %v", testCase.msg, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestThroughputValidation(t *testing.T) {
|
||||
|
||||
cases := []struct {
|
||||
device BlockDevice
|
||||
ok bool
|
||||
msg string
|
||||
}{
|
||||
{
|
||||
device: BlockDevice{
|
||||
DeviceName: "/dev/sdb",
|
||||
VolumeType: "gp3",
|
||||
Throughput: aws.Int64(125),
|
||||
IOPS: aws.Int64(3000),
|
||||
},
|
||||
ok: true,
|
||||
},
|
||||
{
|
||||
device: BlockDevice{
|
||||
DeviceName: "/dev/sdb",
|
||||
VolumeType: "gp3",
|
||||
Throughput: aws.Int64(1000),
|
||||
IOPS: aws.Int64(3000),
|
||||
},
|
||||
ok: true,
|
||||
},
|
||||
// exceed max Throughput
|
||||
{
|
||||
device: BlockDevice{
|
||||
DeviceName: "/dev/sdb",
|
||||
VolumeType: "gp3",
|
||||
Throughput: aws.Int64(1001),
|
||||
IOPS: aws.Int64(3000),
|
||||
},
|
||||
ok: false,
|
||||
msg: "Throughput must be between 125 and 1000 for device /dev/sdb",
|
||||
},
|
||||
// lower than min Throughput
|
||||
{
|
||||
device: BlockDevice{
|
||||
DeviceName: "/dev/sdb",
|
||||
VolumeType: "gp3",
|
||||
Throughput: aws.Int64(124),
|
||||
IOPS: aws.Int64(3000),
|
||||
},
|
||||
ok: false,
|
||||
msg: "Throughput must be between 125 and 1000 for device /dev/sdb",
|
||||
},
|
||||
}
|
||||
|
||||
ctx := interpolate.Context{}
|
||||
for _, testCase := range cases {
|
||||
err := testCase.device.Prepare(&ctx)
|
||||
if testCase.ok && err != nil {
|
||||
t.Fatalf("should not error, but: %v", err)
|
||||
}
|
||||
if !testCase.ok {
|
||||
if err == nil {
|
||||
t.Fatalf("should error")
|
||||
} else if err.Error() != testCase.msg {
|
||||
t.Fatalf("wrong error: expected %s, found: %v", testCase.msg, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,19 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
)
|
||||
|
||||
// Build a slice of EC2 (AMI/Subnet/VPC) filter options from the filters provided.
|
||||
func buildEc2Filters(input map[string]string) []*ec2.Filter {
|
||||
var filters []*ec2.Filter
|
||||
for k, v := range input {
|
||||
a := k
|
||||
b := v
|
||||
filters = append(filters, &ec2.Filter{
|
||||
Name: &a,
|
||||
Values: []*string{&b},
|
||||
})
|
||||
}
|
||||
return filters
|
||||
}
|
|
@ -1,31 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestStepSourceAmiInfo_BuildFilter(t *testing.T) {
|
||||
filter_key := "name"
|
||||
filter_value := "foo"
|
||||
filter_key2 := "name2"
|
||||
filter_value2 := "foo2"
|
||||
|
||||
inputFilter := map[string]string{filter_key: filter_value, filter_key2: filter_value2}
|
||||
outputFilter := buildEc2Filters(inputFilter)
|
||||
|
||||
// deconstruct filter back into things we can test
|
||||
foundMap := map[string]bool{filter_key: false, filter_key2: false}
|
||||
for _, filter := range outputFilter {
|
||||
for key, value := range inputFilter {
|
||||
if *filter.Name == key && *filter.Values[0] == value {
|
||||
foundMap[key] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range foundMap {
|
||||
if !v {
|
||||
t.Fatalf("Fail: should have found value for key: %s", k)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,58 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"regexp"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/sts"
|
||||
)
|
||||
|
||||
var encodedFailureMessagePattern = regexp.MustCompile(`(?i)(.*) Encoded authorization failure message: ([\w-]+) ?( .*)?`)
|
||||
|
||||
type stsDecoder interface {
|
||||
DecodeAuthorizationMessage(input *sts.DecodeAuthorizationMessageInput) (*sts.DecodeAuthorizationMessageOutput, error)
|
||||
}
|
||||
|
||||
// decodeError replaces encoded authorization messages with the
|
||||
// decoded results
|
||||
func decodeAWSError(decoder stsDecoder, err error) error {
|
||||
|
||||
groups := encodedFailureMessagePattern.FindStringSubmatch(err.Error())
|
||||
if len(groups) > 1 {
|
||||
result, decodeErr := decoder.DecodeAuthorizationMessage(&sts.DecodeAuthorizationMessageInput{
|
||||
EncodedMessage: aws.String(groups[2]),
|
||||
})
|
||||
if decodeErr == nil {
|
||||
msg := aws.StringValue(result.DecodedMessage)
|
||||
return fmt.Errorf("%s Authorization failure message: '%s'%s", groups[1], msg, groups[3])
|
||||
}
|
||||
log.Printf("[WARN] Attempted to decode authorization message, but received: %v", decodeErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// DecodeAuthZMessages enables automatic decoding of any
|
||||
// encoded authorization messages
|
||||
func DecodeAuthZMessages(sess *session.Session) {
|
||||
azd := &authZMessageDecoder{
|
||||
Decoder: sts.New(sess),
|
||||
}
|
||||
sess.Handlers.UnmarshalError.AfterEachFn = azd.afterEachFn
|
||||
}
|
||||
|
||||
type authZMessageDecoder struct {
|
||||
Decoder stsDecoder
|
||||
}
|
||||
|
||||
func (a *authZMessageDecoder) afterEachFn(item request.HandlerListRunItem) bool {
|
||||
if err, ok := item.Request.Error.(awserr.Error); ok && err.Code() == "UnauthorizedOperation" {
|
||||
item.Request.Error = decodeAWSError(a.Decoder, err)
|
||||
}
|
||||
return true
|
||||
}
|
|
@ -1,70 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/sts"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
)
|
||||
|
||||
type mockSTS struct {
|
||||
}
|
||||
|
||||
func (m *mockSTS) DecodeAuthorizationMessage(input *sts.DecodeAuthorizationMessageInput) (*sts.DecodeAuthorizationMessageOutput, error) {
|
||||
return &sts.DecodeAuthorizationMessageOutput{
|
||||
DecodedMessage: aws.String(`{
|
||||
"allowed": false,
|
||||
"explicitDeny": true,
|
||||
"matchedStatements": {}
|
||||
}`),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func TestErrorsParsing_RequestFailure(t *testing.T) {
|
||||
|
||||
ae := awserr.New("UnauthorizedOperation",
|
||||
`You are not authorized to perform this operation. Encoded authorization failure message: D9Q7oicjOMr9l2CC-NPP1FiZXK9Ijia1k-3l0siBFCcrK3oSuMFMkBIO5TNj0HdXE-WfwnAcdycFOohfKroNO6toPJEns8RFVfy_M_IjNGmrEFJ6E62pnmBW0OLrMsXxR9FQE4gB4gJzSM0AD6cV6S3FOfqYzWBRX-sQdOT4HryGkFNRoFBr9Xbp-tRwiadwkbdHdfnV9fbRkXmnwCdULml16NBSofC4ZPepLMKmIB5rKjwk-m179UUh2XA-J5no0si6XcRo5GbHQB5QfCIwSHL4vsro2wLZUd16-8OWKyr3tVlTbQe0ERZskqRqRQ5E28QuiBCVV6XstUyo-T4lBSr75Fgnyr3wCO-dS3b_5Ns3WzA2JD4E2AJOAStXIU8IH5YuKkAg7C-dJMuBMPpmKCBEXhNoHDwCyOo5PsV3xMlc0jSb0qYGpfst_TDDtejcZfn7NssUjxVq9qkdH-OXz2gPoQB-hX8ycmZCL5UZwKc3TCLUr7TGnudHjmnMrE9cUo-yTCWfyHPLprhiYhTCKW18EikJ0O1EKI3FJ_b4F19_jFBPARjSwQc7Ut6MNCVzrPdZGYSF6acj5gPaxdy9uSkVQwWXK7Pd5MFP7EBDE1_DgYbzodgwDO2PXeVFUbSLBHKWo_ebZS9ZX2nYPcGss_sYaly0ZVSIJXp7G58B5BoFVhvVH6jYnF9XiAOjMltuP_ycu1pQP1lki500RY3baLvfeYeAsB38XZHKEgWZzq7Fei-uh89q0cjJTmlVyrfRU3q6`,
|
||||
fmt.Errorf("You can't do it!!"))
|
||||
rf := awserr.NewRequestFailure(ae, 400, "abc-def-123-456")
|
||||
|
||||
result := decodeAWSError(&mockSTS{}, rf)
|
||||
if result == nil {
|
||||
t.Error("Expected resulting error")
|
||||
}
|
||||
if !strings.Contains(result.Error(), "Authorization failure message:") {
|
||||
t.Error("Expected authorization failure message")
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrorsParsing_NonAuthorizationFailure(t *testing.T) {
|
||||
|
||||
ae := awserr.New("BadRequest",
|
||||
`You did something wrong. Try again`,
|
||||
fmt.Errorf("Request was no good."))
|
||||
rf := awserr.NewRequestFailure(ae, 400, "abc-def-123-456")
|
||||
|
||||
result := decodeAWSError(&mockSTS{}, rf)
|
||||
if result == nil {
|
||||
t.Error("Expected resulting error")
|
||||
}
|
||||
if result != rf {
|
||||
t.Error("Expected original error to be returned unchanged")
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrorsParsing_NonAWSError(t *testing.T) {
|
||||
|
||||
err := fmt.Errorf("Random error occurred")
|
||||
|
||||
result := decodeAWSError(&mockSTS{}, err)
|
||||
if result == nil {
|
||||
t.Error("Expected resulting error")
|
||||
}
|
||||
if result != err {
|
||||
t.Error("Expected original error to be returned unchanged")
|
||||
}
|
||||
}
|
|
@ -1,75 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/hashicorp/packer-plugin-sdk/retry"
|
||||
"github.com/hashicorp/packer/builder/amazon/common/awserrors"
|
||||
)
|
||||
|
||||
// DestroyAMIs deregisters the AWS machine images in imageids from an active AWS account
|
||||
func DestroyAMIs(imageids []*string, ec2conn *ec2.EC2) error {
|
||||
resp, err := ec2conn.DescribeImages(&ec2.DescribeImagesInput{
|
||||
ImageIds: imageids,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error describing AMI: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Deregister image by name.
|
||||
for _, i := range resp.Images {
|
||||
|
||||
ctx := context.TODO()
|
||||
err = retry.Config{
|
||||
Tries: 11,
|
||||
ShouldRetry: func(err error) bool {
|
||||
return awserrors.Matches(err, "UnauthorizedOperation", "")
|
||||
},
|
||||
RetryDelay: (&retry.Backoff{InitialBackoff: 200 * time.Millisecond, MaxBackoff: 30 * time.Second, Multiplier: 2}).Linear,
|
||||
}.Run(ctx, func(ctx context.Context) error {
|
||||
_, err := ec2conn.DeregisterImage(&ec2.DeregisterImageInput{
|
||||
ImageId: i.ImageId,
|
||||
})
|
||||
return err
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error deregistering existing AMI: %s", err)
|
||||
return err
|
||||
}
|
||||
log.Printf("Deregistered AMI id: %s", *i.ImageId)
|
||||
|
||||
// Delete snapshot(s) by image
|
||||
for _, b := range i.BlockDeviceMappings {
|
||||
if b.Ebs != nil && aws.StringValue(b.Ebs.SnapshotId) != "" {
|
||||
|
||||
err = retry.Config{
|
||||
Tries: 11,
|
||||
ShouldRetry: func(err error) bool {
|
||||
return awserrors.Matches(err, "UnauthorizedOperation", "")
|
||||
},
|
||||
RetryDelay: (&retry.Backoff{InitialBackoff: 200 * time.Millisecond, MaxBackoff: 30 * time.Second, Multiplier: 2}).Linear,
|
||||
}.Run(ctx, func(ctx context.Context) error {
|
||||
_, err := ec2conn.DeleteSnapshot(&ec2.DeleteSnapshotInput{
|
||||
SnapshotId: b.Ebs.SnapshotId,
|
||||
})
|
||||
return err
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error deleting existing snapshot: %s", err)
|
||||
return err
|
||||
}
|
||||
log.Printf("Deleted snapshot: %s", *b.Ebs.SnapshotId)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,63 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
"github.com/hashicorp/packer-plugin-sdk/packerbuilderdata"
|
||||
)
|
||||
|
||||
type BuildInfoTemplate struct {
|
||||
BuildRegion string
|
||||
SourceAMI string
|
||||
SourceAMICreationDate string
|
||||
SourceAMIName string
|
||||
SourceAMIOwner string
|
||||
SourceAMIOwnerName string
|
||||
SourceAMITags map[string]string
|
||||
}
|
||||
|
||||
func extractBuildInfo(region string, state multistep.StateBag, generatedData *packerbuilderdata.GeneratedData) *BuildInfoTemplate {
|
||||
rawSourceAMI, hasSourceAMI := state.GetOk("source_image")
|
||||
if !hasSourceAMI {
|
||||
return &BuildInfoTemplate{
|
||||
BuildRegion: region,
|
||||
}
|
||||
}
|
||||
|
||||
sourceAMI := rawSourceAMI.(*ec2.Image)
|
||||
sourceAMITags := make(map[string]string, len(sourceAMI.Tags))
|
||||
for _, tag := range sourceAMI.Tags {
|
||||
sourceAMITags[aws.StringValue(tag.Key)] = aws.StringValue(tag.Value)
|
||||
}
|
||||
|
||||
buildInfoTemplate := &BuildInfoTemplate{
|
||||
BuildRegion: region,
|
||||
SourceAMI: aws.StringValue(sourceAMI.ImageId),
|
||||
SourceAMICreationDate: aws.StringValue(sourceAMI.CreationDate),
|
||||
SourceAMIName: aws.StringValue(sourceAMI.Name),
|
||||
SourceAMIOwner: aws.StringValue(sourceAMI.OwnerId),
|
||||
SourceAMIOwnerName: aws.StringValue(sourceAMI.ImageOwnerAlias),
|
||||
SourceAMITags: sourceAMITags,
|
||||
}
|
||||
|
||||
generatedData.Put("BuildRegion", buildInfoTemplate.BuildRegion)
|
||||
generatedData.Put("SourceAMI", buildInfoTemplate.SourceAMI)
|
||||
generatedData.Put("SourceAMICreationDate", buildInfoTemplate.SourceAMICreationDate)
|
||||
generatedData.Put("SourceAMIName", buildInfoTemplate.SourceAMIName)
|
||||
generatedData.Put("SourceAMIOwner", buildInfoTemplate.SourceAMIOwner)
|
||||
generatedData.Put("SourceAMIOwnerName", buildInfoTemplate.SourceAMIOwnerName)
|
||||
|
||||
return buildInfoTemplate
|
||||
}
|
||||
|
||||
func GetGeneratedDataList() []string {
|
||||
return []string{
|
||||
"SourceAMIName",
|
||||
"BuildRegion",
|
||||
"SourceAMI",
|
||||
"SourceAMICreationDate",
|
||||
"SourceAMIOwner",
|
||||
"SourceAMIOwnerName",
|
||||
}
|
||||
}
|
|
@ -1,91 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
"github.com/hashicorp/packer-plugin-sdk/packerbuilderdata"
|
||||
)
|
||||
|
||||
func testImage() *ec2.Image {
|
||||
return &ec2.Image{
|
||||
ImageId: aws.String("ami-abcd1234"),
|
||||
CreationDate: aws.String("ami_test_creation_date"),
|
||||
Name: aws.String("ami_test_name"),
|
||||
OwnerId: aws.String("ami_test_owner_id"),
|
||||
ImageOwnerAlias: aws.String("ami_test_owner_alias"),
|
||||
RootDeviceType: aws.String("ebs"),
|
||||
Tags: []*ec2.Tag{
|
||||
{
|
||||
Key: aws.String("key-1"),
|
||||
Value: aws.String("value-1"),
|
||||
},
|
||||
{
|
||||
Key: aws.String("key-2"),
|
||||
Value: aws.String("value-2"),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func testState() multistep.StateBag {
|
||||
state := new(multistep.BasicStateBag)
|
||||
return state
|
||||
}
|
||||
|
||||
func testGeneratedData(state multistep.StateBag) packerbuilderdata.GeneratedData {
|
||||
generatedData := packerbuilderdata.GeneratedData{State: state}
|
||||
return generatedData
|
||||
}
|
||||
|
||||
func TestInterpolateBuildInfo_extractBuildInfo_noSourceImage(t *testing.T) {
|
||||
state := testState()
|
||||
generatedData := testGeneratedData(state)
|
||||
buildInfo := extractBuildInfo("foo", state, &generatedData)
|
||||
|
||||
expected := BuildInfoTemplate{
|
||||
BuildRegion: "foo",
|
||||
}
|
||||
if !reflect.DeepEqual(*buildInfo, expected) {
|
||||
t.Fatalf("Unexpected BuildInfoTemplate: expected %#v got %#v\n", expected, *buildInfo)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInterpolateBuildInfo_extractBuildInfo_withSourceImage(t *testing.T) {
|
||||
state := testState()
|
||||
state.Put("source_image", testImage())
|
||||
generatedData := testGeneratedData(state)
|
||||
buildInfo := extractBuildInfo("foo", state, &generatedData)
|
||||
|
||||
expected := BuildInfoTemplate{
|
||||
BuildRegion: "foo",
|
||||
SourceAMI: "ami-abcd1234",
|
||||
SourceAMICreationDate: "ami_test_creation_date",
|
||||
SourceAMIName: "ami_test_name",
|
||||
SourceAMIOwner: "ami_test_owner_id",
|
||||
SourceAMIOwnerName: "ami_test_owner_alias",
|
||||
SourceAMITags: map[string]string{
|
||||
"key-1": "value-1",
|
||||
"key-2": "value-2",
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(*buildInfo, expected) {
|
||||
t.Fatalf("Unexpected BuildInfoTemplate: expected %#v got %#v\n", expected, *buildInfo)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInterpolateBuildInfo_extractBuildInfo_GeneratedDataWithSourceImageName(t *testing.T) {
|
||||
state := testState()
|
||||
state.Put("source_image", testImage())
|
||||
generatedData := testGeneratedData(state)
|
||||
extractBuildInfo("foo", state, &generatedData)
|
||||
|
||||
generatedDataState := state.Get("generated_data").(map[string]interface{})
|
||||
|
||||
if generatedDataState["SourceAMIName"] != "ami_test_name" {
|
||||
t.Fatalf("Unexpected state SourceAMIName: expected %#v got %#v\n", "ami_test_name", generatedDataState["SourceAMIName"])
|
||||
}
|
||||
}
|
|
@ -1,58 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
|
||||
)
|
||||
|
||||
func listEC2Regions(ec2conn ec2iface.EC2API) ([]string, error) {
|
||||
var regions []string
|
||||
resultRegions, err := ec2conn.DescribeRegions(nil)
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
for _, region := range resultRegions.Regions {
|
||||
regions = append(regions, *region.RegionName)
|
||||
}
|
||||
|
||||
return regions, nil
|
||||
}
|
||||
|
||||
// ValidateRegion returns an nil if the regions are valid
|
||||
// and exists; otherwise an error.
|
||||
// ValidateRegion calls ec2conn.DescribeRegions to get the list of
|
||||
// regions available to this account.
|
||||
func (c *AccessConfig) ValidateRegion(regions ...string) error {
|
||||
ec2conn, err := c.NewEC2Connection()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
validRegions, err := listEC2Regions(ec2conn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var invalidRegions []string
|
||||
for _, region := range regions {
|
||||
if region == "" {
|
||||
continue
|
||||
}
|
||||
found := false
|
||||
for _, validRegion := range validRegions {
|
||||
if region == validRegion {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
invalidRegions = append(invalidRegions, region)
|
||||
}
|
||||
}
|
||||
|
||||
if len(invalidRegions) > 0 {
|
||||
return fmt.Errorf("Invalid region(s): %v, available regions: %v", invalidRegions, validRegions)
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,624 +0,0 @@
|
|||
//go:generate struct-markdown
|
||||
//go:generate mapstructure-to-hcl2 -type AmiFilterOptions,SecurityGroupFilterOptions,SubnetFilterOptions,VpcFilterOptions,PolicyDocument,Statement
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/communicator"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/config"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/interpolate"
|
||||
"github.com/hashicorp/packer-plugin-sdk/uuid"
|
||||
)
|
||||
|
||||
var reShutdownBehavior = regexp.MustCompile("^(stop|terminate)$")
|
||||
|
||||
type SubnetFilterOptions struct {
|
||||
config.NameValueFilter `mapstructure:",squash"`
|
||||
MostFree bool `mapstructure:"most_free"`
|
||||
Random bool `mapstructure:"random"`
|
||||
}
|
||||
|
||||
type VpcFilterOptions struct {
|
||||
config.NameValueFilter `mapstructure:",squash"`
|
||||
}
|
||||
|
||||
type Statement struct {
|
||||
Effect string `mapstructure:"Effect" required:"false"`
|
||||
Action []string `mapstructure:"Action" required:"false"`
|
||||
Resource []string `mapstructure:"Resource" required:"false"`
|
||||
}
|
||||
|
||||
type PolicyDocument struct {
|
||||
Version string `mapstructure:"Version" required:"false"`
|
||||
Statement []Statement `mapstructure:"Statement" required:"false"`
|
||||
}
|
||||
|
||||
type SecurityGroupFilterOptions struct {
|
||||
config.NameValueFilter `mapstructure:",squash"`
|
||||
}
|
||||
|
||||
// RunConfig contains configuration for running an instance from a source
|
||||
// AMI and details on how to access that launched image.
|
||||
type RunConfig struct {
|
||||
// If using a non-default VPC,
|
||||
// public IP addresses are not provided by default. If this is true, your
|
||||
// new instance will get a Public IP. default: false
|
||||
AssociatePublicIpAddress bool `mapstructure:"associate_public_ip_address" required:"false"`
|
||||
// Destination availability zone to launch
|
||||
// instance in. Leave this empty to allow Amazon to auto-assign.
|
||||
AvailabilityZone string `mapstructure:"availability_zone" required:"false"`
|
||||
// Requires spot_price to be set. The
|
||||
// required duration for the Spot Instances (also known as Spot blocks). This
|
||||
// value must be a multiple of 60 (60, 120, 180, 240, 300, or 360). You can't
|
||||
// specify an Availability Zone group or a launch group if you specify a
|
||||
// duration.
|
||||
BlockDurationMinutes int64 `mapstructure:"block_duration_minutes" required:"false"`
|
||||
// Packer normally stops the build instance after all provisioners have
|
||||
// run. For Windows instances, it is sometimes desirable to [run
|
||||
// Sysprep](https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/Creating_EBSbacked_WinAMI.html)
|
||||
// which will stop the instance for you. If this is set to `true`, Packer
|
||||
// *will not* stop the instance but will assume that you will send the stop
|
||||
// signal yourself through your final provisioner. You can do this with a
|
||||
// [windows-shell provisioner](/docs/provisioners/windows-shell). Note that
|
||||
// Packer will still wait for the instance to be stopped, and failing to
|
||||
// send the stop signal yourself, when you have set this flag to `true`,
|
||||
// will cause a timeout.
|
||||
//
|
||||
// An example of a valid windows shutdown command in a `windows-shell`
|
||||
// provisioner is :
|
||||
// ```shell-session
|
||||
// ec2config.exe -sysprep
|
||||
// ```
|
||||
// or
|
||||
// ```sell-session
|
||||
// "%programfiles%\amazon\ec2configservice\"ec2config.exe -sysprep""
|
||||
// ```
|
||||
// -> Note: The double quotation marks in the command are not required if
|
||||
// your CMD shell is already in the
|
||||
// `C:\Program Files\Amazon\EC2ConfigService\` directory.
|
||||
DisableStopInstance bool `mapstructure:"disable_stop_instance" required:"false"`
|
||||
// Mark instance as [EBS
|
||||
// Optimized](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html).
|
||||
// Default `false`.
|
||||
EbsOptimized bool `mapstructure:"ebs_optimized" required:"false"`
|
||||
// Enabling T2 Unlimited allows the source instance to burst additional CPU
|
||||
// beyond its available [CPU
|
||||
// Credits](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-credits-baseline-concepts.html)
|
||||
// for as long as the demand exists. This is in contrast to the standard
|
||||
// configuration that only allows an instance to consume up to its
|
||||
// available CPU Credits. See the AWS documentation for [T2
|
||||
// Unlimited](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-unlimited.html)
|
||||
// and the **T2 Unlimited Pricing** section of the [Amazon EC2 On-Demand
|
||||
// Pricing](https://aws.amazon.com/ec2/pricing/on-demand/) document for
|
||||
// more information. By default this option is disabled and Packer will set
|
||||
// up a [T2
|
||||
// Standard](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-std.html)
|
||||
// instance instead.
|
||||
//
|
||||
// To use T2 Unlimited you must use a T2 instance type, e.g. `t2.micro`.
|
||||
// Additionally, T2 Unlimited cannot be used in conjunction with Spot
|
||||
// Instances, e.g. when the `spot_price` option has been configured.
|
||||
// Attempting to do so will cause an error.
|
||||
//
|
||||
// !> **Warning!** Additional costs may be incurred by enabling T2
|
||||
// Unlimited - even for instances that would usually qualify for the
|
||||
// [AWS Free Tier](https://aws.amazon.com/free/).
|
||||
EnableT2Unlimited bool `mapstructure:"enable_t2_unlimited" required:"false"`
|
||||
// The name of an [IAM instance
|
||||
// profile](https://docs.aws.amazon.com/IAM/latest/UserGuide/instance-profiles.html)
|
||||
// to launch the EC2 instance with.
|
||||
IamInstanceProfile string `mapstructure:"iam_instance_profile" required:"false"`
|
||||
// Whether or not to check if the IAM instance profile exists. Defaults to false
|
||||
SkipProfileValidation bool `mapstructure:"skip_profile_validation" required:"false"`
|
||||
// Temporary IAM instance profile policy document
|
||||
// If IamInstanceProfile is specified it will be used instead. Example:
|
||||
//
|
||||
// ```json
|
||||
//{
|
||||
// "Version": "2012-10-17",
|
||||
// "Statement": [
|
||||
// {
|
||||
// "Action": [
|
||||
// "logs:*"
|
||||
// ],
|
||||
// "Effect": "Allow",
|
||||
// "Resource": "*"
|
||||
// }
|
||||
// ]
|
||||
//}
|
||||
// ```
|
||||
//
|
||||
TemporaryIamInstanceProfilePolicyDocument *PolicyDocument `mapstructure:"temporary_iam_instance_profile_policy_document" required:"false"`
|
||||
// Automatically terminate instances on
|
||||
// shutdown in case Packer exits ungracefully. Possible values are stop and
|
||||
// terminate. Defaults to stop.
|
||||
InstanceInitiatedShutdownBehavior string `mapstructure:"shutdown_behavior" required:"false"`
|
||||
// The EC2 instance type to use while building the
|
||||
// AMI, such as t2.small.
|
||||
InstanceType string `mapstructure:"instance_type" required:"true"`
|
||||
// Filters used to populate the `security_group_ids` field. JSON Example:
|
||||
//
|
||||
// ```json
|
||||
// {
|
||||
// "security_group_filter": {
|
||||
// "filters": {
|
||||
// "tag:Class": "packer"
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// ```
|
||||
//
|
||||
// HCL2 Example:
|
||||
//
|
||||
// ```hcl
|
||||
// security_group_filter {
|
||||
// filters = {
|
||||
// "tag:Class": "packer"
|
||||
// }
|
||||
// }
|
||||
// ```
|
||||
//
|
||||
// This selects the SG's with tag `Class` with the value `packer`.
|
||||
//
|
||||
// - `filters` (map of strings) - filters used to select a
|
||||
// `security_group_ids`. Any filter described in the docs for
|
||||
// [DescribeSecurityGroups](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html)
|
||||
// is valid.
|
||||
//
|
||||
// `security_group_ids` take precedence over this.
|
||||
SecurityGroupFilter SecurityGroupFilterOptions `mapstructure:"security_group_filter" required:"false"`
|
||||
// Key/value pair tags to apply to the instance that is that is *launched*
|
||||
// to create the EBS volumes. This is a [template
|
||||
// engine](/docs/templates/legacy_json_templates/engine), see [Build template
|
||||
// data](#build-template-data) for more information.
|
||||
RunTags map[string]string `mapstructure:"run_tags" required:"false"`
|
||||
// Same as [`run_tags`](#run_tags) but defined as a singular repeatable
|
||||
// block containing a `key` and a `value` field. In HCL2 mode the
|
||||
// [`dynamic_block`](/docs/templates/hcl_templates/expressions#dynamic-blocks)
|
||||
// will allow you to create those programatically.
|
||||
RunTag config.KeyValues `mapstructure:"run_tag" required:"false"`
|
||||
// The ID (not the name) of the security
|
||||
// group to assign to the instance. By default this is not set and Packer will
|
||||
// automatically create a new temporary security group to allow SSH access.
|
||||
// Note that if this is specified, you must be sure the security group allows
|
||||
// access to the ssh_port given below.
|
||||
SecurityGroupId string `mapstructure:"security_group_id" required:"false"`
|
||||
// A list of security groups as
|
||||
// described above. Note that if this is specified, you must omit the
|
||||
// security_group_id.
|
||||
SecurityGroupIds []string `mapstructure:"security_group_ids" required:"false"`
|
||||
// The source AMI whose root volume will be copied and
|
||||
// provisioned on the currently running instance. This must be an EBS-backed
|
||||
// AMI with a root volume snapshot that you have access to.
|
||||
SourceAmi string `mapstructure:"source_ami" required:"true"`
|
||||
// Filters used to populate the `source_ami`
|
||||
// field. JSON Example:
|
||||
//
|
||||
// ```json
|
||||
// "builders" [
|
||||
// {
|
||||
// "type": "amazon-ebs",
|
||||
// "source_ami_filter": {
|
||||
// "filters": {
|
||||
// "virtualization-type": "hvm",
|
||||
// "name": "ubuntu/images/\*ubuntu-xenial-16.04-amd64-server-\*",
|
||||
// "root-device-type": "ebs"
|
||||
// },
|
||||
// "owners": ["099720109477"],
|
||||
// "most_recent": true
|
||||
// }
|
||||
// }
|
||||
// ]
|
||||
// ```
|
||||
// HCL2 example:
|
||||
//
|
||||
// ```hcl
|
||||
// source "amazon-ebs" "basic-example" {
|
||||
// source_ami_filter {
|
||||
// filters = {
|
||||
// virtualization-type = "hvm"
|
||||
// name = "ubuntu/images/\*ubuntu-xenial-16.04-amd64-server-\*"
|
||||
// root-device-type = "ebs"
|
||||
// }
|
||||
// owners = ["099720109477"]
|
||||
// most_recent = true
|
||||
// }
|
||||
// }
|
||||
// ```
|
||||
//
|
||||
// This selects the most recent Ubuntu 16.04 HVM EBS AMI from Canonical. NOTE:
|
||||
// This will fail unless *exactly* one AMI is returned. In the above example,
|
||||
// `most_recent` will cause this to succeed by selecting the newest image.
|
||||
//
|
||||
// - `filters` (map of strings) - filters used to select a `source_ami`.
|
||||
// NOTE: This will fail unless *exactly* one AMI is returned. Any filter
|
||||
// described in the docs for
|
||||
// [DescribeImages](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html)
|
||||
// is valid.
|
||||
//
|
||||
// - `owners` (array of strings) - Filters the images by their owner. You
|
||||
// may specify one or more AWS account IDs, "self" (which will use the
|
||||
// account whose credentials you are using to run Packer), or an AWS owner
|
||||
// alias: for example, `amazon`, `aws-marketplace`, or `microsoft`. This
|
||||
// option is required for security reasons.
|
||||
//
|
||||
// - `most_recent` (boolean) - Selects the newest created image when true.
|
||||
// This is most useful for selecting a daily distro build.
|
||||
//
|
||||
// You may set this in place of `source_ami` or in conjunction with it. If you
|
||||
// set this in conjunction with `source_ami`, the `source_ami` will be added
|
||||
// to the filter. The provided `source_ami` must meet all of the filtering
|
||||
// criteria provided in `source_ami_filter`; this pins the AMI returned by the
|
||||
// filter, but will cause Packer to fail if the `source_ami` does not exist.
|
||||
SourceAmiFilter AmiFilterOptions `mapstructure:"source_ami_filter" required:"false"`
|
||||
// a list of acceptable instance
|
||||
// types to run your build on. We will request a spot instance using the max
|
||||
// price of spot_price and the allocation strategy of "lowest price".
|
||||
// Your instance will be launched on an instance type of the lowest available
|
||||
// price that you have in your list. This is used in place of instance_type.
|
||||
// You may only set either spot_instance_types or instance_type, not both.
|
||||
// This feature exists to help prevent situations where a Packer build fails
|
||||
// because a particular availability zone does not have capacity for the
|
||||
// specific instance_type requested in instance_type.
|
||||
SpotInstanceTypes []string `mapstructure:"spot_instance_types" required:"false"`
|
||||
// With Spot Instances, you pay the Spot price that's in effect for the
|
||||
// time period your instances are running. Spot Instance prices are set by
|
||||
// Amazon EC2 and adjust gradually based on long-term trends in supply and
|
||||
// demand for Spot Instance capacity.
|
||||
//
|
||||
// When this field is set, it represents the maximum hourly price you are
|
||||
// willing to pay for a spot instance. If you do not set this value, it
|
||||
// defaults to a maximum price equal to the on demand price of the
|
||||
// instance. In the situation where the current Amazon-set spot price
|
||||
// exceeds the value set in this field, Packer will not launch an instance
|
||||
// and the build will error. In the situation where the Amazon-set spot
|
||||
// price is less than the value set in this field, Packer will launch and
|
||||
// you will pay the Amazon-set spot price, not this maximum value.
|
||||
// For more information, see the Amazon docs on
|
||||
// [spot pricing](https://aws.amazon.com/ec2/spot/pricing/).
|
||||
SpotPrice string `mapstructure:"spot_price" required:"false"`
|
||||
// Required if spot_price is set to
|
||||
// auto. This tells Packer what sort of AMI you're launching to find the
|
||||
// best spot price. This must be one of: Linux/UNIX, SUSE Linux,
|
||||
// Windows, Linux/UNIX (Amazon VPC), SUSE Linux (Amazon VPC),
|
||||
// Windows (Amazon VPC)
|
||||
SpotPriceAutoProduct string `mapstructure:"spot_price_auto_product" required:"false" undocumented:"true"`
|
||||
// Requires spot_price to be set. Key/value pair tags to apply tags to the
|
||||
// spot request that is issued.
|
||||
SpotTags map[string]string `mapstructure:"spot_tags" required:"false"`
|
||||
// Same as [`spot_tags`](#spot_tags) but defined as a singular repeatable block
|
||||
// containing a `key` and a `value` field. In HCL2 mode the
|
||||
// [`dynamic_block`](/docs/templates/hcl_templates/expressions#dynamic-blocks)
|
||||
// will allow you to create those programatically.
|
||||
SpotTag config.KeyValues `mapstructure:"spot_tag" required:"false"`
|
||||
// Filters used to populate the `subnet_id` field.
|
||||
// JSON Example:
|
||||
//
|
||||
// ```json
|
||||
// "builders" [
|
||||
// {
|
||||
// "type": "amazon-ebs",
|
||||
// "subnet_filter": {
|
||||
// "filters": {
|
||||
// "tag:Class": "build"
|
||||
// },
|
||||
// "most_free": true,
|
||||
// "random": false
|
||||
// }
|
||||
// }
|
||||
// ]
|
||||
// ```
|
||||
// HCL2 example:
|
||||
//
|
||||
// ```hcl
|
||||
// source "amazon-ebs" "basic-example" {
|
||||
// subnet_filter {
|
||||
// filters = {
|
||||
// "tag:Class": "build"
|
||||
// }
|
||||
// most_free = true
|
||||
// random = false
|
||||
// }
|
||||
// }
|
||||
// ```
|
||||
//
|
||||
// This selects the Subnet with tag `Class` with the value `build`, which has
|
||||
// the most free IP addresses. NOTE: This will fail unless *exactly* one
|
||||
// Subnet is returned. By using `most_free` or `random` one will be selected
|
||||
// from those matching the filter.
|
||||
//
|
||||
// - `filters` (map of strings) - filters used to select a `subnet_id`.
|
||||
// NOTE: This will fail unless *exactly* one Subnet is returned. Any
|
||||
// filter described in the docs for
|
||||
// [DescribeSubnets](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSubnets.html)
|
||||
// is valid.
|
||||
//
|
||||
// - `most_free` (boolean) - The Subnet with the most free IPv4 addresses
|
||||
// will be used if multiple Subnets matches the filter.
|
||||
//
|
||||
// - `random` (boolean) - A random Subnet will be used if multiple Subnets
|
||||
// matches the filter. `most_free` have precendence over this.
|
||||
//
|
||||
// `subnet_id` take precedence over this.
|
||||
SubnetFilter SubnetFilterOptions `mapstructure:"subnet_filter" required:"false"`
|
||||
// If using VPC, the ID of the subnet, such as
|
||||
// subnet-12345def, where Packer will launch the EC2 instance. This field is
|
||||
// required if you are using an non-default VPC.
|
||||
SubnetId string `mapstructure:"subnet_id" required:"false"`
|
||||
// [Tenancy](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/dedicated-instance.html) used
|
||||
// when Packer launches the EC2 instance, allowing it to be launched on dedicated hardware.
|
||||
//
|
||||
// The default is "default", meaning shared tenancy. Allowed values are "default",
|
||||
// "dedicated" and "host".
|
||||
Tenancy string `mapstructure:"tenancy" required:"false"`
|
||||
// A list of IPv4 CIDR blocks to be authorized access to the instance, when
|
||||
// packer is creating a temporary security group.
|
||||
//
|
||||
// The default is [`0.0.0.0/0`] (i.e., allow any IPv4 source). This is only
|
||||
// used when `security_group_id` or `security_group_ids` is not specified.
|
||||
TemporarySGSourceCidrs []string `mapstructure:"temporary_security_group_source_cidrs" required:"false"`
|
||||
// User data to apply when launching the instance. Note
|
||||
// that you need to be careful about escaping characters due to the templates
|
||||
// being JSON. It is often more convenient to use user_data_file, instead.
|
||||
// Packer will not automatically wait for a user script to finish before
|
||||
// shutting down the instance this must be handled in a provisioner.
|
||||
UserData string `mapstructure:"user_data" required:"false"`
|
||||
// Path to a file that will be used for the user
|
||||
// data when launching the instance.
|
||||
UserDataFile string `mapstructure:"user_data_file" required:"false"`
|
||||
// Filters used to populate the `vpc_id` field.
|
||||
// JSON Example:
|
||||
//
|
||||
// ```json
|
||||
// "builders" [
|
||||
// {
|
||||
// "type": "amazon-ebs",
|
||||
// "vpc_filter": {
|
||||
// "filters": {
|
||||
// "tag:Class": "build",
|
||||
// "isDefault": "false",
|
||||
// "cidr": "/24"
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// ]
|
||||
// ```
|
||||
// HCL2 example:
|
||||
//
|
||||
// ```hcl
|
||||
// source "amazon-ebs" "basic-example" {
|
||||
// vpc_filter {
|
||||
// filters = {
|
||||
// "tag:Class": "build",
|
||||
// "isDefault": "false",
|
||||
// "cidr": "/24"
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// ```
|
||||
//
|
||||
// This selects the VPC with tag `Class` with the value `build`, which is not
|
||||
// the default VPC, and have a IPv4 CIDR block of `/24`. NOTE: This will fail
|
||||
// unless *exactly* one VPC is returned.
|
||||
//
|
||||
// - `filters` (map of strings) - filters used to select a `vpc_id`. NOTE:
|
||||
// This will fail unless *exactly* one VPC is returned. Any filter
|
||||
// described in the docs for
|
||||
// [DescribeVpcs](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html)
|
||||
// is valid.
|
||||
//
|
||||
// `vpc_id` take precedence over this.
|
||||
VpcFilter VpcFilterOptions `mapstructure:"vpc_filter" required:"false"`
|
||||
// If launching into a VPC subnet, Packer needs the VPC ID
|
||||
// in order to create a temporary security group within the VPC. Requires
|
||||
// subnet_id to be set. If this field is left blank, Packer will try to get
|
||||
// the VPC ID from the subnet_id.
|
||||
VpcId string `mapstructure:"vpc_id" required:"false"`
|
||||
// The timeout for waiting for a Windows
|
||||
// password for Windows instances. Defaults to 20 minutes. Example value:
|
||||
// 10m
|
||||
WindowsPasswordTimeout time.Duration `mapstructure:"windows_password_timeout" required:"false"`
|
||||
|
||||
// Communicator settings
|
||||
Comm communicator.Config `mapstructure:",squash"`
|
||||
|
||||
// One of `public_ip`, `private_ip`, `public_dns`, `private_dns` or `session_manager`.
|
||||
// If set, either the public IP address, private IP address, public DNS name
|
||||
// or private DNS name will be used as the host for SSH. The default behaviour
|
||||
// if inside a VPC is to use the public IP address if available, otherwise
|
||||
// the private IP address will be used. If not in a VPC the public DNS name
|
||||
// will be used. Also works for WinRM.
|
||||
//
|
||||
// Where Packer is configured for an outbound proxy but WinRM traffic
|
||||
// should be direct, `ssh_interface` must be set to `private_dns` and
|
||||
// `<region>.compute.internal` included in the `NO_PROXY` environment
|
||||
// variable.
|
||||
//
|
||||
// When using `session_manager` the machine running Packer must have
|
||||
// the AWS Session Manager Plugin installed and within the users' system path.
|
||||
// Connectivity via the `session_manager` interface establishes a secure tunnel
|
||||
// between the local host and the remote host on an available local port to the specified `ssh_port`.
|
||||
// See [Session Manager Connections](#session-manager-connections) for more information.
|
||||
// - Session manager connectivity is currently only implemented for the SSH communicator, not the WinRM communicator.
|
||||
// - Upon termination the secure tunnel will be terminated automatically, if however there is a failure in
|
||||
// terminating the tunnel it will automatically terminate itself after 20 minutes of inactivity.
|
||||
SSHInterface string `mapstructure:"ssh_interface"`
|
||||
|
||||
// The time to wait before establishing the Session Manager session.
|
||||
// The value of this should be a duration. Examples are
|
||||
// `5s` and `1m30s` which will cause Packer to wait five seconds and one
|
||||
// minute 30 seconds, respectively. If no set, defaults to 10 seconds.
|
||||
// This option is useful when the remote port takes longer to become available.
|
||||
PauseBeforeSSM time.Duration `mapstructure:"pause_before_ssm"`
|
||||
|
||||
// Which port to connect the local end of the session tunnel to. If
|
||||
// left blank, Packer will choose a port for you from available ports.
|
||||
// This option is only used when `ssh_interface` is set `session_manager`.
|
||||
SessionManagerPort int `mapstructure:"session_manager_port"`
|
||||
}
|
||||
|
||||
func (c *RunConfig) Prepare(ctx *interpolate.Context) []error {
|
||||
// If we are not given an explicit ssh_keypair_name or
|
||||
// ssh_private_key_file, then create a temporary one, but only if the
|
||||
// temporary_key_pair_name has not been provided and we are not using
|
||||
// ssh_password.
|
||||
if c.Comm.SSHKeyPairName == "" && c.Comm.SSHTemporaryKeyPairName == "" &&
|
||||
c.Comm.SSHPrivateKeyFile == "" && c.Comm.SSHPassword == "" {
|
||||
|
||||
c.Comm.SSHTemporaryKeyPairName = fmt.Sprintf("packer_%s", uuid.TimeOrderedUUID())
|
||||
}
|
||||
|
||||
if c.WindowsPasswordTimeout == 0 {
|
||||
c.WindowsPasswordTimeout = 20 * time.Minute
|
||||
}
|
||||
|
||||
if c.RunTags == nil {
|
||||
c.RunTags = make(map[string]string)
|
||||
}
|
||||
|
||||
// Validation
|
||||
errs := c.Comm.Prepare(ctx)
|
||||
|
||||
// Copy singular tag maps
|
||||
errs = append(errs, c.RunTag.CopyOn(&c.RunTags)...)
|
||||
errs = append(errs, c.SpotTag.CopyOn(&c.SpotTags)...)
|
||||
|
||||
for _, preparer := range []interface{ Prepare() []error }{
|
||||
&c.SecurityGroupFilter,
|
||||
&c.SubnetFilter,
|
||||
&c.VpcFilter,
|
||||
} {
|
||||
errs = append(errs, preparer.Prepare()...)
|
||||
}
|
||||
|
||||
// Validating ssh_interface
|
||||
if c.SSHInterface != "public_ip" &&
|
||||
c.SSHInterface != "private_ip" &&
|
||||
c.SSHInterface != "public_dns" &&
|
||||
c.SSHInterface != "private_dns" &&
|
||||
c.SSHInterface != "session_manager" &&
|
||||
c.SSHInterface != "" {
|
||||
errs = append(errs, fmt.Errorf("Unknown interface type: %s", c.SSHInterface))
|
||||
}
|
||||
|
||||
// Connectivity via Session Manager has a few requirements
|
||||
if c.SSHInterface == "session_manager" {
|
||||
if c.Comm.Type == "winrm" {
|
||||
msg := fmt.Errorf(`session_manager connectivity is not supported with the "winrm" communicator; please use "ssh"`)
|
||||
errs = append(errs, msg)
|
||||
}
|
||||
|
||||
if c.IamInstanceProfile == "" && c.TemporaryIamInstanceProfilePolicyDocument == nil {
|
||||
msg := fmt.Errorf(`no iam_instance_profile defined; session_manager connectivity requires a valid instance profile with AmazonSSMManagedInstanceCore permissions. Alternatively a temporary_iam_instance_profile_policy_document can be used.`)
|
||||
errs = append(errs, msg)
|
||||
}
|
||||
}
|
||||
|
||||
if c.Comm.SSHKeyPairName != "" {
|
||||
if c.Comm.Type == "winrm" && c.Comm.WinRMPassword == "" && c.Comm.SSHPrivateKeyFile == "" {
|
||||
errs = append(errs, fmt.Errorf("ssh_private_key_file must be provided to retrieve the winrm password when using ssh_keypair_name."))
|
||||
} else if c.Comm.SSHPrivateKeyFile == "" && !c.Comm.SSHAgentAuth {
|
||||
errs = append(errs, fmt.Errorf("ssh_private_key_file must be provided or ssh_agent_auth enabled when ssh_keypair_name is specified."))
|
||||
}
|
||||
}
|
||||
|
||||
if c.SourceAmi == "" && c.SourceAmiFilter.Empty() {
|
||||
errs = append(errs, fmt.Errorf("A source_ami or source_ami_filter must be specified"))
|
||||
}
|
||||
|
||||
if c.SourceAmi == "" && c.SourceAmiFilter.NoOwner() {
|
||||
errs = append(errs, fmt.Errorf("For security reasons, your source AMI filter must declare an owner."))
|
||||
}
|
||||
|
||||
if c.InstanceType == "" && len(c.SpotInstanceTypes) == 0 {
|
||||
errs = append(errs, fmt.Errorf("either instance_type or "+
|
||||
"spot_instance_types must be specified"))
|
||||
}
|
||||
|
||||
if c.InstanceType != "" && len(c.SpotInstanceTypes) > 0 {
|
||||
errs = append(errs, fmt.Errorf("either instance_type or "+
|
||||
"spot_instance_types must be specified, not both"))
|
||||
}
|
||||
|
||||
if c.BlockDurationMinutes%60 != 0 {
|
||||
errs = append(errs, fmt.Errorf(
|
||||
"block_duration_minutes must be multiple of 60"))
|
||||
}
|
||||
|
||||
if c.SpotTags != nil {
|
||||
if c.SpotPrice == "" || c.SpotPrice == "0" {
|
||||
errs = append(errs, fmt.Errorf(
|
||||
"spot_tags should not be set when not requesting a spot instance"))
|
||||
}
|
||||
}
|
||||
|
||||
if c.UserData != "" && c.UserDataFile != "" {
|
||||
errs = append(errs, fmt.Errorf("Only one of user_data or user_data_file can be specified."))
|
||||
} else if c.UserDataFile != "" {
|
||||
if _, err := os.Stat(c.UserDataFile); err != nil {
|
||||
errs = append(errs, fmt.Errorf("user_data_file not found: %s", c.UserDataFile))
|
||||
}
|
||||
}
|
||||
|
||||
if c.SecurityGroupId != "" {
|
||||
if len(c.SecurityGroupIds) > 0 {
|
||||
errs = append(errs, fmt.Errorf("Only one of security_group_id or security_group_ids can be specified."))
|
||||
} else {
|
||||
c.SecurityGroupIds = []string{c.SecurityGroupId}
|
||||
c.SecurityGroupId = ""
|
||||
}
|
||||
}
|
||||
|
||||
if len(c.TemporarySGSourceCidrs) == 0 {
|
||||
c.TemporarySGSourceCidrs = []string{"0.0.0.0/0"}
|
||||
} else {
|
||||
for _, cidr := range c.TemporarySGSourceCidrs {
|
||||
if _, _, err := net.ParseCIDR(cidr); err != nil {
|
||||
errs = append(errs, fmt.Errorf("Error parsing CIDR in temporary_security_group_source_cidrs: %s", err.Error()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if c.InstanceInitiatedShutdownBehavior == "" {
|
||||
c.InstanceInitiatedShutdownBehavior = "stop"
|
||||
} else if !reShutdownBehavior.MatchString(c.InstanceInitiatedShutdownBehavior) {
|
||||
errs = append(errs, fmt.Errorf("shutdown_behavior only accepts 'stop' or 'terminate' values."))
|
||||
}
|
||||
|
||||
if c.EnableT2Unlimited {
|
||||
if c.SpotPrice != "" {
|
||||
errs = append(errs, fmt.Errorf("Error: T2 Unlimited cannot be used in conjunction with Spot Instances"))
|
||||
}
|
||||
firstDotIndex := strings.Index(c.InstanceType, ".")
|
||||
if firstDotIndex == -1 {
|
||||
errs = append(errs, fmt.Errorf("Error determining main Instance Type from: %s", c.InstanceType))
|
||||
} else if c.InstanceType[0:firstDotIndex] != "t2" {
|
||||
errs = append(errs, fmt.Errorf("Error: T2 Unlimited enabled with a non-T2 Instance Type: %s", c.InstanceType))
|
||||
}
|
||||
}
|
||||
|
||||
if c.Tenancy != "" &&
|
||||
c.Tenancy != "default" &&
|
||||
c.Tenancy != "dedicated" &&
|
||||
c.Tenancy != "host" {
|
||||
errs = append(errs, fmt.Errorf("Error: Unknown tenancy type %s", c.Tenancy))
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
func (c *RunConfig) IsSpotInstance() bool {
|
||||
return c.SpotPrice != "" && c.SpotPrice != "0"
|
||||
}
|
||||
|
||||
func (c *RunConfig) SSMAgentEnabled() bool {
|
||||
hasIamInstanceProfile := c.IamInstanceProfile != "" || c.TemporaryIamInstanceProfilePolicyDocument != nil
|
||||
return c.SSHInterface == "session_manager" && hasIamInstanceProfile
|
||||
}
|
|
@ -1,167 +0,0 @@
|
|||
// Code generated by "mapstructure-to-hcl2 -type AmiFilterOptions,SecurityGroupFilterOptions,SubnetFilterOptions,VpcFilterOptions,PolicyDocument,Statement"; DO NOT EDIT.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl/v2/hcldec"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/config"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
// FlatAmiFilterOptions is an auto-generated flat version of AmiFilterOptions.
|
||||
// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up.
|
||||
type FlatAmiFilterOptions struct {
|
||||
Filters map[string]string `mapstructure:"filters" cty:"filters" hcl:"filters"`
|
||||
Owners []string `mapstructure:"owners" cty:"owners" hcl:"owners"`
|
||||
MostRecent *bool `mapstructure:"most_recent" cty:"most_recent" hcl:"most_recent"`
|
||||
}
|
||||
|
||||
// FlatMapstructure returns a new FlatAmiFilterOptions.
|
||||
// FlatAmiFilterOptions is an auto-generated flat version of AmiFilterOptions.
|
||||
// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up.
|
||||
func (*AmiFilterOptions) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } {
|
||||
return new(FlatAmiFilterOptions)
|
||||
}
|
||||
|
||||
// HCL2Spec returns the hcl spec of a AmiFilterOptions.
|
||||
// This spec is used by HCL to read the fields of AmiFilterOptions.
|
||||
// The decoded values from this spec will then be applied to a FlatAmiFilterOptions.
|
||||
func (*FlatAmiFilterOptions) HCL2Spec() map[string]hcldec.Spec {
|
||||
s := map[string]hcldec.Spec{
|
||||
"filters": &hcldec.AttrSpec{Name: "filters", Type: cty.Map(cty.String), Required: false},
|
||||
"owners": &hcldec.AttrSpec{Name: "owners", Type: cty.List(cty.String), Required: false},
|
||||
"most_recent": &hcldec.AttrSpec{Name: "most_recent", Type: cty.Bool, Required: false},
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// FlatPolicyDocument is an auto-generated flat version of PolicyDocument.
|
||||
// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up.
|
||||
type FlatPolicyDocument struct {
|
||||
Version *string `mapstructure:"Version" required:"false" cty:"Version" hcl:"Version"`
|
||||
Statement []FlatStatement `mapstructure:"Statement" required:"false" cty:"Statement" hcl:"Statement"`
|
||||
}
|
||||
|
||||
// FlatMapstructure returns a new FlatPolicyDocument.
|
||||
// FlatPolicyDocument is an auto-generated flat version of PolicyDocument.
|
||||
// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up.
|
||||
func (*PolicyDocument) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } {
|
||||
return new(FlatPolicyDocument)
|
||||
}
|
||||
|
||||
// HCL2Spec returns the hcl spec of a PolicyDocument.
|
||||
// This spec is used by HCL to read the fields of PolicyDocument.
|
||||
// The decoded values from this spec will then be applied to a FlatPolicyDocument.
|
||||
func (*FlatPolicyDocument) HCL2Spec() map[string]hcldec.Spec {
|
||||
s := map[string]hcldec.Spec{
|
||||
"Version": &hcldec.AttrSpec{Name: "Version", Type: cty.String, Required: false},
|
||||
"Statement": &hcldec.BlockListSpec{TypeName: "Statement", Nested: hcldec.ObjectSpec((*FlatStatement)(nil).HCL2Spec())},
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// FlatSecurityGroupFilterOptions is an auto-generated flat version of SecurityGroupFilterOptions.
|
||||
// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up.
|
||||
type FlatSecurityGroupFilterOptions struct {
|
||||
Filters map[string]string `cty:"filters" hcl:"filters"`
|
||||
Filter []config.FlatNameValue `cty:"filter" hcl:"filter"`
|
||||
}
|
||||
|
||||
// FlatMapstructure returns a new FlatSecurityGroupFilterOptions.
|
||||
// FlatSecurityGroupFilterOptions is an auto-generated flat version of SecurityGroupFilterOptions.
|
||||
// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up.
|
||||
func (*SecurityGroupFilterOptions) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } {
|
||||
return new(FlatSecurityGroupFilterOptions)
|
||||
}
|
||||
|
||||
// HCL2Spec returns the hcl spec of a SecurityGroupFilterOptions.
|
||||
// This spec is used by HCL to read the fields of SecurityGroupFilterOptions.
|
||||
// The decoded values from this spec will then be applied to a FlatSecurityGroupFilterOptions.
|
||||
func (*FlatSecurityGroupFilterOptions) HCL2Spec() map[string]hcldec.Spec {
|
||||
s := map[string]hcldec.Spec{
|
||||
"filters": &hcldec.AttrSpec{Name: "filters", Type: cty.Map(cty.String), Required: false},
|
||||
"filter": &hcldec.BlockListSpec{TypeName: "filter", Nested: hcldec.ObjectSpec((*config.FlatNameValue)(nil).HCL2Spec())},
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// FlatStatement is an auto-generated flat version of Statement.
|
||||
// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up.
|
||||
type FlatStatement struct {
|
||||
Effect *string `mapstructure:"Effect" required:"false" cty:"Effect" hcl:"Effect"`
|
||||
Action []string `mapstructure:"Action" required:"false" cty:"Action" hcl:"Action"`
|
||||
Resource []string `mapstructure:"Resource" required:"false" cty:"Resource" hcl:"Resource"`
|
||||
}
|
||||
|
||||
// FlatMapstructure returns a new FlatStatement.
|
||||
// FlatStatement is an auto-generated flat version of Statement.
|
||||
// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up.
|
||||
func (*Statement) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } {
|
||||
return new(FlatStatement)
|
||||
}
|
||||
|
||||
// HCL2Spec returns the hcl spec of a Statement.
|
||||
// This spec is used by HCL to read the fields of Statement.
|
||||
// The decoded values from this spec will then be applied to a FlatStatement.
|
||||
func (*FlatStatement) HCL2Spec() map[string]hcldec.Spec {
|
||||
s := map[string]hcldec.Spec{
|
||||
"Effect": &hcldec.AttrSpec{Name: "Effect", Type: cty.String, Required: false},
|
||||
"Action": &hcldec.AttrSpec{Name: "Action", Type: cty.List(cty.String), Required: false},
|
||||
"Resource": &hcldec.AttrSpec{Name: "Resource", Type: cty.List(cty.String), Required: false},
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// FlatSubnetFilterOptions is an auto-generated flat version of SubnetFilterOptions.
|
||||
// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up.
|
||||
type FlatSubnetFilterOptions struct {
|
||||
Filters map[string]string `cty:"filters" hcl:"filters"`
|
||||
Filter []config.FlatNameValue `cty:"filter" hcl:"filter"`
|
||||
MostFree *bool `mapstructure:"most_free" cty:"most_free" hcl:"most_free"`
|
||||
Random *bool `mapstructure:"random" cty:"random" hcl:"random"`
|
||||
}
|
||||
|
||||
// FlatMapstructure returns a new FlatSubnetFilterOptions.
|
||||
// FlatSubnetFilterOptions is an auto-generated flat version of SubnetFilterOptions.
|
||||
// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up.
|
||||
func (*SubnetFilterOptions) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } {
|
||||
return new(FlatSubnetFilterOptions)
|
||||
}
|
||||
|
||||
// HCL2Spec returns the hcl spec of a SubnetFilterOptions.
|
||||
// This spec is used by HCL to read the fields of SubnetFilterOptions.
|
||||
// The decoded values from this spec will then be applied to a FlatSubnetFilterOptions.
|
||||
func (*FlatSubnetFilterOptions) HCL2Spec() map[string]hcldec.Spec {
|
||||
s := map[string]hcldec.Spec{
|
||||
"filters": &hcldec.AttrSpec{Name: "filters", Type: cty.Map(cty.String), Required: false},
|
||||
"filter": &hcldec.BlockListSpec{TypeName: "filter", Nested: hcldec.ObjectSpec((*config.FlatNameValue)(nil).HCL2Spec())},
|
||||
"most_free": &hcldec.AttrSpec{Name: "most_free", Type: cty.Bool, Required: false},
|
||||
"random": &hcldec.AttrSpec{Name: "random", Type: cty.Bool, Required: false},
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// FlatVpcFilterOptions is an auto-generated flat version of VpcFilterOptions.
|
||||
// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up.
|
||||
type FlatVpcFilterOptions struct {
|
||||
Filters map[string]string `cty:"filters" hcl:"filters"`
|
||||
Filter []config.FlatNameValue `cty:"filter" hcl:"filter"`
|
||||
}
|
||||
|
||||
// FlatMapstructure returns a new FlatVpcFilterOptions.
|
||||
// FlatVpcFilterOptions is an auto-generated flat version of VpcFilterOptions.
|
||||
// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up.
|
||||
func (*VpcFilterOptions) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } {
|
||||
return new(FlatVpcFilterOptions)
|
||||
}
|
||||
|
||||
// HCL2Spec returns the hcl spec of a VpcFilterOptions.
|
||||
// This spec is used by HCL to read the fields of VpcFilterOptions.
|
||||
// The decoded values from this spec will then be applied to a FlatVpcFilterOptions.
|
||||
func (*FlatVpcFilterOptions) HCL2Spec() map[string]hcldec.Spec {
|
||||
s := map[string]hcldec.Spec{
|
||||
"filters": &hcldec.AttrSpec{Name: "filters", Type: cty.Map(cty.String), Required: false},
|
||||
"filter": &hcldec.BlockListSpec{TypeName: "filter", Nested: hcldec.ObjectSpec((*config.FlatNameValue)(nil).HCL2Spec())},
|
||||
}
|
||||
return s
|
||||
}
|
|
@ -1,251 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"regexp"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/communicator"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Clear out the AWS access key env vars so they don't
|
||||
// affect our tests.
|
||||
os.Setenv("AWS_ACCESS_KEY_ID", "")
|
||||
os.Setenv("AWS_ACCESS_KEY", "")
|
||||
os.Setenv("AWS_SECRET_ACCESS_KEY", "")
|
||||
os.Setenv("AWS_SECRET_KEY", "")
|
||||
}
|
||||
|
||||
func testConfig() *RunConfig {
|
||||
return &RunConfig{
|
||||
SourceAmi: "abcd",
|
||||
InstanceType: "m1.small",
|
||||
|
||||
Comm: communicator.Config{
|
||||
SSH: communicator.SSH{
|
||||
SSHUsername: "foo",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func testConfigFilter() *RunConfig {
|
||||
config := testConfig()
|
||||
config.SourceAmi = ""
|
||||
config.SourceAmiFilter = AmiFilterOptions{}
|
||||
return config
|
||||
}
|
||||
|
||||
func TestRunConfigPrepare(t *testing.T) {
|
||||
c := testConfig()
|
||||
err := c.Prepare(nil)
|
||||
if len(err) > 0 {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunConfigPrepare_InstanceType(t *testing.T) {
|
||||
c := testConfig()
|
||||
c.InstanceType = ""
|
||||
if err := c.Prepare(nil); len(err) != 1 {
|
||||
t.Fatalf("Should error if an instance_type is not specified")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunConfigPrepare_SourceAmi(t *testing.T) {
|
||||
c := testConfig()
|
||||
c.SourceAmi = ""
|
||||
if err := c.Prepare(nil); len(err) != 2 {
|
||||
t.Fatalf("Should error if a source_ami (or source_ami_filter) is not specified")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunConfigPrepare_SourceAmiFilterBlank(t *testing.T) {
|
||||
c := testConfigFilter()
|
||||
if err := c.Prepare(nil); len(err) != 2 {
|
||||
t.Fatalf("Should error if source_ami_filter is empty or not specified (and source_ami is not specified)")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunConfigPrepare_SourceAmiFilterOwnersBlank(t *testing.T) {
|
||||
c := testConfigFilter()
|
||||
filter_key := "name"
|
||||
filter_value := "foo"
|
||||
c.SourceAmiFilter.Filters = map[string]string{filter_key: filter_value}
|
||||
if err := c.Prepare(nil); len(err) != 1 {
|
||||
t.Fatalf("Should error if Owners is not specified)")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunConfigPrepare_SourceAmiFilterGood(t *testing.T) {
|
||||
c := testConfigFilter()
|
||||
owner := "123"
|
||||
filter_key := "name"
|
||||
filter_value := "foo"
|
||||
goodFilter := AmiFilterOptions{
|
||||
Owners: []string{owner},
|
||||
Filters: map[string]string{filter_key: filter_value},
|
||||
}
|
||||
c.SourceAmiFilter = goodFilter
|
||||
if err := c.Prepare(nil); len(err) != 0 {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunConfigPrepare_EnableT2UnlimitedGood(t *testing.T) {
|
||||
c := testConfig()
|
||||
// Must have a T2 instance type if T2 Unlimited is enabled
|
||||
c.InstanceType = "t2.micro"
|
||||
c.EnableT2Unlimited = true
|
||||
err := c.Prepare(nil)
|
||||
if len(err) > 0 {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunConfigPrepare_EnableT2UnlimitedBadInstanceType(t *testing.T) {
|
||||
c := testConfig()
|
||||
// T2 Unlimited cannot be used with instance types other than T2
|
||||
c.InstanceType = "m5.large"
|
||||
c.EnableT2Unlimited = true
|
||||
err := c.Prepare(nil)
|
||||
if len(err) != 1 {
|
||||
t.Fatalf("Should error if T2 Unlimited is enabled with non-T2 instance_type")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunConfigPrepare_EnableT2UnlimitedBadWithSpotInstanceRequest(t *testing.T) {
|
||||
c := testConfig()
|
||||
// T2 Unlimited cannot be used with Spot Instances
|
||||
c.InstanceType = "t2.micro"
|
||||
c.EnableT2Unlimited = true
|
||||
c.SpotPrice = "auto"
|
||||
err := c.Prepare(nil)
|
||||
if len(err) != 1 {
|
||||
t.Fatalf("Should error if T2 Unlimited has been used in conjuntion with a Spot Price request")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunConfigPrepare_SpotAuto(t *testing.T) {
|
||||
c := testConfig()
|
||||
c.SpotPrice = "auto"
|
||||
if err := c.Prepare(nil); len(err) != 0 {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
// Shouldn't error (YET) even though SpotPriceAutoProduct is deprecated
|
||||
c.SpotPriceAutoProduct = "Linux/Unix"
|
||||
if err := c.Prepare(nil); len(err) != 0 {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunConfigPrepare_SSHPort(t *testing.T) {
|
||||
c := testConfig()
|
||||
c.Comm.SSHPort = 0
|
||||
if err := c.Prepare(nil); len(err) != 0 {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
if c.Comm.SSHPort != 22 {
|
||||
t.Fatalf("invalid value: %d", c.Comm.SSHPort)
|
||||
}
|
||||
|
||||
c.Comm.SSHPort = 44
|
||||
if err := c.Prepare(nil); len(err) != 0 {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
if c.Comm.SSHPort != 44 {
|
||||
t.Fatalf("invalid value: %d", c.Comm.SSHPort)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunConfigPrepare_UserData(t *testing.T) {
|
||||
c := testConfig()
|
||||
tf, err := ioutil.TempFile("", "packer")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
defer os.Remove(tf.Name())
|
||||
defer tf.Close()
|
||||
|
||||
c.UserData = "foo"
|
||||
c.UserDataFile = tf.Name()
|
||||
if err := c.Prepare(nil); len(err) != 1 {
|
||||
t.Fatalf("Should error if user_data string and user_data_file have both been specified")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunConfigPrepare_UserDataFile(t *testing.T) {
|
||||
c := testConfig()
|
||||
if err := c.Prepare(nil); len(err) != 0 {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
c.UserDataFile = "idontexistidontthink"
|
||||
if err := c.Prepare(nil); len(err) != 1 {
|
||||
t.Fatalf("Should error if the file specified by user_data_file does not exist")
|
||||
}
|
||||
|
||||
tf, err := ioutil.TempFile("", "packer")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
defer os.Remove(tf.Name())
|
||||
defer tf.Close()
|
||||
|
||||
c.UserDataFile = tf.Name()
|
||||
if err := c.Prepare(nil); len(err) != 0 {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunConfigPrepare_TemporaryKeyPairName(t *testing.T) {
|
||||
c := testConfig()
|
||||
c.Comm.SSHTemporaryKeyPairName = ""
|
||||
if err := c.Prepare(nil); len(err) != 0 {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
if c.Comm.SSHTemporaryKeyPairName == "" {
|
||||
t.Fatal("keypair name is empty")
|
||||
}
|
||||
|
||||
// Match prefix and UUID, e.g. "packer_5790d491-a0b8-c84c-c9d2-2aea55086550".
|
||||
r := regexp.MustCompile(`\Apacker_(?:(?i)[a-f\d]{8}(?:-[a-f\d]{4}){3}-[a-f\d]{12}?)\z`)
|
||||
if !r.MatchString(c.Comm.SSHTemporaryKeyPairName) {
|
||||
t.Fatal("keypair name is not valid")
|
||||
}
|
||||
|
||||
c.Comm.SSHTemporaryKeyPairName = "ssh-key-123"
|
||||
if err := c.Prepare(nil); len(err) != 0 {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
if c.Comm.SSHTemporaryKeyPairName != "ssh-key-123" {
|
||||
t.Fatal("keypair name does not match")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunConfigPrepare_TenancyBad(t *testing.T) {
|
||||
c := testConfig()
|
||||
c.Tenancy = "not_real"
|
||||
|
||||
if err := c.Prepare(nil); len(err) != 1 {
|
||||
t.Fatal("Should error if tenancy is set to an invalid type")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunConfigPrepare_TenancyGood(t *testing.T) {
|
||||
validTenancy := []string{"", "default", "dedicated", "host"}
|
||||
for _, vt := range validTenancy {
|
||||
c := testConfig()
|
||||
c.Tenancy = vt
|
||||
if err := c.Prepare(nil); len(err) != 0 {
|
||||
t.Fatalf("Should not error if tenancy is set to %s", vt)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,108 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
)
|
||||
|
||||
type ec2Describer interface {
|
||||
DescribeInstances(*ec2.DescribeInstancesInput) (*ec2.DescribeInstancesOutput, error)
|
||||
}
|
||||
|
||||
var (
|
||||
// modified in tests
|
||||
sshHostSleepDuration = time.Second
|
||||
)
|
||||
|
||||
// SSHHost returns a function that can be given to the SSH communicator
|
||||
// for determining the SSH address based on the instance DNS name.
|
||||
func SSHHost(e ec2Describer, sshInterface string, host string) func(multistep.StateBag) (string, error) {
|
||||
return func(state multistep.StateBag) (string, error) {
|
||||
if host != "" {
|
||||
log.Printf("Using host value: %s", host)
|
||||
return host, nil
|
||||
}
|
||||
|
||||
if sshInterface == "session_manager" {
|
||||
return "localhost", nil
|
||||
}
|
||||
|
||||
const tries = 2
|
||||
// <= with current structure to check result of describing `tries` times
|
||||
for j := 0; j <= tries; j++ {
|
||||
i := state.Get("instance").(*ec2.Instance)
|
||||
if sshInterface != "" {
|
||||
switch sshInterface {
|
||||
case "public_ip":
|
||||
if i.PublicIpAddress != nil {
|
||||
host = *i.PublicIpAddress
|
||||
}
|
||||
case "private_ip":
|
||||
if i.PrivateIpAddress != nil {
|
||||
host = *i.PrivateIpAddress
|
||||
}
|
||||
case "public_dns":
|
||||
if i.PublicDnsName != nil {
|
||||
host = *i.PublicDnsName
|
||||
}
|
||||
case "private_dns":
|
||||
if i.PrivateDnsName != nil {
|
||||
host = *i.PrivateDnsName
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("Unknown interface type: %s", sshInterface))
|
||||
}
|
||||
} else if i.VpcId != nil && *i.VpcId != "" {
|
||||
if i.PublicIpAddress != nil && *i.PublicIpAddress != "" {
|
||||
host = *i.PublicIpAddress
|
||||
} else if i.PrivateIpAddress != nil && *i.PrivateIpAddress != "" {
|
||||
host = *i.PrivateIpAddress
|
||||
}
|
||||
} else if i.PublicDnsName != nil && *i.PublicDnsName != "" {
|
||||
host = *i.PublicDnsName
|
||||
}
|
||||
|
||||
if host != "" {
|
||||
return host, nil
|
||||
}
|
||||
|
||||
r, err := e.DescribeInstances(&ec2.DescribeInstancesInput{
|
||||
InstanceIds: []*string{i.InstanceId},
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(r.Reservations) == 0 || len(r.Reservations[0].Instances) == 0 {
|
||||
return "", fmt.Errorf("instance not found: %s", *i.InstanceId)
|
||||
}
|
||||
|
||||
state.Put("instance", r.Reservations[0].Instances[0])
|
||||
time.Sleep(sshHostSleepDuration)
|
||||
}
|
||||
|
||||
return "", errors.New("couldn't determine address for instance")
|
||||
}
|
||||
}
|
||||
|
||||
// Port returns a function that can be given to the communicator
|
||||
// for determining the port to use when connecting to an instance.
|
||||
func Port(sshInterface string, port int) func(multistep.StateBag) (int, error) {
|
||||
return func(state multistep.StateBag) (int, error) {
|
||||
if sshInterface != "session_manager" {
|
||||
return port, nil
|
||||
}
|
||||
|
||||
port, ok := state.GetOk("sessionPort")
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("no local port defined for session-manager")
|
||||
}
|
||||
return port.(int), nil
|
||||
|
||||
}
|
||||
}
|
|
@ -1,141 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
)
|
||||
|
||||
const (
|
||||
privateIP = "10.0.0.1"
|
||||
publicIP = "192.168.1.1"
|
||||
privateDNS = "private.dns.test"
|
||||
publicDNS = "public.dns.test"
|
||||
localhost = "localhost"
|
||||
sshHostTemplate = "custom.host.value"
|
||||
)
|
||||
|
||||
func TestSSHHost(t *testing.T) {
|
||||
origSshHostSleepDuration := sshHostSleepDuration
|
||||
defer func() { sshHostSleepDuration = origSshHostSleepDuration }()
|
||||
sshHostSleepDuration = 0
|
||||
|
||||
var cases = []struct {
|
||||
allowTries int
|
||||
vpcId string
|
||||
sshInterface string
|
||||
|
||||
ok bool
|
||||
wantHost string
|
||||
sshHostOverride string
|
||||
}{
|
||||
{1, "", "", true, publicDNS, ""},
|
||||
{1, "", "private_ip", true, privateIP, ""},
|
||||
{1, "", "session_manager", true, localhost, ""},
|
||||
{1, "vpc-id", "", true, publicIP, ""},
|
||||
{1, "vpc-id", "private_ip", true, privateIP, ""},
|
||||
{1, "vpc-id", "private_dns", true, privateDNS, ""},
|
||||
{1, "vpc-id", "public_dns", true, publicDNS, ""},
|
||||
{1, "vpc-id", "public_ip", true, publicIP, ""},
|
||||
{1, "vpc-id", "session_manager", true, localhost, ""},
|
||||
{2, "", "", true, publicDNS, ""},
|
||||
{2, "", "private_ip", true, privateIP, ""},
|
||||
{2, "vpc-id", "", true, publicIP, ""},
|
||||
{2, "vpc-id", "private_ip", true, privateIP, ""},
|
||||
{2, "vpc-id", "private_dns", true, privateDNS, ""},
|
||||
{2, "vpc-id", "public_dns", true, publicDNS, ""},
|
||||
{2, "vpc-id", "public_ip", true, publicIP, ""},
|
||||
{3, "", "", false, "", ""},
|
||||
{3, "", "private_ip", false, "", ""},
|
||||
{3, "vpc-id", "", false, "", ""},
|
||||
{3, "vpc-id", "private_ip", false, "", ""},
|
||||
{3, "vpc-id", "private_dns", false, "", ""},
|
||||
{3, "vpc-id", "public_dns", false, "", ""},
|
||||
{3, "vpc-id", "public_ip", false, "", ""},
|
||||
{1, "", "", true, sshHostTemplate, sshHostTemplate},
|
||||
{1, "vpc-id", "", true, sshHostTemplate, sshHostTemplate},
|
||||
{2, "vpc-id", "private_dns", true, sshHostTemplate, sshHostTemplate},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
testSSHHost(t, c.allowTries, c.vpcId, c.sshInterface, c.ok, c.wantHost,
|
||||
c.sshHostOverride)
|
||||
}
|
||||
}
|
||||
|
||||
func testSSHHost(t *testing.T, allowTries int, vpcId string, sshInterface string,
|
||||
ok bool, wantHost string, sshHostOverride string) {
|
||||
t.Logf("allowTries=%d vpcId=%s sshInterface=%s ok=%t wantHost=%q sshHostOverride=%s",
|
||||
allowTries, vpcId, sshInterface, ok, wantHost, sshHostOverride)
|
||||
|
||||
e := &fakeEC2Describer{
|
||||
allowTries: allowTries,
|
||||
vpcId: vpcId,
|
||||
privateIP: privateIP,
|
||||
publicIP: publicIP,
|
||||
privateDNS: privateDNS,
|
||||
publicDNS: publicDNS,
|
||||
}
|
||||
|
||||
f := SSHHost(e, sshInterface, sshHostOverride)
|
||||
st := &multistep.BasicStateBag{}
|
||||
st.Put("instance", &ec2.Instance{
|
||||
InstanceId: aws.String("instance-id"),
|
||||
})
|
||||
|
||||
host, err := f(st)
|
||||
|
||||
if e.tries > allowTries {
|
||||
t.Fatalf("got %d ec2 DescribeInstances tries, want %d", e.tries, allowTries)
|
||||
}
|
||||
|
||||
switch {
|
||||
case ok && err != nil:
|
||||
t.Fatalf("expected no error, got %+v", err)
|
||||
case !ok && err == nil:
|
||||
t.Fatalf("expected error, got none and host %s", host)
|
||||
}
|
||||
|
||||
if host != wantHost {
|
||||
t.Fatalf("got host %s, want %s", host, wantHost)
|
||||
}
|
||||
}
|
||||
|
||||
type fakeEC2Describer struct {
|
||||
allowTries int
|
||||
tries int
|
||||
|
||||
vpcId string
|
||||
privateIP, publicIP, privateDNS, publicDNS string
|
||||
}
|
||||
|
||||
func (d *fakeEC2Describer) DescribeInstances(in *ec2.DescribeInstancesInput) (*ec2.DescribeInstancesOutput, error) {
|
||||
d.tries++
|
||||
|
||||
instance := &ec2.Instance{
|
||||
InstanceId: aws.String("instance-id"),
|
||||
}
|
||||
|
||||
if d.vpcId != "" {
|
||||
instance.VpcId = aws.String(d.vpcId)
|
||||
}
|
||||
|
||||
if d.tries >= d.allowTries {
|
||||
instance.PublicIpAddress = aws.String(d.publicIP)
|
||||
instance.PrivateIpAddress = aws.String(d.privateIP)
|
||||
instance.PublicDnsName = aws.String(d.publicDNS)
|
||||
instance.PrivateDnsName = aws.String(d.privateDNS)
|
||||
}
|
||||
|
||||
out := &ec2.DescribeInstancesOutput{
|
||||
Reservations: []*ec2.Reservation{
|
||||
{
|
||||
Instances: []*ec2.Instance{instance},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
|
@ -1,117 +0,0 @@
|
|||
package ssm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ssm"
|
||||
"github.com/aws/aws-sdk-go/service/ssm/ssmiface"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/retry"
|
||||
"github.com/hashicorp/packer-plugin-sdk/shell-local/localexec"
|
||||
"github.com/hashicorp/packer/builder/amazon/common/awserrors"
|
||||
)
|
||||
|
||||
type Session struct {
|
||||
SvcClient ssmiface.SSMAPI
|
||||
Region string
|
||||
InstanceID string
|
||||
LocalPort, RemotePort int
|
||||
}
|
||||
|
||||
func (s Session) buildTunnelInput() *ssm.StartSessionInput {
|
||||
portNumber, localPortNumber := strconv.Itoa(s.RemotePort), strconv.Itoa(s.LocalPort)
|
||||
params := map[string][]*string{
|
||||
"portNumber": []*string{aws.String(portNumber)},
|
||||
"localPortNumber": []*string{aws.String(localPortNumber)},
|
||||
}
|
||||
|
||||
return &ssm.StartSessionInput{
|
||||
DocumentName: aws.String("AWS-StartPortForwardingSession"),
|
||||
Parameters: params,
|
||||
Target: aws.String(s.InstanceID),
|
||||
}
|
||||
}
|
||||
|
||||
// getCommand return a valid ordered set of arguments to pass to the driver command.
|
||||
func (s Session) getCommand(ctx context.Context) ([]string, string, error) {
|
||||
input := s.buildTunnelInput()
|
||||
|
||||
var session *ssm.StartSessionOutput
|
||||
err := retry.Config{
|
||||
ShouldRetry: func(err error) bool { return awserrors.Matches(err, "TargetNotConnected", "") },
|
||||
RetryDelay: (&retry.Backoff{InitialBackoff: 200 * time.Millisecond, MaxBackoff: 60 * time.Second, Multiplier: 2}).Linear,
|
||||
}.Run(ctx, func(ctx context.Context) (err error) {
|
||||
session, err = s.SvcClient.StartSessionWithContext(ctx, input)
|
||||
return err
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
if session == nil {
|
||||
return nil, "", fmt.Errorf("an active Amazon SSM Session is required before trying to open a session tunnel")
|
||||
}
|
||||
|
||||
// AWS session-manager-plugin requires a valid session be passed in JSON.
|
||||
sessionDetails, err := json.Marshal(session)
|
||||
if err != nil {
|
||||
return nil, *session.SessionId, fmt.Errorf("error encountered in reading session details %s", err)
|
||||
}
|
||||
|
||||
// AWS session-manager-plugin requires the parameters used in the session to be passed in JSON as well.
|
||||
sessionParameters, err := json.Marshal(input)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("error encountered in reading session parameter details %s", err)
|
||||
}
|
||||
|
||||
// Args must be in this order
|
||||
args := []string{
|
||||
string(sessionDetails),
|
||||
s.Region,
|
||||
"StartSession",
|
||||
"", // ProfileName
|
||||
string(sessionParameters),
|
||||
*session.StreamUrl,
|
||||
}
|
||||
return args, *session.SessionId, nil
|
||||
}
|
||||
|
||||
// Start an interactive Systems Manager session with a remote instance via the
|
||||
// AWS session-manager-plugin. To terminate the session you must cancell the
|
||||
// context. If you do not wish to terminate the session manually: calling
|
||||
// StopSession on a instance of this driver will terminate the active session
|
||||
// created from calling StartSession.
|
||||
func (s Session) Start(ctx context.Context, ui packersdk.Ui) error {
|
||||
for ctx.Err() == nil {
|
||||
log.Printf("ssm: Starting PortForwarding session to instance %s", s.InstanceID)
|
||||
args, sessionID, err := s.getCommand(ctx)
|
||||
if sessionID != "" {
|
||||
defer func() {
|
||||
_, err := s.SvcClient.TerminateSession(&ssm.TerminateSessionInput{SessionId: aws.String(sessionID)})
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf("Error terminating SSM Session %q. Please terminate the session manually: %s", sessionID, err))
|
||||
}
|
||||
}()
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cmd := exec.CommandContext(ctx, "session-manager-plugin", args...)
|
||||
|
||||
ui.Message(fmt.Sprintf("Starting portForwarding session %q.", sessionID))
|
||||
err = localexec.RunAndStream(cmd, ui, nil)
|
||||
if err != nil {
|
||||
ui.Error(err.Error())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,439 +0,0 @@
|
|||
//go:generate struct-markdown
|
||||
//go:generate mapstructure-to-hcl2 -type AWSPollingConfig
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
)
|
||||
|
||||
// StateRefreshFunc is a function type used for StateChangeConf that is
|
||||
// responsible for refreshing the item being watched for a state change.
|
||||
//
|
||||
// It returns three results. `result` is any object that will be returned
|
||||
// as the final object after waiting for state change. This allows you to
|
||||
// return the final updated object, for example an EC2 instance after refreshing
|
||||
// it.
|
||||
//
|
||||
// `state` is the latest state of that object. And `err` is any error that
|
||||
// may have happened while refreshing the state.
|
||||
type StateRefreshFunc func() (result interface{}, state string, err error)
|
||||
|
||||
// StateChangeConf is the configuration struct used for `WaitForState`.
|
||||
type StateChangeConf struct {
|
||||
Pending []string
|
||||
Refresh StateRefreshFunc
|
||||
StepState multistep.StateBag
|
||||
Target string
|
||||
}
|
||||
|
||||
// Following are wrapper functions that use Packer's environment-variables to
|
||||
// determine retry logic, then call the AWS SDK's built-in waiters.
|
||||
|
||||
// Polling configuration for the AWS waiter. Configures the waiter for resources creation or actions like attaching
|
||||
// volumes or importing image.
|
||||
// Usage example:
|
||||
//
|
||||
// In JSON:
|
||||
// ```json
|
||||
// "aws_polling" : {
|
||||
// "delay_seconds": 30,
|
||||
// "max_attempts": 50
|
||||
// }
|
||||
// ```
|
||||
//
|
||||
// In HCL2:
|
||||
// ```hcl
|
||||
// aws_polling {
|
||||
// delay_seconds = 30
|
||||
// max_attempts = 50
|
||||
// }
|
||||
// ```
|
||||
//
|
||||
type AWSPollingConfig struct {
|
||||
// Specifies the maximum number of attempts the waiter will check for resource state.
|
||||
// This value can also be set via the AWS_MAX_ATTEMPTS.
|
||||
// If both option and environment variable are set, the max_attempts will be considered over the AWS_MAX_ATTEMPTS.
|
||||
// If none is set, defaults to AWS waiter default which is 40 max_attempts.
|
||||
MaxAttempts int `mapstructure:"max_attempts" required:"false"`
|
||||
// Specifies the delay in seconds between attempts to check the resource state.
|
||||
// This value can also be set via the AWS_POLL_DELAY_SECONDS.
|
||||
// If both option and environment variable are set, the delay_seconds will be considered over the AWS_POLL_DELAY_SECONDS.
|
||||
// If none is set, defaults to AWS waiter default which is 15 seconds.
|
||||
DelaySeconds int `mapstructure:"delay_seconds" required:"false"`
|
||||
}
|
||||
|
||||
func (w *AWSPollingConfig) WaitUntilAMIAvailable(ctx aws.Context, conn ec2iface.EC2API, imageId string) error {
|
||||
imageInput := ec2.DescribeImagesInput{
|
||||
ImageIds: []*string{&imageId},
|
||||
}
|
||||
|
||||
waitOpts := w.getWaiterOptions()
|
||||
if len(waitOpts) == 0 {
|
||||
// Bump this default to 30 minutes because the aws default
|
||||
// of ten minutes doesn't work for some of our long-running copies.
|
||||
waitOpts = append(waitOpts, request.WithWaiterMaxAttempts(120))
|
||||
}
|
||||
err := conn.WaitUntilImageAvailableWithContext(
|
||||
ctx,
|
||||
&imageInput,
|
||||
waitOpts...)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), request.WaiterResourceNotReadyErrorCode) {
|
||||
err = fmt.Errorf("Failed with ResourceNotReady error, which can "+
|
||||
"have a variety of causes. For help troubleshooting, check "+
|
||||
"our docs: "+
|
||||
"https://www.packer.io/docs/builders/amazon.html#resourcenotready-error\n"+
|
||||
"original error: %s", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *AWSPollingConfig) WaitUntilInstanceRunning(ctx aws.Context, conn *ec2.EC2, instanceId string) error {
|
||||
|
||||
instanceInput := ec2.DescribeInstancesInput{
|
||||
InstanceIds: []*string{&instanceId},
|
||||
}
|
||||
|
||||
err := conn.WaitUntilInstanceRunningWithContext(
|
||||
ctx,
|
||||
&instanceInput,
|
||||
w.getWaiterOptions()...)
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *AWSPollingConfig) WaitUntilInstanceTerminated(ctx aws.Context, conn *ec2.EC2, instanceId string) error {
|
||||
instanceInput := ec2.DescribeInstancesInput{
|
||||
InstanceIds: []*string{&instanceId},
|
||||
}
|
||||
|
||||
err := conn.WaitUntilInstanceTerminatedWithContext(
|
||||
ctx,
|
||||
&instanceInput,
|
||||
w.getWaiterOptions()...)
|
||||
return err
|
||||
}
|
||||
|
||||
// This function works for both requesting and cancelling spot instances.
|
||||
func (w *AWSPollingConfig) WaitUntilSpotRequestFulfilled(ctx aws.Context, conn *ec2.EC2, spotRequestId string) error {
|
||||
spotRequestInput := ec2.DescribeSpotInstanceRequestsInput{
|
||||
SpotInstanceRequestIds: []*string{&spotRequestId},
|
||||
}
|
||||
|
||||
err := conn.WaitUntilSpotInstanceRequestFulfilledWithContext(
|
||||
ctx,
|
||||
&spotRequestInput,
|
||||
w.getWaiterOptions()...)
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *AWSPollingConfig) WaitUntilVolumeAvailable(ctx aws.Context, conn *ec2.EC2, volumeId string) error {
|
||||
volumeInput := ec2.DescribeVolumesInput{
|
||||
VolumeIds: []*string{&volumeId},
|
||||
}
|
||||
|
||||
err := conn.WaitUntilVolumeAvailableWithContext(
|
||||
ctx,
|
||||
&volumeInput,
|
||||
w.getWaiterOptions()...)
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *AWSPollingConfig) WaitUntilSnapshotDone(ctx aws.Context, conn *ec2.EC2, snapshotID string) error {
|
||||
snapInput := ec2.DescribeSnapshotsInput{
|
||||
SnapshotIds: []*string{&snapshotID},
|
||||
}
|
||||
|
||||
err := conn.WaitUntilSnapshotCompletedWithContext(
|
||||
ctx,
|
||||
&snapInput,
|
||||
w.getWaiterOptions()...)
|
||||
return err
|
||||
}
|
||||
|
||||
// Wrappers for our custom AWS waiters
|
||||
|
||||
func (w *AWSPollingConfig) WaitUntilVolumeAttached(ctx aws.Context, conn *ec2.EC2, volumeId string) error {
|
||||
volumeInput := ec2.DescribeVolumesInput{
|
||||
VolumeIds: []*string{&volumeId},
|
||||
}
|
||||
|
||||
err := WaitForVolumeToBeAttached(conn,
|
||||
ctx,
|
||||
&volumeInput,
|
||||
w.getWaiterOptions()...)
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *AWSPollingConfig) WaitUntilVolumeDetached(ctx aws.Context, conn *ec2.EC2, volumeId string) error {
|
||||
volumeInput := ec2.DescribeVolumesInput{
|
||||
VolumeIds: []*string{&volumeId},
|
||||
}
|
||||
|
||||
err := WaitForVolumeToBeDetached(conn,
|
||||
ctx,
|
||||
&volumeInput,
|
||||
w.getWaiterOptions()...)
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *AWSPollingConfig) WaitUntilImageImported(ctx aws.Context, conn *ec2.EC2, taskID string) error {
|
||||
importInput := ec2.DescribeImportImageTasksInput{
|
||||
ImportTaskIds: []*string{&taskID},
|
||||
}
|
||||
|
||||
err := WaitForImageToBeImported(conn,
|
||||
ctx,
|
||||
&importInput,
|
||||
w.getWaiterOptions()...)
|
||||
return err
|
||||
}
|
||||
|
||||
// Custom waiters using AWS's request.Waiter
|
||||
|
||||
func WaitForVolumeToBeAttached(c *ec2.EC2, ctx aws.Context, input *ec2.DescribeVolumesInput, opts ...request.WaiterOption) error {
|
||||
w := request.Waiter{
|
||||
Name: "DescribeVolumes",
|
||||
MaxAttempts: 40,
|
||||
Delay: request.ConstantWaiterDelay(5 * time.Second),
|
||||
Acceptors: []request.WaiterAcceptor{
|
||||
{
|
||||
State: request.SuccessWaiterState,
|
||||
Matcher: request.PathAllWaiterMatch,
|
||||
Argument: "Volumes[].Attachments[].State",
|
||||
Expected: "attached",
|
||||
},
|
||||
},
|
||||
Logger: c.Config.Logger,
|
||||
NewRequest: func(opts []request.Option) (*request.Request, error) {
|
||||
var inCpy *ec2.DescribeVolumesInput
|
||||
if input != nil {
|
||||
tmp := *input
|
||||
inCpy = &tmp
|
||||
}
|
||||
req, _ := c.DescribeVolumesRequest(inCpy)
|
||||
req.SetContext(ctx)
|
||||
req.ApplyOptions(opts...)
|
||||
return req, nil
|
||||
},
|
||||
}
|
||||
w.ApplyOptions(opts...)
|
||||
|
||||
return w.WaitWithContext(ctx)
|
||||
}
|
||||
|
||||
func WaitForVolumeToBeDetached(c *ec2.EC2, ctx aws.Context, input *ec2.DescribeVolumesInput, opts ...request.WaiterOption) error {
|
||||
w := request.Waiter{
|
||||
Name: "DescribeVolumes",
|
||||
MaxAttempts: 40,
|
||||
Delay: request.ConstantWaiterDelay(5 * time.Second),
|
||||
Acceptors: []request.WaiterAcceptor{
|
||||
{
|
||||
State: request.SuccessWaiterState,
|
||||
Matcher: request.PathAllWaiterMatch,
|
||||
Argument: "length(Volumes[].Attachments[]) == `0`",
|
||||
Expected: true,
|
||||
},
|
||||
},
|
||||
Logger: c.Config.Logger,
|
||||
NewRequest: func(opts []request.Option) (*request.Request, error) {
|
||||
var inCpy *ec2.DescribeVolumesInput
|
||||
if input != nil {
|
||||
tmp := *input
|
||||
inCpy = &tmp
|
||||
}
|
||||
req, _ := c.DescribeVolumesRequest(inCpy)
|
||||
req.SetContext(ctx)
|
||||
req.ApplyOptions(opts...)
|
||||
return req, nil
|
||||
},
|
||||
}
|
||||
w.ApplyOptions(opts...)
|
||||
|
||||
return w.WaitWithContext(ctx)
|
||||
}
|
||||
|
||||
func WaitForImageToBeImported(c *ec2.EC2, ctx aws.Context, input *ec2.DescribeImportImageTasksInput, opts ...request.WaiterOption) error {
|
||||
w := request.Waiter{
|
||||
Name: "DescribeImages",
|
||||
MaxAttempts: 720,
|
||||
Delay: request.ConstantWaiterDelay(5 * time.Second),
|
||||
Acceptors: []request.WaiterAcceptor{
|
||||
{
|
||||
State: request.SuccessWaiterState,
|
||||
Matcher: request.PathAllWaiterMatch,
|
||||
Argument: "ImportImageTasks[].Status",
|
||||
Expected: "completed",
|
||||
},
|
||||
{
|
||||
State: request.FailureWaiterState,
|
||||
Matcher: request.PathAnyWaiterMatch,
|
||||
Argument: "ImportImageTasks[].Status",
|
||||
Expected: "deleted",
|
||||
},
|
||||
},
|
||||
Logger: c.Config.Logger,
|
||||
NewRequest: func(opts []request.Option) (*request.Request, error) {
|
||||
var inCpy *ec2.DescribeImportImageTasksInput
|
||||
if input != nil {
|
||||
tmp := *input
|
||||
inCpy = &tmp
|
||||
}
|
||||
req, _ := c.DescribeImportImageTasksRequest(inCpy)
|
||||
req.SetContext(ctx)
|
||||
req.ApplyOptions(opts...)
|
||||
return req, nil
|
||||
},
|
||||
}
|
||||
w.ApplyOptions(opts...)
|
||||
|
||||
return w.WaitWithContext(ctx)
|
||||
}
|
||||
|
||||
// This helper function uses the environment variables AWS_TIMEOUT_SECONDS and
|
||||
// AWS_POLL_DELAY_SECONDS to generate waiter options that can be passed into any
|
||||
// request.Waiter function. These options will control how many times the waiter
|
||||
// will retry the request, as well as how long to wait between the retries.
|
||||
|
||||
// DEFAULTING BEHAVIOR:
|
||||
// if AWS_POLL_DELAY_SECONDS is set but the others are not, Packer will set this
|
||||
// poll delay and use the waiter-specific default
|
||||
|
||||
// if AWS_TIMEOUT_SECONDS is set but AWS_MAX_ATTEMPTS is not, Packer will use
|
||||
// AWS_TIMEOUT_SECONDS and _either_ AWS_POLL_DELAY_SECONDS _or_ 2 if the user has not set AWS_POLL_DELAY_SECONDS, to determine a max number of attempts to make.
|
||||
|
||||
// if AWS_TIMEOUT_SECONDS, _and_ AWS_MAX_ATTEMPTS are both set,
|
||||
// AWS_TIMEOUT_SECONDS will be ignored.
|
||||
|
||||
// if AWS_MAX_ATTEMPTS is set but AWS_POLL_DELAY_SECONDS is not, then we will
|
||||
// use waiter-specific defaults.
|
||||
|
||||
type envInfo struct {
|
||||
envKey string
|
||||
Val int
|
||||
overridden bool
|
||||
}
|
||||
|
||||
type overridableWaitVars struct {
|
||||
awsPollDelaySeconds envInfo
|
||||
awsMaxAttempts envInfo
|
||||
awsTimeoutSeconds envInfo
|
||||
}
|
||||
|
||||
func (w *AWSPollingConfig) getWaiterOptions() []request.WaiterOption {
|
||||
envOverrides := getEnvOverrides()
|
||||
|
||||
if w.MaxAttempts != 0 {
|
||||
envOverrides.awsMaxAttempts.Val = w.MaxAttempts
|
||||
envOverrides.awsMaxAttempts.overridden = true
|
||||
}
|
||||
if w.DelaySeconds != 0 {
|
||||
envOverrides.awsPollDelaySeconds.Val = w.DelaySeconds
|
||||
envOverrides.awsPollDelaySeconds.overridden = true
|
||||
}
|
||||
|
||||
waitOpts := applyEnvOverrides(envOverrides)
|
||||
return waitOpts
|
||||
}
|
||||
|
||||
func getOverride(varInfo envInfo) envInfo {
|
||||
override := os.Getenv(varInfo.envKey)
|
||||
if override != "" {
|
||||
n, err := strconv.Atoi(override)
|
||||
if err != nil {
|
||||
log.Printf("Invalid %s '%s', using default", varInfo.envKey, override)
|
||||
} else {
|
||||
varInfo.overridden = true
|
||||
varInfo.Val = n
|
||||
}
|
||||
}
|
||||
|
||||
return varInfo
|
||||
}
|
||||
func getEnvOverrides() overridableWaitVars {
|
||||
// Load env vars from environment.
|
||||
envValues := overridableWaitVars{
|
||||
envInfo{"AWS_POLL_DELAY_SECONDS", 2, false},
|
||||
envInfo{"AWS_MAX_ATTEMPTS", 0, false},
|
||||
envInfo{"AWS_TIMEOUT_SECONDS", 0, false},
|
||||
}
|
||||
|
||||
envValues.awsMaxAttempts = getOverride(envValues.awsMaxAttempts)
|
||||
envValues.awsPollDelaySeconds = getOverride(envValues.awsPollDelaySeconds)
|
||||
envValues.awsTimeoutSeconds = getOverride(envValues.awsTimeoutSeconds)
|
||||
|
||||
return envValues
|
||||
}
|
||||
|
||||
func (w *AWSPollingConfig) LogEnvOverrideWarnings() {
|
||||
pollDelayEnv := os.Getenv("AWS_POLL_DELAY_SECONDS")
|
||||
timeoutSecondsEnv := os.Getenv("AWS_TIMEOUT_SECONDS")
|
||||
maxAttemptsEnv := os.Getenv("AWS_MAX_ATTEMPTS")
|
||||
|
||||
maxAttemptsIsSet := maxAttemptsEnv != "" || w.MaxAttempts != 0
|
||||
timeoutSecondsIsSet := timeoutSecondsEnv != ""
|
||||
pollDelayIsSet := pollDelayEnv != "" || w.DelaySeconds != 0
|
||||
|
||||
if maxAttemptsIsSet && timeoutSecondsIsSet {
|
||||
warning := fmt.Sprintf("[WARNING] (aws): AWS_MAX_ATTEMPTS and " +
|
||||
"AWS_TIMEOUT_SECONDS are both set. Packer will use " +
|
||||
"AWS_MAX_ATTEMPTS and discard AWS_TIMEOUT_SECONDS.")
|
||||
if !pollDelayIsSet {
|
||||
warning = fmt.Sprintf("%s Since you have not set the poll delay, "+
|
||||
"Packer will default to a 2-second delay.", warning)
|
||||
}
|
||||
log.Printf(warning)
|
||||
} else if timeoutSecondsIsSet {
|
||||
log.Printf("[WARNING] (aws): env var AWS_TIMEOUT_SECONDS is " +
|
||||
"deprecated in favor of AWS_MAX_ATTEMPTS env or aws_polling_max_attempts config option. " +
|
||||
"If you have not explicitly set AWS_POLL_DELAY_SECONDS env or aws_polling_delay_seconds config option, " +
|
||||
"we are defaulting to a poll delay of 2 seconds, regardless of the AWS waiter's default.")
|
||||
}
|
||||
if !maxAttemptsIsSet && !timeoutSecondsIsSet && !pollDelayIsSet {
|
||||
log.Printf("[INFO] (aws): No AWS timeout and polling overrides have been set. " +
|
||||
"Packer will default to waiter-specific delays and timeouts. If you would " +
|
||||
"like to customize the length of time between retries and max " +
|
||||
"number of retries you may do so by setting the environment " +
|
||||
"variables AWS_POLL_DELAY_SECONDS and AWS_MAX_ATTEMPTS or the " +
|
||||
"configuration options aws_polling_delay_seconds and aws_polling_max_attempts " +
|
||||
"to your desired values.")
|
||||
}
|
||||
}
|
||||
|
||||
func applyEnvOverrides(envOverrides overridableWaitVars) []request.WaiterOption {
|
||||
waitOpts := make([]request.WaiterOption, 0)
|
||||
// If user has set poll delay seconds, overwrite it. If user has NOT,
|
||||
// default to a poll delay of 2 seconds
|
||||
if envOverrides.awsPollDelaySeconds.overridden {
|
||||
delaySeconds := request.ConstantWaiterDelay(time.Duration(envOverrides.awsPollDelaySeconds.Val) * time.Second)
|
||||
waitOpts = append(waitOpts, request.WithWaiterDelay(delaySeconds))
|
||||
}
|
||||
|
||||
// If user has set max attempts, overwrite it. If user hasn't set max
|
||||
// attempts, default to whatever the waiter has set as a default.
|
||||
if envOverrides.awsMaxAttempts.overridden {
|
||||
waitOpts = append(waitOpts, request.WithWaiterMaxAttempts(envOverrides.awsMaxAttempts.Val))
|
||||
} else if envOverrides.awsTimeoutSeconds.overridden {
|
||||
maxAttempts := envOverrides.awsTimeoutSeconds.Val / envOverrides.awsPollDelaySeconds.Val
|
||||
// override the delay so we can get the timeout right
|
||||
if !envOverrides.awsPollDelaySeconds.overridden {
|
||||
delaySeconds := request.ConstantWaiterDelay(time.Duration(envOverrides.awsPollDelaySeconds.Val) * time.Second)
|
||||
waitOpts = append(waitOpts, request.WithWaiterDelay(delaySeconds))
|
||||
}
|
||||
waitOpts = append(waitOpts, request.WithWaiterMaxAttempts(maxAttempts))
|
||||
}
|
||||
|
||||
return waitOpts
|
||||
}
|
|
@ -1,33 +0,0 @@
|
|||
// Code generated by "mapstructure-to-hcl2 -type AWSPollingConfig"; DO NOT EDIT.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl/v2/hcldec"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
// FlatAWSPollingConfig is an auto-generated flat version of AWSPollingConfig.
|
||||
// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up.
|
||||
type FlatAWSPollingConfig struct {
|
||||
MaxAttempts *int `mapstructure:"max_attempts" required:"false" cty:"max_attempts" hcl:"max_attempts"`
|
||||
DelaySeconds *int `mapstructure:"delay_seconds" required:"false" cty:"delay_seconds" hcl:"delay_seconds"`
|
||||
}
|
||||
|
||||
// FlatMapstructure returns a new FlatAWSPollingConfig.
|
||||
// FlatAWSPollingConfig is an auto-generated flat version of AWSPollingConfig.
|
||||
// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up.
|
||||
func (*AWSPollingConfig) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } {
|
||||
return new(FlatAWSPollingConfig)
|
||||
}
|
||||
|
||||
// HCL2Spec returns the hcl spec of a AWSPollingConfig.
|
||||
// This spec is used by HCL to read the fields of AWSPollingConfig.
|
||||
// The decoded values from this spec will then be applied to a FlatAWSPollingConfig.
|
||||
func (*FlatAWSPollingConfig) HCL2Spec() map[string]hcldec.Spec {
|
||||
s := map[string]hcldec.Spec{
|
||||
"max_attempts": &hcldec.AttrSpec{Name: "max_attempts", Type: cty.Number, Required: false},
|
||||
"delay_seconds": &hcldec.AttrSpec{Name: "delay_seconds", Type: cty.Number, Required: false},
|
||||
}
|
||||
return s
|
||||
}
|
|
@ -1,66 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
func testGetWaiterOptions(t *testing.T) {
|
||||
// no vars are set
|
||||
envValues := overridableWaitVars{
|
||||
envInfo{"AWS_POLL_DELAY_SECONDS", 2, false},
|
||||
envInfo{"AWS_MAX_ATTEMPTS", 0, false},
|
||||
envInfo{"AWS_TIMEOUT_SECONDS", 300, false},
|
||||
}
|
||||
options := applyEnvOverrides(envValues)
|
||||
if len(options) > 0 {
|
||||
t.Fatalf("Did not expect any waiter options to be generated; actual: %#v", options)
|
||||
}
|
||||
|
||||
// all vars are set
|
||||
envValues = overridableWaitVars{
|
||||
envInfo{"AWS_POLL_DELAY_SECONDS", 1, true},
|
||||
envInfo{"AWS_MAX_ATTEMPTS", 800, true},
|
||||
envInfo{"AWS_TIMEOUT_SECONDS", 20, true},
|
||||
}
|
||||
options = applyEnvOverrides(envValues)
|
||||
expected := []request.WaiterOption{
|
||||
request.WithWaiterDelay(request.ConstantWaiterDelay(time.Duration(1) * time.Second)),
|
||||
request.WithWaiterMaxAttempts(800),
|
||||
}
|
||||
if !reflect.DeepEqual(options, expected) {
|
||||
t.Fatalf("expected != actual!! Expected: %#v; Actual: %#v.", expected, options)
|
||||
}
|
||||
|
||||
// poll delay is not set
|
||||
envValues = overridableWaitVars{
|
||||
envInfo{"AWS_POLL_DELAY_SECONDS", 2, false},
|
||||
envInfo{"AWS_MAX_ATTEMPTS", 800, true},
|
||||
envInfo{"AWS_TIMEOUT_SECONDS", 300, false},
|
||||
}
|
||||
options = applyEnvOverrides(envValues)
|
||||
expected = []request.WaiterOption{
|
||||
request.WithWaiterMaxAttempts(800),
|
||||
}
|
||||
if !reflect.DeepEqual(options, expected) {
|
||||
t.Fatalf("expected != actual!! Expected: %#v; Actual: %#v.", expected, options)
|
||||
}
|
||||
|
||||
// poll delay is not set but timeout seconds is
|
||||
envValues = overridableWaitVars{
|
||||
envInfo{"AWS_POLL_DELAY_SECONDS", 2, false},
|
||||
envInfo{"AWS_MAX_ATTEMPTS", 0, false},
|
||||
envInfo{"AWS_TIMEOUT_SECONDS", 20, true},
|
||||
}
|
||||
options = applyEnvOverrides(envValues)
|
||||
expected = []request.WaiterOption{
|
||||
request.WithWaiterDelay(request.ConstantWaiterDelay(time.Duration(2) * time.Second)),
|
||||
request.WithWaiterMaxAttempts(10),
|
||||
}
|
||||
if !reflect.DeepEqual(options, expected) {
|
||||
t.Fatalf("expected != actual!! Expected: %#v; Actual: %#v.", expected, options)
|
||||
}
|
||||
}
|
|
@ -1,238 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/config"
|
||||
)
|
||||
|
||||
type StepAMIRegionCopy struct {
|
||||
AccessConfig *AccessConfig
|
||||
Regions []string
|
||||
AMIKmsKeyId string
|
||||
RegionKeyIds map[string]string
|
||||
EncryptBootVolume config.Trilean // nil means preserve
|
||||
Name string
|
||||
OriginalRegion string
|
||||
|
||||
toDelete string
|
||||
getRegionConn func(*AccessConfig, string) (ec2iface.EC2API, error)
|
||||
AMISkipCreateImage bool
|
||||
AMISkipBuildRegion bool
|
||||
}
|
||||
|
||||
func (s *StepAMIRegionCopy) DeduplicateRegions(intermediary bool) {
|
||||
// Deduplicates regions by looping over the list of regions and storing
|
||||
// the regions as keys in a map. This saves users from accidentally copying
|
||||
// regions twice if they've added a region to a map twice.
|
||||
|
||||
RegionMap := map[string]bool{}
|
||||
RegionSlice := []string{}
|
||||
|
||||
// Original build region may or may not be present in the Regions list, so
|
||||
// let's make absolutely sure it's in our map.
|
||||
RegionMap[s.OriginalRegion] = true
|
||||
for _, r := range s.Regions {
|
||||
RegionMap[r] = true
|
||||
}
|
||||
|
||||
if !intermediary || s.AMISkipBuildRegion {
|
||||
// We don't want to copy back into the original region if we aren't
|
||||
// using an intermediary image, so remove the original region from our
|
||||
// map.
|
||||
|
||||
// We also don't want to copy back into the original region if the
|
||||
// intermediary image is because we're skipping the build region.
|
||||
delete(RegionMap, s.OriginalRegion)
|
||||
|
||||
}
|
||||
|
||||
// Now print all those keys into the region slice again
|
||||
for k := range RegionMap {
|
||||
RegionSlice = append(RegionSlice, k)
|
||||
}
|
||||
|
||||
s.Regions = RegionSlice
|
||||
}
|
||||
|
||||
func (s *StepAMIRegionCopy) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
if s.AMISkipCreateImage {
|
||||
ui.Say("Skipping AMI region copy...")
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
amis := state.Get("amis").(map[string]string)
|
||||
snapshots := state.Get("snapshots").(map[string][]string)
|
||||
intermediary, _ := state.Get("intermediary_image").(bool)
|
||||
|
||||
s.DeduplicateRegions(intermediary)
|
||||
ami := amis[s.OriginalRegion]
|
||||
|
||||
// Make a note to delete the intermediary AMI if necessary.
|
||||
if intermediary {
|
||||
s.toDelete = ami
|
||||
}
|
||||
|
||||
if s.EncryptBootVolume.True() {
|
||||
// encrypt_boot is true, so we have to copy the temporary
|
||||
// AMI with required encryption setting.
|
||||
// temp image was created by stepCreateAMI.
|
||||
if s.RegionKeyIds == nil {
|
||||
s.RegionKeyIds = make(map[string]string)
|
||||
}
|
||||
|
||||
// Make sure the kms_key_id for the original region is in the map, as
|
||||
// long as the AMIKmsKeyId isn't being defaulted.
|
||||
if s.AMIKmsKeyId != "" {
|
||||
if _, ok := s.RegionKeyIds[s.OriginalRegion]; !ok {
|
||||
s.RegionKeyIds[s.OriginalRegion] = s.AMIKmsKeyId
|
||||
}
|
||||
} else {
|
||||
if regionKey, ok := s.RegionKeyIds[s.OriginalRegion]; ok {
|
||||
s.AMIKmsKeyId = regionKey
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(s.Regions) == 0 {
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
ui.Say(fmt.Sprintf("Copying/Encrypting AMI (%s) to other regions...", ami))
|
||||
|
||||
var lock sync.Mutex
|
||||
var wg sync.WaitGroup
|
||||
errs := new(packersdk.MultiError)
|
||||
wg.Add(len(s.Regions))
|
||||
for _, region := range s.Regions {
|
||||
var regKeyID string
|
||||
ui.Message(fmt.Sprintf("Copying to: %s", region))
|
||||
|
||||
if s.EncryptBootVolume.True() {
|
||||
// Encrypt is true, explicitly
|
||||
regKeyID = s.RegionKeyIds[region]
|
||||
} else {
|
||||
// Encrypt is nil or false; Make sure region key is empty
|
||||
regKeyID = ""
|
||||
}
|
||||
|
||||
go func(region string) {
|
||||
defer wg.Done()
|
||||
id, snapshotIds, err := s.amiRegionCopy(ctx, state, s.AccessConfig,
|
||||
s.Name, ami, region, s.OriginalRegion, regKeyID,
|
||||
s.EncryptBootVolume.ToBoolPointer())
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
amis[region] = id
|
||||
snapshots[region] = snapshotIds
|
||||
if err != nil {
|
||||
errs = packersdk.MultiErrorAppend(errs, err)
|
||||
}
|
||||
}(region)
|
||||
}
|
||||
|
||||
// TODO(mitchellh): Wait but also allow for cancels to go through...
|
||||
ui.Message("Waiting for all copies to complete...")
|
||||
wg.Wait()
|
||||
|
||||
// If there were errors, show them
|
||||
if len(errs.Errors) > 0 {
|
||||
state.Put("error", errs)
|
||||
ui.Error(errs.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
state.Put("amis", amis)
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepAMIRegionCopy) Cleanup(state multistep.StateBag) {
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
if len(s.toDelete) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Delete the unencrypted amis and snapshots
|
||||
ui.Say("Deregistering the AMI and deleting unencrypted temporary " +
|
||||
"AMIs and snapshots")
|
||||
err := DestroyAMIs([]*string{&s.toDelete}, ec2conn)
|
||||
if err != nil {
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func getRegionConn(config *AccessConfig, target string) (ec2iface.EC2API, error) {
|
||||
// Connect to the region where the AMI will be copied to
|
||||
session, err := config.Session()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error getting region connection for copy: %s", err)
|
||||
}
|
||||
|
||||
regionconn := ec2.New(session.Copy(&aws.Config{
|
||||
Region: aws.String(target),
|
||||
}))
|
||||
|
||||
return regionconn, nil
|
||||
}
|
||||
|
||||
// amiRegionCopy does a copy for the given AMI to the target region and
|
||||
// returns the resulting ID and snapshot IDs, or error.
|
||||
func (s *StepAMIRegionCopy) amiRegionCopy(ctx context.Context, state multistep.StateBag, config *AccessConfig, name, imageId,
|
||||
target, source, keyId string, encrypt *bool) (string, []string, error) {
|
||||
snapshotIds := []string{}
|
||||
|
||||
if s.getRegionConn == nil {
|
||||
s.getRegionConn = getRegionConn
|
||||
}
|
||||
|
||||
regionconn, err := s.getRegionConn(config, target)
|
||||
if err != nil {
|
||||
return "", snapshotIds, err
|
||||
}
|
||||
resp, err := regionconn.CopyImage(&ec2.CopyImageInput{
|
||||
SourceRegion: &source,
|
||||
SourceImageId: &imageId,
|
||||
Name: &name,
|
||||
Encrypted: encrypt,
|
||||
KmsKeyId: aws.String(keyId),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return "", snapshotIds, fmt.Errorf("Error Copying AMI (%s) to region (%s): %s",
|
||||
imageId, target, err)
|
||||
}
|
||||
|
||||
// Wait for the image to become ready
|
||||
if err := s.AccessConfig.PollingConfig.WaitUntilAMIAvailable(ctx, regionconn, *resp.ImageId); err != nil {
|
||||
return "", snapshotIds, fmt.Errorf("Error waiting for AMI (%s) in region (%s): %s",
|
||||
*resp.ImageId, target, err)
|
||||
}
|
||||
|
||||
// Getting snapshot IDs out of the copied AMI
|
||||
describeImageResp, err := regionconn.DescribeImages(&ec2.DescribeImagesInput{ImageIds: []*string{resp.ImageId}})
|
||||
if err != nil {
|
||||
return "", snapshotIds, fmt.Errorf("Error describing copied AMI (%s) in region (%s): %s",
|
||||
imageId, target, err)
|
||||
}
|
||||
|
||||
for _, blockDeviceMapping := range describeImageResp.Images[0].BlockDeviceMappings {
|
||||
if blockDeviceMapping.Ebs != nil && blockDeviceMapping.Ebs.SnapshotId != nil {
|
||||
snapshotIds = append(snapshotIds, *blockDeviceMapping.Ebs.SnapshotId)
|
||||
}
|
||||
}
|
||||
|
||||
return *resp.ImageId, snapshotIds, nil
|
||||
}
|
|
@ -1,404 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/config"
|
||||
)
|
||||
|
||||
// Define a mock struct to be used in unit tests for common aws steps.
|
||||
type mockEC2Conn struct {
|
||||
ec2iface.EC2API
|
||||
Config *aws.Config
|
||||
|
||||
// Counters to figure out what code path was taken
|
||||
copyImageCount int
|
||||
describeImagesCount int
|
||||
deregisterImageCount int
|
||||
deleteSnapshotCount int
|
||||
waitCount int
|
||||
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
func (m *mockEC2Conn) CopyImage(copyInput *ec2.CopyImageInput) (*ec2.CopyImageOutput, error) {
|
||||
m.lock.Lock()
|
||||
m.copyImageCount++
|
||||
m.lock.Unlock()
|
||||
copiedImage := fmt.Sprintf("%s-copied-%d", *copyInput.SourceImageId, m.copyImageCount)
|
||||
output := &ec2.CopyImageOutput{
|
||||
ImageId: &copiedImage,
|
||||
}
|
||||
return output, nil
|
||||
}
|
||||
|
||||
// functions we have to create mock responses for in order for test to run
|
||||
func (m *mockEC2Conn) DescribeImages(*ec2.DescribeImagesInput) (*ec2.DescribeImagesOutput, error) {
|
||||
m.lock.Lock()
|
||||
m.describeImagesCount++
|
||||
m.lock.Unlock()
|
||||
output := &ec2.DescribeImagesOutput{
|
||||
Images: []*ec2.Image{{}},
|
||||
}
|
||||
return output, nil
|
||||
}
|
||||
|
||||
func (m *mockEC2Conn) DeregisterImage(*ec2.DeregisterImageInput) (*ec2.DeregisterImageOutput, error) {
|
||||
m.lock.Lock()
|
||||
m.deregisterImageCount++
|
||||
m.lock.Unlock()
|
||||
output := &ec2.DeregisterImageOutput{}
|
||||
return output, nil
|
||||
}
|
||||
|
||||
func (m *mockEC2Conn) DeleteSnapshot(*ec2.DeleteSnapshotInput) (*ec2.DeleteSnapshotOutput, error) {
|
||||
m.lock.Lock()
|
||||
m.deleteSnapshotCount++
|
||||
m.lock.Unlock()
|
||||
output := &ec2.DeleteSnapshotOutput{}
|
||||
return output, nil
|
||||
}
|
||||
|
||||
func (m *mockEC2Conn) WaitUntilImageAvailableWithContext(aws.Context, *ec2.DescribeImagesInput, ...request.WaiterOption) error {
|
||||
m.lock.Lock()
|
||||
m.waitCount++
|
||||
m.lock.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func getMockConn(config *AccessConfig, target string) (ec2iface.EC2API, error) {
|
||||
mockConn := &mockEC2Conn{
|
||||
Config: aws.NewConfig(),
|
||||
}
|
||||
|
||||
return mockConn, nil
|
||||
}
|
||||
|
||||
// Create statebag for running test
|
||||
func tState() multistep.StateBag {
|
||||
state := new(multistep.BasicStateBag)
|
||||
state.Put("ui", &packersdk.BasicUi{
|
||||
Reader: new(bytes.Buffer),
|
||||
Writer: new(bytes.Buffer),
|
||||
})
|
||||
state.Put("amis", map[string]string{"us-east-1": "ami-12345"})
|
||||
state.Put("snapshots", map[string][]string{"us-east-1": {"snap-0012345"}})
|
||||
conn, _ := getMockConn(&AccessConfig{}, "us-east-2")
|
||||
state.Put("ec2", conn)
|
||||
return state
|
||||
}
|
||||
|
||||
func TestStepAMIRegionCopy_duplicates(t *testing.T) {
|
||||
// ------------------------------------------------------------------------
|
||||
// Test that if the original region is added to both Regions and Region,
|
||||
// the ami is only copied once (with encryption).
|
||||
// ------------------------------------------------------------------------
|
||||
|
||||
stepAMIRegionCopy := StepAMIRegionCopy{
|
||||
AccessConfig: testAccessConfig(),
|
||||
Regions: []string{"us-east-1"},
|
||||
AMIKmsKeyId: "12345",
|
||||
// Original region key in regionkeyids is different than in amikmskeyid
|
||||
RegionKeyIds: map[string]string{"us-east-1": "12345"},
|
||||
EncryptBootVolume: config.TriTrue,
|
||||
Name: "fake-ami-name",
|
||||
OriginalRegion: "us-east-1",
|
||||
}
|
||||
// mock out the region connection code
|
||||
stepAMIRegionCopy.getRegionConn = getMockConn
|
||||
|
||||
state := tState()
|
||||
state.Put("intermediary_image", true)
|
||||
stepAMIRegionCopy.Run(context.Background(), state)
|
||||
|
||||
if len(stepAMIRegionCopy.Regions) != 1 {
|
||||
t.Fatalf("Should have added original ami to Regions one time only")
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------
|
||||
// Both Region and Regions set, but no encryption - shouldn't copy anything
|
||||
// ------------------------------------------------------------------------
|
||||
|
||||
// the ami is only copied once.
|
||||
stepAMIRegionCopy = StepAMIRegionCopy{
|
||||
AccessConfig: testAccessConfig(),
|
||||
Regions: []string{"us-east-1"},
|
||||
Name: "fake-ami-name",
|
||||
OriginalRegion: "us-east-1",
|
||||
}
|
||||
// mock out the region connection code
|
||||
state.Put("intermediary_image", false)
|
||||
stepAMIRegionCopy.getRegionConn = getMockConn
|
||||
stepAMIRegionCopy.Run(context.Background(), state)
|
||||
|
||||
if len(stepAMIRegionCopy.Regions) != 0 {
|
||||
t.Fatalf("Should not have added original ami to Regions; not encrypting")
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------
|
||||
// Both Region and Regions set, but no encryption - shouldn't copy anything,
|
||||
// this tests false as opposed to nil value above.
|
||||
// ------------------------------------------------------------------------
|
||||
|
||||
// the ami is only copied once.
|
||||
stepAMIRegionCopy = StepAMIRegionCopy{
|
||||
AccessConfig: testAccessConfig(),
|
||||
Regions: []string{"us-east-1"},
|
||||
EncryptBootVolume: config.TriFalse,
|
||||
Name: "fake-ami-name",
|
||||
OriginalRegion: "us-east-1",
|
||||
}
|
||||
// mock out the region connection code
|
||||
state.Put("intermediary_image", false)
|
||||
stepAMIRegionCopy.getRegionConn = getMockConn
|
||||
stepAMIRegionCopy.Run(context.Background(), state)
|
||||
|
||||
if len(stepAMIRegionCopy.Regions) != 0 {
|
||||
t.Fatalf("Should not have added original ami to Regions once; not" +
|
||||
"encrypting")
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------
|
||||
// Multiple regions, many duplicates, and encryption (this shouldn't ever
|
||||
// happen because of our template validation, but good to test it.)
|
||||
// ------------------------------------------------------------------------
|
||||
|
||||
stepAMIRegionCopy = StepAMIRegionCopy{
|
||||
AccessConfig: testAccessConfig(),
|
||||
// Many duplicates for only 3 actual values
|
||||
Regions: []string{"us-east-1", "us-west-2", "us-west-2", "ap-east-1", "ap-east-1", "ap-east-1"},
|
||||
AMIKmsKeyId: "IlikePancakes",
|
||||
// Original region key in regionkeyids is different than in amikmskeyid
|
||||
RegionKeyIds: map[string]string{"us-east-1": "12345", "us-west-2": "abcde", "ap-east-1": "xyz"},
|
||||
EncryptBootVolume: config.TriTrue,
|
||||
Name: "fake-ami-name",
|
||||
OriginalRegion: "us-east-1",
|
||||
}
|
||||
// mock out the region connection code
|
||||
stepAMIRegionCopy.getRegionConn = getMockConn
|
||||
state.Put("intermediary_image", true)
|
||||
stepAMIRegionCopy.Run(context.Background(), state)
|
||||
|
||||
if len(stepAMIRegionCopy.Regions) != 3 {
|
||||
t.Fatalf("Each AMI should have been added to Regions one time only.")
|
||||
}
|
||||
|
||||
// Also verify that we respect RegionKeyIds over AMIKmsKeyIds:
|
||||
if stepAMIRegionCopy.RegionKeyIds["us-east-1"] != "12345" {
|
||||
t.Fatalf("RegionKeyIds should take precedence over AmiKmsKeyIds")
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------
|
||||
// Multiple regions, many duplicates, NO encryption
|
||||
// ------------------------------------------------------------------------
|
||||
|
||||
stepAMIRegionCopy = StepAMIRegionCopy{
|
||||
AccessConfig: testAccessConfig(),
|
||||
// Many duplicates for only 3 actual values
|
||||
Regions: []string{"us-east-1", "us-west-2", "us-west-2", "ap-east-1", "ap-east-1", "ap-east-1"},
|
||||
Name: "fake-ami-name",
|
||||
OriginalRegion: "us-east-1",
|
||||
}
|
||||
// mock out the region connection code
|
||||
stepAMIRegionCopy.getRegionConn = getMockConn
|
||||
state.Put("intermediary_image", false)
|
||||
stepAMIRegionCopy.Run(context.Background(), state)
|
||||
|
||||
if len(stepAMIRegionCopy.Regions) != 2 {
|
||||
t.Fatalf("Each AMI should have been added to Regions one time only, " +
|
||||
"and original region shouldn't be added at all")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStepAmiRegionCopy_nil_encryption(t *testing.T) {
|
||||
// create step
|
||||
stepAMIRegionCopy := StepAMIRegionCopy{
|
||||
AccessConfig: testAccessConfig(),
|
||||
Regions: make([]string, 0),
|
||||
AMIKmsKeyId: "",
|
||||
RegionKeyIds: make(map[string]string),
|
||||
EncryptBootVolume: config.TriUnset,
|
||||
Name: "fake-ami-name",
|
||||
OriginalRegion: "us-east-1",
|
||||
}
|
||||
// mock out the region connection code
|
||||
stepAMIRegionCopy.getRegionConn = getMockConn
|
||||
|
||||
state := tState()
|
||||
state.Put("intermediary_image", false)
|
||||
stepAMIRegionCopy.Run(context.Background(), state)
|
||||
|
||||
if stepAMIRegionCopy.toDelete != "" {
|
||||
t.Fatalf("Shouldn't have an intermediary ami if encrypt is nil")
|
||||
}
|
||||
if len(stepAMIRegionCopy.Regions) != 0 {
|
||||
t.Fatalf("Should not have added original ami to original region")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStepAmiRegionCopy_true_encryption(t *testing.T) {
|
||||
// create step
|
||||
stepAMIRegionCopy := StepAMIRegionCopy{
|
||||
AccessConfig: testAccessConfig(),
|
||||
Regions: make([]string, 0),
|
||||
AMIKmsKeyId: "",
|
||||
RegionKeyIds: make(map[string]string),
|
||||
EncryptBootVolume: config.TriTrue,
|
||||
Name: "fake-ami-name",
|
||||
OriginalRegion: "us-east-1",
|
||||
}
|
||||
// mock out the region connection code
|
||||
stepAMIRegionCopy.getRegionConn = getMockConn
|
||||
|
||||
state := tState()
|
||||
state.Put("intermediary_image", true)
|
||||
stepAMIRegionCopy.Run(context.Background(), state)
|
||||
|
||||
if stepAMIRegionCopy.toDelete == "" {
|
||||
t.Fatalf("Should delete original AMI if encrypted=true")
|
||||
}
|
||||
if len(stepAMIRegionCopy.Regions) == 0 {
|
||||
t.Fatalf("Should have added original ami to Regions")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStepAmiRegionCopy_nil_intermediary(t *testing.T) {
|
||||
// create step
|
||||
stepAMIRegionCopy := StepAMIRegionCopy{
|
||||
AccessConfig: testAccessConfig(),
|
||||
Regions: make([]string, 0),
|
||||
AMIKmsKeyId: "",
|
||||
RegionKeyIds: make(map[string]string),
|
||||
EncryptBootVolume: config.TriFalse,
|
||||
Name: "fake-ami-name",
|
||||
OriginalRegion: "us-east-1",
|
||||
}
|
||||
// mock out the region connection code
|
||||
stepAMIRegionCopy.getRegionConn = getMockConn
|
||||
|
||||
state := tState()
|
||||
stepAMIRegionCopy.Run(context.Background(), state)
|
||||
|
||||
if stepAMIRegionCopy.toDelete != "" {
|
||||
t.Fatalf("Should not delete original AMI if no intermediary")
|
||||
}
|
||||
if len(stepAMIRegionCopy.Regions) != 0 {
|
||||
t.Fatalf("Should not have added original ami to Regions")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStepAmiRegionCopy_AMISkipBuildRegion(t *testing.T) {
|
||||
// ------------------------------------------------------------------------
|
||||
// skip build region is true
|
||||
// ------------------------------------------------------------------------
|
||||
|
||||
stepAMIRegionCopy := StepAMIRegionCopy{
|
||||
AccessConfig: testAccessConfig(),
|
||||
Regions: []string{"us-west-1"},
|
||||
AMIKmsKeyId: "",
|
||||
RegionKeyIds: map[string]string{"us-west-1": "abcde"},
|
||||
Name: "fake-ami-name",
|
||||
OriginalRegion: "us-east-1",
|
||||
AMISkipBuildRegion: true,
|
||||
}
|
||||
// mock out the region connection code
|
||||
stepAMIRegionCopy.getRegionConn = getMockConn
|
||||
|
||||
state := tState()
|
||||
state.Put("intermediary_image", true)
|
||||
stepAMIRegionCopy.Run(context.Background(), state)
|
||||
|
||||
if stepAMIRegionCopy.toDelete == "" {
|
||||
t.Fatalf("Should delete original AMI if skip_save_build_region=true")
|
||||
}
|
||||
if len(stepAMIRegionCopy.Regions) != 1 {
|
||||
t.Fatalf("Should not have added original ami to Regions; Regions: %#v", stepAMIRegionCopy.Regions)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------
|
||||
// skip build region is false.
|
||||
// ------------------------------------------------------------------------
|
||||
stepAMIRegionCopy = StepAMIRegionCopy{
|
||||
AccessConfig: testAccessConfig(),
|
||||
Regions: []string{"us-west-1"},
|
||||
AMIKmsKeyId: "",
|
||||
RegionKeyIds: make(map[string]string),
|
||||
Name: "fake-ami-name",
|
||||
OriginalRegion: "us-east-1",
|
||||
AMISkipBuildRegion: false,
|
||||
}
|
||||
// mock out the region connection code
|
||||
stepAMIRegionCopy.getRegionConn = getMockConn
|
||||
|
||||
state.Put("intermediary_image", false) // not encrypted
|
||||
stepAMIRegionCopy.Run(context.Background(), state)
|
||||
|
||||
if stepAMIRegionCopy.toDelete != "" {
|
||||
t.Fatalf("Shouldn't have an intermediary AMI, so dont delete original ami")
|
||||
}
|
||||
if len(stepAMIRegionCopy.Regions) != 1 {
|
||||
t.Fatalf("Should not have added original ami to Regions; Regions: %#v", stepAMIRegionCopy.Regions)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------
|
||||
// skip build region is false, but encrypt is true
|
||||
// ------------------------------------------------------------------------
|
||||
stepAMIRegionCopy = StepAMIRegionCopy{
|
||||
AccessConfig: testAccessConfig(),
|
||||
Regions: []string{"us-west-1"},
|
||||
AMIKmsKeyId: "",
|
||||
RegionKeyIds: map[string]string{"us-west-1": "abcde"},
|
||||
Name: "fake-ami-name",
|
||||
OriginalRegion: "us-east-1",
|
||||
AMISkipBuildRegion: false,
|
||||
EncryptBootVolume: config.TriTrue,
|
||||
}
|
||||
// mock out the region connection code
|
||||
stepAMIRegionCopy.getRegionConn = getMockConn
|
||||
|
||||
state.Put("intermediary_image", true) //encrypted
|
||||
stepAMIRegionCopy.Run(context.Background(), state)
|
||||
|
||||
if stepAMIRegionCopy.toDelete == "" {
|
||||
t.Fatalf("Have to delete intermediary AMI")
|
||||
}
|
||||
if len(stepAMIRegionCopy.Regions) != 2 {
|
||||
t.Fatalf("Should have added original ami to Regions; Regions: %#v", stepAMIRegionCopy.Regions)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------
|
||||
// skip build region is true, and encrypt is true
|
||||
// ------------------------------------------------------------------------
|
||||
stepAMIRegionCopy = StepAMIRegionCopy{
|
||||
AccessConfig: testAccessConfig(),
|
||||
Regions: []string{"us-west-1"},
|
||||
AMIKmsKeyId: "",
|
||||
RegionKeyIds: map[string]string{"us-west-1": "abcde"},
|
||||
Name: "fake-ami-name",
|
||||
OriginalRegion: "us-east-1",
|
||||
AMISkipBuildRegion: true,
|
||||
EncryptBootVolume: config.TriTrue,
|
||||
}
|
||||
// mock out the region connection code
|
||||
stepAMIRegionCopy.getRegionConn = getMockConn
|
||||
|
||||
state.Put("intermediary_image", true) //encrypted
|
||||
stepAMIRegionCopy.Run(context.Background(), state)
|
||||
|
||||
if stepAMIRegionCopy.toDelete == "" {
|
||||
t.Fatalf("Have to delete intermediary AMI")
|
||||
}
|
||||
if len(stepAMIRegionCopy.Regions) != 1 {
|
||||
t.Fatalf("Should not have added original ami to Regions; Regions: %#v", stepAMIRegionCopy.Regions)
|
||||
}
|
||||
}
|
|
@ -1,99 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
// stepCleanupVolumes cleans up any orphaned volumes that were not designated to
|
||||
// remain after termination of the instance. These volumes are typically ones
|
||||
// that are marked as "delete on terminate:false" in the source_ami of a build.
|
||||
type StepCleanupVolumes struct {
|
||||
LaunchMappings BlockDevices
|
||||
}
|
||||
|
||||
func (s *StepCleanupVolumes) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
// stepCleanupVolumes is for Cleanup only
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepCleanupVolumes) Cleanup(state multistep.StateBag) {
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
instanceRaw := state.Get("instance")
|
||||
var instance *ec2.Instance
|
||||
if instanceRaw != nil {
|
||||
instance = instanceRaw.(*ec2.Instance)
|
||||
}
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
if instance == nil {
|
||||
ui.Say("No volumes to clean up, skipping")
|
||||
return
|
||||
}
|
||||
|
||||
ui.Say("Cleaning up any extra volumes...")
|
||||
|
||||
// Collect Volume information from the cached Instance as a map of volume-id
|
||||
// to device name, to compare with save list below
|
||||
var vl []*string
|
||||
volList := make(map[string]string)
|
||||
for _, bdm := range instance.BlockDeviceMappings {
|
||||
if bdm.Ebs != nil {
|
||||
vl = append(vl, bdm.Ebs.VolumeId)
|
||||
volList[*bdm.Ebs.VolumeId] = *bdm.DeviceName
|
||||
}
|
||||
}
|
||||
|
||||
// Using the volume list from the cached Instance, check with AWS for up to
|
||||
// date information on them
|
||||
resp, err := ec2conn.DescribeVolumes(&ec2.DescribeVolumesInput{
|
||||
Filters: []*ec2.Filter{
|
||||
{
|
||||
Name: aws.String("volume-id"),
|
||||
Values: vl,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf("Error describing volumes: %s", err))
|
||||
return
|
||||
}
|
||||
|
||||
// If any of the returned volumes are in a "deleting" stage or otherwise not
|
||||
// available, remove them from the list of volumes
|
||||
for _, v := range resp.Volumes {
|
||||
if v.State != nil && *v.State != "available" {
|
||||
delete(volList, *v.VolumeId)
|
||||
}
|
||||
}
|
||||
|
||||
if len(resp.Volumes) == 0 {
|
||||
ui.Say("No volumes to clean up, skipping")
|
||||
return
|
||||
}
|
||||
|
||||
// Filter out any devices created as part of the launch mappings, since
|
||||
// we'll let amazon follow the `delete_on_termination` setting.
|
||||
for _, b := range s.LaunchMappings {
|
||||
for volKey, volName := range volList {
|
||||
if volName == b.DeviceName {
|
||||
delete(volList, volKey)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Destroy remaining volumes
|
||||
for k := range volList {
|
||||
ui.Say(fmt.Sprintf("Destroying volume (%s)...", k))
|
||||
_, err := ec2conn.DeleteVolume(&ec2.DeleteVolumeInput{VolumeId: aws.String(k)})
|
||||
if err != nil {
|
||||
ui.Say(fmt.Sprintf("Error deleting volume: %s", err))
|
||||
}
|
||||
|
||||
}
|
||||
}
|
|
@ -1,126 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/aws/aws-sdk-go/service/ssm"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
"github.com/hashicorp/packer-plugin-sdk/net"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
pssm "github.com/hashicorp/packer/builder/amazon/common/ssm"
|
||||
)
|
||||
|
||||
type StepCreateSSMTunnel struct {
|
||||
AWSSession *session.Session
|
||||
Region string
|
||||
LocalPortNumber int
|
||||
RemotePortNumber int
|
||||
SSMAgentEnabled bool
|
||||
PauseBeforeSSM time.Duration
|
||||
stopSSMCommand func()
|
||||
}
|
||||
|
||||
// Run executes the Packer build step that creates a session tunnel.
|
||||
func (s *StepCreateSSMTunnel) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
if !s.SSMAgentEnabled {
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
// Wait for the remote port to become available
|
||||
if s.PauseBeforeSSM > 0 {
|
||||
ui.Say(fmt.Sprintf("Waiting %s before establishing the SSM session...", s.PauseBeforeSSM))
|
||||
select {
|
||||
case <-time.After(s.PauseBeforeSSM):
|
||||
break
|
||||
case <-ctx.Done():
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
}
|
||||
|
||||
// Configure local port number
|
||||
if err := s.ConfigureLocalHostPort(ctx); err != nil {
|
||||
err := fmt.Errorf("error finding an available port to initiate a session tunnel: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Get instance information
|
||||
instance, ok := state.Get("instance").(*ec2.Instance)
|
||||
if !ok {
|
||||
err := fmt.Errorf("error encountered in obtaining target instance id for session tunnel")
|
||||
ui.Error(err.Error())
|
||||
state.Put("error", err)
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
state.Put("sessionPort", s.LocalPortNumber)
|
||||
|
||||
ssmCtx, ssmCancel := context.WithCancel(ctx)
|
||||
s.stopSSMCommand = ssmCancel
|
||||
|
||||
go func() {
|
||||
ssmconn := ssm.New(s.AWSSession)
|
||||
err := pssm.Session{
|
||||
SvcClient: ssmconn,
|
||||
InstanceID: aws.StringValue(instance.InstanceId),
|
||||
RemotePort: s.RemotePortNumber,
|
||||
LocalPort: s.LocalPortNumber,
|
||||
Region: s.Region,
|
||||
}.Start(ssmCtx, ui)
|
||||
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf("ssm error: %s", err))
|
||||
}
|
||||
}()
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
// Cleanup terminates an active session on AWS, which in turn terminates the associated tunnel process running on the local machine.
|
||||
func (s *StepCreateSSMTunnel) Cleanup(state multistep.StateBag) {
|
||||
if !s.SSMAgentEnabled {
|
||||
return
|
||||
}
|
||||
|
||||
if s.stopSSMCommand != nil {
|
||||
s.stopSSMCommand()
|
||||
}
|
||||
}
|
||||
|
||||
// ConfigureLocalHostPort finds an available port on the localhost that can be used for the remote tunnel.
|
||||
// Defaults to using s.LocalPortNumber if it is set.
|
||||
func (s *StepCreateSSMTunnel) ConfigureLocalHostPort(ctx context.Context) error {
|
||||
minPortNumber, maxPortNumber := 8000, 9000
|
||||
|
||||
if s.LocalPortNumber != 0 {
|
||||
minPortNumber = s.LocalPortNumber
|
||||
maxPortNumber = minPortNumber
|
||||
}
|
||||
|
||||
// Find an available TCP port for our HTTP server
|
||||
l, err := net.ListenRangeConfig{
|
||||
Min: minPortNumber,
|
||||
Max: maxPortNumber,
|
||||
Addr: "0.0.0.0",
|
||||
Network: "tcp",
|
||||
}.Listen(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.LocalPortNumber = l.Port
|
||||
// Stop listening on selected port so that the AWS session-manager-plugin can use it.
|
||||
// The port is closed right before we start the session to avoid two Packer builds from getting the same port - fingers-crossed
|
||||
l.Close()
|
||||
|
||||
return nil
|
||||
|
||||
}
|
|
@ -1,146 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/retry"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/interpolate"
|
||||
"github.com/hashicorp/packer/builder/amazon/common/awserrors"
|
||||
)
|
||||
|
||||
type StepCreateTags struct {
|
||||
AMISkipCreateImage bool
|
||||
|
||||
Tags map[string]string
|
||||
SnapshotTags map[string]string
|
||||
Ctx interpolate.Context
|
||||
}
|
||||
|
||||
func (s *StepCreateTags) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
session := state.Get("awsSession").(*session.Session)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
if s.AMISkipCreateImage {
|
||||
ui.Say("Skipping AMI create tags...")
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
amis := state.Get("amis").(map[string]string)
|
||||
|
||||
if len(s.Tags) == 0 && len(s.SnapshotTags) == 0 {
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
// Adds tags to AMIs and snapshots
|
||||
for region, ami := range amis {
|
||||
ui.Say(fmt.Sprintf("Adding tags to AMI (%s)...", ami))
|
||||
|
||||
regionConn := ec2.New(session, &aws.Config{
|
||||
Region: aws.String(region),
|
||||
})
|
||||
|
||||
// Retrieve image list for given AMI
|
||||
resourceIds := []*string{&ami}
|
||||
imageResp, err := regionConn.DescribeImages(&ec2.DescribeImagesInput{
|
||||
ImageIds: resourceIds,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error retrieving details for AMI (%s): %s", ami, err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
if len(imageResp.Images) == 0 {
|
||||
err := fmt.Errorf("Error retrieving details for AMI (%s), no images found", ami)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
image := imageResp.Images[0]
|
||||
snapshotIds := []*string{}
|
||||
|
||||
// Add only those with a Snapshot ID, i.e. not Ephemeral
|
||||
for _, device := range image.BlockDeviceMappings {
|
||||
if device.Ebs != nil && device.Ebs.SnapshotId != nil {
|
||||
ui.Say(fmt.Sprintf("Tagging snapshot: %s", *device.Ebs.SnapshotId))
|
||||
resourceIds = append(resourceIds, device.Ebs.SnapshotId)
|
||||
snapshotIds = append(snapshotIds, device.Ebs.SnapshotId)
|
||||
}
|
||||
}
|
||||
|
||||
// Convert tags to ec2.Tag format
|
||||
ui.Say("Creating AMI tags")
|
||||
amiTags, err := TagMap(s.Tags).EC2Tags(s.Ctx, *ec2conn.Config.Region, state)
|
||||
if err != nil {
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
amiTags.Report(ui)
|
||||
|
||||
ui.Say("Creating snapshot tags")
|
||||
snapshotTags, err := TagMap(s.SnapshotTags).EC2Tags(s.Ctx, *ec2conn.Config.Region, state)
|
||||
if err != nil {
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
snapshotTags.Report(ui)
|
||||
|
||||
// Retry creating tags for about 2.5 minutes
|
||||
err = retry.Config{Tries: 11, ShouldRetry: func(error) bool {
|
||||
if awserrors.Matches(err, "InvalidAMIID.NotFound", "") || awserrors.Matches(err, "InvalidSnapshot.NotFound", "") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
},
|
||||
RetryDelay: (&retry.Backoff{InitialBackoff: 200 * time.Millisecond, MaxBackoff: 30 * time.Second, Multiplier: 2}).Linear,
|
||||
}.Run(ctx, func(ctx context.Context) error {
|
||||
// Tag images and snapshots
|
||||
|
||||
var err error
|
||||
if len(amiTags) > 0 {
|
||||
_, err = regionConn.CreateTags(&ec2.CreateTagsInput{
|
||||
Resources: resourceIds,
|
||||
Tags: amiTags,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Override tags on snapshots
|
||||
if len(snapshotTags) > 0 {
|
||||
_, err = regionConn.CreateTags(&ec2.CreateTagsInput{
|
||||
Resources: snapshotIds,
|
||||
Tags: snapshotTags,
|
||||
})
|
||||
}
|
||||
return err
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error adding tags to Resources (%#v): %s", resourceIds, err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepCreateTags) Cleanup(state multistep.StateBag) {
|
||||
// No cleanup...
|
||||
}
|
|
@ -1,96 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
type StepDeregisterAMI struct {
|
||||
AccessConfig *AccessConfig
|
||||
ForceDeregister bool
|
||||
ForceDeleteSnapshot bool
|
||||
AMIName string
|
||||
Regions []string
|
||||
}
|
||||
|
||||
func (s *StepDeregisterAMI) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
// Check for force deregister
|
||||
if !s.ForceDeregister {
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
// Add the session region to list of regions will deregister AMIs in
|
||||
regions := append(s.Regions, *ec2conn.Config.Region)
|
||||
|
||||
for _, region := range regions {
|
||||
// get new connection for each region in which we need to deregister vms
|
||||
session, err := s.AccessConfig.Session()
|
||||
if err != nil {
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
regionconn := ec2.New(session.Copy(&aws.Config{
|
||||
Region: aws.String(region),
|
||||
}))
|
||||
|
||||
resp, err := regionconn.DescribeImages(&ec2.DescribeImagesInput{
|
||||
Owners: aws.StringSlice([]string{"self"}),
|
||||
Filters: []*ec2.Filter{{
|
||||
Name: aws.String("name"),
|
||||
Values: aws.StringSlice([]string{s.AMIName}),
|
||||
}}})
|
||||
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error describing AMI: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Deregister image(s) by name
|
||||
for _, i := range resp.Images {
|
||||
_, err := regionconn.DeregisterImage(&ec2.DeregisterImageInput{
|
||||
ImageId: i.ImageId,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error deregistering existing AMI: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
ui.Say(fmt.Sprintf("Deregistered AMI %s, id: %s", s.AMIName, *i.ImageId))
|
||||
|
||||
// Delete snapshot(s) by image
|
||||
if s.ForceDeleteSnapshot {
|
||||
for _, b := range i.BlockDeviceMappings {
|
||||
if b.Ebs != nil && aws.StringValue(b.Ebs.SnapshotId) != "" {
|
||||
_, err := regionconn.DeleteSnapshot(&ec2.DeleteSnapshotInput{
|
||||
SnapshotId: b.Ebs.SnapshotId,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error deleting existing snapshot: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
ui.Say(fmt.Sprintf("Deleted snapshot: %s", *b.Ebs.SnapshotId))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepDeregisterAMI) Cleanup(state multistep.StateBag) {
|
||||
}
|
|
@ -1,182 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/hashicorp/packer-plugin-sdk/communicator"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/retry"
|
||||
)
|
||||
|
||||
// StepGetPassword reads the password from a Windows server and sets it
|
||||
// on the WinRM config.
|
||||
type StepGetPassword struct {
|
||||
Debug bool
|
||||
Comm *communicator.Config
|
||||
Timeout time.Duration
|
||||
BuildName string
|
||||
}
|
||||
|
||||
func (s *StepGetPassword) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
// Skip if we're not using winrm
|
||||
if s.Comm.Type != "winrm" {
|
||||
log.Printf("[INFO] Not using winrm communicator, skipping get password...")
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
// If we already have a password, skip it
|
||||
if s.Comm.WinRMPassword != "" {
|
||||
ui.Say("Skipping waiting for password since WinRM password set...")
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
// Get the password
|
||||
var password string
|
||||
var err error
|
||||
waitDone := make(chan bool, 1)
|
||||
go func() {
|
||||
ui.Say("Waiting for auto-generated password for instance...")
|
||||
ui.Message(
|
||||
"It is normal for this process to take up to 15 minutes,\n" +
|
||||
"but it usually takes around 5. Please wait.")
|
||||
password, err = s.waitForPassword(ctx, state)
|
||||
waitDone <- true
|
||||
}()
|
||||
|
||||
timeout := time.After(s.Timeout)
|
||||
WaitLoop:
|
||||
for {
|
||||
// Wait for either SSH to become available, a timeout to occur,
|
||||
// or an interrupt to come through.
|
||||
select {
|
||||
case <-waitDone:
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf("Error waiting for password: %s", err))
|
||||
state.Put("error", err)
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
ui.Message(fmt.Sprintf(" \nPassword retrieved!"))
|
||||
s.Comm.WinRMPassword = password
|
||||
break WaitLoop
|
||||
case <-timeout:
|
||||
err := fmt.Errorf("Timeout waiting for password.")
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
case <-ctx.Done():
|
||||
// The step sequence was cancelled, so cancel waiting for password
|
||||
// and just start the halting process.
|
||||
log.Println("[WARN] Interrupt detected, quitting waiting for password.")
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
}
|
||||
|
||||
// In debug-mode, we output the password
|
||||
if s.Debug {
|
||||
ui.Message(fmt.Sprintf(
|
||||
"Password (since debug is enabled): %s", s.Comm.WinRMPassword))
|
||||
}
|
||||
// store so that we can access this later during provisioning
|
||||
state.Put("winrm_password", s.Comm.WinRMPassword)
|
||||
packersdk.LogSecretFilter.Set(s.Comm.WinRMPassword)
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepGetPassword) Cleanup(multistep.StateBag) {}
|
||||
|
||||
func (s *StepGetPassword) waitForPassword(ctx context.Context, state multistep.StateBag) (string, error) {
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
instance := state.Get("instance").(*ec2.Instance)
|
||||
privateKey := s.Comm.SSHPrivateKey
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
log.Println("[INFO] Retrieve password wait cancelled. Exiting loop.")
|
||||
return "", errors.New("Retrieve password wait cancelled")
|
||||
case <-time.After(5 * time.Second):
|
||||
}
|
||||
|
||||
// Wrap in a retry so that we don't fail on rate-limiting.
|
||||
log.Printf("Retrieving auto-generated instance password...")
|
||||
var resp *ec2.GetPasswordDataOutput
|
||||
err := retry.Config{
|
||||
Tries: 11,
|
||||
RetryDelay: (&retry.Backoff{InitialBackoff: 200 * time.Millisecond, MaxBackoff: 30 * time.Second, Multiplier: 2}).Linear,
|
||||
}.Run(ctx, func(ctx context.Context) error {
|
||||
var err error
|
||||
resp, err = ec2conn.GetPasswordData(&ec2.GetPasswordDataInput{
|
||||
InstanceId: instance.InstanceId,
|
||||
})
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error retrieving auto-generated instance password: %s", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if resp.PasswordData != nil && *resp.PasswordData != "" {
|
||||
decryptedPassword, err := decryptPasswordDataWithPrivateKey(
|
||||
*resp.PasswordData, []byte(privateKey))
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error decrypting auto-generated instance password: %s", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
return decryptedPassword, nil
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Password is blank, will retry...")
|
||||
}
|
||||
}
|
||||
|
||||
func decryptPasswordDataWithPrivateKey(passwordData string, pemBytes []byte) (string, error) {
|
||||
encryptedPasswd, err := base64.StdEncoding.DecodeString(passwordData)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
block, _ := pem.Decode(pemBytes)
|
||||
var asn1Bytes []byte
|
||||
if _, ok := block.Headers["DEK-Info"]; ok {
|
||||
return "", errors.New("encrypted private key isn't yet supported")
|
||||
/*
|
||||
asn1Bytes, err = x509.DecryptPEMBlock(block, password)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
*/
|
||||
} else {
|
||||
asn1Bytes = block.Bytes
|
||||
}
|
||||
|
||||
key, err := x509.ParsePKCS1PrivateKey(asn1Bytes)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
out, err := rsa.DecryptPKCS1v15(nil, key, encryptedPasswd)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(out), nil
|
||||
}
|
|
@ -1,194 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/iam"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/uuid"
|
||||
)
|
||||
|
||||
type StepIamInstanceProfile struct {
|
||||
IamInstanceProfile string
|
||||
SkipProfileValidation bool
|
||||
TemporaryIamInstanceProfilePolicyDocument *PolicyDocument
|
||||
createdInstanceProfileName string
|
||||
createdRoleName string
|
||||
createdPolicyName string
|
||||
roleIsAttached bool
|
||||
}
|
||||
|
||||
func (s *StepIamInstanceProfile) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
iamsvc := state.Get("iam").(*iam.IAM)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
state.Put("iamInstanceProfile", "")
|
||||
|
||||
if len(s.IamInstanceProfile) > 0 {
|
||||
if !s.SkipProfileValidation {
|
||||
_, err := iamsvc.GetInstanceProfile(
|
||||
&iam.GetInstanceProfileInput{
|
||||
InstanceProfileName: aws.String(s.IamInstanceProfile),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Couldn't find specified instance profile: %s", err)
|
||||
log.Printf("[DEBUG] %s", err.Error())
|
||||
state.Put("error", err)
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
}
|
||||
log.Printf("Using specified instance profile: %v", s.IamInstanceProfile)
|
||||
state.Put("iamInstanceProfile", s.IamInstanceProfile)
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
if s.TemporaryIamInstanceProfilePolicyDocument != nil {
|
||||
// Create the profile
|
||||
profileName := fmt.Sprintf("packer-%s", uuid.TimeOrderedUUID())
|
||||
|
||||
policy, err := json.Marshal(s.TemporaryIamInstanceProfilePolicyDocument)
|
||||
if err != nil {
|
||||
ui.Error(err.Error())
|
||||
state.Put("error", err)
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
ui.Say(fmt.Sprintf("Creating temporary instance profile for this instance: %s", profileName))
|
||||
|
||||
profileResp, err := iamsvc.CreateInstanceProfile(&iam.CreateInstanceProfileInput{
|
||||
InstanceProfileName: aws.String(profileName),
|
||||
})
|
||||
if err != nil {
|
||||
ui.Error(err.Error())
|
||||
state.Put("error", err)
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
s.createdInstanceProfileName = aws.StringValue(profileResp.InstanceProfile.InstanceProfileName)
|
||||
|
||||
log.Printf("[DEBUG] Waiting for temporary instance profile: %s", s.createdInstanceProfileName)
|
||||
err = iamsvc.WaitUntilInstanceProfileExists(&iam.GetInstanceProfileInput{
|
||||
InstanceProfileName: aws.String(s.createdInstanceProfileName),
|
||||
})
|
||||
|
||||
if err == nil {
|
||||
log.Printf("[DEBUG] Found instance profile %s", s.createdInstanceProfileName)
|
||||
} else {
|
||||
err := fmt.Errorf("Timed out waiting for instance profile %s: %s", s.createdInstanceProfileName, err)
|
||||
log.Printf("[DEBUG] %s", err.Error())
|
||||
state.Put("error", err)
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
ui.Say(fmt.Sprintf("Creating temporary role for this instance: %s", profileName))
|
||||
|
||||
roleResp, err := iamsvc.CreateRole(&iam.CreateRoleInput{
|
||||
RoleName: aws.String(profileName),
|
||||
Description: aws.String("Temporary role for Packer"),
|
||||
AssumeRolePolicyDocument: aws.String("{\"Version\": \"2012-10-17\",\"Statement\": [{\"Effect\": \"Allow\",\"Principal\": {\"Service\": \"ec2.amazonaws.com\"},\"Action\": \"sts:AssumeRole\"}]}"),
|
||||
})
|
||||
if err != nil {
|
||||
ui.Error(err.Error())
|
||||
state.Put("error", err)
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
s.createdRoleName = aws.StringValue(roleResp.Role.RoleName)
|
||||
|
||||
log.Printf("[DEBUG] Waiting for temporary role: %s", s.createdInstanceProfileName)
|
||||
err = iamsvc.WaitUntilRoleExists(&iam.GetRoleInput{
|
||||
RoleName: aws.String(s.createdRoleName),
|
||||
})
|
||||
if err == nil {
|
||||
log.Printf("[DEBUG] Found temporary role %s", s.createdRoleName)
|
||||
} else {
|
||||
err := fmt.Errorf("Timed out waiting for temporary role %s: %s", s.createdRoleName, err)
|
||||
log.Printf("[DEBUG] %s", err.Error())
|
||||
state.Put("error", err)
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
ui.Say(fmt.Sprintf("Attaching policy to the temporary role: %s", profileName))
|
||||
|
||||
_, err = iamsvc.PutRolePolicy(&iam.PutRolePolicyInput{
|
||||
RoleName: roleResp.Role.RoleName,
|
||||
PolicyName: aws.String(profileName),
|
||||
PolicyDocument: aws.String(string(policy)),
|
||||
})
|
||||
if err != nil {
|
||||
ui.Error(err.Error())
|
||||
state.Put("error", err)
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
s.createdPolicyName = aws.StringValue(roleResp.Role.RoleName)
|
||||
|
||||
_, err = iamsvc.AddRoleToInstanceProfile(&iam.AddRoleToInstanceProfileInput{
|
||||
RoleName: roleResp.Role.RoleName,
|
||||
InstanceProfileName: profileResp.InstanceProfile.InstanceProfileName,
|
||||
})
|
||||
if err != nil {
|
||||
ui.Error(err.Error())
|
||||
state.Put("error", err)
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
s.roleIsAttached = true
|
||||
state.Put("iamInstanceProfile", aws.StringValue(profileResp.InstanceProfile.InstanceProfileName))
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepIamInstanceProfile) Cleanup(state multistep.StateBag) {
|
||||
iamsvc := state.Get("iam").(*iam.IAM)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
var err error
|
||||
|
||||
if s.roleIsAttached == true {
|
||||
ui.Say("Detaching temporary role from instance profile...")
|
||||
|
||||
_, err := iamsvc.RemoveRoleFromInstanceProfile(&iam.RemoveRoleFromInstanceProfileInput{
|
||||
InstanceProfileName: aws.String(s.createdInstanceProfileName),
|
||||
RoleName: aws.String(s.createdRoleName),
|
||||
})
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf(
|
||||
"Error %s. Please delete the role manually: %s", err.Error(), s.createdRoleName))
|
||||
}
|
||||
}
|
||||
|
||||
if s.createdPolicyName != "" {
|
||||
ui.Say("Removing policy from temporary role...")
|
||||
iamsvc.DeleteRolePolicy(&iam.DeleteRolePolicyInput{
|
||||
PolicyName: aws.String(s.createdPolicyName),
|
||||
RoleName: aws.String(s.createdRoleName),
|
||||
})
|
||||
}
|
||||
if s.createdRoleName != "" {
|
||||
ui.Say("Deleting temporary role...")
|
||||
|
||||
_, err = iamsvc.DeleteRole(&iam.DeleteRoleInput{RoleName: &s.createdRoleName})
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf(
|
||||
"Error %s. Please delete the role manually: %s", err.Error(), s.createdRoleName))
|
||||
}
|
||||
}
|
||||
|
||||
if s.createdInstanceProfileName != "" {
|
||||
ui.Say("Deleting temporary instance profile...")
|
||||
|
||||
_, err = iamsvc.DeleteInstanceProfile(&iam.DeleteInstanceProfileInput{
|
||||
InstanceProfileName: &s.createdInstanceProfileName})
|
||||
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf(
|
||||
"Error %s. Please delete the instance profile manually: %s", err.Error(), s.createdInstanceProfileName))
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,134 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/hashicorp/packer-plugin-sdk/communicator"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/retry"
|
||||
)
|
||||
|
||||
type StepKeyPair struct {
|
||||
Debug bool
|
||||
Comm *communicator.Config
|
||||
DebugKeyPath string
|
||||
|
||||
doCleanup bool
|
||||
}
|
||||
|
||||
func (s *StepKeyPair) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
if s.Comm.SSHPrivateKeyFile != "" {
|
||||
ui.Say("Using existing SSH private key")
|
||||
privateKeyBytes, err := s.Comm.ReadSSHPrivateKeyFile()
|
||||
if err != nil {
|
||||
state.Put("error", err)
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
s.Comm.SSHPrivateKey = privateKeyBytes
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
if s.Comm.SSHAgentAuth && s.Comm.SSHKeyPairName == "" {
|
||||
ui.Say("Using SSH Agent with key pair in Source AMI")
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
if s.Comm.SSHAgentAuth && s.Comm.SSHKeyPairName != "" {
|
||||
ui.Say(fmt.Sprintf("Using SSH Agent for existing key pair %s", s.Comm.SSHKeyPairName))
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
if s.Comm.SSHTemporaryKeyPairName == "" {
|
||||
ui.Say("Not using temporary keypair")
|
||||
s.Comm.SSHKeyPairName = ""
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
var keyResp *ec2.CreateKeyPairOutput
|
||||
|
||||
ui.Say(fmt.Sprintf("Creating temporary keypair: %s", s.Comm.SSHTemporaryKeyPairName))
|
||||
err := retry.Config{
|
||||
Tries: 11,
|
||||
RetryDelay: (&retry.Backoff{InitialBackoff: 200 * time.Millisecond, MaxBackoff: 30 * time.Second, Multiplier: 2}).Linear,
|
||||
}.Run(ctx, func(ctx context.Context) error {
|
||||
var err error
|
||||
keyResp, err = ec2conn.CreateKeyPair(&ec2.CreateKeyPairInput{
|
||||
KeyName: &s.Comm.SSHTemporaryKeyPairName})
|
||||
return err
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
state.Put("error", fmt.Errorf("Error creating temporary keypair: %s", err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
s.doCleanup = true
|
||||
|
||||
// Set some data for use in future steps
|
||||
s.Comm.SSHKeyPairName = s.Comm.SSHTemporaryKeyPairName
|
||||
s.Comm.SSHPrivateKey = []byte(*keyResp.KeyMaterial)
|
||||
|
||||
// If we're in debug mode, output the private key to the working
|
||||
// directory.
|
||||
if s.Debug {
|
||||
ui.Message(fmt.Sprintf("Saving key for debug purposes: %s", s.DebugKeyPath))
|
||||
f, err := os.Create(s.DebugKeyPath)
|
||||
if err != nil {
|
||||
state.Put("error", fmt.Errorf("Error saving debug key: %s", err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
// Write the key out
|
||||
if _, err := f.Write([]byte(*keyResp.KeyMaterial)); err != nil {
|
||||
state.Put("error", fmt.Errorf("Error saving debug key: %s", err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Chmod it so that it is SSH ready
|
||||
if runtime.GOOS != "windows" {
|
||||
if err := f.Chmod(0600); err != nil {
|
||||
state.Put("error", fmt.Errorf("Error setting permissions of debug key: %s", err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepKeyPair) Cleanup(state multistep.StateBag) {
|
||||
if !s.doCleanup {
|
||||
return
|
||||
}
|
||||
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
// Remove the keypair
|
||||
ui.Say("Deleting temporary keypair...")
|
||||
_, err := ec2conn.DeleteKeyPair(&ec2.DeleteKeyPairInput{KeyName: &s.Comm.SSHTemporaryKeyPairName})
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf(
|
||||
"Error cleaning up keypair. Please delete the key manually: %s", s.Comm.SSHTemporaryKeyPairName))
|
||||
}
|
||||
|
||||
// Also remove the physical key if we're debugging.
|
||||
if s.Debug {
|
||||
if err := os.Remove(s.DebugKeyPath); err != nil {
|
||||
ui.Error(fmt.Sprintf(
|
||||
"Error removing debug key '%s': %s", s.DebugKeyPath, err))
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,200 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/packerbuilderdata"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/interpolate"
|
||||
)
|
||||
|
||||
type StepModifyAMIAttributes struct {
|
||||
AMISkipCreateImage bool
|
||||
|
||||
Users []string
|
||||
Groups []string
|
||||
SnapshotUsers []string
|
||||
SnapshotGroups []string
|
||||
ProductCodes []string
|
||||
Description string
|
||||
Ctx interpolate.Context
|
||||
|
||||
GeneratedData *packerbuilderdata.GeneratedData
|
||||
}
|
||||
|
||||
func (s *StepModifyAMIAttributes) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
session := state.Get("awsSession").(*session.Session)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
if s.AMISkipCreateImage {
|
||||
ui.Say("Skipping AMI modify attributes...")
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
amis := state.Get("amis").(map[string]string)
|
||||
snapshots := state.Get("snapshots").(map[string][]string)
|
||||
|
||||
// Determine if there is any work to do.
|
||||
valid := false
|
||||
valid = valid || s.Description != ""
|
||||
valid = valid || (s.Users != nil && len(s.Users) > 0)
|
||||
valid = valid || (s.Groups != nil && len(s.Groups) > 0)
|
||||
valid = valid || (s.ProductCodes != nil && len(s.ProductCodes) > 0)
|
||||
valid = valid || (s.SnapshotUsers != nil && len(s.SnapshotUsers) > 0)
|
||||
valid = valid || (s.SnapshotGroups != nil && len(s.SnapshotGroups) > 0)
|
||||
|
||||
if !valid {
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
var err error
|
||||
s.Ctx.Data = extractBuildInfo(*ec2conn.Config.Region, state, s.GeneratedData)
|
||||
s.Description, err = interpolate.Render(s.Description, &s.Ctx)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Error interpolating AMI description: %s", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Construct the modify image and snapshot attribute requests we're going
|
||||
// to make. We need to make each separately since the EC2 API only allows
|
||||
// changing one type at a kind currently.
|
||||
options := make(map[string]*ec2.ModifyImageAttributeInput)
|
||||
if s.Description != "" {
|
||||
options["description"] = &ec2.ModifyImageAttributeInput{
|
||||
Description: &ec2.AttributeValue{Value: &s.Description},
|
||||
}
|
||||
}
|
||||
snapshotOptions := make(map[string]*ec2.ModifySnapshotAttributeInput)
|
||||
|
||||
if len(s.Groups) > 0 {
|
||||
groups := make([]*string, len(s.Groups))
|
||||
addsImage := make([]*ec2.LaunchPermission, len(s.Groups))
|
||||
addGroups := &ec2.ModifyImageAttributeInput{
|
||||
LaunchPermission: &ec2.LaunchPermissionModifications{},
|
||||
}
|
||||
|
||||
for i, g := range s.Groups {
|
||||
groups[i] = aws.String(g)
|
||||
addsImage[i] = &ec2.LaunchPermission{
|
||||
Group: aws.String(g),
|
||||
}
|
||||
}
|
||||
|
||||
addGroups.UserGroups = groups
|
||||
addGroups.LaunchPermission.Add = addsImage
|
||||
options["groups"] = addGroups
|
||||
}
|
||||
|
||||
if len(s.SnapshotGroups) > 0 {
|
||||
groups := make([]*string, len(s.SnapshotGroups))
|
||||
addsSnapshot := make([]*ec2.CreateVolumePermission, len(s.SnapshotGroups))
|
||||
addSnapshotGroups := &ec2.ModifySnapshotAttributeInput{
|
||||
CreateVolumePermission: &ec2.CreateVolumePermissionModifications{},
|
||||
}
|
||||
|
||||
for i, g := range s.SnapshotGroups {
|
||||
groups[i] = aws.String(g)
|
||||
addsSnapshot[i] = &ec2.CreateVolumePermission{
|
||||
Group: aws.String(g),
|
||||
}
|
||||
}
|
||||
addSnapshotGroups.GroupNames = groups
|
||||
addSnapshotGroups.CreateVolumePermission.Add = addsSnapshot
|
||||
snapshotOptions["groups"] = addSnapshotGroups
|
||||
}
|
||||
|
||||
if len(s.Users) > 0 {
|
||||
users := make([]*string, len(s.Users))
|
||||
addsImage := make([]*ec2.LaunchPermission, len(s.Users))
|
||||
for i, u := range s.Users {
|
||||
users[i] = aws.String(u)
|
||||
addsImage[i] = &ec2.LaunchPermission{UserId: aws.String(u)}
|
||||
}
|
||||
|
||||
options["users"] = &ec2.ModifyImageAttributeInput{
|
||||
UserIds: users,
|
||||
LaunchPermission: &ec2.LaunchPermissionModifications{
|
||||
Add: addsImage,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if len(s.SnapshotUsers) > 0 {
|
||||
users := make([]*string, len(s.SnapshotUsers))
|
||||
addsSnapshot := make([]*ec2.CreateVolumePermission, len(s.SnapshotUsers))
|
||||
for i, u := range s.SnapshotUsers {
|
||||
users[i] = aws.String(u)
|
||||
addsSnapshot[i] = &ec2.CreateVolumePermission{UserId: aws.String(u)}
|
||||
}
|
||||
|
||||
snapshotOptions["users"] = &ec2.ModifySnapshotAttributeInput{
|
||||
UserIds: users,
|
||||
CreateVolumePermission: &ec2.CreateVolumePermissionModifications{
|
||||
Add: addsSnapshot,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if len(s.ProductCodes) > 0 {
|
||||
codes := make([]*string, len(s.ProductCodes))
|
||||
for i, c := range s.ProductCodes {
|
||||
codes[i] = &c
|
||||
}
|
||||
options["product codes"] = &ec2.ModifyImageAttributeInput{
|
||||
ProductCodes: codes,
|
||||
}
|
||||
}
|
||||
|
||||
// Modifying image attributes
|
||||
for region, ami := range amis {
|
||||
ui.Say(fmt.Sprintf("Modifying attributes on AMI (%s)...", ami))
|
||||
regionConn := ec2.New(session, &aws.Config{
|
||||
Region: aws.String(region),
|
||||
})
|
||||
for name, input := range options {
|
||||
ui.Message(fmt.Sprintf("Modifying: %s", name))
|
||||
input.ImageId = &ami
|
||||
_, err := regionConn.ModifyImageAttribute(input)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error modify AMI attributes: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Modifying snapshot attributes
|
||||
for region, region_snapshots := range snapshots {
|
||||
for _, snapshot := range region_snapshots {
|
||||
ui.Say(fmt.Sprintf("Modifying attributes on snapshot (%s)...", snapshot))
|
||||
regionConn := ec2.New(session, &aws.Config{
|
||||
Region: aws.String(region),
|
||||
})
|
||||
for name, input := range snapshotOptions {
|
||||
ui.Message(fmt.Sprintf("Modifying: %s", name))
|
||||
input.SnapshotId = &snapshot
|
||||
_, err := regionConn.ModifySnapshotAttribute(input)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error modify snapshot attributes: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepModifyAMIAttributes) Cleanup(state multistep.StateBag) {
|
||||
// No cleanup...
|
||||
}
|
|
@ -1,75 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
confighelper "github.com/hashicorp/packer-plugin-sdk/template/config"
|
||||
)
|
||||
|
||||
type StepModifyEBSBackedInstance struct {
|
||||
Skip bool
|
||||
EnableAMIENASupport confighelper.Trilean
|
||||
EnableAMISriovNetSupport bool
|
||||
}
|
||||
|
||||
func (s *StepModifyEBSBackedInstance) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ec2conn := state.Get("ec2").(ec2iface.EC2API)
|
||||
instance := state.Get("instance").(*ec2.Instance)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
// Skip when it is a spot instance
|
||||
if s.Skip {
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
// Set SriovNetSupport to "simple". See http://goo.gl/icuXh5
|
||||
// As of February 2017, this applies to C3, C4, D2, I2, R3, and M4 (excluding m4.16xlarge)
|
||||
if s.EnableAMISriovNetSupport {
|
||||
ui.Say("Enabling Enhanced Networking (SR-IOV)...")
|
||||
simple := "simple"
|
||||
_, err := ec2conn.ModifyInstanceAttribute(&ec2.ModifyInstanceAttributeInput{
|
||||
InstanceId: instance.InstanceId,
|
||||
SriovNetSupport: &ec2.AttributeValue{Value: &simple},
|
||||
})
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error enabling Enhanced Networking (SR-IOV) on %s: %s", *instance.InstanceId, err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
}
|
||||
|
||||
// Handle EnaSupport flag.
|
||||
// As of February 2017, this applies to C5, I3, P2, R4, X1, and m4.16xlarge
|
||||
if s.EnableAMIENASupport != confighelper.TriUnset {
|
||||
var prefix string
|
||||
if s.EnableAMIENASupport.True() {
|
||||
prefix = "En"
|
||||
} else {
|
||||
prefix = "Dis"
|
||||
}
|
||||
ui.Say(fmt.Sprintf("%sabling Enhanced Networking (ENA)...", prefix))
|
||||
_, err := ec2conn.ModifyInstanceAttribute(&ec2.ModifyInstanceAttributeInput{
|
||||
InstanceId: instance.InstanceId,
|
||||
EnaSupport: &ec2.AttributeBooleanValue{Value: s.EnableAMIENASupport.ToBoolPointer()},
|
||||
})
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error %sabling Enhanced Networking (ENA) on %s: %s", strings.ToLower(prefix), *instance.InstanceId, err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepModifyEBSBackedInstance) Cleanup(state multistep.StateBag) {
|
||||
// No cleanup...
|
||||
}
|
|
@ -1,153 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"sort"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
// StepNetworkInfo queries AWS for information about
|
||||
// VPC's and Subnets that is used throughout the AMI creation process.
|
||||
//
|
||||
// Produces (adding them to the state bag):
|
||||
// vpc_id string - the VPC ID
|
||||
// subnet_id string - the Subnet ID
|
||||
// availability_zone string - the AZ name
|
||||
type StepNetworkInfo struct {
|
||||
VpcId string
|
||||
VpcFilter VpcFilterOptions
|
||||
SubnetId string
|
||||
SubnetFilter SubnetFilterOptions
|
||||
AvailabilityZone string
|
||||
SecurityGroupIds []string
|
||||
SecurityGroupFilter SecurityGroupFilterOptions
|
||||
}
|
||||
|
||||
type subnetsSort []*ec2.Subnet
|
||||
|
||||
func (a subnetsSort) Len() int { return len(a) }
|
||||
func (a subnetsSort) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a subnetsSort) Less(i, j int) bool {
|
||||
return *a[i].AvailableIpAddressCount < *a[j].AvailableIpAddressCount
|
||||
}
|
||||
|
||||
// Returns the most recent AMI out of a slice of images.
|
||||
func mostFreeSubnet(subnets []*ec2.Subnet) *ec2.Subnet {
|
||||
sortedSubnets := subnets
|
||||
sort.Sort(subnetsSort(sortedSubnets))
|
||||
return sortedSubnets[len(sortedSubnets)-1]
|
||||
}
|
||||
|
||||
func (s *StepNetworkInfo) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
// VPC
|
||||
if s.VpcId == "" && !s.VpcFilter.Empty() {
|
||||
params := &ec2.DescribeVpcsInput{}
|
||||
params.Filters = buildEc2Filters(s.VpcFilter.Filters)
|
||||
s.VpcFilter.Filters["state"] = "available"
|
||||
|
||||
log.Printf("Using VPC Filters %v", params)
|
||||
|
||||
vpcResp, err := ec2conn.DescribeVpcs(params)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error querying VPCs: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
if len(vpcResp.Vpcs) != 1 {
|
||||
err := fmt.Errorf("Exactly one VPC should match the filter, but %d VPC's was found matching filters: %v", len(vpcResp.Vpcs), params)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
s.VpcId = *vpcResp.Vpcs[0].VpcId
|
||||
ui.Message(fmt.Sprintf("Found VPC ID: %s", s.VpcId))
|
||||
}
|
||||
|
||||
// Subnet
|
||||
if s.SubnetId == "" && !s.SubnetFilter.Empty() {
|
||||
params := &ec2.DescribeSubnetsInput{}
|
||||
s.SubnetFilter.Filters["state"] = "available"
|
||||
|
||||
if s.VpcId != "" {
|
||||
s.SubnetFilter.Filters["vpc-id"] = s.VpcId
|
||||
}
|
||||
if s.AvailabilityZone != "" {
|
||||
s.SubnetFilter.Filters["availabilityZone"] = s.AvailabilityZone
|
||||
}
|
||||
params.Filters = buildEc2Filters(s.SubnetFilter.Filters)
|
||||
log.Printf("Using Subnet Filters %v", params)
|
||||
|
||||
subnetsResp, err := ec2conn.DescribeSubnets(params)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error querying Subnets: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
if len(subnetsResp.Subnets) == 0 {
|
||||
err := fmt.Errorf("No Subnets was found matching filters: %v", params)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
if len(subnetsResp.Subnets) > 1 && !s.SubnetFilter.Random && !s.SubnetFilter.MostFree {
|
||||
err := fmt.Errorf("Your filter matched %d Subnets. Please try a more specific search, or set random or most_free to true.", len(subnetsResp.Subnets))
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
var subnet *ec2.Subnet
|
||||
switch {
|
||||
case s.SubnetFilter.MostFree:
|
||||
subnet = mostFreeSubnet(subnetsResp.Subnets)
|
||||
case s.SubnetFilter.Random:
|
||||
subnet = subnetsResp.Subnets[rand.Intn(len(subnetsResp.Subnets))]
|
||||
default:
|
||||
subnet = subnetsResp.Subnets[0]
|
||||
}
|
||||
s.SubnetId = *subnet.SubnetId
|
||||
ui.Message(fmt.Sprintf("Found Subnet ID: %s", s.SubnetId))
|
||||
}
|
||||
|
||||
// Try to find AZ and VPC Id from Subnet if they are not yet found/given
|
||||
if s.SubnetId != "" && (s.AvailabilityZone == "" || s.VpcId == "") {
|
||||
log.Printf("[INFO] Finding AZ and VpcId for the given subnet '%s'", s.SubnetId)
|
||||
resp, err := ec2conn.DescribeSubnets(&ec2.DescribeSubnetsInput{SubnetIds: []*string{&s.SubnetId}})
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Describing the subnet: %s returned error: %s.", s.SubnetId, err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
if s.AvailabilityZone == "" {
|
||||
s.AvailabilityZone = *resp.Subnets[0].AvailabilityZone
|
||||
log.Printf("[INFO] AvailabilityZone found: '%s'", s.AvailabilityZone)
|
||||
}
|
||||
if s.VpcId == "" {
|
||||
s.VpcId = *resp.Subnets[0].VpcId
|
||||
log.Printf("[INFO] VpcId found: '%s'", s.VpcId)
|
||||
}
|
||||
}
|
||||
|
||||
state.Put("vpc_id", s.VpcId)
|
||||
state.Put("availability_zone", s.AvailabilityZone)
|
||||
state.Put("subnet_id", s.SubnetId)
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepNetworkInfo) Cleanup(multistep.StateBag) {}
|
|
@ -1,148 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/retry"
|
||||
"github.com/hashicorp/packer/builder/amazon/common/awserrors"
|
||||
)
|
||||
|
||||
// StepPreValidate provides an opportunity to pre-validate any configuration for
|
||||
// the build before actually doing any time consuming work
|
||||
//
|
||||
type StepPreValidate struct {
|
||||
DestAmiName string
|
||||
ForceDeregister bool
|
||||
AMISkipBuildRegion bool
|
||||
VpcId string
|
||||
SubnetId string
|
||||
HasSubnetFilter bool
|
||||
}
|
||||
|
||||
func (s *StepPreValidate) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
if accessConfig, ok := state.GetOk("access_config"); ok {
|
||||
accessconf := accessConfig.(*AccessConfig)
|
||||
if !accessconf.VaultAWSEngine.Empty() {
|
||||
// loop over the authentication a few times to give vault-created creds
|
||||
// time to become eventually-consistent
|
||||
ui.Say("You're using Vault-generated AWS credentials. It may take a " +
|
||||
"few moments for them to become available on AWS. Waiting...")
|
||||
err := retry.Config{
|
||||
Tries: 11,
|
||||
ShouldRetry: func(err error) bool {
|
||||
if awserrors.Matches(err, "AuthFailure", "") {
|
||||
log.Printf("Waiting for Vault-generated AWS credentials" +
|
||||
" to pass authentication... trying again.")
|
||||
return true
|
||||
}
|
||||
return false
|
||||
},
|
||||
RetryDelay: (&retry.Backoff{InitialBackoff: 200 * time.Millisecond, MaxBackoff: 30 * time.Second, Multiplier: 2}).Linear,
|
||||
}.Run(ctx, func(ctx context.Context) error {
|
||||
ec2conn, err := accessconf.NewEC2Connection()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = listEC2Regions(ec2conn)
|
||||
return err
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
state.Put("error", fmt.Errorf("Was unable to Authenticate to AWS using Vault-"+
|
||||
"Generated Credentials within the retry timeout."))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
}
|
||||
|
||||
if amiConfig, ok := state.GetOk("ami_config"); ok {
|
||||
amiconf := amiConfig.(*AMIConfig)
|
||||
if !amiconf.AMISkipRegionValidation {
|
||||
regionsToValidate := append(amiconf.AMIRegions, accessconf.RawRegion)
|
||||
err := accessconf.ValidateRegion(regionsToValidate...)
|
||||
if err != nil {
|
||||
state.Put("error", fmt.Errorf("error validating regions: %v", err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if s.ForceDeregister {
|
||||
ui.Say("Force Deregister flag found, skipping prevalidating AMI Name")
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
if s.AMISkipBuildRegion {
|
||||
ui.Say("skip_build_region was set; not prevalidating AMI name")
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
|
||||
// Validate VPC settings for non-default VPCs
|
||||
ui.Say("Prevalidating any provided VPC information")
|
||||
if err := s.checkVpc(ec2conn); err != nil {
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
ui.Say(fmt.Sprintf("Prevalidating AMI Name: %s", s.DestAmiName))
|
||||
req, resp := ec2conn.DescribeImagesRequest(&ec2.DescribeImagesInput{
|
||||
Filters: []*ec2.Filter{{
|
||||
Name: aws.String("name"),
|
||||
Values: []*string{aws.String(s.DestAmiName)},
|
||||
}}})
|
||||
req.RetryCount = 11
|
||||
|
||||
if err := req.Send(); err != nil {
|
||||
err = fmt.Errorf("Error querying AMI: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
if len(resp.Images) > 0 {
|
||||
err := fmt.Errorf("Error: AMI Name: '%s' is used by an existing AMI: %s", *resp.Images[0].Name, *resp.Images[0].ImageId)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepPreValidate) checkVpc(conn ec2iface.EC2API) error {
|
||||
if s.VpcId == "" || (s.VpcId != "" && (s.SubnetId != "" || s.HasSubnetFilter)) {
|
||||
// Skip validation if:
|
||||
// * The user has not provided a VpcId.
|
||||
// * Both VpcId and SubnetId are provided; AWS API will error if something is wrong.
|
||||
// * Both VpcId and SubnetFilter are provided
|
||||
return nil
|
||||
}
|
||||
|
||||
res, err := conn.DescribeVpcs(&ec2.DescribeVpcsInput{VpcIds: []*string{aws.String(s.VpcId)}})
|
||||
if awserrors.Matches(err, "InvalidVpcID.NotFound", "") || err != nil {
|
||||
return fmt.Errorf("Error retrieving VPC information for vpc_id %s: %s", s.VpcId, err)
|
||||
}
|
||||
|
||||
if res != nil && len(res.Vpcs) == 1 && res.Vpcs[0] != nil {
|
||||
if isDefault := aws.BoolValue(res.Vpcs[0].IsDefault); !isDefault {
|
||||
return fmt.Errorf("Error: subnet_id or subnet_filter must be provided for non-default VPCs (%s)", s.VpcId)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Cleanup ...
|
||||
func (s *StepPreValidate) Cleanup(multistep.StateBag) {}
|
|
@ -1,70 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
)
|
||||
|
||||
//DescribeVpcs mocks an ec2.DescribeVpcsOutput for a given input
|
||||
func (m *mockEC2Conn) DescribeVpcs(input *ec2.DescribeVpcsInput) (*ec2.DescribeVpcsOutput, error) {
|
||||
|
||||
if input == nil || aws.StringValue(input.VpcIds[0]) == "" {
|
||||
return nil, fmt.Errorf("oops looks like we need more input")
|
||||
}
|
||||
|
||||
var isDefault bool
|
||||
vpcID := aws.StringValue(input.VpcIds[0])
|
||||
|
||||
//only one default VPC per region
|
||||
if strings.Contains("vpc-default-id", vpcID) {
|
||||
isDefault = true
|
||||
}
|
||||
|
||||
output := &ec2.DescribeVpcsOutput{
|
||||
Vpcs: []*ec2.Vpc{
|
||||
{IsDefault: aws.Bool(isDefault),
|
||||
VpcId: aws.String(vpcID),
|
||||
},
|
||||
},
|
||||
}
|
||||
return output, nil
|
||||
}
|
||||
|
||||
func TestStepPreValidate_checkVpc(t *testing.T) {
|
||||
tt := []struct {
|
||||
name string
|
||||
step StepPreValidate
|
||||
errorExpected bool
|
||||
}{
|
||||
{"DefaultVpc", StepPreValidate{VpcId: "vpc-default-id"}, false},
|
||||
{"NonDefaultVpcNoSubnet", StepPreValidate{VpcId: "vpc-1234567890"}, true},
|
||||
{"NonDefaultVpcWithSubnet", StepPreValidate{VpcId: "vpc-1234567890", SubnetId: "subnet-1234567890"}, false},
|
||||
{"SubnetWithNoVpc", StepPreValidate{SubnetId: "subnet-1234567890"}, false},
|
||||
{"NoVpcInformation", StepPreValidate{}, false},
|
||||
{"NonDefaultVpcWithSubnetFilter", StepPreValidate{VpcId: "vpc-1234567890", HasSubnetFilter: true}, false},
|
||||
}
|
||||
|
||||
mockConn, err := getMockConn(nil, "")
|
||||
if err != nil {
|
||||
t.Fatal("unable to get a mock connection")
|
||||
}
|
||||
|
||||
for _, tc := range tt {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
err := tc.step.checkVpc(mockConn)
|
||||
|
||||
if tc.errorExpected && err == nil {
|
||||
t.Errorf("expected a validation error for %q but got %q", tc.name, err)
|
||||
}
|
||||
|
||||
if !tc.errorExpected && err != nil {
|
||||
t.Errorf("expected a validation to pass for %q but got %q", tc.name, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
|
@ -1,390 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/communicator"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/retry"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/interpolate"
|
||||
"github.com/hashicorp/packer/builder/amazon/common/awserrors"
|
||||
)
|
||||
|
||||
type StepRunSourceInstance struct {
|
||||
PollingConfig *AWSPollingConfig
|
||||
AssociatePublicIpAddress bool
|
||||
LaunchMappings EC2BlockDeviceMappingsBuilder
|
||||
Comm *communicator.Config
|
||||
Ctx interpolate.Context
|
||||
Debug bool
|
||||
EbsOptimized bool
|
||||
EnableT2Unlimited bool
|
||||
ExpectedRootDevice string
|
||||
InstanceInitiatedShutdownBehavior string
|
||||
InstanceType string
|
||||
IsRestricted bool
|
||||
SourceAMI string
|
||||
Tags map[string]string
|
||||
Tenancy string
|
||||
UserData string
|
||||
UserDataFile string
|
||||
VolumeTags map[string]string
|
||||
NoEphemeral bool
|
||||
|
||||
instanceId string
|
||||
}
|
||||
|
||||
func (s *StepRunSourceInstance) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
|
||||
securityGroupIds := aws.StringSlice(state.Get("securityGroupIds").([]string))
|
||||
iamInstanceProfile := aws.String(state.Get("iamInstanceProfile").(string))
|
||||
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
userData := s.UserData
|
||||
if s.UserDataFile != "" {
|
||||
contents, err := ioutil.ReadFile(s.UserDataFile)
|
||||
if err != nil {
|
||||
state.Put("error", fmt.Errorf("Problem reading user data file: %s", err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
userData = string(contents)
|
||||
}
|
||||
|
||||
// Test if it is encoded already, and if not, encode it
|
||||
if _, err := base64.StdEncoding.DecodeString(userData); err != nil {
|
||||
log.Printf("[DEBUG] base64 encoding user data...")
|
||||
userData = base64.StdEncoding.EncodeToString([]byte(userData))
|
||||
}
|
||||
|
||||
ui.Say("Launching a source AWS instance...")
|
||||
image, ok := state.Get("source_image").(*ec2.Image)
|
||||
if !ok {
|
||||
state.Put("error", fmt.Errorf("source_image type assertion failed"))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
s.SourceAMI = *image.ImageId
|
||||
|
||||
if s.ExpectedRootDevice != "" && *image.RootDeviceType != s.ExpectedRootDevice {
|
||||
state.Put("error", fmt.Errorf(
|
||||
"The provided source AMI has an invalid root device type.\n"+
|
||||
"Expected '%s', got '%s'.",
|
||||
s.ExpectedRootDevice, *image.RootDeviceType))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
var instanceId string
|
||||
|
||||
ui.Say("Adding tags to source instance")
|
||||
if _, exists := s.Tags["Name"]; !exists {
|
||||
s.Tags["Name"] = "Packer Builder"
|
||||
}
|
||||
|
||||
ec2Tags, err := TagMap(s.Tags).EC2Tags(s.Ctx, *ec2conn.Config.Region, state)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error tagging source instance: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
volTags, err := TagMap(s.VolumeTags).EC2Tags(s.Ctx, *ec2conn.Config.Region, state)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error tagging volumes: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
az := state.Get("availability_zone").(string)
|
||||
runOpts := &ec2.RunInstancesInput{
|
||||
ImageId: &s.SourceAMI,
|
||||
InstanceType: &s.InstanceType,
|
||||
UserData: &userData,
|
||||
MaxCount: aws.Int64(1),
|
||||
MinCount: aws.Int64(1),
|
||||
IamInstanceProfile: &ec2.IamInstanceProfileSpecification{Name: iamInstanceProfile},
|
||||
BlockDeviceMappings: s.LaunchMappings.BuildEC2BlockDeviceMappings(),
|
||||
Placement: &ec2.Placement{AvailabilityZone: &az},
|
||||
EbsOptimized: &s.EbsOptimized,
|
||||
}
|
||||
|
||||
if s.NoEphemeral {
|
||||
// This is only relevant for windows guests. Ephemeral drives by
|
||||
// default are assigned to drive names xvdca-xvdcz.
|
||||
// When vms are launched from the AWS console, they're automatically
|
||||
// removed from the block devices if the user hasn't said to use them,
|
||||
// but the SDK does not perform this cleanup. The following code just
|
||||
// manually removes the ephemeral drives from the mapping so that they
|
||||
// don't clutter up console views and cause confusion.
|
||||
log.Printf("no_ephemeral was set, so creating drives xvdca-xvdcz as empty mappings")
|
||||
DefaultEphemeralDeviceLetters := "abcdefghijklmnopqrstuvwxyz"
|
||||
for _, letter := range DefaultEphemeralDeviceLetters {
|
||||
bd := &ec2.BlockDeviceMapping{
|
||||
DeviceName: aws.String("xvdc" + string(letter)),
|
||||
NoDevice: aws.String(""),
|
||||
}
|
||||
runOpts.BlockDeviceMappings = append(runOpts.BlockDeviceMappings, bd)
|
||||
}
|
||||
}
|
||||
|
||||
if s.EnableT2Unlimited {
|
||||
creditOption := "unlimited"
|
||||
runOpts.CreditSpecification = &ec2.CreditSpecificationRequest{CpuCredits: &creditOption}
|
||||
}
|
||||
|
||||
// Collect tags for tagging on resource creation
|
||||
var tagSpecs []*ec2.TagSpecification
|
||||
|
||||
if len(ec2Tags) > 0 {
|
||||
runTags := &ec2.TagSpecification{
|
||||
ResourceType: aws.String("instance"),
|
||||
Tags: ec2Tags,
|
||||
}
|
||||
|
||||
tagSpecs = append(tagSpecs, runTags)
|
||||
}
|
||||
|
||||
if len(volTags) > 0 {
|
||||
runVolTags := &ec2.TagSpecification{
|
||||
ResourceType: aws.String("volume"),
|
||||
Tags: volTags,
|
||||
}
|
||||
|
||||
tagSpecs = append(tagSpecs, runVolTags)
|
||||
}
|
||||
|
||||
// If our region supports it, set tag specifications
|
||||
if len(tagSpecs) > 0 && !s.IsRestricted {
|
||||
runOpts.SetTagSpecifications(tagSpecs)
|
||||
ec2Tags.Report(ui)
|
||||
volTags.Report(ui)
|
||||
}
|
||||
|
||||
if s.Comm.SSHKeyPairName != "" {
|
||||
runOpts.KeyName = &s.Comm.SSHKeyPairName
|
||||
}
|
||||
|
||||
subnetId := state.Get("subnet_id").(string)
|
||||
|
||||
if subnetId != "" && s.AssociatePublicIpAddress {
|
||||
runOpts.NetworkInterfaces = []*ec2.InstanceNetworkInterfaceSpecification{
|
||||
{
|
||||
DeviceIndex: aws.Int64(0),
|
||||
AssociatePublicIpAddress: &s.AssociatePublicIpAddress,
|
||||
SubnetId: aws.String(subnetId),
|
||||
Groups: securityGroupIds,
|
||||
DeleteOnTermination: aws.Bool(true),
|
||||
},
|
||||
}
|
||||
} else {
|
||||
runOpts.SubnetId = aws.String(subnetId)
|
||||
runOpts.SecurityGroupIds = securityGroupIds
|
||||
}
|
||||
|
||||
if s.ExpectedRootDevice == "ebs" {
|
||||
runOpts.InstanceInitiatedShutdownBehavior = &s.InstanceInitiatedShutdownBehavior
|
||||
}
|
||||
|
||||
if s.Tenancy != "" {
|
||||
runOpts.Placement.Tenancy = aws.String(s.Tenancy)
|
||||
}
|
||||
|
||||
var runResp *ec2.Reservation
|
||||
err = retry.Config{
|
||||
Tries: 11,
|
||||
ShouldRetry: func(err error) bool {
|
||||
if awserrors.Matches(err, "InvalidParameterValue", "iamInstanceProfile") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
},
|
||||
RetryDelay: (&retry.Backoff{InitialBackoff: 200 * time.Millisecond, MaxBackoff: 30 * time.Second, Multiplier: 2}).Linear,
|
||||
}.Run(ctx, func(ctx context.Context) error {
|
||||
runResp, err = ec2conn.RunInstances(runOpts)
|
||||
return err
|
||||
})
|
||||
|
||||
if awserrors.Matches(err, "VPCIdNotSpecified", "No default VPC for this user") && subnetId == "" {
|
||||
err := fmt.Errorf("Error launching source instance: a valid Subnet Id was not specified")
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error launching source instance: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
instanceId = *runResp.Instances[0].InstanceId
|
||||
|
||||
// Set the instance ID so that the cleanup works properly
|
||||
s.instanceId = instanceId
|
||||
|
||||
ui.Message(fmt.Sprintf("Instance ID: %s", instanceId))
|
||||
ui.Say(fmt.Sprintf("Waiting for instance (%v) to become ready...", instanceId))
|
||||
|
||||
describeInstance := &ec2.DescribeInstancesInput{
|
||||
InstanceIds: []*string{aws.String(instanceId)},
|
||||
}
|
||||
|
||||
if err := s.PollingConfig.WaitUntilInstanceRunning(ctx, ec2conn, instanceId); err != nil {
|
||||
err := fmt.Errorf("Error waiting for instance (%s) to become ready: %s", instanceId, err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
|
||||
// try to get some context from AWS on why was instance
|
||||
// transitioned to the unexpected state
|
||||
if resp, e := ec2conn.DescribeInstances(describeInstance); e == nil {
|
||||
if len(resp.Reservations) > 0 && len(resp.Reservations[0].Instances) > 0 {
|
||||
instance := resp.Reservations[0].Instances[0]
|
||||
if instance.StateTransitionReason != nil && instance.StateReason.Message != nil {
|
||||
ui.Error(fmt.Sprintf("Instance state change details: %s: %s",
|
||||
*instance.StateTransitionReason, *instance.StateReason.Message))
|
||||
}
|
||||
}
|
||||
}
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// there's a race condition that can happen because of AWS's eventual
|
||||
// consistency where even though the wait is complete, the describe call
|
||||
// will fail. Retry a couple of times to try to mitigate that race.
|
||||
|
||||
var r *ec2.DescribeInstancesOutput
|
||||
err = retry.Config{Tries: 11, ShouldRetry: func(err error) bool {
|
||||
if awserrors.Matches(err, "InvalidInstanceID.NotFound", "") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
},
|
||||
RetryDelay: (&retry.Backoff{InitialBackoff: 200 * time.Millisecond, MaxBackoff: 30 * time.Second, Multiplier: 2}).Linear,
|
||||
}.Run(ctx, func(ctx context.Context) error {
|
||||
r, err = ec2conn.DescribeInstances(describeInstance)
|
||||
return err
|
||||
})
|
||||
if err != nil || len(r.Reservations) == 0 || len(r.Reservations[0].Instances) == 0 {
|
||||
err := fmt.Errorf("Error finding source instance.")
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
instance := r.Reservations[0].Instances[0]
|
||||
|
||||
if s.Debug {
|
||||
if instance.PublicDnsName != nil && *instance.PublicDnsName != "" {
|
||||
ui.Message(fmt.Sprintf("Public DNS: %s", *instance.PublicDnsName))
|
||||
}
|
||||
|
||||
if instance.PublicIpAddress != nil && *instance.PublicIpAddress != "" {
|
||||
ui.Message(fmt.Sprintf("Public IP: %s", *instance.PublicIpAddress))
|
||||
}
|
||||
|
||||
if instance.PrivateIpAddress != nil && *instance.PrivateIpAddress != "" {
|
||||
ui.Message(fmt.Sprintf("Private IP: %s", *instance.PrivateIpAddress))
|
||||
}
|
||||
}
|
||||
|
||||
state.Put("instance", instance)
|
||||
// instance_id is the generic term used so that users can have access to the
|
||||
// instance id inside of the provisioners, used in step_provision.
|
||||
state.Put("instance_id", instance.InstanceId)
|
||||
|
||||
// If we're in a region that doesn't support tagging on instance creation,
|
||||
// do that now.
|
||||
|
||||
if s.IsRestricted {
|
||||
ec2Tags.Report(ui)
|
||||
// Retry creating tags for about 2.5 minutes
|
||||
err = retry.Config{Tries: 11, ShouldRetry: func(error) bool {
|
||||
if awserrors.Matches(err, "InvalidInstanceID.NotFound", "") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
},
|
||||
RetryDelay: (&retry.Backoff{InitialBackoff: 200 * time.Millisecond, MaxBackoff: 30 * time.Second, Multiplier: 2}).Linear,
|
||||
}.Run(ctx, func(ctx context.Context) error {
|
||||
_, err := ec2conn.CreateTags(&ec2.CreateTagsInput{
|
||||
Tags: ec2Tags,
|
||||
Resources: []*string{instance.InstanceId},
|
||||
})
|
||||
return err
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error tagging source instance: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Now tag volumes
|
||||
|
||||
volumeIds := make([]*string, 0)
|
||||
for _, v := range instance.BlockDeviceMappings {
|
||||
if ebs := v.Ebs; ebs != nil {
|
||||
volumeIds = append(volumeIds, ebs.VolumeId)
|
||||
}
|
||||
}
|
||||
|
||||
if len(volumeIds) > 0 && len(s.VolumeTags) > 0 {
|
||||
ui.Say("Adding tags to source EBS Volumes")
|
||||
|
||||
volumeTags, err := TagMap(s.VolumeTags).EC2Tags(s.Ctx, *ec2conn.Config.Region, state)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error tagging source EBS Volumes on %s: %s", *instance.InstanceId, err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
volumeTags.Report(ui)
|
||||
|
||||
_, err = ec2conn.CreateTags(&ec2.CreateTagsInput{
|
||||
Resources: volumeIds,
|
||||
Tags: volumeTags,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error tagging source EBS Volumes on %s: %s", *instance.InstanceId, err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepRunSourceInstance) Cleanup(state multistep.StateBag) {
|
||||
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
// Terminate the source instance if it exists
|
||||
if s.instanceId != "" {
|
||||
ui.Say("Terminating the source AWS instance...")
|
||||
if _, err := ec2conn.TerminateInstances(&ec2.TerminateInstancesInput{InstanceIds: []*string{&s.instanceId}}); err != nil {
|
||||
ui.Error(fmt.Sprintf("Error terminating instance, may still be around: %s", err))
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.PollingConfig.WaitUntilInstanceTerminated(aws.BackgroundContext(), ec2conn, s.instanceId); err != nil {
|
||||
ui.Error(err.Error())
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,533 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
|
||||
"github.com/hashicorp/packer-plugin-sdk/communicator"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/random"
|
||||
"github.com/hashicorp/packer-plugin-sdk/retry"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/interpolate"
|
||||
"github.com/hashicorp/packer/builder/amazon/common/awserrors"
|
||||
)
|
||||
|
||||
type EC2BlockDeviceMappingsBuilder interface {
|
||||
BuildEC2BlockDeviceMappings() []*ec2.BlockDeviceMapping
|
||||
}
|
||||
|
||||
type StepRunSpotInstance struct {
|
||||
PollingConfig *AWSPollingConfig
|
||||
AssociatePublicIpAddress bool
|
||||
LaunchMappings EC2BlockDeviceMappingsBuilder
|
||||
BlockDurationMinutes int64
|
||||
Debug bool
|
||||
Comm *communicator.Config
|
||||
EbsOptimized bool
|
||||
ExpectedRootDevice string
|
||||
InstanceInitiatedShutdownBehavior string
|
||||
InstanceType string
|
||||
Region string
|
||||
SourceAMI string
|
||||
SpotPrice string
|
||||
SpotTags map[string]string
|
||||
SpotInstanceTypes []string
|
||||
Tags map[string]string
|
||||
VolumeTags map[string]string
|
||||
UserData string
|
||||
UserDataFile string
|
||||
Ctx interpolate.Context
|
||||
NoEphemeral bool
|
||||
|
||||
instanceId string
|
||||
}
|
||||
|
||||
func (s *StepRunSpotInstance) CreateTemplateData(userData *string, az string,
|
||||
state multistep.StateBag, marketOptions *ec2.LaunchTemplateInstanceMarketOptionsRequest) *ec2.RequestLaunchTemplateData {
|
||||
blockDeviceMappings := s.LaunchMappings.BuildEC2BlockDeviceMappings()
|
||||
// Convert the BlockDeviceMapping into a
|
||||
// LaunchTemplateBlockDeviceMappingRequest. These structs are identical,
|
||||
// except for the EBS field -- on one, that field contains a
|
||||
// LaunchTemplateEbsBlockDeviceRequest, and on the other, it contains an
|
||||
// EbsBlockDevice. The EbsBlockDevice and
|
||||
// LaunchTemplateEbsBlockDeviceRequest structs are themselves
|
||||
// identical except for the struct's name, so you can cast one directly
|
||||
// into the other.
|
||||
var launchMappingRequests []*ec2.LaunchTemplateBlockDeviceMappingRequest
|
||||
for _, mapping := range blockDeviceMappings {
|
||||
launchRequest := &ec2.LaunchTemplateBlockDeviceMappingRequest{
|
||||
DeviceName: mapping.DeviceName,
|
||||
Ebs: (*ec2.LaunchTemplateEbsBlockDeviceRequest)(mapping.Ebs),
|
||||
VirtualName: mapping.VirtualName,
|
||||
}
|
||||
launchMappingRequests = append(launchMappingRequests, launchRequest)
|
||||
}
|
||||
if s.NoEphemeral {
|
||||
// This is only relevant for windows guests. Ephemeral drives by
|
||||
// default are assigned to drive names xvdca-xvdcz.
|
||||
// When vms are launched from the AWS console, they're automatically
|
||||
// removed from the block devices if the user hasn't said to use them,
|
||||
// but the SDK does not perform this cleanup. The following code just
|
||||
// manually removes the ephemeral drives from the mapping so that they
|
||||
// don't clutter up console views and cause confusion.
|
||||
log.Printf("no_ephemeral was set, so creating drives xvdca-xvdcz as empty mappings")
|
||||
DefaultEphemeralDeviceLetters := "abcdefghijklmnopqrstuvwxyz"
|
||||
for _, letter := range DefaultEphemeralDeviceLetters {
|
||||
launchRequest := &ec2.LaunchTemplateBlockDeviceMappingRequest{
|
||||
DeviceName: aws.String("xvdc" + string(letter)),
|
||||
NoDevice: aws.String(""),
|
||||
}
|
||||
launchMappingRequests = append(launchMappingRequests, launchRequest)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
iamInstanceProfile := aws.String(state.Get("iamInstanceProfile").(string))
|
||||
|
||||
// Create a launch template.
|
||||
templateData := ec2.RequestLaunchTemplateData{
|
||||
BlockDeviceMappings: launchMappingRequests,
|
||||
DisableApiTermination: aws.Bool(false),
|
||||
EbsOptimized: &s.EbsOptimized,
|
||||
IamInstanceProfile: &ec2.LaunchTemplateIamInstanceProfileSpecificationRequest{Name: iamInstanceProfile},
|
||||
ImageId: &s.SourceAMI,
|
||||
InstanceMarketOptions: marketOptions,
|
||||
Placement: &ec2.LaunchTemplatePlacementRequest{
|
||||
AvailabilityZone: &az,
|
||||
},
|
||||
UserData: userData,
|
||||
}
|
||||
// Create a network interface
|
||||
securityGroupIds := aws.StringSlice(state.Get("securityGroupIds").([]string))
|
||||
subnetId := state.Get("subnet_id").(string)
|
||||
|
||||
if subnetId != "" {
|
||||
// Set up a full network interface
|
||||
networkInterface := ec2.LaunchTemplateInstanceNetworkInterfaceSpecificationRequest{
|
||||
Groups: securityGroupIds,
|
||||
DeleteOnTermination: aws.Bool(true),
|
||||
DeviceIndex: aws.Int64(0),
|
||||
SubnetId: aws.String(subnetId),
|
||||
}
|
||||
if s.AssociatePublicIpAddress {
|
||||
networkInterface.SetAssociatePublicIpAddress(s.AssociatePublicIpAddress)
|
||||
}
|
||||
templateData.SetNetworkInterfaces([]*ec2.LaunchTemplateInstanceNetworkInterfaceSpecificationRequest{&networkInterface})
|
||||
} else {
|
||||
templateData.SetSecurityGroupIds(securityGroupIds)
|
||||
|
||||
}
|
||||
|
||||
// If instance type is not set, we'll just pick the lowest priced instance
|
||||
// available.
|
||||
if s.InstanceType != "" {
|
||||
templateData.SetInstanceType(s.InstanceType)
|
||||
}
|
||||
|
||||
if s.Comm.SSHKeyPairName != "" {
|
||||
templateData.SetKeyName(s.Comm.SSHKeyPairName)
|
||||
}
|
||||
|
||||
return &templateData
|
||||
}
|
||||
|
||||
func (s *StepRunSpotInstance) LoadUserData() (string, error) {
|
||||
userData := s.UserData
|
||||
if s.UserDataFile != "" {
|
||||
contents, err := ioutil.ReadFile(s.UserDataFile)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Problem reading user data file: %s", err)
|
||||
}
|
||||
|
||||
userData = string(contents)
|
||||
}
|
||||
|
||||
// Test if it is encoded already, and if not, encode it
|
||||
if _, err := base64.StdEncoding.DecodeString(userData); err != nil {
|
||||
log.Printf("[DEBUG] base64 encoding user data...")
|
||||
userData = base64.StdEncoding.EncodeToString([]byte(userData))
|
||||
}
|
||||
return userData, nil
|
||||
}
|
||||
|
||||
func (s *StepRunSpotInstance) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ec2conn := state.Get("ec2").(ec2iface.EC2API)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
ui.Say("Launching a spot AWS instance...")
|
||||
|
||||
// Get and validate the source AMI
|
||||
image, ok := state.Get("source_image").(*ec2.Image)
|
||||
if !ok {
|
||||
state.Put("error", fmt.Errorf("source_image type assertion failed"))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
s.SourceAMI = *image.ImageId
|
||||
|
||||
if s.ExpectedRootDevice != "" && *image.RootDeviceType != s.ExpectedRootDevice {
|
||||
state.Put("error", fmt.Errorf(
|
||||
"The provided source AMI has an invalid root device type.\n"+
|
||||
"Expected '%s', got '%s'.",
|
||||
s.ExpectedRootDevice, *image.RootDeviceType))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
azConfig := ""
|
||||
if azRaw, ok := state.GetOk("availability_zone"); ok {
|
||||
azConfig = azRaw.(string)
|
||||
}
|
||||
az := azConfig
|
||||
|
||||
var instanceId string
|
||||
|
||||
ui.Say("Interpolating tags for spot instance...")
|
||||
// s.Tags will tag the eventually launched instance
|
||||
// s.SpotTags apply to the spot request itself, and do not automatically
|
||||
// get applied to the spot instance that is launched once the request is
|
||||
// fulfilled
|
||||
if _, exists := s.Tags["Name"]; !exists {
|
||||
s.Tags["Name"] = "Packer Builder"
|
||||
}
|
||||
|
||||
// Convert tags from the tag map provided by the user into *ec2.Tag s
|
||||
ec2Tags, err := TagMap(s.Tags).EC2Tags(s.Ctx, s.Region, state)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error generating tags for source instance: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
// This prints the tags to the ui; it doesn't actually add them to the
|
||||
// instance yet
|
||||
ec2Tags.Report(ui)
|
||||
|
||||
volumeTags, err := TagMap(s.VolumeTags).EC2Tags(s.Ctx, s.Region, state)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error generating volume tags: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
volumeTags.Report(ui)
|
||||
|
||||
spotOptions := ec2.LaunchTemplateSpotMarketOptionsRequest{}
|
||||
// The default is to set the maximum price to the OnDemand price.
|
||||
if s.SpotPrice != "auto" {
|
||||
spotOptions.SetMaxPrice(s.SpotPrice)
|
||||
}
|
||||
if s.BlockDurationMinutes != 0 {
|
||||
spotOptions.BlockDurationMinutes = &s.BlockDurationMinutes
|
||||
}
|
||||
marketOptions := &ec2.LaunchTemplateInstanceMarketOptionsRequest{
|
||||
SpotOptions: &spotOptions,
|
||||
}
|
||||
marketOptions.SetMarketType(ec2.MarketTypeSpot)
|
||||
|
||||
spotTags, err := TagMap(s.SpotTags).EC2Tags(s.Ctx, s.Region, state)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error generating tags for spot request: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Create a launch template for the instance
|
||||
ui.Message("Loading User Data File...")
|
||||
|
||||
// Generate a random name to avoid conflicting with other
|
||||
// instances of packer running in this AWS account
|
||||
launchTemplateName := fmt.Sprintf(
|
||||
"packer-fleet-launch-template-%s",
|
||||
random.AlphaNum(7))
|
||||
state.Put("launchTemplateName", launchTemplateName) // For the cleanup step
|
||||
|
||||
userData, err := s.LoadUserData()
|
||||
if err != nil {
|
||||
state.Put("error", err)
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
ui.Message("Creating Spot Fleet launch template...")
|
||||
templateData := s.CreateTemplateData(&userData, az, state, marketOptions)
|
||||
launchTemplate := &ec2.CreateLaunchTemplateInput{
|
||||
LaunchTemplateData: templateData,
|
||||
LaunchTemplateName: aws.String(launchTemplateName),
|
||||
VersionDescription: aws.String("template generated by packer for launching spot instances"),
|
||||
}
|
||||
if len(spotTags) > 0 {
|
||||
launchTemplate.TagSpecifications = []*ec2.TagSpecification{
|
||||
{
|
||||
ResourceType: aws.String("launch-template"),
|
||||
Tags: spotTags,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if len(ec2Tags) > 0 {
|
||||
launchTemplate.LaunchTemplateData.TagSpecifications = append(
|
||||
launchTemplate.LaunchTemplateData.TagSpecifications,
|
||||
&ec2.LaunchTemplateTagSpecificationRequest{
|
||||
ResourceType: aws.String("instance"),
|
||||
Tags: ec2Tags,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
if len(volumeTags) > 0 {
|
||||
launchTemplate.LaunchTemplateData.TagSpecifications = append(
|
||||
launchTemplate.LaunchTemplateData.TagSpecifications,
|
||||
&ec2.LaunchTemplateTagSpecificationRequest{
|
||||
ResourceType: aws.String("volume"),
|
||||
Tags: volumeTags,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// Tell EC2 to create the template
|
||||
createLaunchTemplateOutput, err := ec2conn.CreateLaunchTemplate(launchTemplate)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error creating launch template for spot instance: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
launchTemplateId := createLaunchTemplateOutput.LaunchTemplate.LaunchTemplateId
|
||||
ui.Message(fmt.Sprintf("Created Spot Fleet launch template: %s", *launchTemplateId))
|
||||
|
||||
// Add overrides for each user-provided instance type
|
||||
var overrides []*ec2.FleetLaunchTemplateOverridesRequest
|
||||
for _, instanceType := range s.SpotInstanceTypes {
|
||||
override := ec2.FleetLaunchTemplateOverridesRequest{
|
||||
InstanceType: aws.String(instanceType),
|
||||
}
|
||||
overrides = append(overrides, &override)
|
||||
}
|
||||
|
||||
createFleetInput := &ec2.CreateFleetInput{
|
||||
LaunchTemplateConfigs: []*ec2.FleetLaunchTemplateConfigRequest{
|
||||
{
|
||||
LaunchTemplateSpecification: &ec2.FleetLaunchTemplateSpecificationRequest{
|
||||
LaunchTemplateName: aws.String(launchTemplateName),
|
||||
Version: aws.String("1"),
|
||||
},
|
||||
Overrides: overrides,
|
||||
},
|
||||
},
|
||||
ReplaceUnhealthyInstances: aws.Bool(false),
|
||||
TargetCapacitySpecification: &ec2.TargetCapacitySpecificationRequest{
|
||||
TotalTargetCapacity: aws.Int64(1),
|
||||
DefaultTargetCapacityType: aws.String("spot"),
|
||||
},
|
||||
Type: aws.String("instant"),
|
||||
}
|
||||
|
||||
var createOutput *ec2.CreateFleetOutput
|
||||
err = retry.Config{
|
||||
Tries: 11,
|
||||
ShouldRetry: func(err error) bool {
|
||||
if strings.Contains(err.Error(), "Invalid IAM Instance Profile name") {
|
||||
// eventual consistency of the profile. PutRolePolicy &
|
||||
// AddRoleToInstanceProfile are eventually consistent and once
|
||||
// we can wait on those operations, this can be removed.
|
||||
return true
|
||||
}
|
||||
return false
|
||||
},
|
||||
RetryDelay: (&retry.Backoff{InitialBackoff: 500 * time.Millisecond, MaxBackoff: 30 * time.Second, Multiplier: 2}).Linear,
|
||||
}.Run(ctx, func(ctx context.Context) error {
|
||||
createOutput, err = ec2conn.CreateFleet(createFleetInput)
|
||||
if err == nil && createOutput.Errors != nil {
|
||||
err = fmt.Errorf("errors: %v", createOutput.Errors)
|
||||
}
|
||||
// We can end up with errors because one of the allowed availability
|
||||
// zones doesn't have one of the allowed instance types; as long as
|
||||
// an instance is launched, these errors aren't important.
|
||||
if len(createOutput.Instances) > 0 {
|
||||
if err != nil {
|
||||
log.Printf("create request failed for some instances %v", err.Error())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
log.Printf("create request failed %v", err)
|
||||
}
|
||||
return err
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
if createOutput.FleetId != nil {
|
||||
err = fmt.Errorf("Error waiting for fleet request (%s): %s", *createOutput.FleetId, err)
|
||||
}
|
||||
if len(createOutput.Errors) > 0 {
|
||||
errString := fmt.Sprintf("Error waiting for fleet request (%s) to become ready:", *createOutput.FleetId)
|
||||
for _, outErr := range createOutput.Errors {
|
||||
errString = errString + aws.StringValue(outErr.ErrorMessage)
|
||||
}
|
||||
err = fmt.Errorf(errString)
|
||||
}
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
instanceId = *createOutput.Instances[0].InstanceIds[0]
|
||||
// Set the instance ID so that the cleanup works properly
|
||||
s.instanceId = instanceId
|
||||
|
||||
ui.Message(fmt.Sprintf("Instance ID: %s", instanceId))
|
||||
|
||||
// Get information about the created instance
|
||||
var describeOutput *ec2.DescribeInstancesOutput
|
||||
err = retry.Config{
|
||||
Tries: 11,
|
||||
RetryDelay: (&retry.Backoff{InitialBackoff: 200 * time.Millisecond, MaxBackoff: 30 * time.Second, Multiplier: 2}).Linear,
|
||||
}.Run(ctx, func(ctx context.Context) error {
|
||||
describeOutput, err = ec2conn.DescribeInstances(&ec2.DescribeInstancesInput{
|
||||
InstanceIds: []*string{aws.String(instanceId)},
|
||||
})
|
||||
if len(describeOutput.Reservations) > 0 && len(describeOutput.Reservations[0].Instances) > 0 {
|
||||
if len(s.LaunchMappings.BuildEC2BlockDeviceMappings()) > 0 && len(describeOutput.Reservations[0].Instances[0].BlockDeviceMappings) == 0 {
|
||||
return fmt.Errorf("Instance has no block devices")
|
||||
}
|
||||
}
|
||||
return err
|
||||
})
|
||||
if err != nil || len(describeOutput.Reservations) == 0 || len(describeOutput.Reservations[0].Instances) == 0 {
|
||||
err := fmt.Errorf("Error finding source instance.")
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
instance := describeOutput.Reservations[0].Instances[0]
|
||||
|
||||
// Tag the spot instance request (not the eventual spot instance)
|
||||
if len(spotTags) > 0 && len(s.SpotTags) > 0 {
|
||||
spotTags.Report(ui)
|
||||
// Use the instance ID to find out the SIR, so that we can tag the spot
|
||||
// request associated with this instance.
|
||||
sir := describeOutput.Reservations[0].Instances[0].SpotInstanceRequestId
|
||||
|
||||
// Apply tags to the spot request.
|
||||
err = retry.Config{
|
||||
Tries: 11,
|
||||
ShouldRetry: func(error) bool { return false },
|
||||
RetryDelay: (&retry.Backoff{InitialBackoff: 200 * time.Millisecond, MaxBackoff: 30 * time.Second, Multiplier: 2}).Linear,
|
||||
}.Run(ctx, func(ctx context.Context) error {
|
||||
_, err := ec2conn.CreateTags(&ec2.CreateTagsInput{
|
||||
Tags: spotTags,
|
||||
Resources: []*string{sir},
|
||||
})
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error tagging spot request: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
}
|
||||
|
||||
// Retry creating tags for about 2.5 minutes
|
||||
err = retry.Config{Tries: 11, ShouldRetry: func(err error) bool {
|
||||
if awserrors.Matches(err, "InvalidInstanceID.NotFound", "") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
},
|
||||
RetryDelay: (&retry.Backoff{InitialBackoff: 200 * time.Millisecond, MaxBackoff: 30 * time.Second, Multiplier: 2}).Linear,
|
||||
}.Run(ctx, func(ctx context.Context) error {
|
||||
_, err := ec2conn.CreateTags(&ec2.CreateTagsInput{
|
||||
Tags: ec2Tags,
|
||||
Resources: []*string{instance.InstanceId},
|
||||
})
|
||||
return err
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error tagging source instance: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
volumeIds := make([]*string, 0)
|
||||
for _, v := range instance.BlockDeviceMappings {
|
||||
if ebs := v.Ebs; ebs != nil {
|
||||
volumeIds = append(volumeIds, ebs.VolumeId)
|
||||
}
|
||||
}
|
||||
|
||||
if len(volumeIds) > 0 && len(s.VolumeTags) > 0 {
|
||||
ui.Say("Adding tags to source EBS Volumes")
|
||||
_, err = ec2conn.CreateTags(&ec2.CreateTagsInput{
|
||||
Resources: volumeIds,
|
||||
Tags: volumeTags,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error tagging source EBS Volumes on %s: %s", *instance.InstanceId, err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if s.Debug {
|
||||
if instance.PublicDnsName != nil && *instance.PublicDnsName != "" {
|
||||
ui.Message(fmt.Sprintf("Public DNS: %s", *instance.PublicDnsName))
|
||||
}
|
||||
|
||||
if instance.PublicIpAddress != nil && *instance.PublicIpAddress != "" {
|
||||
ui.Message(fmt.Sprintf("Public IP: %s", *instance.PublicIpAddress))
|
||||
}
|
||||
|
||||
if instance.PrivateIpAddress != nil && *instance.PrivateIpAddress != "" {
|
||||
ui.Message(fmt.Sprintf("Private IP: %s", *instance.PrivateIpAddress))
|
||||
}
|
||||
}
|
||||
|
||||
state.Put("instance", instance)
|
||||
// instance_id is the generic term used so that users can have access to the
|
||||
// instance id inside of the provisioners, used in step_provision.
|
||||
state.Put("instance_id", instance.InstanceId)
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepRunSpotInstance) Cleanup(state multistep.StateBag) {
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
launchTemplateName := state.Get("launchTemplateName").(string)
|
||||
|
||||
// Terminate the source instance if it exists
|
||||
if s.instanceId != "" {
|
||||
ui.Say("Terminating the source AWS instance...")
|
||||
if _, err := ec2conn.TerminateInstances(&ec2.TerminateInstancesInput{InstanceIds: []*string{&s.instanceId}}); err != nil {
|
||||
ui.Error(fmt.Sprintf("Error terminating instance, may still be around: %s", err))
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.PollingConfig.WaitUntilInstanceTerminated(aws.BackgroundContext(), ec2conn, s.instanceId); err != nil {
|
||||
ui.Error(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Delete the launch template used to create the spot fleet
|
||||
deleteInput := &ec2.DeleteLaunchTemplateInput{
|
||||
LaunchTemplateName: aws.String(launchTemplateName),
|
||||
}
|
||||
if _, err := ec2conn.DeleteLaunchTemplate(deleteInput); err != nil {
|
||||
ui.Error(err.Error())
|
||||
}
|
||||
}
|
|
@ -1,366 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
|
||||
"github.com/hashicorp/packer-plugin-sdk/communicator"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
// Create statebag for running test
|
||||
func tStateSpot() multistep.StateBag {
|
||||
state := new(multistep.BasicStateBag)
|
||||
state.Put("ui", &packersdk.BasicUi{
|
||||
Reader: new(bytes.Buffer),
|
||||
Writer: new(bytes.Buffer),
|
||||
})
|
||||
state.Put("availability_zone", "us-east-1c")
|
||||
state.Put("securityGroupIds", []string{"sg-0b8984db72f213dc3"})
|
||||
state.Put("iamInstanceProfile", "packer-123")
|
||||
state.Put("subnet_id", "subnet-077fde4e")
|
||||
state.Put("source_image", "")
|
||||
return state
|
||||
}
|
||||
|
||||
func getBasicStep() *StepRunSpotInstance {
|
||||
stepRunSpotInstance := StepRunSpotInstance{
|
||||
PollingConfig: new(AWSPollingConfig),
|
||||
AssociatePublicIpAddress: false,
|
||||
LaunchMappings: BlockDevices{},
|
||||
BlockDurationMinutes: 0,
|
||||
Debug: false,
|
||||
Comm: &communicator.Config{
|
||||
SSH: communicator.SSH{
|
||||
SSHKeyPairName: "foo",
|
||||
},
|
||||
},
|
||||
EbsOptimized: false,
|
||||
ExpectedRootDevice: "ebs",
|
||||
InstanceInitiatedShutdownBehavior: "stop",
|
||||
InstanceType: "t2.micro",
|
||||
Region: "us-east-1",
|
||||
SourceAMI: "",
|
||||
SpotPrice: "auto",
|
||||
SpotTags: nil,
|
||||
Tags: map[string]string{},
|
||||
VolumeTags: nil,
|
||||
UserData: "",
|
||||
UserDataFile: "",
|
||||
}
|
||||
|
||||
return &stepRunSpotInstance
|
||||
}
|
||||
|
||||
func TestCreateTemplateData(t *testing.T) {
|
||||
state := tStateSpot()
|
||||
stepRunSpotInstance := getBasicStep()
|
||||
template := stepRunSpotInstance.CreateTemplateData(aws.String("userdata"), "az", state,
|
||||
&ec2.LaunchTemplateInstanceMarketOptionsRequest{})
|
||||
|
||||
// expected := []*ec2.LaunchTemplateInstanceNetworkInterfaceSpecificationRequest{
|
||||
// &ec2.LaunchTemplateInstanceNetworkInterfaceSpecificationRequest{
|
||||
// DeleteOnTermination: aws.Bool(true),
|
||||
// DeviceIndex: aws.Int64(0),
|
||||
// Groups: aws.StringSlice([]string{"sg-0b8984db72f213dc3"}),
|
||||
// SubnetId: aws.String("subnet-077fde4e"),
|
||||
// },
|
||||
// }
|
||||
// if expected != template.NetworkInterfaces {
|
||||
if template.NetworkInterfaces == nil {
|
||||
t.Fatalf("Template should have contained a networkInterface object: recieved %#v", template.NetworkInterfaces)
|
||||
}
|
||||
|
||||
if *template.IamInstanceProfile.Name != state.Get("iamInstanceProfile") {
|
||||
t.Fatalf("Template should have contained a InstanceProfile name: recieved %#v", template.IamInstanceProfile.Name)
|
||||
}
|
||||
|
||||
// Rerun, this time testing that we set security group IDs
|
||||
state.Put("subnet_id", "")
|
||||
template = stepRunSpotInstance.CreateTemplateData(aws.String("userdata"), "az", state,
|
||||
&ec2.LaunchTemplateInstanceMarketOptionsRequest{})
|
||||
if template.NetworkInterfaces != nil {
|
||||
t.Fatalf("Template shouldn't contain network interfaces object if subnet_id is unset.")
|
||||
}
|
||||
|
||||
// Rerun, this time testing that instance doesn't have instance profile is iamInstanceProfile is unset
|
||||
state.Put("iamInstanceProfile", "")
|
||||
template = stepRunSpotInstance.CreateTemplateData(aws.String("userdata"), "az", state,
|
||||
&ec2.LaunchTemplateInstanceMarketOptionsRequest{})
|
||||
fmt.Println(template.IamInstanceProfile)
|
||||
if *template.IamInstanceProfile.Name != "" {
|
||||
t.Fatalf("Template shouldn't contain instance profile if iamInstanceProfile is unset.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateTemplateData_NoEphemeral(t *testing.T) {
|
||||
state := tStateSpot()
|
||||
stepRunSpotInstance := getBasicStep()
|
||||
stepRunSpotInstance.NoEphemeral = true
|
||||
template := stepRunSpotInstance.CreateTemplateData(aws.String("userdata"), "az", state,
|
||||
&ec2.LaunchTemplateInstanceMarketOptionsRequest{})
|
||||
if len(template.BlockDeviceMappings) != 26 {
|
||||
t.Fatalf("Should have created 26 mappings to keep ephemeral drives from appearing.")
|
||||
}
|
||||
|
||||
// Now check that noEphemeral doesn't mess with the mappings in real life.
|
||||
// state = tStateSpot()
|
||||
// stepRunSpotInstance = getBasicStep()
|
||||
// stepRunSpotInstance.NoEphemeral = true
|
||||
// mappings := []*ec2.InstanceBlockDeviceMapping{
|
||||
// &ec2.InstanceBlockDeviceMapping{
|
||||
// DeviceName: "xvda",
|
||||
// Ebs: {
|
||||
// DeleteOnTermination: true,
|
||||
// Status: "attaching",
|
||||
// VolumeId: "vol-044cd49c330f21c05",
|
||||
// },
|
||||
// },
|
||||
// &ec2.InstanceBlockDeviceMapping{
|
||||
// DeviceName: "/dev/xvdf",
|
||||
// Ebs: {
|
||||
// DeleteOnTermination: false,
|
||||
// Status: "attaching",
|
||||
// VolumeId: "vol-0eefaf2d6ae35827e",
|
||||
// },
|
||||
// },
|
||||
// }
|
||||
// template = stepRunSpotInstance.CreateTemplateData(aws.String("userdata"), "az", state,
|
||||
// &ec2.LaunchTemplateInstanceMarketOptionsRequest{})
|
||||
// if len(*template.BlockDeviceMappings) != 26 {
|
||||
// t.Fatalf("Should have created 26 mappings to keep ephemeral drives from appearing.")
|
||||
// }
|
||||
}
|
||||
|
||||
type runSpotEC2ConnMock struct {
|
||||
ec2iface.EC2API
|
||||
|
||||
CreateLaunchTemplateParams []*ec2.CreateLaunchTemplateInput
|
||||
CreateLaunchTemplateFn func(*ec2.CreateLaunchTemplateInput) (*ec2.CreateLaunchTemplateOutput, error)
|
||||
|
||||
CreateFleetParams []*ec2.CreateFleetInput
|
||||
CreateFleetFn func(*ec2.CreateFleetInput) (*ec2.CreateFleetOutput, error)
|
||||
|
||||
CreateTagsParams []*ec2.CreateTagsInput
|
||||
CreateTagsFn func(*ec2.CreateTagsInput) (*ec2.CreateTagsOutput, error)
|
||||
|
||||
DescribeInstancesParams []*ec2.DescribeInstancesInput
|
||||
DescribeInstancesFn func(input *ec2.DescribeInstancesInput) (*ec2.DescribeInstancesOutput, error)
|
||||
}
|
||||
|
||||
func (m *runSpotEC2ConnMock) CreateLaunchTemplate(req *ec2.CreateLaunchTemplateInput) (*ec2.CreateLaunchTemplateOutput, error) {
|
||||
m.CreateLaunchTemplateParams = append(m.CreateLaunchTemplateParams, req)
|
||||
resp, err := m.CreateLaunchTemplateFn(req)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (m *runSpotEC2ConnMock) CreateFleet(req *ec2.CreateFleetInput) (*ec2.CreateFleetOutput, error) {
|
||||
m.CreateFleetParams = append(m.CreateFleetParams, req)
|
||||
if m.CreateFleetFn != nil {
|
||||
resp, err := m.CreateFleetFn(req)
|
||||
return resp, err
|
||||
} else {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (m *runSpotEC2ConnMock) DescribeInstances(req *ec2.DescribeInstancesInput) (*ec2.DescribeInstancesOutput, error) {
|
||||
m.DescribeInstancesParams = append(m.DescribeInstancesParams, req)
|
||||
if m.DescribeInstancesFn != nil {
|
||||
resp, err := m.DescribeInstancesFn(req)
|
||||
return resp, err
|
||||
} else {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (m *runSpotEC2ConnMock) CreateTags(req *ec2.CreateTagsInput) (*ec2.CreateTagsOutput, error) {
|
||||
m.CreateTagsParams = append(m.CreateTagsParams, req)
|
||||
if m.CreateTagsFn != nil {
|
||||
resp, err := m.CreateTagsFn(req)
|
||||
return resp, err
|
||||
} else {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
func defaultEc2Mock(instanceId, spotRequestId, volumeId, launchTemplateId *string) *runSpotEC2ConnMock {
|
||||
instance := &ec2.Instance{
|
||||
InstanceId: instanceId,
|
||||
SpotInstanceRequestId: spotRequestId,
|
||||
BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{
|
||||
{
|
||||
Ebs: &ec2.EbsInstanceBlockDevice{
|
||||
VolumeId: volumeId,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return &runSpotEC2ConnMock{
|
||||
CreateLaunchTemplateFn: func(in *ec2.CreateLaunchTemplateInput) (*ec2.CreateLaunchTemplateOutput, error) {
|
||||
return &ec2.CreateLaunchTemplateOutput{
|
||||
LaunchTemplate: &ec2.LaunchTemplate{
|
||||
LaunchTemplateId: launchTemplateId,
|
||||
},
|
||||
Warning: nil,
|
||||
}, nil
|
||||
},
|
||||
CreateFleetFn: func(*ec2.CreateFleetInput) (*ec2.CreateFleetOutput, error) {
|
||||
return &ec2.CreateFleetOutput{
|
||||
Errors: nil,
|
||||
FleetId: nil,
|
||||
Instances: []*ec2.CreateFleetInstance{
|
||||
{
|
||||
InstanceIds: []*string{instanceId},
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
},
|
||||
DescribeInstancesFn: func(input *ec2.DescribeInstancesInput) (*ec2.DescribeInstancesOutput, error) {
|
||||
return &ec2.DescribeInstancesOutput{
|
||||
NextToken: nil,
|
||||
Reservations: []*ec2.Reservation{
|
||||
{
|
||||
Instances: []*ec2.Instance{instance},
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestRun(t *testing.T) {
|
||||
instanceId := aws.String("test-instance-id")
|
||||
spotRequestId := aws.String("spot-id")
|
||||
volumeId := aws.String("volume-id")
|
||||
launchTemplateId := aws.String("launchTemplateId")
|
||||
ec2Mock := defaultEc2Mock(instanceId, spotRequestId, volumeId, launchTemplateId)
|
||||
|
||||
uiMock := packersdk.TestUi(t)
|
||||
|
||||
state := tStateSpot()
|
||||
state.Put("ec2", ec2Mock)
|
||||
state.Put("ui", uiMock)
|
||||
state.Put("source_image", testImage())
|
||||
|
||||
stepRunSpotInstance := getBasicStep()
|
||||
stepRunSpotInstance.Tags["Name"] = "Packer Builder"
|
||||
stepRunSpotInstance.Tags["test-tag"] = "test-value"
|
||||
stepRunSpotInstance.SpotTags = map[string]string{
|
||||
"spot-tag": "spot-tag-value",
|
||||
}
|
||||
stepRunSpotInstance.VolumeTags = map[string]string{
|
||||
"volume-tag": "volume-tag-value",
|
||||
}
|
||||
|
||||
ctx := context.TODO()
|
||||
action := stepRunSpotInstance.Run(ctx, state)
|
||||
|
||||
if err := state.Get("error"); err != nil {
|
||||
t.Fatalf("should not error, but: %v", err)
|
||||
}
|
||||
|
||||
if action != multistep.ActionContinue {
|
||||
t.Fatalf("shoul continue, but: %v", action)
|
||||
}
|
||||
|
||||
if len(ec2Mock.CreateLaunchTemplateParams) != 1 {
|
||||
t.Fatalf("createLaunchTemplate should be invoked once, but invoked %v", len(ec2Mock.CreateLaunchTemplateParams))
|
||||
}
|
||||
launchTemplateName := ec2Mock.CreateLaunchTemplateParams[0].LaunchTemplateName
|
||||
|
||||
if len(ec2Mock.CreateLaunchTemplateParams[0].TagSpecifications) != 1 {
|
||||
t.Fatalf("exactly one launch template tag specification expected")
|
||||
}
|
||||
if *ec2Mock.CreateLaunchTemplateParams[0].TagSpecifications[0].ResourceType != "launch-template" {
|
||||
t.Fatalf("resource type 'launch-template' expected")
|
||||
}
|
||||
if len(ec2Mock.CreateLaunchTemplateParams[0].TagSpecifications[0].Tags) != 1 {
|
||||
t.Fatalf("1 launch template tag expected")
|
||||
}
|
||||
|
||||
nameTag := ec2Mock.CreateLaunchTemplateParams[0].TagSpecifications[0].Tags[0]
|
||||
if *nameTag.Key != "spot-tag" || *nameTag.Value != "spot-tag-value" {
|
||||
t.Fatalf("expected spot-tag: spot-tag-value")
|
||||
}
|
||||
|
||||
if len(ec2Mock.CreateFleetParams) != 1 {
|
||||
t.Fatalf("createFleet should be invoked once, but invoked %v", len(ec2Mock.CreateLaunchTemplateParams))
|
||||
}
|
||||
if *ec2Mock.CreateFleetParams[0].TargetCapacitySpecification.DefaultTargetCapacityType != "spot" {
|
||||
t.Fatalf("capacity type should be spot")
|
||||
}
|
||||
if *ec2Mock.CreateFleetParams[0].TargetCapacitySpecification.TotalTargetCapacity != 1 {
|
||||
t.Fatalf("target capacity should be 1")
|
||||
}
|
||||
if len(ec2Mock.CreateFleetParams[0].LaunchTemplateConfigs) != 1 {
|
||||
t.Fatalf("exactly one launch config template expected")
|
||||
}
|
||||
if *ec2Mock.CreateFleetParams[0].LaunchTemplateConfigs[0].LaunchTemplateSpecification.LaunchTemplateName != *launchTemplateName {
|
||||
t.Fatalf("launchTemplateName should match in createLaunchTemplate and createFleet requests")
|
||||
}
|
||||
|
||||
if len(ec2Mock.DescribeInstancesParams) != 1 {
|
||||
t.Fatalf("describeInstancesParams should be invoked once, but invoked %v", len(ec2Mock.DescribeInstancesParams))
|
||||
}
|
||||
if *ec2Mock.DescribeInstancesParams[0].InstanceIds[0] != *instanceId {
|
||||
t.Fatalf("instanceId should match from createFleet response")
|
||||
}
|
||||
|
||||
uiMock.Say(fmt.Sprintf("%v", ec2Mock.CreateTagsParams))
|
||||
if len(ec2Mock.CreateTagsParams) != 3 {
|
||||
t.Fatalf("createTags should be invoked 3 times")
|
||||
}
|
||||
if len(ec2Mock.CreateTagsParams[0].Resources) != 1 || *ec2Mock.CreateTagsParams[0].Resources[0] != *spotRequestId {
|
||||
t.Fatalf("should create tags for spot request")
|
||||
}
|
||||
if len(ec2Mock.CreateTagsParams[1].Resources) != 1 || *ec2Mock.CreateTagsParams[1].Resources[0] != *instanceId {
|
||||
t.Fatalf("should create tags for instance")
|
||||
}
|
||||
if len(ec2Mock.CreateTagsParams[2].Resources) != 1 || ec2Mock.CreateTagsParams[2].Resources[0] != volumeId {
|
||||
t.Fatalf("should create tags for volume")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRun_NoSpotTags(t *testing.T) {
|
||||
instanceId := aws.String("test-instance-id")
|
||||
spotRequestId := aws.String("spot-id")
|
||||
volumeId := aws.String("volume-id")
|
||||
launchTemplateId := aws.String("lt-id")
|
||||
ec2Mock := defaultEc2Mock(instanceId, spotRequestId, volumeId, launchTemplateId)
|
||||
|
||||
uiMock := packersdk.TestUi(t)
|
||||
|
||||
state := tStateSpot()
|
||||
state.Put("ec2", ec2Mock)
|
||||
state.Put("ui", uiMock)
|
||||
state.Put("source_image", testImage())
|
||||
|
||||
stepRunSpotInstance := getBasicStep()
|
||||
stepRunSpotInstance.Tags["Name"] = "Packer Builder"
|
||||
stepRunSpotInstance.Tags["test-tag"] = "test-value"
|
||||
stepRunSpotInstance.VolumeTags = map[string]string{
|
||||
"volume-tag": "volume-tag-value",
|
||||
}
|
||||
|
||||
ctx := context.TODO()
|
||||
action := stepRunSpotInstance.Run(ctx, state)
|
||||
|
||||
if err := state.Get("error"); err != nil {
|
||||
t.Fatalf("should not error, but: %v", err)
|
||||
}
|
||||
|
||||
if action != multistep.ActionContinue {
|
||||
t.Fatalf("shoul continue, but: %v", action)
|
||||
}
|
||||
|
||||
if len(ec2Mock.CreateLaunchTemplateParams[0].TagSpecifications) != 0 {
|
||||
t.Fatalf("0 launch template tags expected")
|
||||
}
|
||||
}
|
|
@ -1,230 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/hashicorp/packer-plugin-sdk/communicator"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/uuid"
|
||||
)
|
||||
|
||||
type StepSecurityGroup struct {
|
||||
CommConfig *communicator.Config
|
||||
SecurityGroupFilter SecurityGroupFilterOptions
|
||||
SecurityGroupIds []string
|
||||
TemporarySGSourceCidrs []string
|
||||
SkipSSHRuleCreation bool
|
||||
|
||||
createdGroupId string
|
||||
}
|
||||
|
||||
func (s *StepSecurityGroup) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
vpcId := state.Get("vpc_id").(string)
|
||||
|
||||
if len(s.SecurityGroupIds) > 0 {
|
||||
_, err := ec2conn.DescribeSecurityGroups(
|
||||
&ec2.DescribeSecurityGroupsInput{
|
||||
GroupIds: aws.StringSlice(s.SecurityGroupIds),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Couldn't find specified security group: %s", err)
|
||||
log.Printf("[DEBUG] %s", err.Error())
|
||||
state.Put("error", err)
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
log.Printf("Using specified security groups: %v", s.SecurityGroupIds)
|
||||
state.Put("securityGroupIds", s.SecurityGroupIds)
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
if !s.SecurityGroupFilter.Empty() {
|
||||
|
||||
params := &ec2.DescribeSecurityGroupsInput{}
|
||||
if vpcId != "" {
|
||||
s.SecurityGroupFilter.Filters["vpc-id"] = vpcId
|
||||
}
|
||||
params.Filters = buildEc2Filters(s.SecurityGroupFilter.Filters)
|
||||
|
||||
log.Printf("Using SecurityGroup Filters %v", params)
|
||||
|
||||
sgResp, err := ec2conn.DescribeSecurityGroups(params)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Couldn't find security groups for filter: %s", err)
|
||||
log.Printf("[DEBUG] %s", err.Error())
|
||||
state.Put("error", err)
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
securityGroupIds := []string{}
|
||||
for _, sg := range sgResp.SecurityGroups {
|
||||
securityGroupIds = append(securityGroupIds, *sg.GroupId)
|
||||
}
|
||||
|
||||
ui.Message(fmt.Sprintf("Found Security Group(s): %s", strings.Join(securityGroupIds, ", ")))
|
||||
state.Put("securityGroupIds", securityGroupIds)
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
// Create the group
|
||||
groupName := fmt.Sprintf("packer_%s", uuid.TimeOrderedUUID())
|
||||
ui.Say(fmt.Sprintf("Creating temporary security group for this instance: %s", groupName))
|
||||
group := &ec2.CreateSecurityGroupInput{
|
||||
GroupName: &groupName,
|
||||
Description: aws.String("Temporary group for Packer"),
|
||||
}
|
||||
|
||||
group.VpcId = &vpcId
|
||||
|
||||
groupResp, err := ec2conn.CreateSecurityGroup(group)
|
||||
if err != nil {
|
||||
ui.Error(err.Error())
|
||||
state.Put("error", err)
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Set the group ID so we can delete it later
|
||||
s.createdGroupId = *groupResp.GroupId
|
||||
|
||||
// Wait for the security group become available for authorizing
|
||||
log.Printf("[DEBUG] Waiting for temporary security group: %s", s.createdGroupId)
|
||||
err = waitUntilSecurityGroupExists(ec2conn,
|
||||
&ec2.DescribeSecurityGroupsInput{
|
||||
GroupIds: []*string{aws.String(s.createdGroupId)},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Timed out waiting for security group %s: %s", s.createdGroupId, err)
|
||||
log.Printf("[DEBUG] %s", err.Error())
|
||||
state.Put("error", err)
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Found security group %s", s.createdGroupId)
|
||||
|
||||
// map the list of temporary security group CIDRs bundled with config to
|
||||
// types expected by EC2.
|
||||
groupIpRanges := []*ec2.IpRange{}
|
||||
for _, cidr := range s.TemporarySGSourceCidrs {
|
||||
ipRange := ec2.IpRange{
|
||||
CidrIp: aws.String(cidr),
|
||||
}
|
||||
groupIpRanges = append(groupIpRanges, &ipRange)
|
||||
}
|
||||
|
||||
// Set some state data for use in future steps
|
||||
state.Put("securityGroupIds", []string{s.createdGroupId})
|
||||
|
||||
if s.SkipSSHRuleCreation {
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
port := s.CommConfig.Port()
|
||||
// Authorize access for the provided port within the security group
|
||||
groupRules := &ec2.AuthorizeSecurityGroupIngressInput{
|
||||
GroupId: groupResp.GroupId,
|
||||
IpPermissions: []*ec2.IpPermission{
|
||||
{
|
||||
FromPort: aws.Int64(int64(port)),
|
||||
ToPort: aws.Int64(int64(port)),
|
||||
IpRanges: groupIpRanges,
|
||||
IpProtocol: aws.String("tcp"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ui.Say(fmt.Sprintf(
|
||||
"Authorizing access to port %d from %v in the temporary security groups...",
|
||||
port, s.TemporarySGSourceCidrs),
|
||||
)
|
||||
_, err = ec2conn.AuthorizeSecurityGroupIngress(groupRules)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error authorizing temporary security group: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepSecurityGroup) Cleanup(state multistep.StateBag) {
|
||||
if s.createdGroupId == "" {
|
||||
return
|
||||
}
|
||||
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
ui.Say("Deleting temporary security group...")
|
||||
|
||||
var err error
|
||||
for i := 0; i < 5; i++ {
|
||||
_, err = ec2conn.DeleteSecurityGroup(&ec2.DeleteSecurityGroupInput{GroupId: &s.createdGroupId})
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
log.Printf("Error deleting security group: %s", err)
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf(
|
||||
"Error cleaning up security group. Please delete the group manually:"+
|
||||
" err: %s; security group ID: %s", err, s.createdGroupId))
|
||||
}
|
||||
}
|
||||
|
||||
func waitUntilSecurityGroupExists(c *ec2.EC2, input *ec2.DescribeSecurityGroupsInput) error {
|
||||
ctx := aws.BackgroundContext()
|
||||
w := request.Waiter{
|
||||
Name: "DescribeSecurityGroups",
|
||||
MaxAttempts: 40,
|
||||
Delay: request.ConstantWaiterDelay(5 * time.Second),
|
||||
Acceptors: []request.WaiterAcceptor{
|
||||
{
|
||||
State: request.SuccessWaiterState,
|
||||
Matcher: request.PathWaiterMatch,
|
||||
Argument: "length(SecurityGroups[]) > `0`",
|
||||
Expected: true,
|
||||
},
|
||||
{
|
||||
State: request.RetryWaiterState,
|
||||
Matcher: request.ErrorWaiterMatch,
|
||||
Argument: "",
|
||||
Expected: "InvalidGroup.NotFound",
|
||||
},
|
||||
{
|
||||
State: request.RetryWaiterState,
|
||||
Matcher: request.ErrorWaiterMatch,
|
||||
Argument: "",
|
||||
Expected: "InvalidSecurityGroupID.NotFound",
|
||||
},
|
||||
},
|
||||
Logger: c.Config.Logger,
|
||||
NewRequest: func(opts []request.Option) (*request.Request, error) {
|
||||
var inCpy *ec2.DescribeSecurityGroupsInput
|
||||
if input != nil {
|
||||
tmp := *input
|
||||
inCpy = &tmp
|
||||
}
|
||||
req, _ := c.DescribeSecurityGroupsRequest(inCpy)
|
||||
req.SetContext(ctx)
|
||||
req.ApplyOptions(opts...)
|
||||
return req, nil
|
||||
},
|
||||
}
|
||||
return w.WaitWithContext(ctx)
|
||||
}
|
|
@ -1,29 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
"github.com/hashicorp/packer-plugin-sdk/packerbuilderdata"
|
||||
)
|
||||
|
||||
// &awscommon.StepSetGeneratedData{
|
||||
// GeneratedData: generatedData,
|
||||
// },
|
||||
|
||||
type StepSetGeneratedData struct {
|
||||
GeneratedData *packerbuilderdata.GeneratedData
|
||||
}
|
||||
|
||||
func (s *StepSetGeneratedData) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
|
||||
extractBuildInfo(*ec2conn.Config.Region, state, s.GeneratedData)
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepSetGeneratedData) Cleanup(state multistep.StateBag) {
|
||||
// No cleanup...
|
||||
}
|
|
@ -1,92 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
confighelper "github.com/hashicorp/packer-plugin-sdk/template/config"
|
||||
)
|
||||
|
||||
// StepSourceAMIInfo extracts critical information from the source AMI
|
||||
// that is used throughout the AMI creation process.
|
||||
//
|
||||
// Produces:
|
||||
// source_image *ec2.Image - the source AMI info
|
||||
type StepSourceAMIInfo struct {
|
||||
SourceAmi string
|
||||
EnableAMISriovNetSupport bool
|
||||
EnableAMIENASupport confighelper.Trilean
|
||||
AMIVirtType string
|
||||
AmiFilters AmiFilterOptions
|
||||
}
|
||||
|
||||
type imageSort []*ec2.Image
|
||||
|
||||
func (a imageSort) Len() int { return len(a) }
|
||||
func (a imageSort) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a imageSort) Less(i, j int) bool {
|
||||
itime, _ := time.Parse(time.RFC3339, *a[i].CreationDate)
|
||||
jtime, _ := time.Parse(time.RFC3339, *a[j].CreationDate)
|
||||
return itime.Unix() < jtime.Unix()
|
||||
}
|
||||
|
||||
// Returns the most recent AMI out of a slice of images.
|
||||
func mostRecentAmi(images []*ec2.Image) *ec2.Image {
|
||||
sortedImages := images
|
||||
sort.Sort(imageSort(sortedImages))
|
||||
return sortedImages[len(sortedImages)-1]
|
||||
}
|
||||
|
||||
func (s *StepSourceAMIInfo) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
params := &ec2.DescribeImagesInput{}
|
||||
|
||||
if s.SourceAmi != "" {
|
||||
params.ImageIds = []*string{&s.SourceAmi}
|
||||
}
|
||||
|
||||
image, err := s.AmiFilters.GetFilteredImage(params, ec2conn)
|
||||
if err != nil {
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
ui.Message(fmt.Sprintf("Found Image ID: %s", *image.ImageId))
|
||||
|
||||
// Enhanced Networking can only be enabled on HVM AMIs.
|
||||
// See http://goo.gl/icuXh5
|
||||
if s.EnableAMIENASupport.True() || s.EnableAMISriovNetSupport {
|
||||
err = s.canEnableEnhancedNetworking(image)
|
||||
if err != nil {
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
}
|
||||
|
||||
state.Put("source_image", image)
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepSourceAMIInfo) Cleanup(multistep.StateBag) {}
|
||||
|
||||
func (s *StepSourceAMIInfo) canEnableEnhancedNetworking(image *ec2.Image) error {
|
||||
if s.AMIVirtType == "hvm" {
|
||||
return nil
|
||||
}
|
||||
if s.AMIVirtType != "" {
|
||||
return fmt.Errorf("Cannot enable enhanced networking, AMIVirtType '%s' is not HVM", s.AMIVirtType)
|
||||
}
|
||||
if *image.VirtualizationType != "hvm" {
|
||||
return fmt.Errorf("Cannot enable enhanced networking, source AMI '%s' is not HVM", s.SourceAmi)
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,43 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestStepSourceAmiInfo_PVImage(t *testing.T) {
|
||||
err := new(StepSourceAMIInfo).canEnableEnhancedNetworking(&ec2.Image{
|
||||
VirtualizationType: aws.String("paravirtual"),
|
||||
})
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestStepSourceAmiInfo_HVMImage(t *testing.T) {
|
||||
err := new(StepSourceAMIInfo).canEnableEnhancedNetworking(&ec2.Image{
|
||||
VirtualizationType: aws.String("hvm"),
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestStepSourceAmiInfo_PVImageWithAMIVirtPV(t *testing.T) {
|
||||
stepSourceAMIInfo := StepSourceAMIInfo{
|
||||
AMIVirtType: "paravirtual",
|
||||
}
|
||||
err := stepSourceAMIInfo.canEnableEnhancedNetworking(&ec2.Image{
|
||||
VirtualizationType: aws.String("paravirtual"),
|
||||
})
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestStepSourceAmiInfo_PVImageWithAMIVirtHVM(t *testing.T) {
|
||||
stepSourceAMIInfo := StepSourceAMIInfo{
|
||||
AMIVirtType: "hvm",
|
||||
}
|
||||
err := stepSourceAMIInfo.canEnableEnhancedNetworking(&ec2.Image{
|
||||
VirtualizationType: aws.String("paravirtual"),
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
}
|
|
@ -1,93 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/retry"
|
||||
"github.com/hashicorp/packer/builder/amazon/common/awserrors"
|
||||
)
|
||||
|
||||
type StepStopEBSBackedInstance struct {
|
||||
PollingConfig *AWSPollingConfig
|
||||
Skip bool
|
||||
DisableStopInstance bool
|
||||
}
|
||||
|
||||
func (s *StepStopEBSBackedInstance) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
instance := state.Get("instance").(*ec2.Instance)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
// Skip when it is a spot instance
|
||||
if s.Skip {
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
if !s.DisableStopInstance {
|
||||
// Stop the instance so we can create an AMI from it
|
||||
ui.Say("Stopping the source instance...")
|
||||
|
||||
// Amazon EC2 API follows an eventual consistency model.
|
||||
|
||||
// This means that if you run a command to modify or describe a resource
|
||||
// that you just created, its ID might not have propagated throughout
|
||||
// the system, and you will get an error responding that the resource
|
||||
// does not exist.
|
||||
|
||||
// Work around this by retrying a few times, up to about 5 minutes.
|
||||
err := retry.Config{Tries: 6, ShouldRetry: func(error) bool {
|
||||
if awserrors.Matches(err, "InvalidInstanceID.NotFound", "") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
},
|
||||
RetryDelay: (&retry.Backoff{InitialBackoff: 10 * time.Second, MaxBackoff: 60 * time.Second, Multiplier: 2}).Linear,
|
||||
}.Run(ctx, func(ctx context.Context) error {
|
||||
ui.Message(fmt.Sprintf("Stopping instance"))
|
||||
|
||||
_, err = ec2conn.StopInstances(&ec2.StopInstancesInput{
|
||||
InstanceIds: []*string{instance.InstanceId},
|
||||
})
|
||||
|
||||
return err
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error stopping instance: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
} else {
|
||||
ui.Say("Automatic instance stop disabled. Please stop instance manually.")
|
||||
}
|
||||
|
||||
// Wait for the instance to actually stop
|
||||
ui.Say("Waiting for the instance to stop...")
|
||||
err = ec2conn.WaitUntilInstanceStoppedWithContext(ctx,
|
||||
&ec2.DescribeInstancesInput{
|
||||
InstanceIds: []*string{instance.InstanceId},
|
||||
},
|
||||
s.PollingConfig.getWaiterOptions()...)
|
||||
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error waiting for instance to stop: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepStopEBSBackedInstance) Cleanup(multistep.StateBag) {
|
||||
// No cleanup...
|
||||
}
|
|
@ -1,44 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/packerbuilderdata"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/interpolate"
|
||||
)
|
||||
|
||||
type TagMap map[string]string
|
||||
type EC2Tags []*ec2.Tag
|
||||
|
||||
func (t EC2Tags) Report(ui packersdk.Ui) {
|
||||
for _, tag := range t {
|
||||
ui.Message(fmt.Sprintf("Adding tag: \"%s\": \"%s\"",
|
||||
aws.StringValue(tag.Key), aws.StringValue(tag.Value)))
|
||||
}
|
||||
}
|
||||
|
||||
func (t TagMap) EC2Tags(ictx interpolate.Context, region string, state multistep.StateBag) (EC2Tags, error) {
|
||||
var ec2Tags []*ec2.Tag
|
||||
generatedData := packerbuilderdata.GeneratedData{State: state}
|
||||
ictx.Data = extractBuildInfo(region, state, &generatedData)
|
||||
|
||||
for key, value := range t {
|
||||
interpolatedKey, err := interpolate.Render(key, &ictx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error processing tag: %s:%s - %s", key, value, err)
|
||||
}
|
||||
interpolatedValue, err := interpolate.Render(value, &ictx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error processing tag: %s:%s - %s", key, value, err)
|
||||
}
|
||||
ec2Tags = append(ec2Tags, &ec2.Tag{
|
||||
Key: aws.String(interpolatedKey),
|
||||
Value: aws.String(interpolatedValue),
|
||||
})
|
||||
}
|
||||
return ec2Tags, nil
|
||||
}
|
|
@ -1,40 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
func isalphanumeric(b byte) bool {
|
||||
if '0' <= b && b <= '9' {
|
||||
return true
|
||||
}
|
||||
if 'a' <= b && b <= 'z' {
|
||||
return true
|
||||
}
|
||||
if 'A' <= b && b <= 'Z' {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Clean up AMI name by replacing invalid characters with "-"
|
||||
// For allowed characters see docs for Name parameter
|
||||
// at http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateImage.html
|
||||
func templateCleanAMIName(s string) string {
|
||||
allowed := []byte{'(', ')', '[', ']', ' ', '.', '/', '-', '\'', '@', '_'}
|
||||
b := []byte(s)
|
||||
newb := make([]byte, len(b))
|
||||
for i, c := range b {
|
||||
if isalphanumeric(c) || bytes.IndexByte(allowed, c) != -1 {
|
||||
newb[i] = c
|
||||
} else {
|
||||
newb[i] = '-'
|
||||
}
|
||||
}
|
||||
return string(newb[:])
|
||||
}
|
||||
|
||||
var TemplateFuncs = template.FuncMap{
|
||||
"clean_resource_name": templateCleanAMIName,
|
||||
}
|
|
@ -1,16 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAMITemplatePrepare_clean(t *testing.T) {
|
||||
origName := "AMZamz09()./-_:&^ $%[]#'@"
|
||||
expected := "AMZamz09()./-_--- --[]-'@"
|
||||
|
||||
name := templateCleanAMIName(origName)
|
||||
|
||||
if name != expected {
|
||||
t.Fatalf("template names do not match: expected %s got %s\n", expected, name)
|
||||
}
|
||||
}
|
|
@ -1,46 +0,0 @@
|
|||
package amazon_acc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
awscommon "github.com/hashicorp/packer/builder/amazon/common"
|
||||
)
|
||||
|
||||
type AWSHelper struct {
|
||||
Region string
|
||||
AMIName string
|
||||
}
|
||||
|
||||
func (a *AWSHelper) CleanUpAmi() error {
|
||||
accessConfig := &awscommon.AccessConfig{}
|
||||
session, err := accessConfig.Session()
|
||||
if err != nil {
|
||||
return fmt.Errorf("AWSAMICleanUp: Unable to create aws session %s", err.Error())
|
||||
}
|
||||
|
||||
regionconn := ec2.New(session.Copy(&aws.Config{
|
||||
Region: aws.String(a.Region),
|
||||
}))
|
||||
|
||||
resp, err := regionconn.DescribeImages(&ec2.DescribeImagesInput{
|
||||
Owners: aws.StringSlice([]string{"self"}),
|
||||
Filters: []*ec2.Filter{{
|
||||
Name: aws.String("name"),
|
||||
Values: aws.StringSlice([]string{a.AMIName}),
|
||||
}}})
|
||||
if err != nil {
|
||||
return fmt.Errorf("AWSAMICleanUp: Unable to find Image %s: %s", a.AMIName, err.Error())
|
||||
}
|
||||
|
||||
if resp != nil && len(resp.Images) > 0 {
|
||||
_, err = regionconn.DeregisterImage(&ec2.DeregisterImageInput{
|
||||
ImageId: resp.Images[0].ImageId,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("AWSAMICleanUp: Unable to Deregister Image %s", err.Error())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,59 +0,0 @@
|
|||
package amazon_acc
|
||||
|
||||
// This is the code necessary for running the provisioner acceptance tests.
|
||||
// It provides the builder config and cleans up created resource.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
amazonebsbuilder "github.com/hashicorp/packer/builder/amazon/ebs"
|
||||
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
type AmazonEBSAccTest struct{}
|
||||
|
||||
func (s *AmazonEBSAccTest) GetConfigs() (map[string]string, error) {
|
||||
fixtures := map[string]string{
|
||||
"linux": "amazon-ebs.txt",
|
||||
"windows": "amazon-ebs_windows.txt",
|
||||
}
|
||||
|
||||
configs := make(map[string]string)
|
||||
|
||||
for distro, fixture := range fixtures {
|
||||
fileName := fixture
|
||||
filePath := filepath.Join("../../builder/amazon/ebs/acceptance/test-fixtures/", fileName)
|
||||
config, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Expected to find %s", filePath)
|
||||
}
|
||||
defer config.Close()
|
||||
|
||||
file, err := ioutil.ReadAll(config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Unable to read %s", filePath)
|
||||
}
|
||||
|
||||
configs[distro] = string(file)
|
||||
|
||||
}
|
||||
return configs, nil
|
||||
}
|
||||
|
||||
func (s *AmazonEBSAccTest) CleanUp() error {
|
||||
helper := AWSHelper{
|
||||
Region: "us-east-1",
|
||||
AMIName: "packer-acc-test",
|
||||
}
|
||||
return helper.CleanUpAmi()
|
||||
}
|
||||
|
||||
func (s *AmazonEBSAccTest) GetBuilderStore() packersdk.MapOfBuilder {
|
||||
return packersdk.MapOfBuilder{
|
||||
"amazon-ebs": func() (packersdk.Builder, error) { return &amazonebsbuilder.Builder{}, nil },
|
||||
}
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
{
|
||||
"type": "amazon-ebs",
|
||||
"ami_name": "packer-acc-test",
|
||||
"instance_type": "m1.small",
|
||||
"region": "us-east-1",
|
||||
"ssh_username": "ubuntu",
|
||||
"source_ami": "ami-0568456c",
|
||||
"force_deregister" : true,
|
||||
"tags": {
|
||||
"packer-test": "true"
|
||||
}
|
||||
}
|
|
@ -1,23 +0,0 @@
|
|||
{
|
||||
"type": "amazon-ebs",
|
||||
"region": "us-east-1",
|
||||
"instance_type": "t2.micro",
|
||||
"source_ami_filter": {
|
||||
"filters": {
|
||||
"virtualization-type": "hvm",
|
||||
"name": "*Windows_Server-2012-R2*English-64Bit-Base*",
|
||||
"root-device-type": "ebs"
|
||||
},
|
||||
"most_recent": true,
|
||||
"owners": "amazon"
|
||||
},
|
||||
"ami_name": "packer-acc-test",
|
||||
"user_data_file": "../../builder/amazon/ebs/acceptance/test-fixtures/scripts/bootstrap_win.txt",
|
||||
"communicator": "winrm",
|
||||
"winrm_username": "Administrator",
|
||||
"winrm_password": "SuperS3cr3t!!!!",
|
||||
"force_deregister" : true,
|
||||
"tags": {
|
||||
"packer-test": "true"
|
||||
}
|
||||
}
|
|
@ -1,40 +0,0 @@
|
|||
<powershell>
|
||||
# Set administrator password
|
||||
net user Administrator SuperS3cr3t!!!!
|
||||
wmic useraccount where "name='Administrator'" set PasswordExpires=FALSE
|
||||
|
||||
# First, make sure WinRM can't be connected to
|
||||
netsh advfirewall firewall set rule name="Windows Remote Management (HTTP-In)" new enable=yes action=block
|
||||
|
||||
# Delete any existing WinRM listeners
|
||||
winrm delete winrm/config/listener?Address=*+Transport=HTTP 2>$Null
|
||||
winrm delete winrm/config/listener?Address=*+Transport=HTTPS 2>$Null
|
||||
|
||||
# Disable group policies which block basic authentication and unencrypted login
|
||||
|
||||
Set-ItemProperty -Path HKLM:\Software\Policies\Microsoft\Windows\WinRM\Client -Name AllowBasic -Value 1
|
||||
Set-ItemProperty -Path HKLM:\Software\Policies\Microsoft\Windows\WinRM\Client -Name AllowUnencryptedTraffic -Value 1
|
||||
Set-ItemProperty -Path HKLM:\Software\Policies\Microsoft\Windows\WinRM\Service -Name AllowBasic -Value 1
|
||||
Set-ItemProperty -Path HKLM:\Software\Policies\Microsoft\Windows\WinRM\Service -Name AllowUnencryptedTraffic -Value 1
|
||||
|
||||
|
||||
# Create a new WinRM listener and configure
|
||||
winrm create winrm/config/listener?Address=*+Transport=HTTP
|
||||
winrm set winrm/config/winrs '@{MaxMemoryPerShellMB="0"}'
|
||||
winrm set winrm/config '@{MaxTimeoutms="7200000"}'
|
||||
winrm set winrm/config/service '@{AllowUnencrypted="true"}'
|
||||
winrm set winrm/config/service '@{MaxConcurrentOperationsPerUser="12000"}'
|
||||
winrm set winrm/config/service/auth '@{Basic="true"}'
|
||||
winrm set winrm/config/client/auth '@{Basic="true"}'
|
||||
|
||||
# Configure UAC to allow privilege elevation in remote shells
|
||||
$Key = 'HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System'
|
||||
$Setting = 'LocalAccountTokenFilterPolicy'
|
||||
Set-ItemProperty -Path $Key -Name $Setting -Value 1 -Force
|
||||
|
||||
# Configure and restart the WinRM Service; Enable the required firewall exception
|
||||
Stop-Service -Name WinRM
|
||||
Set-Service -Name WinRM -StartupType Automatic
|
||||
netsh advfirewall firewall set rule name="Windows Remote Management (HTTP-In)" new action=allow localip=any remoteip=any
|
||||
Start-Service -Name WinRM
|
||||
</powershell>
|
|
@ -1,381 +0,0 @@
|
|||
//go:generate struct-markdown
|
||||
//go:generate mapstructure-to-hcl2 -type Config
|
||||
|
||||
// The amazonebs package contains a packersdk.Builder implementation that
|
||||
// builds AMIs for Amazon EC2.
|
||||
//
|
||||
// In general, there are two types of AMIs that can be created: ebs-backed or
|
||||
// instance-store. This builder _only_ builds ebs-backed images.
|
||||
package ebs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/aws/aws-sdk-go/service/iam"
|
||||
"github.com/hashicorp/hcl/v2/hcldec"
|
||||
"github.com/hashicorp/packer-plugin-sdk/common"
|
||||
"github.com/hashicorp/packer-plugin-sdk/communicator"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/packerbuilderdata"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/config"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/interpolate"
|
||||
awscommon "github.com/hashicorp/packer/builder/amazon/common"
|
||||
)
|
||||
|
||||
// The unique ID for this builder
|
||||
const BuilderId = "mitchellh.amazonebs"
|
||||
|
||||
type Config struct {
|
||||
common.PackerConfig `mapstructure:",squash"`
|
||||
awscommon.AccessConfig `mapstructure:",squash"`
|
||||
awscommon.AMIConfig `mapstructure:",squash"`
|
||||
awscommon.RunConfig `mapstructure:",squash"`
|
||||
// If true, Packer will not create the AMI. Useful for setting to `true`
|
||||
// during a build test stage. Default `false`.
|
||||
AMISkipCreateImage bool `mapstructure:"skip_create_ami" required:"false"`
|
||||
// Add one or more block device mappings to the AMI. These will be attached
|
||||
// when booting a new instance from your AMI. To add a block device during
|
||||
// the Packer build see `launch_block_device_mappings` below. Your options
|
||||
// here may vary depending on the type of VM you use. See the
|
||||
// [BlockDevices](#block-devices-configuration) documentation for fields.
|
||||
AMIMappings awscommon.BlockDevices `mapstructure:"ami_block_device_mappings" required:"false"`
|
||||
// Add one or more block devices before the Packer build starts. If you add
|
||||
// instance store volumes or EBS volumes in addition to the root device
|
||||
// volume, the created AMI will contain block device mapping information
|
||||
// for those volumes. Amazon creates snapshots of the source instance's
|
||||
// root volume and any other EBS volumes described here. When you launch an
|
||||
// instance from this new AMI, the instance automatically launches with
|
||||
// these additional volumes, and will restore them from snapshots taken
|
||||
// from the source instance. See the
|
||||
// [BlockDevices](#block-devices-configuration) documentation for fields.
|
||||
LaunchMappings awscommon.BlockDevices `mapstructure:"launch_block_device_mappings" required:"false"`
|
||||
// Tags to apply to the volumes that are *launched* to create the AMI.
|
||||
// These tags are *not* applied to the resulting AMI unless they're
|
||||
// duplicated in `tags`. This is a [template
|
||||
// engine](/docs/templates/legacy_json_templates/engine), see [Build template
|
||||
// data](#build-template-data) for more information.
|
||||
VolumeRunTags map[string]string `mapstructure:"run_volume_tags"`
|
||||
// Same as [`run_volume_tags`](#run_volume_tags) but defined as a singular
|
||||
// block containing a `name` and a `value` field. In HCL2 mode the
|
||||
// [`dynamic_block`](https://packer.io/docs/templates/hcl_templates/expressions.html#dynamic-blocks)
|
||||
// will allow you to create those programatically.
|
||||
VolumeRunTag config.NameValues `mapstructure:"run_volume_tag" required:"false"`
|
||||
// Relevant only to Windows guests: If you set this flag, we'll add clauses
|
||||
// to the launch_block_device_mappings that make sure ephemeral drives
|
||||
// don't show up in the EC2 console. If you launched from the EC2 console,
|
||||
// you'd get this automatically, but the SDK does not provide this service.
|
||||
// For more information, see
|
||||
// https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/InstanceStorage.html.
|
||||
// Because we don't validate the OS type of your guest, it is up to you to
|
||||
// make sure you don't set this for *nix guests; behavior may be
|
||||
// unpredictable.
|
||||
NoEphemeral bool `mapstructure:"no_ephemeral" required:"false"`
|
||||
|
||||
ctx interpolate.Context
|
||||
}
|
||||
|
||||
type Builder struct {
|
||||
config Config
|
||||
runner multistep.Runner
|
||||
}
|
||||
|
||||
func (b *Builder) ConfigSpec() hcldec.ObjectSpec { return b.config.FlatMapstructure().HCL2Spec() }
|
||||
|
||||
func (b *Builder) Prepare(raws ...interface{}) ([]string, []string, error) {
|
||||
b.config.ctx.Funcs = awscommon.TemplateFuncs
|
||||
err := config.Decode(&b.config, &config.DecodeOpts{
|
||||
PluginType: BuilderId,
|
||||
Interpolate: true,
|
||||
InterpolateContext: &b.config.ctx,
|
||||
InterpolateFilter: &interpolate.RenderFilter{
|
||||
Exclude: []string{
|
||||
"ami_description",
|
||||
"run_tags",
|
||||
"run_tag",
|
||||
"run_volume_tags",
|
||||
"run_volume_tag",
|
||||
"spot_tags",
|
||||
"spot_tag",
|
||||
"snapshot_tags",
|
||||
"snapshot_tag",
|
||||
"tags",
|
||||
"tag",
|
||||
},
|
||||
},
|
||||
}, raws...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if b.config.PackerConfig.PackerForce {
|
||||
b.config.AMIForceDeregister = true
|
||||
}
|
||||
|
||||
// Accumulate any errors
|
||||
var errs *packersdk.MultiError
|
||||
var warns []string
|
||||
|
||||
errs = packersdk.MultiErrorAppend(errs, b.config.VolumeRunTag.CopyOn(&b.config.VolumeRunTags)...)
|
||||
|
||||
errs = packersdk.MultiErrorAppend(errs, b.config.AccessConfig.Prepare()...)
|
||||
errs = packersdk.MultiErrorAppend(errs,
|
||||
b.config.AMIConfig.Prepare(&b.config.AccessConfig, &b.config.ctx)...)
|
||||
errs = packersdk.MultiErrorAppend(errs, b.config.AMIMappings.Prepare(&b.config.ctx)...)
|
||||
errs = packersdk.MultiErrorAppend(errs, b.config.LaunchMappings.Prepare(&b.config.ctx)...)
|
||||
errs = packersdk.MultiErrorAppend(errs, b.config.RunConfig.Prepare(&b.config.ctx)...)
|
||||
|
||||
if b.config.IsSpotInstance() && (b.config.AMIENASupport.True() || b.config.AMISriovNetSupport) {
|
||||
errs = packersdk.MultiErrorAppend(errs,
|
||||
fmt.Errorf("Spot instances do not support modification, which is required "+
|
||||
"when either `ena_support` or `sriov_support` are set. Please ensure "+
|
||||
"you use an AMI that already has either SR-IOV or ENA enabled."))
|
||||
}
|
||||
|
||||
if b.config.RunConfig.SpotPriceAutoProduct != "" {
|
||||
warns = append(warns, "spot_price_auto_product is deprecated and no "+
|
||||
"longer necessary for Packer builds. In future versions of "+
|
||||
"Packer, inclusion of spot_price_auto_product will error your "+
|
||||
"builds. Please take a look at our current documentation to "+
|
||||
"understand how Packer requests Spot instances.")
|
||||
}
|
||||
|
||||
if errs != nil && len(errs.Errors) > 0 {
|
||||
return nil, warns, errs
|
||||
}
|
||||
|
||||
packersdk.LogSecretFilter.Set(b.config.AccessKey, b.config.SecretKey, b.config.Token)
|
||||
|
||||
generatedData := awscommon.GetGeneratedDataList()
|
||||
return generatedData, warns, nil
|
||||
}
|
||||
|
||||
func (b *Builder) Run(ctx context.Context, ui packersdk.Ui, hook packersdk.Hook) (packersdk.Artifact, error) {
|
||||
|
||||
session, err := b.config.Session()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ec2conn := ec2.New(session)
|
||||
iam := iam.New(session)
|
||||
// Setup the state bag and initial state for the steps
|
||||
state := new(multistep.BasicStateBag)
|
||||
state.Put("config", &b.config)
|
||||
state.Put("access_config", &b.config.AccessConfig)
|
||||
state.Put("ami_config", &b.config.AMIConfig)
|
||||
state.Put("ec2", ec2conn)
|
||||
state.Put("iam", iam)
|
||||
state.Put("awsSession", session)
|
||||
state.Put("hook", hook)
|
||||
state.Put("ui", ui)
|
||||
generatedData := &packerbuilderdata.GeneratedData{State: state}
|
||||
|
||||
var instanceStep multistep.Step
|
||||
|
||||
if b.config.IsSpotInstance() {
|
||||
instanceStep = &awscommon.StepRunSpotInstance{
|
||||
PollingConfig: b.config.PollingConfig,
|
||||
AssociatePublicIpAddress: b.config.AssociatePublicIpAddress,
|
||||
LaunchMappings: b.config.LaunchMappings,
|
||||
BlockDurationMinutes: b.config.BlockDurationMinutes,
|
||||
Ctx: b.config.ctx,
|
||||
Comm: &b.config.RunConfig.Comm,
|
||||
Debug: b.config.PackerDebug,
|
||||
EbsOptimized: b.config.EbsOptimized,
|
||||
ExpectedRootDevice: "ebs",
|
||||
InstanceInitiatedShutdownBehavior: b.config.InstanceInitiatedShutdownBehavior,
|
||||
InstanceType: b.config.InstanceType,
|
||||
Region: *ec2conn.Config.Region,
|
||||
SourceAMI: b.config.SourceAmi,
|
||||
SpotPrice: b.config.SpotPrice,
|
||||
SpotTags: b.config.SpotTags,
|
||||
Tags: b.config.RunTags,
|
||||
SpotInstanceTypes: b.config.SpotInstanceTypes,
|
||||
UserData: b.config.UserData,
|
||||
UserDataFile: b.config.UserDataFile,
|
||||
VolumeTags: b.config.VolumeRunTags,
|
||||
NoEphemeral: b.config.NoEphemeral,
|
||||
}
|
||||
} else {
|
||||
instanceStep = &awscommon.StepRunSourceInstance{
|
||||
PollingConfig: b.config.PollingConfig,
|
||||
AssociatePublicIpAddress: b.config.AssociatePublicIpAddress,
|
||||
LaunchMappings: b.config.LaunchMappings,
|
||||
Comm: &b.config.RunConfig.Comm,
|
||||
Ctx: b.config.ctx,
|
||||
Debug: b.config.PackerDebug,
|
||||
EbsOptimized: b.config.EbsOptimized,
|
||||
EnableT2Unlimited: b.config.EnableT2Unlimited,
|
||||
ExpectedRootDevice: "ebs",
|
||||
InstanceInitiatedShutdownBehavior: b.config.InstanceInitiatedShutdownBehavior,
|
||||
InstanceType: b.config.InstanceType,
|
||||
IsRestricted: b.config.IsChinaCloud() || b.config.IsGovCloud(),
|
||||
SourceAMI: b.config.SourceAmi,
|
||||
Tags: b.config.RunTags,
|
||||
Tenancy: b.config.Tenancy,
|
||||
UserData: b.config.UserData,
|
||||
UserDataFile: b.config.UserDataFile,
|
||||
VolumeTags: b.config.VolumeRunTags,
|
||||
NoEphemeral: b.config.NoEphemeral,
|
||||
}
|
||||
}
|
||||
|
||||
// Build the steps
|
||||
steps := []multistep.Step{
|
||||
&awscommon.StepPreValidate{
|
||||
DestAmiName: b.config.AMIName,
|
||||
ForceDeregister: b.config.AMIForceDeregister,
|
||||
AMISkipBuildRegion: b.config.AMISkipBuildRegion,
|
||||
VpcId: b.config.VpcId,
|
||||
SubnetId: b.config.SubnetId,
|
||||
HasSubnetFilter: !b.config.SubnetFilter.Empty(),
|
||||
},
|
||||
&awscommon.StepSourceAMIInfo{
|
||||
SourceAmi: b.config.SourceAmi,
|
||||
EnableAMISriovNetSupport: b.config.AMISriovNetSupport,
|
||||
EnableAMIENASupport: b.config.AMIENASupport,
|
||||
AmiFilters: b.config.SourceAmiFilter,
|
||||
AMIVirtType: b.config.AMIVirtType,
|
||||
},
|
||||
&awscommon.StepNetworkInfo{
|
||||
VpcId: b.config.VpcId,
|
||||
VpcFilter: b.config.VpcFilter,
|
||||
SecurityGroupIds: b.config.SecurityGroupIds,
|
||||
SecurityGroupFilter: b.config.SecurityGroupFilter,
|
||||
SubnetId: b.config.SubnetId,
|
||||
SubnetFilter: b.config.SubnetFilter,
|
||||
AvailabilityZone: b.config.AvailabilityZone,
|
||||
},
|
||||
&awscommon.StepKeyPair{
|
||||
Debug: b.config.PackerDebug,
|
||||
Comm: &b.config.RunConfig.Comm,
|
||||
DebugKeyPath: fmt.Sprintf("ec2_%s.pem", b.config.PackerBuildName),
|
||||
},
|
||||
&awscommon.StepSecurityGroup{
|
||||
SecurityGroupFilter: b.config.SecurityGroupFilter,
|
||||
SecurityGroupIds: b.config.SecurityGroupIds,
|
||||
CommConfig: &b.config.RunConfig.Comm,
|
||||
TemporarySGSourceCidrs: b.config.TemporarySGSourceCidrs,
|
||||
SkipSSHRuleCreation: b.config.SSMAgentEnabled(),
|
||||
},
|
||||
&awscommon.StepIamInstanceProfile{
|
||||
IamInstanceProfile: b.config.IamInstanceProfile,
|
||||
SkipProfileValidation: b.config.SkipProfileValidation,
|
||||
TemporaryIamInstanceProfilePolicyDocument: b.config.TemporaryIamInstanceProfilePolicyDocument,
|
||||
},
|
||||
&awscommon.StepCleanupVolumes{
|
||||
LaunchMappings: b.config.LaunchMappings,
|
||||
},
|
||||
instanceStep,
|
||||
&awscommon.StepGetPassword{
|
||||
Debug: b.config.PackerDebug,
|
||||
Comm: &b.config.RunConfig.Comm,
|
||||
Timeout: b.config.WindowsPasswordTimeout,
|
||||
BuildName: b.config.PackerBuildName,
|
||||
},
|
||||
&awscommon.StepCreateSSMTunnel{
|
||||
AWSSession: session,
|
||||
Region: *ec2conn.Config.Region,
|
||||
PauseBeforeSSM: b.config.PauseBeforeSSM,
|
||||
LocalPortNumber: b.config.SessionManagerPort,
|
||||
RemotePortNumber: b.config.Comm.Port(),
|
||||
SSMAgentEnabled: b.config.SSMAgentEnabled(),
|
||||
},
|
||||
&communicator.StepConnect{
|
||||
Config: &b.config.RunConfig.Comm,
|
||||
Host: awscommon.SSHHost(
|
||||
ec2conn,
|
||||
b.config.SSHInterface,
|
||||
b.config.Comm.Host(),
|
||||
),
|
||||
SSHPort: awscommon.Port(
|
||||
b.config.SSHInterface,
|
||||
b.config.Comm.Port(),
|
||||
),
|
||||
SSHConfig: b.config.RunConfig.Comm.SSHConfigFunc(),
|
||||
},
|
||||
&awscommon.StepSetGeneratedData{
|
||||
GeneratedData: generatedData,
|
||||
},
|
||||
&commonsteps.StepProvision{},
|
||||
&commonsteps.StepCleanupTempKeys{
|
||||
Comm: &b.config.RunConfig.Comm,
|
||||
},
|
||||
&awscommon.StepStopEBSBackedInstance{
|
||||
PollingConfig: b.config.PollingConfig,
|
||||
Skip: b.config.IsSpotInstance(),
|
||||
DisableStopInstance: b.config.DisableStopInstance,
|
||||
},
|
||||
&awscommon.StepModifyEBSBackedInstance{
|
||||
EnableAMISriovNetSupport: b.config.AMISriovNetSupport,
|
||||
EnableAMIENASupport: b.config.AMIENASupport,
|
||||
},
|
||||
&awscommon.StepDeregisterAMI{
|
||||
AccessConfig: &b.config.AccessConfig,
|
||||
ForceDeregister: b.config.AMIForceDeregister,
|
||||
ForceDeleteSnapshot: b.config.AMIForceDeleteSnapshot,
|
||||
AMIName: b.config.AMIName,
|
||||
Regions: b.config.AMIRegions,
|
||||
},
|
||||
&stepCreateAMI{
|
||||
AMISkipCreateImage: b.config.AMISkipCreateImage,
|
||||
AMISkipBuildRegion: b.config.AMISkipBuildRegion,
|
||||
PollingConfig: b.config.PollingConfig,
|
||||
},
|
||||
&awscommon.StepAMIRegionCopy{
|
||||
AccessConfig: &b.config.AccessConfig,
|
||||
Regions: b.config.AMIRegions,
|
||||
AMIKmsKeyId: b.config.AMIKmsKeyId,
|
||||
RegionKeyIds: b.config.AMIRegionKMSKeyIDs,
|
||||
EncryptBootVolume: b.config.AMIEncryptBootVolume,
|
||||
Name: b.config.AMIName,
|
||||
OriginalRegion: *ec2conn.Config.Region,
|
||||
AMISkipCreateImage: b.config.AMISkipCreateImage,
|
||||
AMISkipBuildRegion: b.config.AMISkipBuildRegion,
|
||||
},
|
||||
&awscommon.StepModifyAMIAttributes{
|
||||
AMISkipCreateImage: b.config.AMISkipCreateImage,
|
||||
Description: b.config.AMIDescription,
|
||||
Users: b.config.AMIUsers,
|
||||
Groups: b.config.AMIGroups,
|
||||
ProductCodes: b.config.AMIProductCodes,
|
||||
SnapshotUsers: b.config.SnapshotUsers,
|
||||
SnapshotGroups: b.config.SnapshotGroups,
|
||||
Ctx: b.config.ctx,
|
||||
GeneratedData: generatedData,
|
||||
},
|
||||
&awscommon.StepCreateTags{
|
||||
AMISkipCreateImage: b.config.AMISkipCreateImage,
|
||||
Tags: b.config.AMITags,
|
||||
SnapshotTags: b.config.SnapshotTags,
|
||||
Ctx: b.config.ctx,
|
||||
},
|
||||
}
|
||||
|
||||
// Run!
|
||||
b.runner = commonsteps.NewRunner(steps, b.config.PackerConfig, ui)
|
||||
b.runner.Run(ctx, state)
|
||||
// If there was an error, return that
|
||||
if rawErr, ok := state.GetOk("error"); ok {
|
||||
return nil, rawErr.(error)
|
||||
}
|
||||
|
||||
// If there are no AMIs, then just return
|
||||
if _, ok := state.GetOk("amis"); !ok {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Build the artifact and return it
|
||||
artifact := &awscommon.Artifact{
|
||||
Amis: state.Get("amis").(map[string]string),
|
||||
BuilderIdValue: BuilderId,
|
||||
Session: session,
|
||||
StateData: map[string]interface{}{"generated_data": state.Get("generated_data")},
|
||||
}
|
||||
|
||||
return artifact, nil
|
||||
}
|
|
@ -1,303 +0,0 @@
|
|||
// Code generated by "mapstructure-to-hcl2 -type Config"; DO NOT EDIT.
|
||||
|
||||
package ebs
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl/v2/hcldec"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/config"
|
||||
"github.com/hashicorp/packer/builder/amazon/common"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
// FlatConfig is an auto-generated flat version of Config.
|
||||
// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up.
|
||||
type FlatConfig struct {
|
||||
PackerBuildName *string `mapstructure:"packer_build_name" cty:"packer_build_name" hcl:"packer_build_name"`
|
||||
PackerBuilderType *string `mapstructure:"packer_builder_type" cty:"packer_builder_type" hcl:"packer_builder_type"`
|
||||
PackerCoreVersion *string `mapstructure:"packer_core_version" cty:"packer_core_version" hcl:"packer_core_version"`
|
||||
PackerDebug *bool `mapstructure:"packer_debug" cty:"packer_debug" hcl:"packer_debug"`
|
||||
PackerForce *bool `mapstructure:"packer_force" cty:"packer_force" hcl:"packer_force"`
|
||||
PackerOnError *string `mapstructure:"packer_on_error" cty:"packer_on_error" hcl:"packer_on_error"`
|
||||
PackerUserVars map[string]string `mapstructure:"packer_user_variables" cty:"packer_user_variables" hcl:"packer_user_variables"`
|
||||
PackerSensitiveVars []string `mapstructure:"packer_sensitive_variables" cty:"packer_sensitive_variables" hcl:"packer_sensitive_variables"`
|
||||
AccessKey *string `mapstructure:"access_key" required:"true" cty:"access_key" hcl:"access_key"`
|
||||
AssumeRole *common.FlatAssumeRoleConfig `mapstructure:"assume_role" required:"false" cty:"assume_role" hcl:"assume_role"`
|
||||
CustomEndpointEc2 *string `mapstructure:"custom_endpoint_ec2" required:"false" cty:"custom_endpoint_ec2" hcl:"custom_endpoint_ec2"`
|
||||
CredsFilename *string `mapstructure:"shared_credentials_file" required:"false" cty:"shared_credentials_file" hcl:"shared_credentials_file"`
|
||||
DecodeAuthZMessages *bool `mapstructure:"decode_authorization_messages" required:"false" cty:"decode_authorization_messages" hcl:"decode_authorization_messages"`
|
||||
InsecureSkipTLSVerify *bool `mapstructure:"insecure_skip_tls_verify" required:"false" cty:"insecure_skip_tls_verify" hcl:"insecure_skip_tls_verify"`
|
||||
MaxRetries *int `mapstructure:"max_retries" required:"false" cty:"max_retries" hcl:"max_retries"`
|
||||
MFACode *string `mapstructure:"mfa_code" required:"false" cty:"mfa_code" hcl:"mfa_code"`
|
||||
ProfileName *string `mapstructure:"profile" required:"false" cty:"profile" hcl:"profile"`
|
||||
RawRegion *string `mapstructure:"region" required:"true" cty:"region" hcl:"region"`
|
||||
SecretKey *string `mapstructure:"secret_key" required:"true" cty:"secret_key" hcl:"secret_key"`
|
||||
SkipMetadataApiCheck *bool `mapstructure:"skip_metadata_api_check" cty:"skip_metadata_api_check" hcl:"skip_metadata_api_check"`
|
||||
SkipCredsValidation *bool `mapstructure:"skip_credential_validation" cty:"skip_credential_validation" hcl:"skip_credential_validation"`
|
||||
Token *string `mapstructure:"token" required:"false" cty:"token" hcl:"token"`
|
||||
VaultAWSEngine *common.FlatVaultAWSEngineOptions `mapstructure:"vault_aws_engine" required:"false" cty:"vault_aws_engine" hcl:"vault_aws_engine"`
|
||||
PollingConfig *common.FlatAWSPollingConfig `mapstructure:"aws_polling" required:"false" cty:"aws_polling" hcl:"aws_polling"`
|
||||
AMIName *string `mapstructure:"ami_name" required:"true" cty:"ami_name" hcl:"ami_name"`
|
||||
AMIDescription *string `mapstructure:"ami_description" required:"false" cty:"ami_description" hcl:"ami_description"`
|
||||
AMIVirtType *string `mapstructure:"ami_virtualization_type" required:"false" cty:"ami_virtualization_type" hcl:"ami_virtualization_type"`
|
||||
AMIUsers []string `mapstructure:"ami_users" required:"false" cty:"ami_users" hcl:"ami_users"`
|
||||
AMIGroups []string `mapstructure:"ami_groups" required:"false" cty:"ami_groups" hcl:"ami_groups"`
|
||||
AMIProductCodes []string `mapstructure:"ami_product_codes" required:"false" cty:"ami_product_codes" hcl:"ami_product_codes"`
|
||||
AMIRegions []string `mapstructure:"ami_regions" required:"false" cty:"ami_regions" hcl:"ami_regions"`
|
||||
AMISkipRegionValidation *bool `mapstructure:"skip_region_validation" required:"false" cty:"skip_region_validation" hcl:"skip_region_validation"`
|
||||
AMITags map[string]string `mapstructure:"tags" required:"false" cty:"tags" hcl:"tags"`
|
||||
AMITag []config.FlatKeyValue `mapstructure:"tag" required:"false" cty:"tag" hcl:"tag"`
|
||||
AMIENASupport *bool `mapstructure:"ena_support" required:"false" cty:"ena_support" hcl:"ena_support"`
|
||||
AMISriovNetSupport *bool `mapstructure:"sriov_support" required:"false" cty:"sriov_support" hcl:"sriov_support"`
|
||||
AMIForceDeregister *bool `mapstructure:"force_deregister" required:"false" cty:"force_deregister" hcl:"force_deregister"`
|
||||
AMIForceDeleteSnapshot *bool `mapstructure:"force_delete_snapshot" required:"false" cty:"force_delete_snapshot" hcl:"force_delete_snapshot"`
|
||||
AMIEncryptBootVolume *bool `mapstructure:"encrypt_boot" required:"false" cty:"encrypt_boot" hcl:"encrypt_boot"`
|
||||
AMIKmsKeyId *string `mapstructure:"kms_key_id" required:"false" cty:"kms_key_id" hcl:"kms_key_id"`
|
||||
AMIRegionKMSKeyIDs map[string]string `mapstructure:"region_kms_key_ids" required:"false" cty:"region_kms_key_ids" hcl:"region_kms_key_ids"`
|
||||
AMISkipBuildRegion *bool `mapstructure:"skip_save_build_region" cty:"skip_save_build_region" hcl:"skip_save_build_region"`
|
||||
SnapshotTags map[string]string `mapstructure:"snapshot_tags" required:"false" cty:"snapshot_tags" hcl:"snapshot_tags"`
|
||||
SnapshotTag []config.FlatKeyValue `mapstructure:"snapshot_tag" required:"false" cty:"snapshot_tag" hcl:"snapshot_tag"`
|
||||
SnapshotUsers []string `mapstructure:"snapshot_users" required:"false" cty:"snapshot_users" hcl:"snapshot_users"`
|
||||
SnapshotGroups []string `mapstructure:"snapshot_groups" required:"false" cty:"snapshot_groups" hcl:"snapshot_groups"`
|
||||
AssociatePublicIpAddress *bool `mapstructure:"associate_public_ip_address" required:"false" cty:"associate_public_ip_address" hcl:"associate_public_ip_address"`
|
||||
AvailabilityZone *string `mapstructure:"availability_zone" required:"false" cty:"availability_zone" hcl:"availability_zone"`
|
||||
BlockDurationMinutes *int64 `mapstructure:"block_duration_minutes" required:"false" cty:"block_duration_minutes" hcl:"block_duration_minutes"`
|
||||
DisableStopInstance *bool `mapstructure:"disable_stop_instance" required:"false" cty:"disable_stop_instance" hcl:"disable_stop_instance"`
|
||||
EbsOptimized *bool `mapstructure:"ebs_optimized" required:"false" cty:"ebs_optimized" hcl:"ebs_optimized"`
|
||||
EnableT2Unlimited *bool `mapstructure:"enable_t2_unlimited" required:"false" cty:"enable_t2_unlimited" hcl:"enable_t2_unlimited"`
|
||||
IamInstanceProfile *string `mapstructure:"iam_instance_profile" required:"false" cty:"iam_instance_profile" hcl:"iam_instance_profile"`
|
||||
SkipProfileValidation *bool `mapstructure:"skip_profile_validation" required:"false" cty:"skip_profile_validation" hcl:"skip_profile_validation"`
|
||||
TemporaryIamInstanceProfilePolicyDocument *common.FlatPolicyDocument `mapstructure:"temporary_iam_instance_profile_policy_document" required:"false" cty:"temporary_iam_instance_profile_policy_document" hcl:"temporary_iam_instance_profile_policy_document"`
|
||||
InstanceInitiatedShutdownBehavior *string `mapstructure:"shutdown_behavior" required:"false" cty:"shutdown_behavior" hcl:"shutdown_behavior"`
|
||||
InstanceType *string `mapstructure:"instance_type" required:"true" cty:"instance_type" hcl:"instance_type"`
|
||||
SecurityGroupFilter *common.FlatSecurityGroupFilterOptions `mapstructure:"security_group_filter" required:"false" cty:"security_group_filter" hcl:"security_group_filter"`
|
||||
RunTags map[string]string `mapstructure:"run_tags" required:"false" cty:"run_tags" hcl:"run_tags"`
|
||||
RunTag []config.FlatKeyValue `mapstructure:"run_tag" required:"false" cty:"run_tag" hcl:"run_tag"`
|
||||
SecurityGroupId *string `mapstructure:"security_group_id" required:"false" cty:"security_group_id" hcl:"security_group_id"`
|
||||
SecurityGroupIds []string `mapstructure:"security_group_ids" required:"false" cty:"security_group_ids" hcl:"security_group_ids"`
|
||||
SourceAmi *string `mapstructure:"source_ami" required:"true" cty:"source_ami" hcl:"source_ami"`
|
||||
SourceAmiFilter *common.FlatAmiFilterOptions `mapstructure:"source_ami_filter" required:"false" cty:"source_ami_filter" hcl:"source_ami_filter"`
|
||||
SpotInstanceTypes []string `mapstructure:"spot_instance_types" required:"false" cty:"spot_instance_types" hcl:"spot_instance_types"`
|
||||
SpotPrice *string `mapstructure:"spot_price" required:"false" cty:"spot_price" hcl:"spot_price"`
|
||||
SpotPriceAutoProduct *string `mapstructure:"spot_price_auto_product" required:"false" undocumented:"true" cty:"spot_price_auto_product" hcl:"spot_price_auto_product"`
|
||||
SpotTags map[string]string `mapstructure:"spot_tags" required:"false" cty:"spot_tags" hcl:"spot_tags"`
|
||||
SpotTag []config.FlatKeyValue `mapstructure:"spot_tag" required:"false" cty:"spot_tag" hcl:"spot_tag"`
|
||||
SubnetFilter *common.FlatSubnetFilterOptions `mapstructure:"subnet_filter" required:"false" cty:"subnet_filter" hcl:"subnet_filter"`
|
||||
SubnetId *string `mapstructure:"subnet_id" required:"false" cty:"subnet_id" hcl:"subnet_id"`
|
||||
Tenancy *string `mapstructure:"tenancy" required:"false" cty:"tenancy" hcl:"tenancy"`
|
||||
TemporarySGSourceCidrs []string `mapstructure:"temporary_security_group_source_cidrs" required:"false" cty:"temporary_security_group_source_cidrs" hcl:"temporary_security_group_source_cidrs"`
|
||||
UserData *string `mapstructure:"user_data" required:"false" cty:"user_data" hcl:"user_data"`
|
||||
UserDataFile *string `mapstructure:"user_data_file" required:"false" cty:"user_data_file" hcl:"user_data_file"`
|
||||
VpcFilter *common.FlatVpcFilterOptions `mapstructure:"vpc_filter" required:"false" cty:"vpc_filter" hcl:"vpc_filter"`
|
||||
VpcId *string `mapstructure:"vpc_id" required:"false" cty:"vpc_id" hcl:"vpc_id"`
|
||||
WindowsPasswordTimeout *string `mapstructure:"windows_password_timeout" required:"false" cty:"windows_password_timeout" hcl:"windows_password_timeout"`
|
||||
Type *string `mapstructure:"communicator" cty:"communicator" hcl:"communicator"`
|
||||
PauseBeforeConnect *string `mapstructure:"pause_before_connecting" cty:"pause_before_connecting" hcl:"pause_before_connecting"`
|
||||
SSHHost *string `mapstructure:"ssh_host" cty:"ssh_host" hcl:"ssh_host"`
|
||||
SSHPort *int `mapstructure:"ssh_port" cty:"ssh_port" hcl:"ssh_port"`
|
||||
SSHUsername *string `mapstructure:"ssh_username" cty:"ssh_username" hcl:"ssh_username"`
|
||||
SSHPassword *string `mapstructure:"ssh_password" cty:"ssh_password" hcl:"ssh_password"`
|
||||
SSHKeyPairName *string `mapstructure:"ssh_keypair_name" undocumented:"true" cty:"ssh_keypair_name" hcl:"ssh_keypair_name"`
|
||||
SSHTemporaryKeyPairName *string `mapstructure:"temporary_key_pair_name" undocumented:"true" cty:"temporary_key_pair_name" hcl:"temporary_key_pair_name"`
|
||||
SSHTemporaryKeyPairType *string `mapstructure:"temporary_key_pair_type" cty:"temporary_key_pair_type" hcl:"temporary_key_pair_type"`
|
||||
SSHTemporaryKeyPairBits *int `mapstructure:"temporary_key_pair_bits" cty:"temporary_key_pair_bits" hcl:"temporary_key_pair_bits"`
|
||||
SSHCiphers []string `mapstructure:"ssh_ciphers" cty:"ssh_ciphers" hcl:"ssh_ciphers"`
|
||||
SSHClearAuthorizedKeys *bool `mapstructure:"ssh_clear_authorized_keys" cty:"ssh_clear_authorized_keys" hcl:"ssh_clear_authorized_keys"`
|
||||
SSHKEXAlgos []string `mapstructure:"ssh_key_exchange_algorithms" cty:"ssh_key_exchange_algorithms" hcl:"ssh_key_exchange_algorithms"`
|
||||
SSHPrivateKeyFile *string `mapstructure:"ssh_private_key_file" undocumented:"true" cty:"ssh_private_key_file" hcl:"ssh_private_key_file"`
|
||||
SSHCertificateFile *string `mapstructure:"ssh_certificate_file" cty:"ssh_certificate_file" hcl:"ssh_certificate_file"`
|
||||
SSHPty *bool `mapstructure:"ssh_pty" cty:"ssh_pty" hcl:"ssh_pty"`
|
||||
SSHTimeout *string `mapstructure:"ssh_timeout" cty:"ssh_timeout" hcl:"ssh_timeout"`
|
||||
SSHWaitTimeout *string `mapstructure:"ssh_wait_timeout" undocumented:"true" cty:"ssh_wait_timeout" hcl:"ssh_wait_timeout"`
|
||||
SSHAgentAuth *bool `mapstructure:"ssh_agent_auth" undocumented:"true" cty:"ssh_agent_auth" hcl:"ssh_agent_auth"`
|
||||
SSHDisableAgentForwarding *bool `mapstructure:"ssh_disable_agent_forwarding" cty:"ssh_disable_agent_forwarding" hcl:"ssh_disable_agent_forwarding"`
|
||||
SSHHandshakeAttempts *int `mapstructure:"ssh_handshake_attempts" cty:"ssh_handshake_attempts" hcl:"ssh_handshake_attempts"`
|
||||
SSHBastionHost *string `mapstructure:"ssh_bastion_host" cty:"ssh_bastion_host" hcl:"ssh_bastion_host"`
|
||||
SSHBastionPort *int `mapstructure:"ssh_bastion_port" cty:"ssh_bastion_port" hcl:"ssh_bastion_port"`
|
||||
SSHBastionAgentAuth *bool `mapstructure:"ssh_bastion_agent_auth" cty:"ssh_bastion_agent_auth" hcl:"ssh_bastion_agent_auth"`
|
||||
SSHBastionUsername *string `mapstructure:"ssh_bastion_username" cty:"ssh_bastion_username" hcl:"ssh_bastion_username"`
|
||||
SSHBastionPassword *string `mapstructure:"ssh_bastion_password" cty:"ssh_bastion_password" hcl:"ssh_bastion_password"`
|
||||
SSHBastionInteractive *bool `mapstructure:"ssh_bastion_interactive" cty:"ssh_bastion_interactive" hcl:"ssh_bastion_interactive"`
|
||||
SSHBastionPrivateKeyFile *string `mapstructure:"ssh_bastion_private_key_file" cty:"ssh_bastion_private_key_file" hcl:"ssh_bastion_private_key_file"`
|
||||
SSHBastionCertificateFile *string `mapstructure:"ssh_bastion_certificate_file" cty:"ssh_bastion_certificate_file" hcl:"ssh_bastion_certificate_file"`
|
||||
SSHFileTransferMethod *string `mapstructure:"ssh_file_transfer_method" cty:"ssh_file_transfer_method" hcl:"ssh_file_transfer_method"`
|
||||
SSHProxyHost *string `mapstructure:"ssh_proxy_host" cty:"ssh_proxy_host" hcl:"ssh_proxy_host"`
|
||||
SSHProxyPort *int `mapstructure:"ssh_proxy_port" cty:"ssh_proxy_port" hcl:"ssh_proxy_port"`
|
||||
SSHProxyUsername *string `mapstructure:"ssh_proxy_username" cty:"ssh_proxy_username" hcl:"ssh_proxy_username"`
|
||||
SSHProxyPassword *string `mapstructure:"ssh_proxy_password" cty:"ssh_proxy_password" hcl:"ssh_proxy_password"`
|
||||
SSHKeepAliveInterval *string `mapstructure:"ssh_keep_alive_interval" cty:"ssh_keep_alive_interval" hcl:"ssh_keep_alive_interval"`
|
||||
SSHReadWriteTimeout *string `mapstructure:"ssh_read_write_timeout" cty:"ssh_read_write_timeout" hcl:"ssh_read_write_timeout"`
|
||||
SSHRemoteTunnels []string `mapstructure:"ssh_remote_tunnels" cty:"ssh_remote_tunnels" hcl:"ssh_remote_tunnels"`
|
||||
SSHLocalTunnels []string `mapstructure:"ssh_local_tunnels" cty:"ssh_local_tunnels" hcl:"ssh_local_tunnels"`
|
||||
SSHPublicKey []byte `mapstructure:"ssh_public_key" undocumented:"true" cty:"ssh_public_key" hcl:"ssh_public_key"`
|
||||
SSHPrivateKey []byte `mapstructure:"ssh_private_key" undocumented:"true" cty:"ssh_private_key" hcl:"ssh_private_key"`
|
||||
WinRMUser *string `mapstructure:"winrm_username" cty:"winrm_username" hcl:"winrm_username"`
|
||||
WinRMPassword *string `mapstructure:"winrm_password" cty:"winrm_password" hcl:"winrm_password"`
|
||||
WinRMHost *string `mapstructure:"winrm_host" cty:"winrm_host" hcl:"winrm_host"`
|
||||
WinRMNoProxy *bool `mapstructure:"winrm_no_proxy" cty:"winrm_no_proxy" hcl:"winrm_no_proxy"`
|
||||
WinRMPort *int `mapstructure:"winrm_port" cty:"winrm_port" hcl:"winrm_port"`
|
||||
WinRMTimeout *string `mapstructure:"winrm_timeout" cty:"winrm_timeout" hcl:"winrm_timeout"`
|
||||
WinRMUseSSL *bool `mapstructure:"winrm_use_ssl" cty:"winrm_use_ssl" hcl:"winrm_use_ssl"`
|
||||
WinRMInsecure *bool `mapstructure:"winrm_insecure" cty:"winrm_insecure" hcl:"winrm_insecure"`
|
||||
WinRMUseNTLM *bool `mapstructure:"winrm_use_ntlm" cty:"winrm_use_ntlm" hcl:"winrm_use_ntlm"`
|
||||
SSHInterface *string `mapstructure:"ssh_interface" cty:"ssh_interface" hcl:"ssh_interface"`
|
||||
PauseBeforeSSM *string `mapstructure:"pause_before_ssm" cty:"pause_before_ssm" hcl:"pause_before_ssm"`
|
||||
SessionManagerPort *int `mapstructure:"session_manager_port" cty:"session_manager_port" hcl:"session_manager_port"`
|
||||
AMISkipCreateImage *bool `mapstructure:"skip_create_ami" required:"false" cty:"skip_create_ami" hcl:"skip_create_ami"`
|
||||
AMIMappings []common.FlatBlockDevice `mapstructure:"ami_block_device_mappings" required:"false" cty:"ami_block_device_mappings" hcl:"ami_block_device_mappings"`
|
||||
LaunchMappings []common.FlatBlockDevice `mapstructure:"launch_block_device_mappings" required:"false" cty:"launch_block_device_mappings" hcl:"launch_block_device_mappings"`
|
||||
VolumeRunTags map[string]string `mapstructure:"run_volume_tags" cty:"run_volume_tags" hcl:"run_volume_tags"`
|
||||
VolumeRunTag []config.FlatNameValue `mapstructure:"run_volume_tag" required:"false" cty:"run_volume_tag" hcl:"run_volume_tag"`
|
||||
NoEphemeral *bool `mapstructure:"no_ephemeral" required:"false" cty:"no_ephemeral" hcl:"no_ephemeral"`
|
||||
}
|
||||
|
||||
// FlatMapstructure returns a new FlatConfig.
|
||||
// FlatConfig is an auto-generated flat version of Config.
|
||||
// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up.
|
||||
func (*Config) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } {
|
||||
return new(FlatConfig)
|
||||
}
|
||||
|
||||
// HCL2Spec returns the hcl spec of a Config.
|
||||
// This spec is used by HCL to read the fields of Config.
|
||||
// The decoded values from this spec will then be applied to a FlatConfig.
|
||||
func (*FlatConfig) HCL2Spec() map[string]hcldec.Spec {
|
||||
s := map[string]hcldec.Spec{
|
||||
"packer_build_name": &hcldec.AttrSpec{Name: "packer_build_name", Type: cty.String, Required: false},
|
||||
"packer_builder_type": &hcldec.AttrSpec{Name: "packer_builder_type", Type: cty.String, Required: false},
|
||||
"packer_core_version": &hcldec.AttrSpec{Name: "packer_core_version", Type: cty.String, Required: false},
|
||||
"packer_debug": &hcldec.AttrSpec{Name: "packer_debug", Type: cty.Bool, Required: false},
|
||||
"packer_force": &hcldec.AttrSpec{Name: "packer_force", Type: cty.Bool, Required: false},
|
||||
"packer_on_error": &hcldec.AttrSpec{Name: "packer_on_error", Type: cty.String, Required: false},
|
||||
"packer_user_variables": &hcldec.AttrSpec{Name: "packer_user_variables", Type: cty.Map(cty.String), Required: false},
|
||||
"packer_sensitive_variables": &hcldec.AttrSpec{Name: "packer_sensitive_variables", Type: cty.List(cty.String), Required: false},
|
||||
"access_key": &hcldec.AttrSpec{Name: "access_key", Type: cty.String, Required: false},
|
||||
"assume_role": &hcldec.BlockSpec{TypeName: "assume_role", Nested: hcldec.ObjectSpec((*common.FlatAssumeRoleConfig)(nil).HCL2Spec())},
|
||||
"custom_endpoint_ec2": &hcldec.AttrSpec{Name: "custom_endpoint_ec2", Type: cty.String, Required: false},
|
||||
"shared_credentials_file": &hcldec.AttrSpec{Name: "shared_credentials_file", Type: cty.String, Required: false},
|
||||
"decode_authorization_messages": &hcldec.AttrSpec{Name: "decode_authorization_messages", Type: cty.Bool, Required: false},
|
||||
"insecure_skip_tls_verify": &hcldec.AttrSpec{Name: "insecure_skip_tls_verify", Type: cty.Bool, Required: false},
|
||||
"max_retries": &hcldec.AttrSpec{Name: "max_retries", Type: cty.Number, Required: false},
|
||||
"mfa_code": &hcldec.AttrSpec{Name: "mfa_code", Type: cty.String, Required: false},
|
||||
"profile": &hcldec.AttrSpec{Name: "profile", Type: cty.String, Required: false},
|
||||
"region": &hcldec.AttrSpec{Name: "region", Type: cty.String, Required: false},
|
||||
"secret_key": &hcldec.AttrSpec{Name: "secret_key", Type: cty.String, Required: false},
|
||||
"skip_metadata_api_check": &hcldec.AttrSpec{Name: "skip_metadata_api_check", Type: cty.Bool, Required: false},
|
||||
"skip_credential_validation": &hcldec.AttrSpec{Name: "skip_credential_validation", Type: cty.Bool, Required: false},
|
||||
"token": &hcldec.AttrSpec{Name: "token", Type: cty.String, Required: false},
|
||||
"vault_aws_engine": &hcldec.BlockSpec{TypeName: "vault_aws_engine", Nested: hcldec.ObjectSpec((*common.FlatVaultAWSEngineOptions)(nil).HCL2Spec())},
|
||||
"aws_polling": &hcldec.BlockSpec{TypeName: "aws_polling", Nested: hcldec.ObjectSpec((*common.FlatAWSPollingConfig)(nil).HCL2Spec())},
|
||||
"ami_name": &hcldec.AttrSpec{Name: "ami_name", Type: cty.String, Required: false},
|
||||
"ami_description": &hcldec.AttrSpec{Name: "ami_description", Type: cty.String, Required: false},
|
||||
"ami_virtualization_type": &hcldec.AttrSpec{Name: "ami_virtualization_type", Type: cty.String, Required: false},
|
||||
"ami_users": &hcldec.AttrSpec{Name: "ami_users", Type: cty.List(cty.String), Required: false},
|
||||
"ami_groups": &hcldec.AttrSpec{Name: "ami_groups", Type: cty.List(cty.String), Required: false},
|
||||
"ami_product_codes": &hcldec.AttrSpec{Name: "ami_product_codes", Type: cty.List(cty.String), Required: false},
|
||||
"ami_regions": &hcldec.AttrSpec{Name: "ami_regions", Type: cty.List(cty.String), Required: false},
|
||||
"skip_region_validation": &hcldec.AttrSpec{Name: "skip_region_validation", Type: cty.Bool, Required: false},
|
||||
"tags": &hcldec.AttrSpec{Name: "tags", Type: cty.Map(cty.String), Required: false},
|
||||
"tag": &hcldec.BlockListSpec{TypeName: "tag", Nested: hcldec.ObjectSpec((*config.FlatKeyValue)(nil).HCL2Spec())},
|
||||
"ena_support": &hcldec.AttrSpec{Name: "ena_support", Type: cty.Bool, Required: false},
|
||||
"sriov_support": &hcldec.AttrSpec{Name: "sriov_support", Type: cty.Bool, Required: false},
|
||||
"force_deregister": &hcldec.AttrSpec{Name: "force_deregister", Type: cty.Bool, Required: false},
|
||||
"force_delete_snapshot": &hcldec.AttrSpec{Name: "force_delete_snapshot", Type: cty.Bool, Required: false},
|
||||
"encrypt_boot": &hcldec.AttrSpec{Name: "encrypt_boot", Type: cty.Bool, Required: false},
|
||||
"kms_key_id": &hcldec.AttrSpec{Name: "kms_key_id", Type: cty.String, Required: false},
|
||||
"region_kms_key_ids": &hcldec.AttrSpec{Name: "region_kms_key_ids", Type: cty.Map(cty.String), Required: false},
|
||||
"skip_save_build_region": &hcldec.AttrSpec{Name: "skip_save_build_region", Type: cty.Bool, Required: false},
|
||||
"snapshot_tags": &hcldec.AttrSpec{Name: "snapshot_tags", Type: cty.Map(cty.String), Required: false},
|
||||
"snapshot_tag": &hcldec.BlockListSpec{TypeName: "snapshot_tag", Nested: hcldec.ObjectSpec((*config.FlatKeyValue)(nil).HCL2Spec())},
|
||||
"snapshot_users": &hcldec.AttrSpec{Name: "snapshot_users", Type: cty.List(cty.String), Required: false},
|
||||
"snapshot_groups": &hcldec.AttrSpec{Name: "snapshot_groups", Type: cty.List(cty.String), Required: false},
|
||||
"associate_public_ip_address": &hcldec.AttrSpec{Name: "associate_public_ip_address", Type: cty.Bool, Required: false},
|
||||
"availability_zone": &hcldec.AttrSpec{Name: "availability_zone", Type: cty.String, Required: false},
|
||||
"block_duration_minutes": &hcldec.AttrSpec{Name: "block_duration_minutes", Type: cty.Number, Required: false},
|
||||
"disable_stop_instance": &hcldec.AttrSpec{Name: "disable_stop_instance", Type: cty.Bool, Required: false},
|
||||
"ebs_optimized": &hcldec.AttrSpec{Name: "ebs_optimized", Type: cty.Bool, Required: false},
|
||||
"enable_t2_unlimited": &hcldec.AttrSpec{Name: "enable_t2_unlimited", Type: cty.Bool, Required: false},
|
||||
"iam_instance_profile": &hcldec.AttrSpec{Name: "iam_instance_profile", Type: cty.String, Required: false},
|
||||
"skip_profile_validation": &hcldec.AttrSpec{Name: "skip_profile_validation", Type: cty.Bool, Required: false},
|
||||
"temporary_iam_instance_profile_policy_document": &hcldec.BlockSpec{TypeName: "temporary_iam_instance_profile_policy_document", Nested: hcldec.ObjectSpec((*common.FlatPolicyDocument)(nil).HCL2Spec())},
|
||||
"shutdown_behavior": &hcldec.AttrSpec{Name: "shutdown_behavior", Type: cty.String, Required: false},
|
||||
"instance_type": &hcldec.AttrSpec{Name: "instance_type", Type: cty.String, Required: false},
|
||||
"security_group_filter": &hcldec.BlockSpec{TypeName: "security_group_filter", Nested: hcldec.ObjectSpec((*common.FlatSecurityGroupFilterOptions)(nil).HCL2Spec())},
|
||||
"run_tags": &hcldec.AttrSpec{Name: "run_tags", Type: cty.Map(cty.String), Required: false},
|
||||
"run_tag": &hcldec.BlockListSpec{TypeName: "run_tag", Nested: hcldec.ObjectSpec((*config.FlatKeyValue)(nil).HCL2Spec())},
|
||||
"security_group_id": &hcldec.AttrSpec{Name: "security_group_id", Type: cty.String, Required: false},
|
||||
"security_group_ids": &hcldec.AttrSpec{Name: "security_group_ids", Type: cty.List(cty.String), Required: false},
|
||||
"source_ami": &hcldec.AttrSpec{Name: "source_ami", Type: cty.String, Required: false},
|
||||
"source_ami_filter": &hcldec.BlockSpec{TypeName: "source_ami_filter", Nested: hcldec.ObjectSpec((*common.FlatAmiFilterOptions)(nil).HCL2Spec())},
|
||||
"spot_instance_types": &hcldec.AttrSpec{Name: "spot_instance_types", Type: cty.List(cty.String), Required: false},
|
||||
"spot_price": &hcldec.AttrSpec{Name: "spot_price", Type: cty.String, Required: false},
|
||||
"spot_price_auto_product": &hcldec.AttrSpec{Name: "spot_price_auto_product", Type: cty.String, Required: false},
|
||||
"spot_tags": &hcldec.AttrSpec{Name: "spot_tags", Type: cty.Map(cty.String), Required: false},
|
||||
"spot_tag": &hcldec.BlockListSpec{TypeName: "spot_tag", Nested: hcldec.ObjectSpec((*config.FlatKeyValue)(nil).HCL2Spec())},
|
||||
"subnet_filter": &hcldec.BlockSpec{TypeName: "subnet_filter", Nested: hcldec.ObjectSpec((*common.FlatSubnetFilterOptions)(nil).HCL2Spec())},
|
||||
"subnet_id": &hcldec.AttrSpec{Name: "subnet_id", Type: cty.String, Required: false},
|
||||
"tenancy": &hcldec.AttrSpec{Name: "tenancy", Type: cty.String, Required: false},
|
||||
"temporary_security_group_source_cidrs": &hcldec.AttrSpec{Name: "temporary_security_group_source_cidrs", Type: cty.List(cty.String), Required: false},
|
||||
"user_data": &hcldec.AttrSpec{Name: "user_data", Type: cty.String, Required: false},
|
||||
"user_data_file": &hcldec.AttrSpec{Name: "user_data_file", Type: cty.String, Required: false},
|
||||
"vpc_filter": &hcldec.BlockSpec{TypeName: "vpc_filter", Nested: hcldec.ObjectSpec((*common.FlatVpcFilterOptions)(nil).HCL2Spec())},
|
||||
"vpc_id": &hcldec.AttrSpec{Name: "vpc_id", Type: cty.String, Required: false},
|
||||
"windows_password_timeout": &hcldec.AttrSpec{Name: "windows_password_timeout", Type: cty.String, Required: false},
|
||||
"communicator": &hcldec.AttrSpec{Name: "communicator", Type: cty.String, Required: false},
|
||||
"pause_before_connecting": &hcldec.AttrSpec{Name: "pause_before_connecting", Type: cty.String, Required: false},
|
||||
"ssh_host": &hcldec.AttrSpec{Name: "ssh_host", Type: cty.String, Required: false},
|
||||
"ssh_port": &hcldec.AttrSpec{Name: "ssh_port", Type: cty.Number, Required: false},
|
||||
"ssh_username": &hcldec.AttrSpec{Name: "ssh_username", Type: cty.String, Required: false},
|
||||
"ssh_password": &hcldec.AttrSpec{Name: "ssh_password", Type: cty.String, Required: false},
|
||||
"ssh_keypair_name": &hcldec.AttrSpec{Name: "ssh_keypair_name", Type: cty.String, Required: false},
|
||||
"temporary_key_pair_name": &hcldec.AttrSpec{Name: "temporary_key_pair_name", Type: cty.String, Required: false},
|
||||
"temporary_key_pair_type": &hcldec.AttrSpec{Name: "temporary_key_pair_type", Type: cty.String, Required: false},
|
||||
"temporary_key_pair_bits": &hcldec.AttrSpec{Name: "temporary_key_pair_bits", Type: cty.Number, Required: false},
|
||||
"ssh_ciphers": &hcldec.AttrSpec{Name: "ssh_ciphers", Type: cty.List(cty.String), Required: false},
|
||||
"ssh_clear_authorized_keys": &hcldec.AttrSpec{Name: "ssh_clear_authorized_keys", Type: cty.Bool, Required: false},
|
||||
"ssh_key_exchange_algorithms": &hcldec.AttrSpec{Name: "ssh_key_exchange_algorithms", Type: cty.List(cty.String), Required: false},
|
||||
"ssh_private_key_file": &hcldec.AttrSpec{Name: "ssh_private_key_file", Type: cty.String, Required: false},
|
||||
"ssh_certificate_file": &hcldec.AttrSpec{Name: "ssh_certificate_file", Type: cty.String, Required: false},
|
||||
"ssh_pty": &hcldec.AttrSpec{Name: "ssh_pty", Type: cty.Bool, Required: false},
|
||||
"ssh_timeout": &hcldec.AttrSpec{Name: "ssh_timeout", Type: cty.String, Required: false},
|
||||
"ssh_wait_timeout": &hcldec.AttrSpec{Name: "ssh_wait_timeout", Type: cty.String, Required: false},
|
||||
"ssh_agent_auth": &hcldec.AttrSpec{Name: "ssh_agent_auth", Type: cty.Bool, Required: false},
|
||||
"ssh_disable_agent_forwarding": &hcldec.AttrSpec{Name: "ssh_disable_agent_forwarding", Type: cty.Bool, Required: false},
|
||||
"ssh_handshake_attempts": &hcldec.AttrSpec{Name: "ssh_handshake_attempts", Type: cty.Number, Required: false},
|
||||
"ssh_bastion_host": &hcldec.AttrSpec{Name: "ssh_bastion_host", Type: cty.String, Required: false},
|
||||
"ssh_bastion_port": &hcldec.AttrSpec{Name: "ssh_bastion_port", Type: cty.Number, Required: false},
|
||||
"ssh_bastion_agent_auth": &hcldec.AttrSpec{Name: "ssh_bastion_agent_auth", Type: cty.Bool, Required: false},
|
||||
"ssh_bastion_username": &hcldec.AttrSpec{Name: "ssh_bastion_username", Type: cty.String, Required: false},
|
||||
"ssh_bastion_password": &hcldec.AttrSpec{Name: "ssh_bastion_password", Type: cty.String, Required: false},
|
||||
"ssh_bastion_interactive": &hcldec.AttrSpec{Name: "ssh_bastion_interactive", Type: cty.Bool, Required: false},
|
||||
"ssh_bastion_private_key_file": &hcldec.AttrSpec{Name: "ssh_bastion_private_key_file", Type: cty.String, Required: false},
|
||||
"ssh_bastion_certificate_file": &hcldec.AttrSpec{Name: "ssh_bastion_certificate_file", Type: cty.String, Required: false},
|
||||
"ssh_file_transfer_method": &hcldec.AttrSpec{Name: "ssh_file_transfer_method", Type: cty.String, Required: false},
|
||||
"ssh_proxy_host": &hcldec.AttrSpec{Name: "ssh_proxy_host", Type: cty.String, Required: false},
|
||||
"ssh_proxy_port": &hcldec.AttrSpec{Name: "ssh_proxy_port", Type: cty.Number, Required: false},
|
||||
"ssh_proxy_username": &hcldec.AttrSpec{Name: "ssh_proxy_username", Type: cty.String, Required: false},
|
||||
"ssh_proxy_password": &hcldec.AttrSpec{Name: "ssh_proxy_password", Type: cty.String, Required: false},
|
||||
"ssh_keep_alive_interval": &hcldec.AttrSpec{Name: "ssh_keep_alive_interval", Type: cty.String, Required: false},
|
||||
"ssh_read_write_timeout": &hcldec.AttrSpec{Name: "ssh_read_write_timeout", Type: cty.String, Required: false},
|
||||
"ssh_remote_tunnels": &hcldec.AttrSpec{Name: "ssh_remote_tunnels", Type: cty.List(cty.String), Required: false},
|
||||
"ssh_local_tunnels": &hcldec.AttrSpec{Name: "ssh_local_tunnels", Type: cty.List(cty.String), Required: false},
|
||||
"ssh_public_key": &hcldec.AttrSpec{Name: "ssh_public_key", Type: cty.List(cty.Number), Required: false},
|
||||
"ssh_private_key": &hcldec.AttrSpec{Name: "ssh_private_key", Type: cty.List(cty.Number), Required: false},
|
||||
"winrm_username": &hcldec.AttrSpec{Name: "winrm_username", Type: cty.String, Required: false},
|
||||
"winrm_password": &hcldec.AttrSpec{Name: "winrm_password", Type: cty.String, Required: false},
|
||||
"winrm_host": &hcldec.AttrSpec{Name: "winrm_host", Type: cty.String, Required: false},
|
||||
"winrm_no_proxy": &hcldec.AttrSpec{Name: "winrm_no_proxy", Type: cty.Bool, Required: false},
|
||||
"winrm_port": &hcldec.AttrSpec{Name: "winrm_port", Type: cty.Number, Required: false},
|
||||
"winrm_timeout": &hcldec.AttrSpec{Name: "winrm_timeout", Type: cty.String, Required: false},
|
||||
"winrm_use_ssl": &hcldec.AttrSpec{Name: "winrm_use_ssl", Type: cty.Bool, Required: false},
|
||||
"winrm_insecure": &hcldec.AttrSpec{Name: "winrm_insecure", Type: cty.Bool, Required: false},
|
||||
"winrm_use_ntlm": &hcldec.AttrSpec{Name: "winrm_use_ntlm", Type: cty.Bool, Required: false},
|
||||
"ssh_interface": &hcldec.AttrSpec{Name: "ssh_interface", Type: cty.String, Required: false},
|
||||
"pause_before_ssm": &hcldec.AttrSpec{Name: "pause_before_ssm", Type: cty.String, Required: false},
|
||||
"session_manager_port": &hcldec.AttrSpec{Name: "session_manager_port", Type: cty.Number, Required: false},
|
||||
"skip_create_ami": &hcldec.AttrSpec{Name: "skip_create_ami", Type: cty.Bool, Required: false},
|
||||
"ami_block_device_mappings": &hcldec.BlockListSpec{TypeName: "ami_block_device_mappings", Nested: hcldec.ObjectSpec((*common.FlatBlockDevice)(nil).HCL2Spec())},
|
||||
"launch_block_device_mappings": &hcldec.BlockListSpec{TypeName: "launch_block_device_mappings", Nested: hcldec.ObjectSpec((*common.FlatBlockDevice)(nil).HCL2Spec())},
|
||||
"run_volume_tags": &hcldec.AttrSpec{Name: "run_volume_tags", Type: cty.Map(cty.String), Required: false},
|
||||
"run_volume_tag": &hcldec.BlockListSpec{TypeName: "run_volume_tag", Nested: hcldec.ObjectSpec((*config.FlatNameValue)(nil).HCL2Spec())},
|
||||
"no_ephemeral": &hcldec.AttrSpec{Name: "no_ephemeral", Type: cty.Bool, Required: false},
|
||||
}
|
||||
return s
|
||||
}
|
|
@ -1,396 +0,0 @@
|
|||
/*
|
||||
Deregister the test image with
|
||||
aws ec2 deregister-image --image-id $(aws ec2 describe-images --output text --filters "Name=name,Values=packer-test-packer-test-dereg" --query 'Images[*].{ID:ImageId}')
|
||||
*/
|
||||
package ebs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
builderT "github.com/hashicorp/packer-plugin-sdk/acctest"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer/builder/amazon/common"
|
||||
)
|
||||
|
||||
func TestBuilderAcc_basic(t *testing.T) {
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Builder: &Builder{},
|
||||
Template: testBuilderAccBasic,
|
||||
})
|
||||
}
|
||||
|
||||
func TestBuilderAcc_regionCopy(t *testing.T) {
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Builder: &Builder{},
|
||||
Template: testBuilderAccRegionCopy,
|
||||
Check: checkRegionCopy([]string{"us-east-1", "us-west-2"}),
|
||||
})
|
||||
}
|
||||
|
||||
func TestBuilderAcc_forceDeregister(t *testing.T) {
|
||||
// Build the same AMI name twice, with force_deregister on the second run
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Builder: &Builder{},
|
||||
Template: buildForceDeregisterConfig("false", "dereg"),
|
||||
SkipArtifactTeardown: true,
|
||||
})
|
||||
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Builder: &Builder{},
|
||||
Template: buildForceDeregisterConfig("true", "dereg"),
|
||||
})
|
||||
}
|
||||
|
||||
func TestBuilderAcc_forceDeleteSnapshot(t *testing.T) {
|
||||
amiName := "packer-test-dereg"
|
||||
|
||||
// Build the same AMI name twice, with force_delete_snapshot on the second run
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Builder: &Builder{},
|
||||
Template: buildForceDeleteSnapshotConfig("false", amiName),
|
||||
SkipArtifactTeardown: true,
|
||||
})
|
||||
|
||||
// Get image data by AMI name
|
||||
ec2conn, _ := testEC2Conn()
|
||||
describeInput := &ec2.DescribeImagesInput{Filters: []*ec2.Filter{
|
||||
{
|
||||
Name: aws.String("name"),
|
||||
Values: []*string{aws.String(amiName)},
|
||||
},
|
||||
}}
|
||||
ec2conn.WaitUntilImageExists(describeInput)
|
||||
imageResp, _ := ec2conn.DescribeImages(describeInput)
|
||||
image := imageResp.Images[0]
|
||||
|
||||
// Get snapshot ids for image
|
||||
snapshotIds := []*string{}
|
||||
for _, device := range image.BlockDeviceMappings {
|
||||
if device.Ebs != nil && device.Ebs.SnapshotId != nil {
|
||||
snapshotIds = append(snapshotIds, device.Ebs.SnapshotId)
|
||||
}
|
||||
}
|
||||
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Builder: &Builder{},
|
||||
Template: buildForceDeleteSnapshotConfig("true", amiName),
|
||||
Check: checkSnapshotsDeleted(snapshotIds),
|
||||
})
|
||||
}
|
||||
|
||||
func checkSnapshotsDeleted(snapshotIds []*string) builderT.TestCheckFunc {
|
||||
return func(artifacts []packersdk.Artifact) error {
|
||||
// Verify the snapshots are gone
|
||||
ec2conn, _ := testEC2Conn()
|
||||
snapshotResp, _ := ec2conn.DescribeSnapshots(
|
||||
&ec2.DescribeSnapshotsInput{SnapshotIds: snapshotIds},
|
||||
)
|
||||
|
||||
if len(snapshotResp.Snapshots) > 0 {
|
||||
return fmt.Errorf("Snapshots weren't successfully deleted by `force_delete_snapshot`")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderAcc_amiSharing(t *testing.T) {
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
PreCheck: func() { testAccSharingPreCheck(t) },
|
||||
Builder: &Builder{},
|
||||
Template: buildSharingConfig(os.Getenv("TESTACC_AWS_ACCOUNT_ID")),
|
||||
Check: checkAMISharing(2, os.Getenv("TESTACC_AWS_ACCOUNT_ID"), "all"),
|
||||
})
|
||||
}
|
||||
|
||||
func TestBuilderAcc_encryptedBoot(t *testing.T) {
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Builder: &Builder{},
|
||||
Template: testBuilderAccEncrypted,
|
||||
Check: checkBootEncrypted(),
|
||||
})
|
||||
}
|
||||
|
||||
func checkAMISharing(count int, uid, group string) builderT.TestCheckFunc {
|
||||
return func(artifacts []packersdk.Artifact) error {
|
||||
if len(artifacts) > 1 {
|
||||
return fmt.Errorf("more than 1 artifact")
|
||||
}
|
||||
|
||||
// Get the actual *Artifact pointer so we can access the AMIs directly
|
||||
artifactRaw := artifacts[0]
|
||||
artifact, ok := artifactRaw.(*common.Artifact)
|
||||
if !ok {
|
||||
return fmt.Errorf("unknown artifact: %#v", artifactRaw)
|
||||
}
|
||||
|
||||
// describe the image, get block devices with a snapshot
|
||||
ec2conn, _ := testEC2Conn()
|
||||
imageResp, err := ec2conn.DescribeImageAttribute(&ec2.DescribeImageAttributeInput{
|
||||
Attribute: aws.String("launchPermission"),
|
||||
ImageId: aws.String(artifact.Amis["us-east-1"]),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error retrieving Image Attributes for AMI Artifact (%#v) in AMI Sharing Test: %s", artifact, err)
|
||||
}
|
||||
|
||||
// Launch Permissions are in addition to the userid that created it, so if
|
||||
// you add 3 additional ami_users, you expect 2 Launch Permissions here
|
||||
if len(imageResp.LaunchPermissions) != count {
|
||||
return fmt.Errorf("Error in Image Attributes, expected (%d) Launch Permissions, got (%d)", count, len(imageResp.LaunchPermissions))
|
||||
}
|
||||
|
||||
userFound := false
|
||||
for _, lp := range imageResp.LaunchPermissions {
|
||||
if lp.UserId != nil && uid == *lp.UserId {
|
||||
userFound = true
|
||||
}
|
||||
}
|
||||
|
||||
if !userFound {
|
||||
return fmt.Errorf("Error in Image Attributes, expected User ID (%s) to have Launch Permissions, but was not found", uid)
|
||||
}
|
||||
|
||||
groupFound := false
|
||||
for _, lp := range imageResp.LaunchPermissions {
|
||||
if lp.Group != nil && group == *lp.Group {
|
||||
groupFound = true
|
||||
}
|
||||
}
|
||||
|
||||
if !groupFound {
|
||||
return fmt.Errorf("Error in Image Attributes, expected Group ID (%s) to have Launch Permissions, but was not found", group)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func checkRegionCopy(regions []string) builderT.TestCheckFunc {
|
||||
return func(artifacts []packersdk.Artifact) error {
|
||||
if len(artifacts) > 1 {
|
||||
return fmt.Errorf("more than 1 artifact")
|
||||
}
|
||||
|
||||
// Get the actual *Artifact pointer so we can access the AMIs directly
|
||||
artifactRaw := artifacts[0]
|
||||
artifact, ok := artifactRaw.(*common.Artifact)
|
||||
if !ok {
|
||||
return fmt.Errorf("unknown artifact: %#v", artifactRaw)
|
||||
}
|
||||
|
||||
// Verify that we copied to only the regions given
|
||||
regionSet := make(map[string]struct{})
|
||||
for _, r := range regions {
|
||||
regionSet[r] = struct{}{}
|
||||
}
|
||||
for r := range artifact.Amis {
|
||||
if _, ok := regionSet[r]; !ok {
|
||||
return fmt.Errorf("unknown region: %s", r)
|
||||
}
|
||||
|
||||
delete(regionSet, r)
|
||||
}
|
||||
if len(regionSet) > 0 {
|
||||
return fmt.Errorf("didn't copy to: %#v", regionSet)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func checkBootEncrypted() builderT.TestCheckFunc {
|
||||
return func(artifacts []packersdk.Artifact) error {
|
||||
|
||||
// Get the actual *Artifact pointer so we can access the AMIs directly
|
||||
artifactRaw := artifacts[0]
|
||||
artifact, ok := artifactRaw.(*common.Artifact)
|
||||
if !ok {
|
||||
return fmt.Errorf("unknown artifact: %#v", artifactRaw)
|
||||
}
|
||||
|
||||
// describe the image, get block devices with a snapshot
|
||||
ec2conn, _ := testEC2Conn()
|
||||
imageResp, err := ec2conn.DescribeImages(&ec2.DescribeImagesInput{
|
||||
ImageIds: []*string{aws.String(artifact.Amis["us-east-1"])},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error retrieving Image Attributes for AMI (%s) in AMI Encrypted Boot Test: %s", artifact, err)
|
||||
}
|
||||
|
||||
image := imageResp.Images[0] // Only requested a single AMI ID
|
||||
|
||||
rootDeviceName := image.RootDeviceName
|
||||
|
||||
for _, bd := range image.BlockDeviceMappings {
|
||||
if *bd.DeviceName == *rootDeviceName {
|
||||
if *bd.Ebs.Encrypted != true {
|
||||
return fmt.Errorf("volume not encrypted: %s", *bd.Ebs.SnapshotId)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderAcc_SessionManagerInterface(t *testing.T) {
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Builder: &Builder{},
|
||||
Template: testBuilderAccSessionManagerInterface,
|
||||
})
|
||||
}
|
||||
|
||||
func testAccPreCheck(t *testing.T) {
|
||||
}
|
||||
|
||||
func testAccSharingPreCheck(t *testing.T) {
|
||||
if v := os.Getenv("TESTACC_AWS_ACCOUNT_ID"); v == "" {
|
||||
t.Fatal(fmt.Sprintf("TESTACC_AWS_ACCOUNT_ID must be set for acceptance tests"))
|
||||
}
|
||||
}
|
||||
|
||||
func testEC2Conn() (*ec2.EC2, error) {
|
||||
access := &common.AccessConfig{RawRegion: "us-east-1"}
|
||||
session, err := access.Session()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ec2.New(session), nil
|
||||
}
|
||||
|
||||
const testBuilderAccBasic = `
|
||||
{
|
||||
"builders": [{
|
||||
"type": "test",
|
||||
"region": "us-east-1",
|
||||
"instance_type": "m3.medium",
|
||||
"source_ami": "ami-76b2a71e",
|
||||
"ssh_username": "ubuntu",
|
||||
"ami_name": "packer-test {{timestamp}}"
|
||||
}]
|
||||
}
|
||||
`
|
||||
|
||||
const testBuilderAccRegionCopy = `
|
||||
{
|
||||
"builders": [{
|
||||
"type": "test",
|
||||
"region": "us-east-1",
|
||||
"instance_type": "m3.medium",
|
||||
"source_ami": "ami-76b2a71e",
|
||||
"ssh_username": "ubuntu",
|
||||
"ami_name": "packer-test {{timestamp}}",
|
||||
"ami_regions": ["us-east-1", "us-west-2"]
|
||||
}]
|
||||
}
|
||||
`
|
||||
|
||||
const testBuilderAccForceDeregister = `
|
||||
{
|
||||
"builders": [{
|
||||
"type": "test",
|
||||
"region": "us-east-1",
|
||||
"instance_type": "m3.medium",
|
||||
"source_ami": "ami-76b2a71e",
|
||||
"ssh_username": "ubuntu",
|
||||
"force_deregister": "%s",
|
||||
"ami_name": "%s"
|
||||
}]
|
||||
}
|
||||
`
|
||||
|
||||
const testBuilderAccForceDeleteSnapshot = `
|
||||
{
|
||||
"builders": [{
|
||||
"type": "test",
|
||||
"region": "us-east-1",
|
||||
"instance_type": "m3.medium",
|
||||
"source_ami": "ami-76b2a71e",
|
||||
"ssh_username": "ubuntu",
|
||||
"force_deregister": "%s",
|
||||
"force_delete_snapshot": "%s",
|
||||
"ami_name": "%s"
|
||||
}]
|
||||
}
|
||||
`
|
||||
|
||||
const testBuilderAccSharing = `
|
||||
{
|
||||
"builders": [{
|
||||
"type": "test",
|
||||
"region": "us-east-1",
|
||||
"instance_type": "m3.medium",
|
||||
"source_ami": "ami-76b2a71e",
|
||||
"ssh_username": "ubuntu",
|
||||
"ami_name": "packer-test {{timestamp}}",
|
||||
"ami_users":["%s"],
|
||||
"ami_groups":["all"]
|
||||
}]
|
||||
}
|
||||
`
|
||||
|
||||
const testBuilderAccEncrypted = `
|
||||
{
|
||||
"builders": [{
|
||||
"type": "test",
|
||||
"region": "us-east-1",
|
||||
"instance_type": "m3.medium",
|
||||
"source_ami":"ami-c15bebaa",
|
||||
"ssh_username": "ubuntu",
|
||||
"ami_name": "packer-enc-test {{timestamp}}",
|
||||
"encrypt_boot": true
|
||||
}]
|
||||
}
|
||||
`
|
||||
|
||||
const testBuilderAccSessionManagerInterface = `
|
||||
{
|
||||
"builders": [{
|
||||
"type": "test",
|
||||
"region": "us-east-1",
|
||||
"instance_type": "m3.medium",
|
||||
"source_ami_filter": {
|
||||
"filters": {
|
||||
"virtualization-type": "hvm",
|
||||
"name": "ubuntu/images/*ubuntu-xenial-16.04-amd64-server-*",
|
||||
"root-device-type": "ebs"
|
||||
},
|
||||
"owners": [
|
||||
"099720109477"
|
||||
],
|
||||
"most_recent": true
|
||||
},
|
||||
"ssh_username": "ubuntu",
|
||||
"ssh_interface": "session_manager",
|
||||
"iam_instance_profile": "SSMInstanceProfile",
|
||||
"ami_name": "packer-ssm-test-{{timestamp}}"
|
||||
}]
|
||||
}
|
||||
`
|
||||
|
||||
func buildForceDeregisterConfig(val, name string) string {
|
||||
return fmt.Sprintf(testBuilderAccForceDeregister, val, name)
|
||||
}
|
||||
|
||||
func buildForceDeleteSnapshotConfig(val, name string) string {
|
||||
return fmt.Sprintf(testBuilderAccForceDeleteSnapshot, val, val, name)
|
||||
}
|
||||
|
||||
func buildSharingConfig(val string) string {
|
||||
return fmt.Sprintf(testBuilderAccSharing, val)
|
||||
}
|
|
@ -1,166 +0,0 @@
|
|||
package ebs
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
func testConfig() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"access_key": "foo",
|
||||
"secret_key": "bar",
|
||||
"source_ami": "foo",
|
||||
"instance_type": "foo",
|
||||
"region": "us-east-1",
|
||||
"ssh_username": "root",
|
||||
"ami_name": "foo",
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilder_ImplementsBuilder(t *testing.T) {
|
||||
var raw interface{}
|
||||
raw = &Builder{}
|
||||
if _, ok := raw.(packersdk.Builder); !ok {
|
||||
t.Fatalf("Builder should be a builder")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilder_Prepare_BadType(t *testing.T) {
|
||||
b := &Builder{}
|
||||
c := map[string]interface{}{
|
||||
"access_key": []string{},
|
||||
}
|
||||
|
||||
_, warnings, err := b.Prepare(c)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err == nil {
|
||||
t.Fatalf("prepare should fail")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderPrepare_AMIName(t *testing.T) {
|
||||
var b Builder
|
||||
config := testConfig()
|
||||
|
||||
// Test good
|
||||
config["ami_name"] = "foo"
|
||||
_, warnings, err := b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
|
||||
// Test bad
|
||||
config["ami_name"] = "foo {{"
|
||||
b = Builder{}
|
||||
_, warnings, err = b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err == nil {
|
||||
t.Fatal("should have error")
|
||||
}
|
||||
|
||||
// Test bad
|
||||
delete(config, "ami_name")
|
||||
b = Builder{}
|
||||
_, warnings, err = b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err == nil {
|
||||
t.Fatal("should have error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderPrepare_InvalidKey(t *testing.T) {
|
||||
var b Builder
|
||||
config := testConfig()
|
||||
|
||||
// Add a random key
|
||||
config["i_should_not_be_valid"] = true
|
||||
_, warnings, err := b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err == nil {
|
||||
t.Fatal("should have error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderPrepare_InvalidShutdownBehavior(t *testing.T) {
|
||||
var b Builder
|
||||
config := testConfig()
|
||||
|
||||
// Test good
|
||||
config["shutdown_behavior"] = "terminate"
|
||||
_, warnings, err := b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
|
||||
// Test good
|
||||
config["shutdown_behavior"] = "stop"
|
||||
_, warnings, err = b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
|
||||
// Test bad
|
||||
config["shutdown_behavior"] = "foobar"
|
||||
_, warnings, err = b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err == nil {
|
||||
t.Fatal("should have error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderPrepare_ReturnGeneratedData(t *testing.T) {
|
||||
var b Builder
|
||||
config := testConfig()
|
||||
|
||||
generatedData, warnings, err := b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
if len(generatedData) == 0 {
|
||||
t.Fatalf("Generated data should not be empty")
|
||||
}
|
||||
if len(generatedData) == 0 {
|
||||
t.Fatalf("Generated data should not be empty")
|
||||
}
|
||||
if generatedData[0] != "SourceAMIName" {
|
||||
t.Fatalf("Generated data should contain SourceAMIName")
|
||||
}
|
||||
if generatedData[1] != "BuildRegion" {
|
||||
t.Fatalf("Generated data should contain BuildRegion")
|
||||
}
|
||||
if generatedData[2] != "SourceAMI" {
|
||||
t.Fatalf("Generated data should contain SourceAMI")
|
||||
}
|
||||
if generatedData[3] != "SourceAMICreationDate" {
|
||||
t.Fatalf("Generated data should contain SourceAMICreationDate")
|
||||
}
|
||||
if generatedData[4] != "SourceAMIOwner" {
|
||||
t.Fatalf("Generated data should contain SourceAMIOwner")
|
||||
}
|
||||
if generatedData[5] != "SourceAMIOwnerName" {
|
||||
t.Fatalf("Generated data should contain SourceAMIOwnerName")
|
||||
}
|
||||
}
|
|
@ -1,200 +0,0 @@
|
|||
package ebs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/random"
|
||||
"github.com/hashicorp/packer-plugin-sdk/retry"
|
||||
awscommon "github.com/hashicorp/packer/builder/amazon/common"
|
||||
"github.com/hashicorp/packer/builder/amazon/common/awserrors"
|
||||
)
|
||||
|
||||
type stepCreateAMI struct {
|
||||
PollingConfig *awscommon.AWSPollingConfig
|
||||
image *ec2.Image
|
||||
AMISkipCreateImage bool
|
||||
AMISkipBuildRegion bool
|
||||
}
|
||||
|
||||
func (s *stepCreateAMI) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
config := state.Get("config").(*Config)
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
instance := state.Get("instance").(*ec2.Instance)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
if s.AMISkipCreateImage {
|
||||
ui.Say("Skipping AMI creation...")
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
// Create the image
|
||||
amiName := config.AMIName
|
||||
state.Put("intermediary_image", false)
|
||||
if config.AMIEncryptBootVolume.True() || s.AMISkipBuildRegion {
|
||||
state.Put("intermediary_image", true)
|
||||
|
||||
// From AWS SDK docs: You can encrypt a copy of an unencrypted snapshot,
|
||||
// but you cannot use it to create an unencrypted copy of an encrypted
|
||||
// snapshot. Your default CMK for EBS is used unless you specify a
|
||||
// non-default key using KmsKeyId.
|
||||
|
||||
// If encrypt_boot is nil or true, we need to create a temporary image
|
||||
// so that in step_region_copy, we can copy it with the correct
|
||||
// encryption
|
||||
amiName = random.AlphaNum(7)
|
||||
}
|
||||
|
||||
ui.Say(fmt.Sprintf("Creating AMI %s from instance %s", amiName, *instance.InstanceId))
|
||||
createOpts := &ec2.CreateImageInput{
|
||||
InstanceId: instance.InstanceId,
|
||||
Name: &amiName,
|
||||
BlockDeviceMappings: config.AMIMappings.BuildEC2BlockDeviceMappings(),
|
||||
}
|
||||
|
||||
var createResp *ec2.CreateImageOutput
|
||||
var err error
|
||||
|
||||
// Create a timeout for the CreateImage call.
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, time.Minute*15)
|
||||
defer cancel()
|
||||
|
||||
err = retry.Config{
|
||||
Tries: 0,
|
||||
ShouldRetry: func(err error) bool {
|
||||
if awserrors.Matches(err, "InvalidParameterValue", "Instance is not in state") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
},
|
||||
RetryDelay: (&retry.Backoff{InitialBackoff: 200 * time.Millisecond, MaxBackoff: 30 * time.Second, Multiplier: 2}).Linear,
|
||||
}.Run(timeoutCtx, func(ctx context.Context) error {
|
||||
createResp, err = ec2conn.CreateImage(createOpts)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error creating AMI: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Set the AMI ID in the state
|
||||
ui.Message(fmt.Sprintf("AMI: %s", *createResp.ImageId))
|
||||
amis := make(map[string]string)
|
||||
amis[*ec2conn.Config.Region] = *createResp.ImageId
|
||||
state.Put("amis", amis)
|
||||
|
||||
// Wait for the image to become ready
|
||||
ui.Say("Waiting for AMI to become ready...")
|
||||
if waitErr := s.PollingConfig.WaitUntilAMIAvailable(ctx, ec2conn, *createResp.ImageId); waitErr != nil {
|
||||
// waitErr should get bubbled up if the issue is a wait timeout
|
||||
err := fmt.Errorf("Error waiting for AMI: %s", waitErr)
|
||||
imResp, imerr := ec2conn.DescribeImages(&ec2.DescribeImagesInput{ImageIds: []*string{createResp.ImageId}})
|
||||
if imerr != nil {
|
||||
// If there's a failure describing images, bubble that error up too, but don't erase the waitErr.
|
||||
log.Printf("DescribeImages call was unable to determine reason waiting for AMI failed: %s", imerr)
|
||||
err = fmt.Errorf("Unknown error waiting for AMI; %s. DescribeImages returned an error: %s", waitErr, imerr)
|
||||
}
|
||||
if imResp != nil && len(imResp.Images) > 0 {
|
||||
// Finally, if there's a stateReason, store that with the wait err
|
||||
image := imResp.Images[0]
|
||||
if image != nil {
|
||||
stateReason := image.StateReason
|
||||
if stateReason != nil {
|
||||
err = fmt.Errorf("Error waiting for AMI: %s. DescribeImages returned the state reason: %s", waitErr, stateReason)
|
||||
}
|
||||
}
|
||||
}
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
imagesResp, err := ec2conn.DescribeImages(&ec2.DescribeImagesInput{ImageIds: []*string{createResp.ImageId}})
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error searching for AMI: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
s.image = imagesResp.Images[0]
|
||||
|
||||
snapshots := make(map[string][]string)
|
||||
for _, blockDeviceMapping := range imagesResp.Images[0].BlockDeviceMappings {
|
||||
if blockDeviceMapping.Ebs != nil && blockDeviceMapping.Ebs.SnapshotId != nil {
|
||||
|
||||
snapshots[*ec2conn.Config.Region] = append(snapshots[*ec2conn.Config.Region], *blockDeviceMapping.Ebs.SnapshotId)
|
||||
}
|
||||
}
|
||||
state.Put("snapshots", snapshots)
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *stepCreateAMI) Cleanup(state multistep.StateBag) {
|
||||
if s.image == nil {
|
||||
return
|
||||
}
|
||||
|
||||
_, cancelled := state.GetOk(multistep.StateCancelled)
|
||||
_, halted := state.GetOk(multistep.StateHalted)
|
||||
if !cancelled && !halted {
|
||||
return
|
||||
}
|
||||
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
ui.Say("Deregistering the AMI and deleting associated snapshots because " +
|
||||
"of cancellation, or error...")
|
||||
|
||||
resp, err := ec2conn.DescribeImages(&ec2.DescribeImagesInput{
|
||||
ImageIds: []*string{s.image.ImageId},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error describing AMI: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// Deregister image by name.
|
||||
for _, i := range resp.Images {
|
||||
_, err := ec2conn.DeregisterImage(&ec2.DeregisterImageInput{
|
||||
ImageId: i.ImageId,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error deregistering existing AMI: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return
|
||||
}
|
||||
ui.Say(fmt.Sprintf("Deregistered AMI id: %s", *i.ImageId))
|
||||
|
||||
// Delete snapshot(s) by image
|
||||
for _, b := range i.BlockDeviceMappings {
|
||||
if b.Ebs != nil && aws.StringValue(b.Ebs.SnapshotId) != "" {
|
||||
_, err := ec2conn.DeleteSnapshot(&ec2.DeleteSnapshotInput{
|
||||
SnapshotId: b.Ebs.SnapshotId,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error deleting existing snapshot: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return
|
||||
}
|
||||
ui.Say(fmt.Sprintf("Deleted snapshot: %s", *b.Ebs.SnapshotId))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,137 +0,0 @@
|
|||
package ebs
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
builderT "github.com/hashicorp/packer-plugin-sdk/acctest"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer/builder/amazon/common"
|
||||
)
|
||||
|
||||
type TFBuilder struct {
|
||||
Type string `json:"type"`
|
||||
Region string `json:"region"`
|
||||
SourceAmi string `json:"source_ami"`
|
||||
InstanceType string `json:"instance_type"`
|
||||
SshUsername string `json:"ssh_username"`
|
||||
AmiName string `json:"ami_name"`
|
||||
Tags map[string]string `json:"tags"`
|
||||
SnapshotTags map[string]string `json:"snapshot_tags"`
|
||||
}
|
||||
|
||||
type TFConfig struct {
|
||||
Builders []TFBuilder `json:"builders"`
|
||||
}
|
||||
|
||||
func TestBuilderTagsAcc_basic(t *testing.T) {
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Builder: &Builder{},
|
||||
Template: testBuilderTagsAccBasic,
|
||||
Check: checkTags(),
|
||||
})
|
||||
}
|
||||
|
||||
func checkTags() builderT.TestCheckFunc {
|
||||
return func(artifacts []packersdk.Artifact) error {
|
||||
if len(artifacts) > 1 {
|
||||
return fmt.Errorf("more than 1 artifact")
|
||||
}
|
||||
|
||||
config := TFConfig{}
|
||||
json.Unmarshal([]byte(testBuilderTagsAccBasic), &config)
|
||||
tags := config.Builders[0].Tags
|
||||
snapshotTags := config.Builders[0].SnapshotTags
|
||||
|
||||
// Get the actual *Artifact pointer so we can access the AMIs directly
|
||||
artifactRaw := artifacts[0]
|
||||
artifact, ok := artifactRaw.(*common.Artifact)
|
||||
if !ok {
|
||||
return fmt.Errorf("unknown artifact: %#v", artifactRaw)
|
||||
}
|
||||
|
||||
// Describe the image, get block devices with a snapshot
|
||||
ec2conn, _ := testEC2Conn()
|
||||
imageResp, err := ec2conn.DescribeImages(&ec2.DescribeImagesInput{
|
||||
ImageIds: []*string{aws.String(artifact.Amis["us-east-1"])},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error retrieving details for AMI Artifact (%#v) in Tags Test: %s", artifact, err)
|
||||
}
|
||||
|
||||
if len(imageResp.Images) == 0 {
|
||||
return fmt.Errorf("No images found for AMI Artifact (%#v) in Tags Test: %s", artifact, err)
|
||||
}
|
||||
|
||||
image := imageResp.Images[0]
|
||||
|
||||
// Check only those with a Snapshot ID, i.e. not Ephemeral
|
||||
var snapshots []*string
|
||||
for _, device := range image.BlockDeviceMappings {
|
||||
if device.Ebs != nil && device.Ebs.SnapshotId != nil {
|
||||
snapshots = append(snapshots, device.Ebs.SnapshotId)
|
||||
}
|
||||
}
|
||||
|
||||
// Grab matching snapshot info
|
||||
resp, err := ec2conn.DescribeSnapshots(&ec2.DescribeSnapshotsInput{
|
||||
SnapshotIds: snapshots,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error retrieving Snapshots for AMI Artifact (%#v) in Tags Test: %s", artifact, err)
|
||||
}
|
||||
|
||||
if len(resp.Snapshots) == 0 {
|
||||
return fmt.Errorf("No Snapshots found for AMI Artifact (%#v) in Tags Test", artifact)
|
||||
}
|
||||
|
||||
// Grab the snapshots, check the tags
|
||||
for _, s := range resp.Snapshots {
|
||||
expected := len(tags)
|
||||
for _, t := range s.Tags {
|
||||
for key, value := range tags {
|
||||
if val, ok := snapshotTags[key]; ok && val == *t.Value {
|
||||
expected--
|
||||
} else if key == *t.Key && value == *t.Value {
|
||||
expected--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if expected > 0 {
|
||||
return fmt.Errorf("Not all tags found")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
const testBuilderTagsAccBasic = `
|
||||
{
|
||||
"builders": [
|
||||
{
|
||||
"type": "test",
|
||||
"region": "us-east-1",
|
||||
"source_ami": "ami-9eaa1cf6",
|
||||
"instance_type": "t2.micro",
|
||||
"ssh_username": "ubuntu",
|
||||
"ami_name": "packer-tags-testing-{{timestamp}}",
|
||||
"tags": {
|
||||
"OS_Version": "Ubuntu",
|
||||
"Release": "Latest",
|
||||
"Name": "Bleep"
|
||||
},
|
||||
"snapshot_tags": {
|
||||
"Name": "Foobar"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
`
|
|
@ -1,58 +0,0 @@
|
|||
//go:generate struct-markdown
|
||||
|
||||
package ebssurrogate
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/interpolate"
|
||||
awscommon "github.com/hashicorp/packer/builder/amazon/common"
|
||||
)
|
||||
|
||||
type BlockDevice struct {
|
||||
awscommon.BlockDevice `mapstructure:",squash"`
|
||||
|
||||
// If true, this block device will not be snapshotted and the created AMI
|
||||
// will not contain block device mapping information for this volume. If
|
||||
// false, the block device will be mapped into the final created AMI. Set
|
||||
// this option to true if you need a block device mounted in the surrogate
|
||||
// AMI but not in the final created AMI.
|
||||
OmitFromArtifact bool `mapstructure:"omit_from_artifact"`
|
||||
}
|
||||
|
||||
type BlockDevices []BlockDevice
|
||||
|
||||
func (bds BlockDevices) Common() []awscommon.BlockDevice {
|
||||
res := []awscommon.BlockDevice{}
|
||||
for _, bd := range bds {
|
||||
res = append(res, bd.BlockDevice)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (bds BlockDevices) BuildEC2BlockDeviceMappings() []*ec2.BlockDeviceMapping {
|
||||
var blockDevices []*ec2.BlockDeviceMapping
|
||||
|
||||
for _, blockDevice := range bds {
|
||||
blockDevices = append(blockDevices, blockDevice.BuildEC2BlockDeviceMapping())
|
||||
}
|
||||
return blockDevices
|
||||
}
|
||||
|
||||
func (bds BlockDevices) Prepare(ctx *interpolate.Context) (errs []error) {
|
||||
for _, block := range bds {
|
||||
if err := block.Prepare(ctx); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
func (b BlockDevices) GetOmissions() map[string]bool {
|
||||
omitMap := make(map[string]bool)
|
||||
|
||||
for _, blockDevice := range b {
|
||||
omitMap[blockDevice.DeviceName] = blockDevice.OmitFromArtifact
|
||||
}
|
||||
|
||||
return omitMap
|
||||
}
|
|
@ -1,413 +0,0 @@
|
|||
//go:generate struct-markdown
|
||||
//go:generate mapstructure-to-hcl2 -type Config,RootBlockDevice,BlockDevice
|
||||
|
||||
// The ebssurrogate package contains a packersdk.Builder implementation that
|
||||
// builds a new EBS-backed AMI using an ephemeral instance.
|
||||
package ebssurrogate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/aws/aws-sdk-go/service/iam"
|
||||
"github.com/hashicorp/hcl/v2/hcldec"
|
||||
"github.com/hashicorp/packer-plugin-sdk/common"
|
||||
"github.com/hashicorp/packer-plugin-sdk/communicator"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/packerbuilderdata"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/config"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/interpolate"
|
||||
awscommon "github.com/hashicorp/packer/builder/amazon/common"
|
||||
)
|
||||
|
||||
const BuilderId = "mitchellh.amazon.ebssurrogate"
|
||||
|
||||
type Config struct {
|
||||
common.PackerConfig `mapstructure:",squash"`
|
||||
awscommon.AccessConfig `mapstructure:",squash"`
|
||||
awscommon.RunConfig `mapstructure:",squash"`
|
||||
awscommon.AMIConfig `mapstructure:",squash"`
|
||||
|
||||
// Add one or more block device mappings to the AMI. These will be attached
|
||||
// when booting a new instance from your AMI. To add a block device during
|
||||
// the Packer build see `launch_block_device_mappings` below. Your options
|
||||
// here may vary depending on the type of VM you use. See the
|
||||
// [BlockDevices](#block-devices-configuration) documentation for fields.
|
||||
AMIMappings awscommon.BlockDevices `mapstructure:"ami_block_device_mappings" required:"false"`
|
||||
// Add one or more block devices before the Packer build starts. If you add
|
||||
// instance store volumes or EBS volumes in addition to the root device
|
||||
// volume, the created AMI will contain block device mapping information
|
||||
// for those volumes. Amazon creates snapshots of the source instance's
|
||||
// root volume and any other EBS volumes described here. When you launch an
|
||||
// instance from this new AMI, the instance automatically launches with
|
||||
// these additional volumes, and will restore them from snapshots taken
|
||||
// from the source instance. See the
|
||||
// [BlockDevices](#block-devices-configuration) documentation for fields.
|
||||
LaunchMappings BlockDevices `mapstructure:"launch_block_device_mappings" required:"false"`
|
||||
// A block device mapping describing the root device of the AMI. This looks
|
||||
// like the mappings in `ami_block_device_mapping`, except with an
|
||||
// additional field:
|
||||
//
|
||||
// - `source_device_name` (string) - The device name of the block device on
|
||||
// the source instance to be used as the root device for the AMI. This
|
||||
// must correspond to a block device in `launch_block_device_mapping`.
|
||||
RootDevice RootBlockDevice `mapstructure:"ami_root_device" required:"true"`
|
||||
// Tags to apply to the volumes that are *launched* to create the AMI.
|
||||
// These tags are *not* applied to the resulting AMI unless they're
|
||||
// duplicated in `tags`. This is a [template
|
||||
// engine](/docs/templates/legacy_json_templates/engine), see [Build template
|
||||
// data](#build-template-data) for more information.
|
||||
VolumeRunTags map[string]string `mapstructure:"run_volume_tags"`
|
||||
// Same as [`run_volume_tags`](#run_volume_tags) but defined as a singular
|
||||
// block containing a `name` and a `value` field. In HCL2 mode the
|
||||
// [`dynamic_block`](https://packer.io/docs/templates/hcl_templates/expressions.html#dynamic-blocks)
|
||||
// will allow you to create those programatically.
|
||||
VolumeRunTag config.NameValues `mapstructure:"run_volume_tag" required:"false"`
|
||||
// what architecture to use when registering the
|
||||
// final AMI; valid options are "x86_64" or "arm64". Defaults to "x86_64".
|
||||
Architecture string `mapstructure:"ami_architecture" required:"false"`
|
||||
|
||||
ctx interpolate.Context
|
||||
}
|
||||
|
||||
type Builder struct {
|
||||
config Config
|
||||
runner multistep.Runner
|
||||
}
|
||||
|
||||
func (b *Builder) ConfigSpec() hcldec.ObjectSpec { return b.config.FlatMapstructure().HCL2Spec() }
|
||||
|
||||
func (b *Builder) Prepare(raws ...interface{}) ([]string, []string, error) {
|
||||
b.config.ctx.Funcs = awscommon.TemplateFuncs
|
||||
err := config.Decode(&b.config, &config.DecodeOpts{
|
||||
PluginType: BuilderId,
|
||||
Interpolate: true,
|
||||
InterpolateContext: &b.config.ctx,
|
||||
InterpolateFilter: &interpolate.RenderFilter{
|
||||
Exclude: []string{
|
||||
"ami_description",
|
||||
"run_tags",
|
||||
"run_tag",
|
||||
"run_volume_tags",
|
||||
"run_volume_tag",
|
||||
"snapshot_tags",
|
||||
"snapshot_tag",
|
||||
"spot_tags",
|
||||
"spot_tag",
|
||||
"tags",
|
||||
"tag",
|
||||
},
|
||||
},
|
||||
}, raws...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if b.config.PackerConfig.PackerForce {
|
||||
b.config.AMIForceDeregister = true
|
||||
}
|
||||
|
||||
// Accumulate any errors
|
||||
var errs *packersdk.MultiError
|
||||
var warns []string
|
||||
errs = packersdk.MultiErrorAppend(errs, b.config.VolumeRunTag.CopyOn(&b.config.VolumeRunTags)...)
|
||||
|
||||
errs = packersdk.MultiErrorAppend(errs, b.config.AccessConfig.Prepare()...)
|
||||
errs = packersdk.MultiErrorAppend(errs, b.config.RunConfig.Prepare(&b.config.ctx)...)
|
||||
errs = packersdk.MultiErrorAppend(errs,
|
||||
b.config.AMIConfig.Prepare(&b.config.AccessConfig, &b.config.ctx)...)
|
||||
errs = packersdk.MultiErrorAppend(errs, b.config.AMIMappings.Prepare(&b.config.ctx)...)
|
||||
errs = packersdk.MultiErrorAppend(errs, b.config.LaunchMappings.Prepare(&b.config.ctx)...)
|
||||
errs = packersdk.MultiErrorAppend(errs, b.config.RootDevice.Prepare(&b.config.ctx)...)
|
||||
|
||||
if b.config.AMIVirtType == "" {
|
||||
errs = packersdk.MultiErrorAppend(errs, errors.New("ami_virtualization_type is required."))
|
||||
}
|
||||
|
||||
foundRootVolume := false
|
||||
for _, launchDevice := range b.config.LaunchMappings {
|
||||
if launchDevice.DeviceName == b.config.RootDevice.SourceDeviceName {
|
||||
foundRootVolume = true
|
||||
if launchDevice.OmitFromArtifact {
|
||||
errs = packersdk.MultiErrorAppend(errs, fmt.Errorf("You cannot set \"omit_from_artifact\": \"true\" for the root volume."))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !foundRootVolume {
|
||||
errs = packersdk.MultiErrorAppend(errs, fmt.Errorf("no volume with name '%s' is found", b.config.RootDevice.SourceDeviceName))
|
||||
}
|
||||
|
||||
if b.config.RunConfig.SpotPriceAutoProduct != "" {
|
||||
warns = append(warns, "spot_price_auto_product is deprecated and no "+
|
||||
"longer necessary for Packer builds. In future versions of "+
|
||||
"Packer, inclusion of spot_price_auto_product will error your "+
|
||||
"builds. Please take a look at our current documentation to "+
|
||||
"understand how Packer requests Spot instances.")
|
||||
}
|
||||
|
||||
if b.config.Architecture == "" {
|
||||
b.config.Architecture = "x86_64"
|
||||
}
|
||||
valid := false
|
||||
for _, validArch := range []string{"x86_64", "arm64"} {
|
||||
if validArch == b.config.Architecture {
|
||||
valid = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !valid {
|
||||
errs = packersdk.MultiErrorAppend(errs, errors.New(`The only valid ami_architecture values are "x86_64" and "arm64"`))
|
||||
}
|
||||
if errs != nil && len(errs.Errors) > 0 {
|
||||
return nil, warns, errs
|
||||
}
|
||||
|
||||
packersdk.LogSecretFilter.Set(b.config.AccessKey, b.config.SecretKey, b.config.Token)
|
||||
|
||||
generatedData := awscommon.GetGeneratedDataList()
|
||||
return generatedData, warns, nil
|
||||
}
|
||||
|
||||
func (b *Builder) Run(ctx context.Context, ui packersdk.Ui, hook packersdk.Hook) (packersdk.Artifact, error) {
|
||||
session, err := b.config.Session()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ec2conn := ec2.New(session)
|
||||
iam := iam.New(session)
|
||||
|
||||
// Setup the state bag and initial state for the steps
|
||||
state := new(multistep.BasicStateBag)
|
||||
state.Put("config", &b.config)
|
||||
state.Put("access_config", &b.config.AccessConfig)
|
||||
state.Put("ami_config", &b.config.AMIConfig)
|
||||
state.Put("ec2", ec2conn)
|
||||
state.Put("iam", iam)
|
||||
state.Put("awsSession", session)
|
||||
state.Put("hook", hook)
|
||||
state.Put("ui", ui)
|
||||
generatedData := &packerbuilderdata.GeneratedData{State: state}
|
||||
|
||||
var instanceStep multistep.Step
|
||||
|
||||
if b.config.IsSpotInstance() {
|
||||
instanceStep = &awscommon.StepRunSpotInstance{
|
||||
PollingConfig: b.config.PollingConfig,
|
||||
AssociatePublicIpAddress: b.config.AssociatePublicIpAddress,
|
||||
LaunchMappings: b.config.LaunchMappings,
|
||||
BlockDurationMinutes: b.config.BlockDurationMinutes,
|
||||
Ctx: b.config.ctx,
|
||||
Comm: &b.config.RunConfig.Comm,
|
||||
Debug: b.config.PackerDebug,
|
||||
EbsOptimized: b.config.EbsOptimized,
|
||||
ExpectedRootDevice: "ebs",
|
||||
InstanceInitiatedShutdownBehavior: b.config.InstanceInitiatedShutdownBehavior,
|
||||
InstanceType: b.config.InstanceType,
|
||||
Region: *ec2conn.Config.Region,
|
||||
SourceAMI: b.config.SourceAmi,
|
||||
SpotPrice: b.config.SpotPrice,
|
||||
SpotInstanceTypes: b.config.SpotInstanceTypes,
|
||||
SpotTags: b.config.SpotTags,
|
||||
Tags: b.config.RunTags,
|
||||
UserData: b.config.UserData,
|
||||
UserDataFile: b.config.UserDataFile,
|
||||
VolumeTags: b.config.VolumeRunTags,
|
||||
}
|
||||
} else {
|
||||
instanceStep = &awscommon.StepRunSourceInstance{
|
||||
PollingConfig: b.config.PollingConfig,
|
||||
AssociatePublicIpAddress: b.config.AssociatePublicIpAddress,
|
||||
LaunchMappings: b.config.LaunchMappings,
|
||||
Comm: &b.config.RunConfig.Comm,
|
||||
Ctx: b.config.ctx,
|
||||
Debug: b.config.PackerDebug,
|
||||
EbsOptimized: b.config.EbsOptimized,
|
||||
EnableT2Unlimited: b.config.EnableT2Unlimited,
|
||||
ExpectedRootDevice: "ebs",
|
||||
InstanceInitiatedShutdownBehavior: b.config.InstanceInitiatedShutdownBehavior,
|
||||
InstanceType: b.config.InstanceType,
|
||||
IsRestricted: b.config.IsChinaCloud() || b.config.IsGovCloud(),
|
||||
SourceAMI: b.config.SourceAmi,
|
||||
Tags: b.config.RunTags,
|
||||
Tenancy: b.config.Tenancy,
|
||||
UserData: b.config.UserData,
|
||||
UserDataFile: b.config.UserDataFile,
|
||||
VolumeTags: b.config.VolumeRunTags,
|
||||
}
|
||||
}
|
||||
|
||||
amiDevices := b.config.AMIMappings.BuildEC2BlockDeviceMappings()
|
||||
launchDevices := b.config.LaunchMappings.BuildEC2BlockDeviceMappings()
|
||||
|
||||
// Build the steps
|
||||
steps := []multistep.Step{
|
||||
&awscommon.StepPreValidate{
|
||||
DestAmiName: b.config.AMIName,
|
||||
ForceDeregister: b.config.AMIForceDeregister,
|
||||
AMISkipBuildRegion: b.config.AMISkipBuildRegion,
|
||||
VpcId: b.config.VpcId,
|
||||
SubnetId: b.config.SubnetId,
|
||||
HasSubnetFilter: !b.config.SubnetFilter.Empty(),
|
||||
},
|
||||
&awscommon.StepSourceAMIInfo{
|
||||
SourceAmi: b.config.SourceAmi,
|
||||
EnableAMISriovNetSupport: b.config.AMISriovNetSupport,
|
||||
EnableAMIENASupport: b.config.AMIENASupport,
|
||||
AmiFilters: b.config.SourceAmiFilter,
|
||||
AMIVirtType: b.config.AMIVirtType,
|
||||
},
|
||||
&awscommon.StepNetworkInfo{
|
||||
VpcId: b.config.VpcId,
|
||||
VpcFilter: b.config.VpcFilter,
|
||||
SecurityGroupIds: b.config.SecurityGroupIds,
|
||||
SecurityGroupFilter: b.config.SecurityGroupFilter,
|
||||
SubnetId: b.config.SubnetId,
|
||||
SubnetFilter: b.config.SubnetFilter,
|
||||
AvailabilityZone: b.config.AvailabilityZone,
|
||||
},
|
||||
&awscommon.StepKeyPair{
|
||||
Debug: b.config.PackerDebug,
|
||||
Comm: &b.config.RunConfig.Comm,
|
||||
DebugKeyPath: fmt.Sprintf("ec2_%s.pem", b.config.PackerBuildName),
|
||||
},
|
||||
&awscommon.StepSecurityGroup{
|
||||
SecurityGroupFilter: b.config.SecurityGroupFilter,
|
||||
SecurityGroupIds: b.config.SecurityGroupIds,
|
||||
CommConfig: &b.config.RunConfig.Comm,
|
||||
TemporarySGSourceCidrs: b.config.TemporarySGSourceCidrs,
|
||||
SkipSSHRuleCreation: b.config.SSMAgentEnabled(),
|
||||
},
|
||||
&awscommon.StepIamInstanceProfile{
|
||||
IamInstanceProfile: b.config.IamInstanceProfile,
|
||||
SkipProfileValidation: b.config.SkipProfileValidation,
|
||||
TemporaryIamInstanceProfilePolicyDocument: b.config.TemporaryIamInstanceProfilePolicyDocument,
|
||||
},
|
||||
&awscommon.StepCleanupVolumes{
|
||||
LaunchMappings: b.config.LaunchMappings.Common(),
|
||||
},
|
||||
instanceStep,
|
||||
&awscommon.StepGetPassword{
|
||||
Debug: b.config.PackerDebug,
|
||||
Comm: &b.config.RunConfig.Comm,
|
||||
Timeout: b.config.WindowsPasswordTimeout,
|
||||
BuildName: b.config.PackerBuildName,
|
||||
},
|
||||
&awscommon.StepCreateSSMTunnel{
|
||||
AWSSession: session,
|
||||
Region: *ec2conn.Config.Region,
|
||||
PauseBeforeSSM: b.config.PauseBeforeSSM,
|
||||
LocalPortNumber: b.config.SessionManagerPort,
|
||||
RemotePortNumber: b.config.Comm.Port(),
|
||||
SSMAgentEnabled: b.config.SSMAgentEnabled(),
|
||||
},
|
||||
&communicator.StepConnect{
|
||||
Config: &b.config.RunConfig.Comm,
|
||||
Host: awscommon.SSHHost(
|
||||
ec2conn,
|
||||
b.config.SSHInterface,
|
||||
b.config.Comm.Host(),
|
||||
),
|
||||
SSHPort: awscommon.Port(
|
||||
b.config.SSHInterface,
|
||||
b.config.Comm.Port(),
|
||||
),
|
||||
SSHConfig: b.config.RunConfig.Comm.SSHConfigFunc(),
|
||||
},
|
||||
&awscommon.StepSetGeneratedData{
|
||||
GeneratedData: generatedData,
|
||||
},
|
||||
&commonsteps.StepProvision{},
|
||||
&commonsteps.StepCleanupTempKeys{
|
||||
Comm: &b.config.RunConfig.Comm,
|
||||
},
|
||||
&awscommon.StepStopEBSBackedInstance{
|
||||
PollingConfig: b.config.PollingConfig,
|
||||
Skip: b.config.IsSpotInstance(),
|
||||
DisableStopInstance: b.config.DisableStopInstance,
|
||||
},
|
||||
&awscommon.StepModifyEBSBackedInstance{
|
||||
Skip: b.config.IsSpotInstance(),
|
||||
EnableAMISriovNetSupport: b.config.AMISriovNetSupport,
|
||||
EnableAMIENASupport: b.config.AMIENASupport,
|
||||
},
|
||||
&StepSnapshotVolumes{
|
||||
PollingConfig: b.config.PollingConfig,
|
||||
LaunchDevices: launchDevices,
|
||||
SnapshotOmitMap: b.config.LaunchMappings.GetOmissions(),
|
||||
SnapshotTags: b.config.SnapshotTags,
|
||||
Ctx: b.config.ctx,
|
||||
},
|
||||
&awscommon.StepDeregisterAMI{
|
||||
AccessConfig: &b.config.AccessConfig,
|
||||
ForceDeregister: b.config.AMIForceDeregister,
|
||||
ForceDeleteSnapshot: b.config.AMIForceDeleteSnapshot,
|
||||
AMIName: b.config.AMIName,
|
||||
Regions: b.config.AMIRegions,
|
||||
},
|
||||
&StepRegisterAMI{
|
||||
RootDevice: b.config.RootDevice,
|
||||
AMIDevices: amiDevices,
|
||||
LaunchDevices: launchDevices,
|
||||
EnableAMISriovNetSupport: b.config.AMISriovNetSupport,
|
||||
EnableAMIENASupport: b.config.AMIENASupport,
|
||||
Architecture: b.config.Architecture,
|
||||
LaunchOmitMap: b.config.LaunchMappings.GetOmissions(),
|
||||
AMISkipBuildRegion: b.config.AMISkipBuildRegion,
|
||||
PollingConfig: b.config.PollingConfig,
|
||||
},
|
||||
&awscommon.StepAMIRegionCopy{
|
||||
AccessConfig: &b.config.AccessConfig,
|
||||
Regions: b.config.AMIRegions,
|
||||
AMIKmsKeyId: b.config.AMIKmsKeyId,
|
||||
RegionKeyIds: b.config.AMIRegionKMSKeyIDs,
|
||||
EncryptBootVolume: b.config.AMIEncryptBootVolume,
|
||||
Name: b.config.AMIName,
|
||||
OriginalRegion: *ec2conn.Config.Region,
|
||||
AMISkipBuildRegion: b.config.AMISkipBuildRegion,
|
||||
},
|
||||
&awscommon.StepModifyAMIAttributes{
|
||||
Description: b.config.AMIDescription,
|
||||
Users: b.config.AMIUsers,
|
||||
Groups: b.config.AMIGroups,
|
||||
ProductCodes: b.config.AMIProductCodes,
|
||||
SnapshotUsers: b.config.SnapshotUsers,
|
||||
SnapshotGroups: b.config.SnapshotGroups,
|
||||
Ctx: b.config.ctx,
|
||||
GeneratedData: generatedData,
|
||||
},
|
||||
&awscommon.StepCreateTags{
|
||||
Tags: b.config.AMITags,
|
||||
SnapshotTags: b.config.SnapshotTags,
|
||||
Ctx: b.config.ctx,
|
||||
},
|
||||
}
|
||||
|
||||
// Run!
|
||||
b.runner = commonsteps.NewRunner(steps, b.config.PackerConfig, ui)
|
||||
b.runner.Run(ctx, state)
|
||||
|
||||
// If there was an error, return that
|
||||
if rawErr, ok := state.GetOk("error"); ok {
|
||||
return nil, rawErr.(error)
|
||||
}
|
||||
|
||||
if amis, ok := state.GetOk("amis"); ok {
|
||||
// Build the artifact and return it
|
||||
artifact := &awscommon.Artifact{
|
||||
Amis: amis.(map[string]string),
|
||||
BuilderIdValue: BuilderId,
|
||||
Session: session,
|
||||
StateData: map[string]interface{}{"generated_data": state.Get("generated_data")},
|
||||
}
|
||||
|
||||
return artifact, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
|
@ -1,381 +0,0 @@
|
|||
// Code generated by "mapstructure-to-hcl2 -type Config,RootBlockDevice,BlockDevice"; DO NOT EDIT.
|
||||
|
||||
package ebssurrogate
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl/v2/hcldec"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/config"
|
||||
"github.com/hashicorp/packer/builder/amazon/common"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
// FlatBlockDevice is an auto-generated flat version of BlockDevice.
|
||||
// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up.
|
||||
type FlatBlockDevice struct {
|
||||
DeleteOnTermination *bool `mapstructure:"delete_on_termination" required:"false" cty:"delete_on_termination" hcl:"delete_on_termination"`
|
||||
DeviceName *string `mapstructure:"device_name" required:"false" cty:"device_name" hcl:"device_name"`
|
||||
Encrypted *bool `mapstructure:"encrypted" required:"false" cty:"encrypted" hcl:"encrypted"`
|
||||
IOPS *int64 `mapstructure:"iops" required:"false" cty:"iops" hcl:"iops"`
|
||||
NoDevice *bool `mapstructure:"no_device" required:"false" cty:"no_device" hcl:"no_device"`
|
||||
SnapshotId *string `mapstructure:"snapshot_id" required:"false" cty:"snapshot_id" hcl:"snapshot_id"`
|
||||
Throughput *int64 `mapstructure:"throughput" required:"false" cty:"throughput" hcl:"throughput"`
|
||||
VirtualName *string `mapstructure:"virtual_name" required:"false" cty:"virtual_name" hcl:"virtual_name"`
|
||||
VolumeType *string `mapstructure:"volume_type" required:"false" cty:"volume_type" hcl:"volume_type"`
|
||||
VolumeSize *int64 `mapstructure:"volume_size" required:"false" cty:"volume_size" hcl:"volume_size"`
|
||||
KmsKeyId *string `mapstructure:"kms_key_id" required:"false" cty:"kms_key_id" hcl:"kms_key_id"`
|
||||
OmitFromArtifact *bool `mapstructure:"omit_from_artifact" cty:"omit_from_artifact" hcl:"omit_from_artifact"`
|
||||
}
|
||||
|
||||
// FlatMapstructure returns a new FlatBlockDevice.
|
||||
// FlatBlockDevice is an auto-generated flat version of BlockDevice.
|
||||
// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up.
|
||||
func (*BlockDevice) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } {
|
||||
return new(FlatBlockDevice)
|
||||
}
|
||||
|
||||
// HCL2Spec returns the hcl spec of a BlockDevice.
|
||||
// This spec is used by HCL to read the fields of BlockDevice.
|
||||
// The decoded values from this spec will then be applied to a FlatBlockDevice.
|
||||
func (*FlatBlockDevice) HCL2Spec() map[string]hcldec.Spec {
|
||||
s := map[string]hcldec.Spec{
|
||||
"delete_on_termination": &hcldec.AttrSpec{Name: "delete_on_termination", Type: cty.Bool, Required: false},
|
||||
"device_name": &hcldec.AttrSpec{Name: "device_name", Type: cty.String, Required: false},
|
||||
"encrypted": &hcldec.AttrSpec{Name: "encrypted", Type: cty.Bool, Required: false},
|
||||
"iops": &hcldec.AttrSpec{Name: "iops", Type: cty.Number, Required: false},
|
||||
"no_device": &hcldec.AttrSpec{Name: "no_device", Type: cty.Bool, Required: false},
|
||||
"snapshot_id": &hcldec.AttrSpec{Name: "snapshot_id", Type: cty.String, Required: false},
|
||||
"throughput": &hcldec.AttrSpec{Name: "throughput", Type: cty.Number, Required: false},
|
||||
"virtual_name": &hcldec.AttrSpec{Name: "virtual_name", Type: cty.String, Required: false},
|
||||
"volume_type": &hcldec.AttrSpec{Name: "volume_type", Type: cty.String, Required: false},
|
||||
"volume_size": &hcldec.AttrSpec{Name: "volume_size", Type: cty.Number, Required: false},
|
||||
"kms_key_id": &hcldec.AttrSpec{Name: "kms_key_id", Type: cty.String, Required: false},
|
||||
"omit_from_artifact": &hcldec.AttrSpec{Name: "omit_from_artifact", Type: cty.Bool, Required: false},
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// FlatConfig is an auto-generated flat version of Config.
|
||||
// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up.
|
||||
type FlatConfig struct {
|
||||
PackerBuildName *string `mapstructure:"packer_build_name" cty:"packer_build_name" hcl:"packer_build_name"`
|
||||
PackerBuilderType *string `mapstructure:"packer_builder_type" cty:"packer_builder_type" hcl:"packer_builder_type"`
|
||||
PackerCoreVersion *string `mapstructure:"packer_core_version" cty:"packer_core_version" hcl:"packer_core_version"`
|
||||
PackerDebug *bool `mapstructure:"packer_debug" cty:"packer_debug" hcl:"packer_debug"`
|
||||
PackerForce *bool `mapstructure:"packer_force" cty:"packer_force" hcl:"packer_force"`
|
||||
PackerOnError *string `mapstructure:"packer_on_error" cty:"packer_on_error" hcl:"packer_on_error"`
|
||||
PackerUserVars map[string]string `mapstructure:"packer_user_variables" cty:"packer_user_variables" hcl:"packer_user_variables"`
|
||||
PackerSensitiveVars []string `mapstructure:"packer_sensitive_variables" cty:"packer_sensitive_variables" hcl:"packer_sensitive_variables"`
|
||||
AccessKey *string `mapstructure:"access_key" required:"true" cty:"access_key" hcl:"access_key"`
|
||||
AssumeRole *common.FlatAssumeRoleConfig `mapstructure:"assume_role" required:"false" cty:"assume_role" hcl:"assume_role"`
|
||||
CustomEndpointEc2 *string `mapstructure:"custom_endpoint_ec2" required:"false" cty:"custom_endpoint_ec2" hcl:"custom_endpoint_ec2"`
|
||||
CredsFilename *string `mapstructure:"shared_credentials_file" required:"false" cty:"shared_credentials_file" hcl:"shared_credentials_file"`
|
||||
DecodeAuthZMessages *bool `mapstructure:"decode_authorization_messages" required:"false" cty:"decode_authorization_messages" hcl:"decode_authorization_messages"`
|
||||
InsecureSkipTLSVerify *bool `mapstructure:"insecure_skip_tls_verify" required:"false" cty:"insecure_skip_tls_verify" hcl:"insecure_skip_tls_verify"`
|
||||
MaxRetries *int `mapstructure:"max_retries" required:"false" cty:"max_retries" hcl:"max_retries"`
|
||||
MFACode *string `mapstructure:"mfa_code" required:"false" cty:"mfa_code" hcl:"mfa_code"`
|
||||
ProfileName *string `mapstructure:"profile" required:"false" cty:"profile" hcl:"profile"`
|
||||
RawRegion *string `mapstructure:"region" required:"true" cty:"region" hcl:"region"`
|
||||
SecretKey *string `mapstructure:"secret_key" required:"true" cty:"secret_key" hcl:"secret_key"`
|
||||
SkipMetadataApiCheck *bool `mapstructure:"skip_metadata_api_check" cty:"skip_metadata_api_check" hcl:"skip_metadata_api_check"`
|
||||
SkipCredsValidation *bool `mapstructure:"skip_credential_validation" cty:"skip_credential_validation" hcl:"skip_credential_validation"`
|
||||
Token *string `mapstructure:"token" required:"false" cty:"token" hcl:"token"`
|
||||
VaultAWSEngine *common.FlatVaultAWSEngineOptions `mapstructure:"vault_aws_engine" required:"false" cty:"vault_aws_engine" hcl:"vault_aws_engine"`
|
||||
PollingConfig *common.FlatAWSPollingConfig `mapstructure:"aws_polling" required:"false" cty:"aws_polling" hcl:"aws_polling"`
|
||||
AssociatePublicIpAddress *bool `mapstructure:"associate_public_ip_address" required:"false" cty:"associate_public_ip_address" hcl:"associate_public_ip_address"`
|
||||
AvailabilityZone *string `mapstructure:"availability_zone" required:"false" cty:"availability_zone" hcl:"availability_zone"`
|
||||
BlockDurationMinutes *int64 `mapstructure:"block_duration_minutes" required:"false" cty:"block_duration_minutes" hcl:"block_duration_minutes"`
|
||||
DisableStopInstance *bool `mapstructure:"disable_stop_instance" required:"false" cty:"disable_stop_instance" hcl:"disable_stop_instance"`
|
||||
EbsOptimized *bool `mapstructure:"ebs_optimized" required:"false" cty:"ebs_optimized" hcl:"ebs_optimized"`
|
||||
EnableT2Unlimited *bool `mapstructure:"enable_t2_unlimited" required:"false" cty:"enable_t2_unlimited" hcl:"enable_t2_unlimited"`
|
||||
IamInstanceProfile *string `mapstructure:"iam_instance_profile" required:"false" cty:"iam_instance_profile" hcl:"iam_instance_profile"`
|
||||
SkipProfileValidation *bool `mapstructure:"skip_profile_validation" required:"false" cty:"skip_profile_validation" hcl:"skip_profile_validation"`
|
||||
TemporaryIamInstanceProfilePolicyDocument *common.FlatPolicyDocument `mapstructure:"temporary_iam_instance_profile_policy_document" required:"false" cty:"temporary_iam_instance_profile_policy_document" hcl:"temporary_iam_instance_profile_policy_document"`
|
||||
InstanceInitiatedShutdownBehavior *string `mapstructure:"shutdown_behavior" required:"false" cty:"shutdown_behavior" hcl:"shutdown_behavior"`
|
||||
InstanceType *string `mapstructure:"instance_type" required:"true" cty:"instance_type" hcl:"instance_type"`
|
||||
SecurityGroupFilter *common.FlatSecurityGroupFilterOptions `mapstructure:"security_group_filter" required:"false" cty:"security_group_filter" hcl:"security_group_filter"`
|
||||
RunTags map[string]string `mapstructure:"run_tags" required:"false" cty:"run_tags" hcl:"run_tags"`
|
||||
RunTag []config.FlatKeyValue `mapstructure:"run_tag" required:"false" cty:"run_tag" hcl:"run_tag"`
|
||||
SecurityGroupId *string `mapstructure:"security_group_id" required:"false" cty:"security_group_id" hcl:"security_group_id"`
|
||||
SecurityGroupIds []string `mapstructure:"security_group_ids" required:"false" cty:"security_group_ids" hcl:"security_group_ids"`
|
||||
SourceAmi *string `mapstructure:"source_ami" required:"true" cty:"source_ami" hcl:"source_ami"`
|
||||
SourceAmiFilter *common.FlatAmiFilterOptions `mapstructure:"source_ami_filter" required:"false" cty:"source_ami_filter" hcl:"source_ami_filter"`
|
||||
SpotInstanceTypes []string `mapstructure:"spot_instance_types" required:"false" cty:"spot_instance_types" hcl:"spot_instance_types"`
|
||||
SpotPrice *string `mapstructure:"spot_price" required:"false" cty:"spot_price" hcl:"spot_price"`
|
||||
SpotPriceAutoProduct *string `mapstructure:"spot_price_auto_product" required:"false" undocumented:"true" cty:"spot_price_auto_product" hcl:"spot_price_auto_product"`
|
||||
SpotTags map[string]string `mapstructure:"spot_tags" required:"false" cty:"spot_tags" hcl:"spot_tags"`
|
||||
SpotTag []config.FlatKeyValue `mapstructure:"spot_tag" required:"false" cty:"spot_tag" hcl:"spot_tag"`
|
||||
SubnetFilter *common.FlatSubnetFilterOptions `mapstructure:"subnet_filter" required:"false" cty:"subnet_filter" hcl:"subnet_filter"`
|
||||
SubnetId *string `mapstructure:"subnet_id" required:"false" cty:"subnet_id" hcl:"subnet_id"`
|
||||
Tenancy *string `mapstructure:"tenancy" required:"false" cty:"tenancy" hcl:"tenancy"`
|
||||
TemporarySGSourceCidrs []string `mapstructure:"temporary_security_group_source_cidrs" required:"false" cty:"temporary_security_group_source_cidrs" hcl:"temporary_security_group_source_cidrs"`
|
||||
UserData *string `mapstructure:"user_data" required:"false" cty:"user_data" hcl:"user_data"`
|
||||
UserDataFile *string `mapstructure:"user_data_file" required:"false" cty:"user_data_file" hcl:"user_data_file"`
|
||||
VpcFilter *common.FlatVpcFilterOptions `mapstructure:"vpc_filter" required:"false" cty:"vpc_filter" hcl:"vpc_filter"`
|
||||
VpcId *string `mapstructure:"vpc_id" required:"false" cty:"vpc_id" hcl:"vpc_id"`
|
||||
WindowsPasswordTimeout *string `mapstructure:"windows_password_timeout" required:"false" cty:"windows_password_timeout" hcl:"windows_password_timeout"`
|
||||
Type *string `mapstructure:"communicator" cty:"communicator" hcl:"communicator"`
|
||||
PauseBeforeConnect *string `mapstructure:"pause_before_connecting" cty:"pause_before_connecting" hcl:"pause_before_connecting"`
|
||||
SSHHost *string `mapstructure:"ssh_host" cty:"ssh_host" hcl:"ssh_host"`
|
||||
SSHPort *int `mapstructure:"ssh_port" cty:"ssh_port" hcl:"ssh_port"`
|
||||
SSHUsername *string `mapstructure:"ssh_username" cty:"ssh_username" hcl:"ssh_username"`
|
||||
SSHPassword *string `mapstructure:"ssh_password" cty:"ssh_password" hcl:"ssh_password"`
|
||||
SSHKeyPairName *string `mapstructure:"ssh_keypair_name" undocumented:"true" cty:"ssh_keypair_name" hcl:"ssh_keypair_name"`
|
||||
SSHTemporaryKeyPairName *string `mapstructure:"temporary_key_pair_name" undocumented:"true" cty:"temporary_key_pair_name" hcl:"temporary_key_pair_name"`
|
||||
SSHTemporaryKeyPairType *string `mapstructure:"temporary_key_pair_type" cty:"temporary_key_pair_type" hcl:"temporary_key_pair_type"`
|
||||
SSHTemporaryKeyPairBits *int `mapstructure:"temporary_key_pair_bits" cty:"temporary_key_pair_bits" hcl:"temporary_key_pair_bits"`
|
||||
SSHCiphers []string `mapstructure:"ssh_ciphers" cty:"ssh_ciphers" hcl:"ssh_ciphers"`
|
||||
SSHClearAuthorizedKeys *bool `mapstructure:"ssh_clear_authorized_keys" cty:"ssh_clear_authorized_keys" hcl:"ssh_clear_authorized_keys"`
|
||||
SSHKEXAlgos []string `mapstructure:"ssh_key_exchange_algorithms" cty:"ssh_key_exchange_algorithms" hcl:"ssh_key_exchange_algorithms"`
|
||||
SSHPrivateKeyFile *string `mapstructure:"ssh_private_key_file" undocumented:"true" cty:"ssh_private_key_file" hcl:"ssh_private_key_file"`
|
||||
SSHCertificateFile *string `mapstructure:"ssh_certificate_file" cty:"ssh_certificate_file" hcl:"ssh_certificate_file"`
|
||||
SSHPty *bool `mapstructure:"ssh_pty" cty:"ssh_pty" hcl:"ssh_pty"`
|
||||
SSHTimeout *string `mapstructure:"ssh_timeout" cty:"ssh_timeout" hcl:"ssh_timeout"`
|
||||
SSHWaitTimeout *string `mapstructure:"ssh_wait_timeout" undocumented:"true" cty:"ssh_wait_timeout" hcl:"ssh_wait_timeout"`
|
||||
SSHAgentAuth *bool `mapstructure:"ssh_agent_auth" undocumented:"true" cty:"ssh_agent_auth" hcl:"ssh_agent_auth"`
|
||||
SSHDisableAgentForwarding *bool `mapstructure:"ssh_disable_agent_forwarding" cty:"ssh_disable_agent_forwarding" hcl:"ssh_disable_agent_forwarding"`
|
||||
SSHHandshakeAttempts *int `mapstructure:"ssh_handshake_attempts" cty:"ssh_handshake_attempts" hcl:"ssh_handshake_attempts"`
|
||||
SSHBastionHost *string `mapstructure:"ssh_bastion_host" cty:"ssh_bastion_host" hcl:"ssh_bastion_host"`
|
||||
SSHBastionPort *int `mapstructure:"ssh_bastion_port" cty:"ssh_bastion_port" hcl:"ssh_bastion_port"`
|
||||
SSHBastionAgentAuth *bool `mapstructure:"ssh_bastion_agent_auth" cty:"ssh_bastion_agent_auth" hcl:"ssh_bastion_agent_auth"`
|
||||
SSHBastionUsername *string `mapstructure:"ssh_bastion_username" cty:"ssh_bastion_username" hcl:"ssh_bastion_username"`
|
||||
SSHBastionPassword *string `mapstructure:"ssh_bastion_password" cty:"ssh_bastion_password" hcl:"ssh_bastion_password"`
|
||||
SSHBastionInteractive *bool `mapstructure:"ssh_bastion_interactive" cty:"ssh_bastion_interactive" hcl:"ssh_bastion_interactive"`
|
||||
SSHBastionPrivateKeyFile *string `mapstructure:"ssh_bastion_private_key_file" cty:"ssh_bastion_private_key_file" hcl:"ssh_bastion_private_key_file"`
|
||||
SSHBastionCertificateFile *string `mapstructure:"ssh_bastion_certificate_file" cty:"ssh_bastion_certificate_file" hcl:"ssh_bastion_certificate_file"`
|
||||
SSHFileTransferMethod *string `mapstructure:"ssh_file_transfer_method" cty:"ssh_file_transfer_method" hcl:"ssh_file_transfer_method"`
|
||||
SSHProxyHost *string `mapstructure:"ssh_proxy_host" cty:"ssh_proxy_host" hcl:"ssh_proxy_host"`
|
||||
SSHProxyPort *int `mapstructure:"ssh_proxy_port" cty:"ssh_proxy_port" hcl:"ssh_proxy_port"`
|
||||
SSHProxyUsername *string `mapstructure:"ssh_proxy_username" cty:"ssh_proxy_username" hcl:"ssh_proxy_username"`
|
||||
SSHProxyPassword *string `mapstructure:"ssh_proxy_password" cty:"ssh_proxy_password" hcl:"ssh_proxy_password"`
|
||||
SSHKeepAliveInterval *string `mapstructure:"ssh_keep_alive_interval" cty:"ssh_keep_alive_interval" hcl:"ssh_keep_alive_interval"`
|
||||
SSHReadWriteTimeout *string `mapstructure:"ssh_read_write_timeout" cty:"ssh_read_write_timeout" hcl:"ssh_read_write_timeout"`
|
||||
SSHRemoteTunnels []string `mapstructure:"ssh_remote_tunnels" cty:"ssh_remote_tunnels" hcl:"ssh_remote_tunnels"`
|
||||
SSHLocalTunnels []string `mapstructure:"ssh_local_tunnels" cty:"ssh_local_tunnels" hcl:"ssh_local_tunnels"`
|
||||
SSHPublicKey []byte `mapstructure:"ssh_public_key" undocumented:"true" cty:"ssh_public_key" hcl:"ssh_public_key"`
|
||||
SSHPrivateKey []byte `mapstructure:"ssh_private_key" undocumented:"true" cty:"ssh_private_key" hcl:"ssh_private_key"`
|
||||
WinRMUser *string `mapstructure:"winrm_username" cty:"winrm_username" hcl:"winrm_username"`
|
||||
WinRMPassword *string `mapstructure:"winrm_password" cty:"winrm_password" hcl:"winrm_password"`
|
||||
WinRMHost *string `mapstructure:"winrm_host" cty:"winrm_host" hcl:"winrm_host"`
|
||||
WinRMNoProxy *bool `mapstructure:"winrm_no_proxy" cty:"winrm_no_proxy" hcl:"winrm_no_proxy"`
|
||||
WinRMPort *int `mapstructure:"winrm_port" cty:"winrm_port" hcl:"winrm_port"`
|
||||
WinRMTimeout *string `mapstructure:"winrm_timeout" cty:"winrm_timeout" hcl:"winrm_timeout"`
|
||||
WinRMUseSSL *bool `mapstructure:"winrm_use_ssl" cty:"winrm_use_ssl" hcl:"winrm_use_ssl"`
|
||||
WinRMInsecure *bool `mapstructure:"winrm_insecure" cty:"winrm_insecure" hcl:"winrm_insecure"`
|
||||
WinRMUseNTLM *bool `mapstructure:"winrm_use_ntlm" cty:"winrm_use_ntlm" hcl:"winrm_use_ntlm"`
|
||||
SSHInterface *string `mapstructure:"ssh_interface" cty:"ssh_interface" hcl:"ssh_interface"`
|
||||
PauseBeforeSSM *string `mapstructure:"pause_before_ssm" cty:"pause_before_ssm" hcl:"pause_before_ssm"`
|
||||
SessionManagerPort *int `mapstructure:"session_manager_port" cty:"session_manager_port" hcl:"session_manager_port"`
|
||||
AMIName *string `mapstructure:"ami_name" required:"true" cty:"ami_name" hcl:"ami_name"`
|
||||
AMIDescription *string `mapstructure:"ami_description" required:"false" cty:"ami_description" hcl:"ami_description"`
|
||||
AMIVirtType *string `mapstructure:"ami_virtualization_type" required:"false" cty:"ami_virtualization_type" hcl:"ami_virtualization_type"`
|
||||
AMIUsers []string `mapstructure:"ami_users" required:"false" cty:"ami_users" hcl:"ami_users"`
|
||||
AMIGroups []string `mapstructure:"ami_groups" required:"false" cty:"ami_groups" hcl:"ami_groups"`
|
||||
AMIProductCodes []string `mapstructure:"ami_product_codes" required:"false" cty:"ami_product_codes" hcl:"ami_product_codes"`
|
||||
AMIRegions []string `mapstructure:"ami_regions" required:"false" cty:"ami_regions" hcl:"ami_regions"`
|
||||
AMISkipRegionValidation *bool `mapstructure:"skip_region_validation" required:"false" cty:"skip_region_validation" hcl:"skip_region_validation"`
|
||||
AMITags map[string]string `mapstructure:"tags" required:"false" cty:"tags" hcl:"tags"`
|
||||
AMITag []config.FlatKeyValue `mapstructure:"tag" required:"false" cty:"tag" hcl:"tag"`
|
||||
AMIENASupport *bool `mapstructure:"ena_support" required:"false" cty:"ena_support" hcl:"ena_support"`
|
||||
AMISriovNetSupport *bool `mapstructure:"sriov_support" required:"false" cty:"sriov_support" hcl:"sriov_support"`
|
||||
AMIForceDeregister *bool `mapstructure:"force_deregister" required:"false" cty:"force_deregister" hcl:"force_deregister"`
|
||||
AMIForceDeleteSnapshot *bool `mapstructure:"force_delete_snapshot" required:"false" cty:"force_delete_snapshot" hcl:"force_delete_snapshot"`
|
||||
AMIEncryptBootVolume *bool `mapstructure:"encrypt_boot" required:"false" cty:"encrypt_boot" hcl:"encrypt_boot"`
|
||||
AMIKmsKeyId *string `mapstructure:"kms_key_id" required:"false" cty:"kms_key_id" hcl:"kms_key_id"`
|
||||
AMIRegionKMSKeyIDs map[string]string `mapstructure:"region_kms_key_ids" required:"false" cty:"region_kms_key_ids" hcl:"region_kms_key_ids"`
|
||||
AMISkipBuildRegion *bool `mapstructure:"skip_save_build_region" cty:"skip_save_build_region" hcl:"skip_save_build_region"`
|
||||
SnapshotTags map[string]string `mapstructure:"snapshot_tags" required:"false" cty:"snapshot_tags" hcl:"snapshot_tags"`
|
||||
SnapshotTag []config.FlatKeyValue `mapstructure:"snapshot_tag" required:"false" cty:"snapshot_tag" hcl:"snapshot_tag"`
|
||||
SnapshotUsers []string `mapstructure:"snapshot_users" required:"false" cty:"snapshot_users" hcl:"snapshot_users"`
|
||||
SnapshotGroups []string `mapstructure:"snapshot_groups" required:"false" cty:"snapshot_groups" hcl:"snapshot_groups"`
|
||||
AMIMappings []common.FlatBlockDevice `mapstructure:"ami_block_device_mappings" required:"false" cty:"ami_block_device_mappings" hcl:"ami_block_device_mappings"`
|
||||
LaunchMappings []FlatBlockDevice `mapstructure:"launch_block_device_mappings" required:"false" cty:"launch_block_device_mappings" hcl:"launch_block_device_mappings"`
|
||||
RootDevice *FlatRootBlockDevice `mapstructure:"ami_root_device" required:"true" cty:"ami_root_device" hcl:"ami_root_device"`
|
||||
VolumeRunTags map[string]string `mapstructure:"run_volume_tags" cty:"run_volume_tags" hcl:"run_volume_tags"`
|
||||
VolumeRunTag []config.FlatNameValue `mapstructure:"run_volume_tag" required:"false" cty:"run_volume_tag" hcl:"run_volume_tag"`
|
||||
Architecture *string `mapstructure:"ami_architecture" required:"false" cty:"ami_architecture" hcl:"ami_architecture"`
|
||||
}
|
||||
|
||||
// FlatMapstructure returns a new FlatConfig.
|
||||
// FlatConfig is an auto-generated flat version of Config.
|
||||
// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up.
|
||||
func (*Config) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } {
|
||||
return new(FlatConfig)
|
||||
}
|
||||
|
||||
// HCL2Spec returns the hcl spec of a Config.
|
||||
// This spec is used by HCL to read the fields of Config.
|
||||
// The decoded values from this spec will then be applied to a FlatConfig.
|
||||
func (*FlatConfig) HCL2Spec() map[string]hcldec.Spec {
|
||||
s := map[string]hcldec.Spec{
|
||||
"packer_build_name": &hcldec.AttrSpec{Name: "packer_build_name", Type: cty.String, Required: false},
|
||||
"packer_builder_type": &hcldec.AttrSpec{Name: "packer_builder_type", Type: cty.String, Required: false},
|
||||
"packer_core_version": &hcldec.AttrSpec{Name: "packer_core_version", Type: cty.String, Required: false},
|
||||
"packer_debug": &hcldec.AttrSpec{Name: "packer_debug", Type: cty.Bool, Required: false},
|
||||
"packer_force": &hcldec.AttrSpec{Name: "packer_force", Type: cty.Bool, Required: false},
|
||||
"packer_on_error": &hcldec.AttrSpec{Name: "packer_on_error", Type: cty.String, Required: false},
|
||||
"packer_user_variables": &hcldec.AttrSpec{Name: "packer_user_variables", Type: cty.Map(cty.String), Required: false},
|
||||
"packer_sensitive_variables": &hcldec.AttrSpec{Name: "packer_sensitive_variables", Type: cty.List(cty.String), Required: false},
|
||||
"access_key": &hcldec.AttrSpec{Name: "access_key", Type: cty.String, Required: false},
|
||||
"assume_role": &hcldec.BlockSpec{TypeName: "assume_role", Nested: hcldec.ObjectSpec((*common.FlatAssumeRoleConfig)(nil).HCL2Spec())},
|
||||
"custom_endpoint_ec2": &hcldec.AttrSpec{Name: "custom_endpoint_ec2", Type: cty.String, Required: false},
|
||||
"shared_credentials_file": &hcldec.AttrSpec{Name: "shared_credentials_file", Type: cty.String, Required: false},
|
||||
"decode_authorization_messages": &hcldec.AttrSpec{Name: "decode_authorization_messages", Type: cty.Bool, Required: false},
|
||||
"insecure_skip_tls_verify": &hcldec.AttrSpec{Name: "insecure_skip_tls_verify", Type: cty.Bool, Required: false},
|
||||
"max_retries": &hcldec.AttrSpec{Name: "max_retries", Type: cty.Number, Required: false},
|
||||
"mfa_code": &hcldec.AttrSpec{Name: "mfa_code", Type: cty.String, Required: false},
|
||||
"profile": &hcldec.AttrSpec{Name: "profile", Type: cty.String, Required: false},
|
||||
"region": &hcldec.AttrSpec{Name: "region", Type: cty.String, Required: false},
|
||||
"secret_key": &hcldec.AttrSpec{Name: "secret_key", Type: cty.String, Required: false},
|
||||
"skip_metadata_api_check": &hcldec.AttrSpec{Name: "skip_metadata_api_check", Type: cty.Bool, Required: false},
|
||||
"skip_credential_validation": &hcldec.AttrSpec{Name: "skip_credential_validation", Type: cty.Bool, Required: false},
|
||||
"token": &hcldec.AttrSpec{Name: "token", Type: cty.String, Required: false},
|
||||
"vault_aws_engine": &hcldec.BlockSpec{TypeName: "vault_aws_engine", Nested: hcldec.ObjectSpec((*common.FlatVaultAWSEngineOptions)(nil).HCL2Spec())},
|
||||
"aws_polling": &hcldec.BlockSpec{TypeName: "aws_polling", Nested: hcldec.ObjectSpec((*common.FlatAWSPollingConfig)(nil).HCL2Spec())},
|
||||
"associate_public_ip_address": &hcldec.AttrSpec{Name: "associate_public_ip_address", Type: cty.Bool, Required: false},
|
||||
"availability_zone": &hcldec.AttrSpec{Name: "availability_zone", Type: cty.String, Required: false},
|
||||
"block_duration_minutes": &hcldec.AttrSpec{Name: "block_duration_minutes", Type: cty.Number, Required: false},
|
||||
"disable_stop_instance": &hcldec.AttrSpec{Name: "disable_stop_instance", Type: cty.Bool, Required: false},
|
||||
"ebs_optimized": &hcldec.AttrSpec{Name: "ebs_optimized", Type: cty.Bool, Required: false},
|
||||
"enable_t2_unlimited": &hcldec.AttrSpec{Name: "enable_t2_unlimited", Type: cty.Bool, Required: false},
|
||||
"iam_instance_profile": &hcldec.AttrSpec{Name: "iam_instance_profile", Type: cty.String, Required: false},
|
||||
"skip_profile_validation": &hcldec.AttrSpec{Name: "skip_profile_validation", Type: cty.Bool, Required: false},
|
||||
"temporary_iam_instance_profile_policy_document": &hcldec.BlockSpec{TypeName: "temporary_iam_instance_profile_policy_document", Nested: hcldec.ObjectSpec((*common.FlatPolicyDocument)(nil).HCL2Spec())},
|
||||
"shutdown_behavior": &hcldec.AttrSpec{Name: "shutdown_behavior", Type: cty.String, Required: false},
|
||||
"instance_type": &hcldec.AttrSpec{Name: "instance_type", Type: cty.String, Required: false},
|
||||
"security_group_filter": &hcldec.BlockSpec{TypeName: "security_group_filter", Nested: hcldec.ObjectSpec((*common.FlatSecurityGroupFilterOptions)(nil).HCL2Spec())},
|
||||
"run_tags": &hcldec.AttrSpec{Name: "run_tags", Type: cty.Map(cty.String), Required: false},
|
||||
"run_tag": &hcldec.BlockListSpec{TypeName: "run_tag", Nested: hcldec.ObjectSpec((*config.FlatKeyValue)(nil).HCL2Spec())},
|
||||
"security_group_id": &hcldec.AttrSpec{Name: "security_group_id", Type: cty.String, Required: false},
|
||||
"security_group_ids": &hcldec.AttrSpec{Name: "security_group_ids", Type: cty.List(cty.String), Required: false},
|
||||
"source_ami": &hcldec.AttrSpec{Name: "source_ami", Type: cty.String, Required: false},
|
||||
"source_ami_filter": &hcldec.BlockSpec{TypeName: "source_ami_filter", Nested: hcldec.ObjectSpec((*common.FlatAmiFilterOptions)(nil).HCL2Spec())},
|
||||
"spot_instance_types": &hcldec.AttrSpec{Name: "spot_instance_types", Type: cty.List(cty.String), Required: false},
|
||||
"spot_price": &hcldec.AttrSpec{Name: "spot_price", Type: cty.String, Required: false},
|
||||
"spot_price_auto_product": &hcldec.AttrSpec{Name: "spot_price_auto_product", Type: cty.String, Required: false},
|
||||
"spot_tags": &hcldec.AttrSpec{Name: "spot_tags", Type: cty.Map(cty.String), Required: false},
|
||||
"spot_tag": &hcldec.BlockListSpec{TypeName: "spot_tag", Nested: hcldec.ObjectSpec((*config.FlatKeyValue)(nil).HCL2Spec())},
|
||||
"subnet_filter": &hcldec.BlockSpec{TypeName: "subnet_filter", Nested: hcldec.ObjectSpec((*common.FlatSubnetFilterOptions)(nil).HCL2Spec())},
|
||||
"subnet_id": &hcldec.AttrSpec{Name: "subnet_id", Type: cty.String, Required: false},
|
||||
"tenancy": &hcldec.AttrSpec{Name: "tenancy", Type: cty.String, Required: false},
|
||||
"temporary_security_group_source_cidrs": &hcldec.AttrSpec{Name: "temporary_security_group_source_cidrs", Type: cty.List(cty.String), Required: false},
|
||||
"user_data": &hcldec.AttrSpec{Name: "user_data", Type: cty.String, Required: false},
|
||||
"user_data_file": &hcldec.AttrSpec{Name: "user_data_file", Type: cty.String, Required: false},
|
||||
"vpc_filter": &hcldec.BlockSpec{TypeName: "vpc_filter", Nested: hcldec.ObjectSpec((*common.FlatVpcFilterOptions)(nil).HCL2Spec())},
|
||||
"vpc_id": &hcldec.AttrSpec{Name: "vpc_id", Type: cty.String, Required: false},
|
||||
"windows_password_timeout": &hcldec.AttrSpec{Name: "windows_password_timeout", Type: cty.String, Required: false},
|
||||
"communicator": &hcldec.AttrSpec{Name: "communicator", Type: cty.String, Required: false},
|
||||
"pause_before_connecting": &hcldec.AttrSpec{Name: "pause_before_connecting", Type: cty.String, Required: false},
|
||||
"ssh_host": &hcldec.AttrSpec{Name: "ssh_host", Type: cty.String, Required: false},
|
||||
"ssh_port": &hcldec.AttrSpec{Name: "ssh_port", Type: cty.Number, Required: false},
|
||||
"ssh_username": &hcldec.AttrSpec{Name: "ssh_username", Type: cty.String, Required: false},
|
||||
"ssh_password": &hcldec.AttrSpec{Name: "ssh_password", Type: cty.String, Required: false},
|
||||
"ssh_keypair_name": &hcldec.AttrSpec{Name: "ssh_keypair_name", Type: cty.String, Required: false},
|
||||
"temporary_key_pair_name": &hcldec.AttrSpec{Name: "temporary_key_pair_name", Type: cty.String, Required: false},
|
||||
"temporary_key_pair_type": &hcldec.AttrSpec{Name: "temporary_key_pair_type", Type: cty.String, Required: false},
|
||||
"temporary_key_pair_bits": &hcldec.AttrSpec{Name: "temporary_key_pair_bits", Type: cty.Number, Required: false},
|
||||
"ssh_ciphers": &hcldec.AttrSpec{Name: "ssh_ciphers", Type: cty.List(cty.String), Required: false},
|
||||
"ssh_clear_authorized_keys": &hcldec.AttrSpec{Name: "ssh_clear_authorized_keys", Type: cty.Bool, Required: false},
|
||||
"ssh_key_exchange_algorithms": &hcldec.AttrSpec{Name: "ssh_key_exchange_algorithms", Type: cty.List(cty.String), Required: false},
|
||||
"ssh_private_key_file": &hcldec.AttrSpec{Name: "ssh_private_key_file", Type: cty.String, Required: false},
|
||||
"ssh_certificate_file": &hcldec.AttrSpec{Name: "ssh_certificate_file", Type: cty.String, Required: false},
|
||||
"ssh_pty": &hcldec.AttrSpec{Name: "ssh_pty", Type: cty.Bool, Required: false},
|
||||
"ssh_timeout": &hcldec.AttrSpec{Name: "ssh_timeout", Type: cty.String, Required: false},
|
||||
"ssh_wait_timeout": &hcldec.AttrSpec{Name: "ssh_wait_timeout", Type: cty.String, Required: false},
|
||||
"ssh_agent_auth": &hcldec.AttrSpec{Name: "ssh_agent_auth", Type: cty.Bool, Required: false},
|
||||
"ssh_disable_agent_forwarding": &hcldec.AttrSpec{Name: "ssh_disable_agent_forwarding", Type: cty.Bool, Required: false},
|
||||
"ssh_handshake_attempts": &hcldec.AttrSpec{Name: "ssh_handshake_attempts", Type: cty.Number, Required: false},
|
||||
"ssh_bastion_host": &hcldec.AttrSpec{Name: "ssh_bastion_host", Type: cty.String, Required: false},
|
||||
"ssh_bastion_port": &hcldec.AttrSpec{Name: "ssh_bastion_port", Type: cty.Number, Required: false},
|
||||
"ssh_bastion_agent_auth": &hcldec.AttrSpec{Name: "ssh_bastion_agent_auth", Type: cty.Bool, Required: false},
|
||||
"ssh_bastion_username": &hcldec.AttrSpec{Name: "ssh_bastion_username", Type: cty.String, Required: false},
|
||||
"ssh_bastion_password": &hcldec.AttrSpec{Name: "ssh_bastion_password", Type: cty.String, Required: false},
|
||||
"ssh_bastion_interactive": &hcldec.AttrSpec{Name: "ssh_bastion_interactive", Type: cty.Bool, Required: false},
|
||||
"ssh_bastion_private_key_file": &hcldec.AttrSpec{Name: "ssh_bastion_private_key_file", Type: cty.String, Required: false},
|
||||
"ssh_bastion_certificate_file": &hcldec.AttrSpec{Name: "ssh_bastion_certificate_file", Type: cty.String, Required: false},
|
||||
"ssh_file_transfer_method": &hcldec.AttrSpec{Name: "ssh_file_transfer_method", Type: cty.String, Required: false},
|
||||
"ssh_proxy_host": &hcldec.AttrSpec{Name: "ssh_proxy_host", Type: cty.String, Required: false},
|
||||
"ssh_proxy_port": &hcldec.AttrSpec{Name: "ssh_proxy_port", Type: cty.Number, Required: false},
|
||||
"ssh_proxy_username": &hcldec.AttrSpec{Name: "ssh_proxy_username", Type: cty.String, Required: false},
|
||||
"ssh_proxy_password": &hcldec.AttrSpec{Name: "ssh_proxy_password", Type: cty.String, Required: false},
|
||||
"ssh_keep_alive_interval": &hcldec.AttrSpec{Name: "ssh_keep_alive_interval", Type: cty.String, Required: false},
|
||||
"ssh_read_write_timeout": &hcldec.AttrSpec{Name: "ssh_read_write_timeout", Type: cty.String, Required: false},
|
||||
"ssh_remote_tunnels": &hcldec.AttrSpec{Name: "ssh_remote_tunnels", Type: cty.List(cty.String), Required: false},
|
||||
"ssh_local_tunnels": &hcldec.AttrSpec{Name: "ssh_local_tunnels", Type: cty.List(cty.String), Required: false},
|
||||
"ssh_public_key": &hcldec.AttrSpec{Name: "ssh_public_key", Type: cty.List(cty.Number), Required: false},
|
||||
"ssh_private_key": &hcldec.AttrSpec{Name: "ssh_private_key", Type: cty.List(cty.Number), Required: false},
|
||||
"winrm_username": &hcldec.AttrSpec{Name: "winrm_username", Type: cty.String, Required: false},
|
||||
"winrm_password": &hcldec.AttrSpec{Name: "winrm_password", Type: cty.String, Required: false},
|
||||
"winrm_host": &hcldec.AttrSpec{Name: "winrm_host", Type: cty.String, Required: false},
|
||||
"winrm_no_proxy": &hcldec.AttrSpec{Name: "winrm_no_proxy", Type: cty.Bool, Required: false},
|
||||
"winrm_port": &hcldec.AttrSpec{Name: "winrm_port", Type: cty.Number, Required: false},
|
||||
"winrm_timeout": &hcldec.AttrSpec{Name: "winrm_timeout", Type: cty.String, Required: false},
|
||||
"winrm_use_ssl": &hcldec.AttrSpec{Name: "winrm_use_ssl", Type: cty.Bool, Required: false},
|
||||
"winrm_insecure": &hcldec.AttrSpec{Name: "winrm_insecure", Type: cty.Bool, Required: false},
|
||||
"winrm_use_ntlm": &hcldec.AttrSpec{Name: "winrm_use_ntlm", Type: cty.Bool, Required: false},
|
||||
"ssh_interface": &hcldec.AttrSpec{Name: "ssh_interface", Type: cty.String, Required: false},
|
||||
"pause_before_ssm": &hcldec.AttrSpec{Name: "pause_before_ssm", Type: cty.String, Required: false},
|
||||
"session_manager_port": &hcldec.AttrSpec{Name: "session_manager_port", Type: cty.Number, Required: false},
|
||||
"ami_name": &hcldec.AttrSpec{Name: "ami_name", Type: cty.String, Required: false},
|
||||
"ami_description": &hcldec.AttrSpec{Name: "ami_description", Type: cty.String, Required: false},
|
||||
"ami_virtualization_type": &hcldec.AttrSpec{Name: "ami_virtualization_type", Type: cty.String, Required: false},
|
||||
"ami_users": &hcldec.AttrSpec{Name: "ami_users", Type: cty.List(cty.String), Required: false},
|
||||
"ami_groups": &hcldec.AttrSpec{Name: "ami_groups", Type: cty.List(cty.String), Required: false},
|
||||
"ami_product_codes": &hcldec.AttrSpec{Name: "ami_product_codes", Type: cty.List(cty.String), Required: false},
|
||||
"ami_regions": &hcldec.AttrSpec{Name: "ami_regions", Type: cty.List(cty.String), Required: false},
|
||||
"skip_region_validation": &hcldec.AttrSpec{Name: "skip_region_validation", Type: cty.Bool, Required: false},
|
||||
"tags": &hcldec.AttrSpec{Name: "tags", Type: cty.Map(cty.String), Required: false},
|
||||
"tag": &hcldec.BlockListSpec{TypeName: "tag", Nested: hcldec.ObjectSpec((*config.FlatKeyValue)(nil).HCL2Spec())},
|
||||
"ena_support": &hcldec.AttrSpec{Name: "ena_support", Type: cty.Bool, Required: false},
|
||||
"sriov_support": &hcldec.AttrSpec{Name: "sriov_support", Type: cty.Bool, Required: false},
|
||||
"force_deregister": &hcldec.AttrSpec{Name: "force_deregister", Type: cty.Bool, Required: false},
|
||||
"force_delete_snapshot": &hcldec.AttrSpec{Name: "force_delete_snapshot", Type: cty.Bool, Required: false},
|
||||
"encrypt_boot": &hcldec.AttrSpec{Name: "encrypt_boot", Type: cty.Bool, Required: false},
|
||||
"kms_key_id": &hcldec.AttrSpec{Name: "kms_key_id", Type: cty.String, Required: false},
|
||||
"region_kms_key_ids": &hcldec.AttrSpec{Name: "region_kms_key_ids", Type: cty.Map(cty.String), Required: false},
|
||||
"skip_save_build_region": &hcldec.AttrSpec{Name: "skip_save_build_region", Type: cty.Bool, Required: false},
|
||||
"snapshot_tags": &hcldec.AttrSpec{Name: "snapshot_tags", Type: cty.Map(cty.String), Required: false},
|
||||
"snapshot_tag": &hcldec.BlockListSpec{TypeName: "snapshot_tag", Nested: hcldec.ObjectSpec((*config.FlatKeyValue)(nil).HCL2Spec())},
|
||||
"snapshot_users": &hcldec.AttrSpec{Name: "snapshot_users", Type: cty.List(cty.String), Required: false},
|
||||
"snapshot_groups": &hcldec.AttrSpec{Name: "snapshot_groups", Type: cty.List(cty.String), Required: false},
|
||||
"ami_block_device_mappings": &hcldec.BlockListSpec{TypeName: "ami_block_device_mappings", Nested: hcldec.ObjectSpec((*common.FlatBlockDevice)(nil).HCL2Spec())},
|
||||
"launch_block_device_mappings": &hcldec.BlockListSpec{TypeName: "launch_block_device_mappings", Nested: hcldec.ObjectSpec((*FlatBlockDevice)(nil).HCL2Spec())},
|
||||
"ami_root_device": &hcldec.BlockSpec{TypeName: "ami_root_device", Nested: hcldec.ObjectSpec((*FlatRootBlockDevice)(nil).HCL2Spec())},
|
||||
"run_volume_tags": &hcldec.AttrSpec{Name: "run_volume_tags", Type: cty.Map(cty.String), Required: false},
|
||||
"run_volume_tag": &hcldec.BlockListSpec{TypeName: "run_volume_tag", Nested: hcldec.ObjectSpec((*config.FlatNameValue)(nil).HCL2Spec())},
|
||||
"ami_architecture": &hcldec.AttrSpec{Name: "ami_architecture", Type: cty.String, Required: false},
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// FlatRootBlockDevice is an auto-generated flat version of RootBlockDevice.
|
||||
// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up.
|
||||
type FlatRootBlockDevice struct {
|
||||
SourceDeviceName *string `mapstructure:"source_device_name" cty:"source_device_name" hcl:"source_device_name"`
|
||||
DeviceName *string `mapstructure:"device_name" required:"false" cty:"device_name" hcl:"device_name"`
|
||||
DeleteOnTermination *bool `mapstructure:"delete_on_termination" required:"false" cty:"delete_on_termination" hcl:"delete_on_termination"`
|
||||
IOPS *int64 `mapstructure:"iops" required:"false" cty:"iops" hcl:"iops"`
|
||||
VolumeType *string `mapstructure:"volume_type" required:"false" cty:"volume_type" hcl:"volume_type"`
|
||||
VolumeSize *int64 `mapstructure:"volume_size" required:"false" cty:"volume_size" hcl:"volume_size"`
|
||||
}
|
||||
|
||||
// FlatMapstructure returns a new FlatRootBlockDevice.
|
||||
// FlatRootBlockDevice is an auto-generated flat version of RootBlockDevice.
|
||||
// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up.
|
||||
func (*RootBlockDevice) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } {
|
||||
return new(FlatRootBlockDevice)
|
||||
}
|
||||
|
||||
// HCL2Spec returns the hcl spec of a RootBlockDevice.
|
||||
// This spec is used by HCL to read the fields of RootBlockDevice.
|
||||
// The decoded values from this spec will then be applied to a FlatRootBlockDevice.
|
||||
func (*FlatRootBlockDevice) HCL2Spec() map[string]hcldec.Spec {
|
||||
s := map[string]hcldec.Spec{
|
||||
"source_device_name": &hcldec.AttrSpec{Name: "source_device_name", Type: cty.String, Required: false},
|
||||
"device_name": &hcldec.AttrSpec{Name: "device_name", Type: cty.String, Required: false},
|
||||
"delete_on_termination": &hcldec.AttrSpec{Name: "delete_on_termination", Type: cty.Bool, Required: false},
|
||||
"iops": &hcldec.AttrSpec{Name: "iops", Type: cty.Number, Required: false},
|
||||
"volume_type": &hcldec.AttrSpec{Name: "volume_type", Type: cty.String, Required: false},
|
||||
"volume_size": &hcldec.AttrSpec{Name: "volume_size", Type: cty.Number, Required: false},
|
||||
}
|
||||
return s
|
||||
}
|
|
@ -1,110 +0,0 @@
|
|||
package ebssurrogate
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/packer/builder/amazon/common"
|
||||
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
func testConfig() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"access_key": "foo",
|
||||
"secret_key": "bar",
|
||||
"source_ami": "foo",
|
||||
"instance_type": "foo",
|
||||
"region": "us-east-1",
|
||||
"ssh_username": "root",
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilder_ImplementsBuilder(t *testing.T) {
|
||||
var raw interface{}
|
||||
raw = &Builder{}
|
||||
if _, ok := raw.(packersdk.Builder); !ok {
|
||||
t.Fatal("Builder should be a builder")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilder_Prepare_BadType(t *testing.T) {
|
||||
b := &Builder{}
|
||||
c := map[string]interface{}{
|
||||
"access_key": []string{},
|
||||
}
|
||||
|
||||
_, warnings, err := b.Prepare(c)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err == nil {
|
||||
t.Fatal("prepare should fail")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderPrepare_InvalidKey(t *testing.T) {
|
||||
var b Builder
|
||||
config := testConfig()
|
||||
|
||||
// Add a random key
|
||||
config["i_should_not_be_valid"] = true
|
||||
_, warnings, err := b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err == nil {
|
||||
t.Fatal("should have error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderPrepare_ReturnGeneratedData(t *testing.T) {
|
||||
var b Builder
|
||||
// Basic configuration
|
||||
b.config.RootDevice = RootBlockDevice{
|
||||
SourceDeviceName: "device name",
|
||||
DeviceName: "device name",
|
||||
}
|
||||
b.config.LaunchMappings = BlockDevices{
|
||||
BlockDevice{
|
||||
BlockDevice: common.BlockDevice{
|
||||
DeviceName: "device name",
|
||||
},
|
||||
OmitFromArtifact: false,
|
||||
},
|
||||
}
|
||||
b.config.AMIVirtType = "type"
|
||||
config := testConfig()
|
||||
config["ami_name"] = "name"
|
||||
|
||||
generatedData, warnings, err := b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
if len(generatedData) == 0 {
|
||||
t.Fatalf("Generated data should not be empty")
|
||||
}
|
||||
if len(generatedData) == 0 {
|
||||
t.Fatalf("Generated data should not be empty")
|
||||
}
|
||||
if generatedData[0] != "SourceAMIName" {
|
||||
t.Fatalf("Generated data should contain SourceAMIName")
|
||||
}
|
||||
if generatedData[1] != "BuildRegion" {
|
||||
t.Fatalf("Generated data should contain BuildRegion")
|
||||
}
|
||||
if generatedData[2] != "SourceAMI" {
|
||||
t.Fatalf("Generated data should contain SourceAMI")
|
||||
}
|
||||
if generatedData[3] != "SourceAMICreationDate" {
|
||||
t.Fatalf("Generated data should contain SourceAMICreationDate")
|
||||
}
|
||||
if generatedData[4] != "SourceAMIOwner" {
|
||||
t.Fatalf("Generated data should contain SourceAMIOwner")
|
||||
}
|
||||
if generatedData[5] != "SourceAMIOwnerName" {
|
||||
t.Fatalf("Generated data should contain SourceAMIOwnerName")
|
||||
}
|
||||
}
|
|
@ -1,66 +0,0 @@
|
|||
//go:generate struct-markdown
|
||||
|
||||
package ebssurrogate
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/interpolate"
|
||||
)
|
||||
|
||||
type RootBlockDevice struct {
|
||||
SourceDeviceName string `mapstructure:"source_device_name"`
|
||||
// The device name exposed to the instance (for
|
||||
// example, /dev/sdh or xvdh). Required for every device in the block
|
||||
// device mapping.
|
||||
DeviceName string `mapstructure:"device_name" required:"false"`
|
||||
// Indicates whether the EBS volume is
|
||||
// deleted on instance termination. Default false. NOTE: If this
|
||||
// value is not explicitly set to true and volumes are not cleaned up by
|
||||
// an alternative method, additional volumes will accumulate after every
|
||||
// build.
|
||||
DeleteOnTermination bool `mapstructure:"delete_on_termination" required:"false"`
|
||||
// The number of I/O operations per second (IOPS) that
|
||||
// the volume supports. See the documentation on
|
||||
// IOPs
|
||||
// for more information
|
||||
IOPS int64 `mapstructure:"iops" required:"false"`
|
||||
// The volume type. gp2 for General Purpose
|
||||
// (SSD) volumes, io1 for Provisioned IOPS (SSD) volumes, st1 for
|
||||
// Throughput Optimized HDD, sc1 for Cold HDD, and standard for
|
||||
// Magnetic volumes.
|
||||
VolumeType string `mapstructure:"volume_type" required:"false"`
|
||||
// The size of the volume, in GiB. Required if
|
||||
// not specifying a snapshot_id.
|
||||
VolumeSize int64 `mapstructure:"volume_size" required:"false"`
|
||||
}
|
||||
|
||||
func (c *RootBlockDevice) Prepare(ctx *interpolate.Context) []error {
|
||||
var errs []error
|
||||
|
||||
if c.SourceDeviceName == "" {
|
||||
errs = append(errs, errors.New("source_device_name for the root_device must be specified"))
|
||||
}
|
||||
|
||||
if c.DeviceName == "" {
|
||||
errs = append(errs, errors.New("device_name for the root_device must be specified"))
|
||||
}
|
||||
|
||||
if c.VolumeType == "gp2" && c.IOPS != 0 {
|
||||
errs = append(errs, errors.New("iops may not be specified for a gp2 volume"))
|
||||
}
|
||||
|
||||
if c.IOPS < 0 {
|
||||
errs = append(errs, errors.New("iops must be greater than 0"))
|
||||
}
|
||||
|
||||
if c.VolumeSize < 0 {
|
||||
errs = append(errs, errors.New("volume_size must be greater than 0"))
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
return errs
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,175 +0,0 @@
|
|||
package ebssurrogate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/random"
|
||||
confighelper "github.com/hashicorp/packer-plugin-sdk/template/config"
|
||||
awscommon "github.com/hashicorp/packer/builder/amazon/common"
|
||||
)
|
||||
|
||||
// StepRegisterAMI creates the AMI.
|
||||
type StepRegisterAMI struct {
|
||||
PollingConfig *awscommon.AWSPollingConfig
|
||||
RootDevice RootBlockDevice
|
||||
AMIDevices []*ec2.BlockDeviceMapping
|
||||
LaunchDevices []*ec2.BlockDeviceMapping
|
||||
EnableAMIENASupport confighelper.Trilean
|
||||
EnableAMISriovNetSupport bool
|
||||
Architecture string
|
||||
image *ec2.Image
|
||||
LaunchOmitMap map[string]bool
|
||||
AMISkipBuildRegion bool
|
||||
}
|
||||
|
||||
func (s *StepRegisterAMI) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
config := state.Get("config").(*Config)
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
snapshotIds := state.Get("snapshot_ids").(map[string]string)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
ui.Say("Registering the AMI...")
|
||||
|
||||
blockDevices := s.combineDevices(snapshotIds)
|
||||
|
||||
// Create the image
|
||||
amiName := config.AMIName
|
||||
state.Put("intermediary_image", false)
|
||||
if config.AMIEncryptBootVolume.True() || s.AMISkipBuildRegion {
|
||||
state.Put("intermediary_image", true)
|
||||
|
||||
// From AWS SDK docs: You can encrypt a copy of an unencrypted snapshot,
|
||||
// but you cannot use it to create an unencrypted copy of an encrypted
|
||||
// snapshot. Your default CMK for EBS is used unless you specify a
|
||||
// non-default key using KmsKeyId.
|
||||
|
||||
// If encrypt_boot is nil or true, we need to create a temporary image
|
||||
// so that in step_region_copy, we can copy it with the correct
|
||||
// encryption
|
||||
amiName = random.AlphaNum(7)
|
||||
}
|
||||
|
||||
registerOpts := &ec2.RegisterImageInput{
|
||||
Name: &amiName,
|
||||
Architecture: aws.String(s.Architecture),
|
||||
RootDeviceName: aws.String(s.RootDevice.DeviceName),
|
||||
VirtualizationType: aws.String(config.AMIVirtType),
|
||||
BlockDeviceMappings: blockDevices,
|
||||
}
|
||||
|
||||
if s.EnableAMISriovNetSupport {
|
||||
// Set SriovNetSupport to "simple". See http://goo.gl/icuXh5
|
||||
// As of February 2017, this applies to C3, C4, D2, I2, R3, and M4 (excluding m4.16xlarge)
|
||||
registerOpts.SriovNetSupport = aws.String("simple")
|
||||
}
|
||||
if s.EnableAMIENASupport.True() {
|
||||
// Set EnaSupport to true
|
||||
// As of February 2017, this applies to C5, I3, P2, R4, X1, and m4.16xlarge
|
||||
registerOpts.EnaSupport = aws.Bool(true)
|
||||
}
|
||||
registerResp, err := ec2conn.RegisterImage(registerOpts)
|
||||
if err != nil {
|
||||
state.Put("error", fmt.Errorf("Error registering AMI: %s", err))
|
||||
ui.Error(state.Get("error").(error).Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Set the AMI ID in the state
|
||||
ui.Say(fmt.Sprintf("AMI: %s", *registerResp.ImageId))
|
||||
amis := make(map[string]string)
|
||||
amis[*ec2conn.Config.Region] = *registerResp.ImageId
|
||||
state.Put("amis", amis)
|
||||
|
||||
// Wait for the image to become ready
|
||||
ui.Say("Waiting for AMI to become ready...")
|
||||
if err := s.PollingConfig.WaitUntilAMIAvailable(ctx, ec2conn, *registerResp.ImageId); err != nil {
|
||||
err := fmt.Errorf("Error waiting for AMI: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
imagesResp, err := ec2conn.DescribeImages(&ec2.DescribeImagesInput{ImageIds: []*string{registerResp.ImageId}})
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error searching for AMI: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
s.image = imagesResp.Images[0]
|
||||
|
||||
snapshots := make(map[string][]string)
|
||||
for _, blockDeviceMapping := range imagesResp.Images[0].BlockDeviceMappings {
|
||||
if blockDeviceMapping.Ebs != nil && blockDeviceMapping.Ebs.SnapshotId != nil {
|
||||
|
||||
snapshots[*ec2conn.Config.Region] = append(snapshots[*ec2conn.Config.Region], *blockDeviceMapping.Ebs.SnapshotId)
|
||||
}
|
||||
}
|
||||
state.Put("snapshots", snapshots)
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepRegisterAMI) Cleanup(state multistep.StateBag) {
|
||||
if s.image == nil {
|
||||
return
|
||||
}
|
||||
|
||||
_, cancelled := state.GetOk(multistep.StateCancelled)
|
||||
_, halted := state.GetOk(multistep.StateHalted)
|
||||
if !cancelled && !halted {
|
||||
return
|
||||
}
|
||||
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
ui.Say("Deregistering the AMI because cancellation or error...")
|
||||
deregisterOpts := &ec2.DeregisterImageInput{ImageId: s.image.ImageId}
|
||||
if _, err := ec2conn.DeregisterImage(deregisterOpts); err != nil {
|
||||
ui.Error(fmt.Sprintf("Error deregistering AMI, may still be around: %s", err))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (s *StepRegisterAMI) combineDevices(snapshotIds map[string]string) []*ec2.BlockDeviceMapping {
|
||||
devices := map[string]*ec2.BlockDeviceMapping{}
|
||||
|
||||
for _, device := range s.AMIDevices {
|
||||
devices[*device.DeviceName] = device
|
||||
}
|
||||
|
||||
// Devices in launch_block_device_mappings override any with
|
||||
// the same name in ami_block_device_mappings, except for the
|
||||
// one designated as the root device in ami_root_device
|
||||
for _, device := range s.LaunchDevices {
|
||||
// Skip devices we've flagged for omission
|
||||
omit, ok := s.LaunchOmitMap[*device.DeviceName]
|
||||
if ok && omit {
|
||||
continue
|
||||
}
|
||||
snapshotId, ok := snapshotIds[*device.DeviceName]
|
||||
if ok {
|
||||
device.Ebs.SnapshotId = aws.String(snapshotId)
|
||||
// Block devices with snapshot inherit
|
||||
// encryption settings from the snapshot
|
||||
device.Ebs.Encrypted = nil
|
||||
device.Ebs.KmsKeyId = nil
|
||||
}
|
||||
if *device.DeviceName == s.RootDevice.SourceDeviceName {
|
||||
device.DeviceName = aws.String(s.RootDevice.DeviceName)
|
||||
}
|
||||
devices[*device.DeviceName] = device
|
||||
}
|
||||
|
||||
blockDevices := []*ec2.BlockDeviceMapping{}
|
||||
for _, device := range devices {
|
||||
blockDevices = append(blockDevices, device)
|
||||
}
|
||||
return blockDevices
|
||||
}
|
|
@ -1,249 +0,0 @@
|
|||
package ebssurrogate
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/hashicorp/packer/builder/amazon/common"
|
||||
)
|
||||
|
||||
const sourceDeviceName = "/dev/xvdf"
|
||||
const rootDeviceName = "/dev/xvda"
|
||||
|
||||
func newStepRegisterAMI(amiDevices, launchDevices []*ec2.BlockDeviceMapping) *StepRegisterAMI {
|
||||
return &StepRegisterAMI{
|
||||
RootDevice: RootBlockDevice{
|
||||
SourceDeviceName: sourceDeviceName,
|
||||
DeviceName: rootDeviceName,
|
||||
DeleteOnTermination: true,
|
||||
VolumeType: "ebs",
|
||||
VolumeSize: 10,
|
||||
},
|
||||
AMIDevices: amiDevices,
|
||||
LaunchDevices: launchDevices,
|
||||
PollingConfig: new(common.AWSPollingConfig),
|
||||
}
|
||||
}
|
||||
|
||||
func sorted(devices []*ec2.BlockDeviceMapping) []*ec2.BlockDeviceMapping {
|
||||
sort.SliceStable(devices, func(i, j int) bool {
|
||||
return *devices[i].DeviceName < *devices[j].DeviceName
|
||||
})
|
||||
return devices
|
||||
}
|
||||
|
||||
func TestStepRegisterAmi_combineDevices(t *testing.T) {
|
||||
cases := []struct {
|
||||
snapshotIds map[string]string
|
||||
amiDevices []*ec2.BlockDeviceMapping
|
||||
launchDevices []*ec2.BlockDeviceMapping
|
||||
allDevices []*ec2.BlockDeviceMapping
|
||||
}{
|
||||
{
|
||||
snapshotIds: map[string]string{},
|
||||
amiDevices: []*ec2.BlockDeviceMapping{},
|
||||
launchDevices: []*ec2.BlockDeviceMapping{},
|
||||
allDevices: []*ec2.BlockDeviceMapping{},
|
||||
},
|
||||
{
|
||||
snapshotIds: map[string]string{},
|
||||
amiDevices: []*ec2.BlockDeviceMapping{},
|
||||
launchDevices: []*ec2.BlockDeviceMapping{
|
||||
{
|
||||
Ebs: &ec2.EbsBlockDevice{},
|
||||
DeviceName: aws.String(sourceDeviceName),
|
||||
},
|
||||
},
|
||||
allDevices: []*ec2.BlockDeviceMapping{
|
||||
{
|
||||
Ebs: &ec2.EbsBlockDevice{},
|
||||
DeviceName: aws.String(rootDeviceName),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
// Minimal single device
|
||||
snapshotIds: map[string]string{
|
||||
sourceDeviceName: "snap-0123456789abcdef1",
|
||||
},
|
||||
amiDevices: []*ec2.BlockDeviceMapping{},
|
||||
launchDevices: []*ec2.BlockDeviceMapping{
|
||||
{
|
||||
Ebs: &ec2.EbsBlockDevice{},
|
||||
DeviceName: aws.String(sourceDeviceName),
|
||||
},
|
||||
},
|
||||
allDevices: []*ec2.BlockDeviceMapping{
|
||||
{
|
||||
Ebs: &ec2.EbsBlockDevice{
|
||||
SnapshotId: aws.String("snap-0123456789abcdef1"),
|
||||
},
|
||||
DeviceName: aws.String(rootDeviceName),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
// Single launch device with AMI device
|
||||
snapshotIds: map[string]string{
|
||||
sourceDeviceName: "snap-0123456789abcdef1",
|
||||
},
|
||||
amiDevices: []*ec2.BlockDeviceMapping{
|
||||
{
|
||||
Ebs: &ec2.EbsBlockDevice{},
|
||||
DeviceName: aws.String("/dev/xvdg"),
|
||||
},
|
||||
},
|
||||
launchDevices: []*ec2.BlockDeviceMapping{
|
||||
{
|
||||
Ebs: &ec2.EbsBlockDevice{},
|
||||
DeviceName: aws.String(sourceDeviceName),
|
||||
},
|
||||
},
|
||||
allDevices: []*ec2.BlockDeviceMapping{
|
||||
{
|
||||
Ebs: &ec2.EbsBlockDevice{
|
||||
SnapshotId: aws.String("snap-0123456789abcdef1"),
|
||||
},
|
||||
DeviceName: aws.String(rootDeviceName),
|
||||
},
|
||||
{
|
||||
Ebs: &ec2.EbsBlockDevice{},
|
||||
DeviceName: aws.String("/dev/xvdg"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
// Multiple launch devices
|
||||
snapshotIds: map[string]string{
|
||||
sourceDeviceName: "snap-0123456789abcdef1",
|
||||
"/dev/xvdg": "snap-0123456789abcdef2",
|
||||
},
|
||||
amiDevices: []*ec2.BlockDeviceMapping{},
|
||||
launchDevices: []*ec2.BlockDeviceMapping{
|
||||
{
|
||||
Ebs: &ec2.EbsBlockDevice{},
|
||||
DeviceName: aws.String(sourceDeviceName),
|
||||
},
|
||||
{
|
||||
Ebs: &ec2.EbsBlockDevice{},
|
||||
DeviceName: aws.String("/dev/xvdg"),
|
||||
},
|
||||
},
|
||||
allDevices: []*ec2.BlockDeviceMapping{
|
||||
{
|
||||
Ebs: &ec2.EbsBlockDevice{
|
||||
SnapshotId: aws.String("snap-0123456789abcdef1"),
|
||||
},
|
||||
DeviceName: aws.String(rootDeviceName),
|
||||
},
|
||||
{
|
||||
Ebs: &ec2.EbsBlockDevice{
|
||||
SnapshotId: aws.String("snap-0123456789abcdef2"),
|
||||
},
|
||||
DeviceName: aws.String("/dev/xvdg"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
// Multiple launch devices with encryption
|
||||
snapshotIds: map[string]string{
|
||||
sourceDeviceName: "snap-0123456789abcdef1",
|
||||
"/dev/xvdg": "snap-0123456789abcdef2",
|
||||
},
|
||||
amiDevices: []*ec2.BlockDeviceMapping{},
|
||||
launchDevices: []*ec2.BlockDeviceMapping{
|
||||
{
|
||||
Ebs: &ec2.EbsBlockDevice{
|
||||
Encrypted: aws.Bool(true),
|
||||
},
|
||||
DeviceName: aws.String(sourceDeviceName),
|
||||
},
|
||||
{
|
||||
Ebs: &ec2.EbsBlockDevice{
|
||||
Encrypted: aws.Bool(true),
|
||||
},
|
||||
DeviceName: aws.String("/dev/xvdg"),
|
||||
},
|
||||
},
|
||||
allDevices: []*ec2.BlockDeviceMapping{
|
||||
{
|
||||
Ebs: &ec2.EbsBlockDevice{
|
||||
SnapshotId: aws.String("snap-0123456789abcdef1"),
|
||||
// Encrypted: true stripped from snapshotted devices
|
||||
},
|
||||
DeviceName: aws.String(rootDeviceName),
|
||||
},
|
||||
{
|
||||
Ebs: &ec2.EbsBlockDevice{
|
||||
SnapshotId: aws.String("snap-0123456789abcdef2"),
|
||||
},
|
||||
DeviceName: aws.String("/dev/xvdg"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
// Multiple launch devices and AMI devices with encryption
|
||||
snapshotIds: map[string]string{
|
||||
sourceDeviceName: "snap-0123456789abcdef1",
|
||||
"/dev/xvdg": "snap-0123456789abcdef2",
|
||||
},
|
||||
amiDevices: []*ec2.BlockDeviceMapping{
|
||||
{
|
||||
Ebs: &ec2.EbsBlockDevice{
|
||||
Encrypted: aws.Bool(true),
|
||||
KmsKeyId: aws.String("keyId"),
|
||||
},
|
||||
// Source device name can be used in AMI devices
|
||||
// since launch device of same name gets renamed
|
||||
// to root device name
|
||||
DeviceName: aws.String(sourceDeviceName),
|
||||
},
|
||||
},
|
||||
launchDevices: []*ec2.BlockDeviceMapping{
|
||||
{
|
||||
Ebs: &ec2.EbsBlockDevice{
|
||||
Encrypted: aws.Bool(true),
|
||||
},
|
||||
DeviceName: aws.String(sourceDeviceName),
|
||||
},
|
||||
{
|
||||
Ebs: &ec2.EbsBlockDevice{
|
||||
Encrypted: aws.Bool(true),
|
||||
},
|
||||
DeviceName: aws.String("/dev/xvdg"),
|
||||
},
|
||||
},
|
||||
allDevices: []*ec2.BlockDeviceMapping{
|
||||
{
|
||||
Ebs: &ec2.EbsBlockDevice{
|
||||
Encrypted: aws.Bool(true),
|
||||
KmsKeyId: aws.String("keyId"),
|
||||
},
|
||||
DeviceName: aws.String(sourceDeviceName),
|
||||
},
|
||||
{
|
||||
Ebs: &ec2.EbsBlockDevice{
|
||||
SnapshotId: aws.String("snap-0123456789abcdef1"),
|
||||
},
|
||||
DeviceName: aws.String(rootDeviceName),
|
||||
},
|
||||
{
|
||||
Ebs: &ec2.EbsBlockDevice{
|
||||
SnapshotId: aws.String("snap-0123456789abcdef2"),
|
||||
},
|
||||
DeviceName: aws.String("/dev/xvdg"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
stepRegisterAmi := newStepRegisterAMI(tc.amiDevices, tc.launchDevices)
|
||||
allDevices := stepRegisterAmi.combineDevices(tc.snapshotIds)
|
||||
if !reflect.DeepEqual(sorted(allDevices), sorted(tc.allDevices)) {
|
||||
t.Fatalf("Unexpected output from combineDevices")
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,146 +0,0 @@
|
|||
package ebssurrogate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
multierror "github.com/hashicorp/go-multierror"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/interpolate"
|
||||
awscommon "github.com/hashicorp/packer/builder/amazon/common"
|
||||
)
|
||||
|
||||
// StepSnapshotVolumes creates snapshots of the created volumes.
|
||||
//
|
||||
// Produces:
|
||||
// snapshot_ids map[string]string - IDs of the created snapshots
|
||||
type StepSnapshotVolumes struct {
|
||||
PollingConfig *awscommon.AWSPollingConfig
|
||||
LaunchDevices []*ec2.BlockDeviceMapping
|
||||
snapshotIds map[string]string
|
||||
snapshotMutex sync.Mutex
|
||||
SnapshotOmitMap map[string]bool
|
||||
SnapshotTags map[string]string
|
||||
Ctx interpolate.Context
|
||||
}
|
||||
|
||||
func (s *StepSnapshotVolumes) snapshotVolume(ctx context.Context, deviceName string, state multistep.StateBag) error {
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
instance := state.Get("instance").(*ec2.Instance)
|
||||
|
||||
var volumeId string
|
||||
for _, volume := range instance.BlockDeviceMappings {
|
||||
if *volume.DeviceName == deviceName {
|
||||
volumeId = *volume.Ebs.VolumeId
|
||||
}
|
||||
}
|
||||
if volumeId == "" {
|
||||
return fmt.Errorf("Volume ID for device %s not found", deviceName)
|
||||
}
|
||||
|
||||
ui.Say("Creating snapshot tags")
|
||||
snapshotTags, err := awscommon.TagMap(s.SnapshotTags).EC2Tags(s.Ctx, *ec2conn.Config.Region, state)
|
||||
if err != nil {
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return err
|
||||
}
|
||||
snapshotTags.Report(ui)
|
||||
|
||||
ui.Say(fmt.Sprintf("Creating snapshot of EBS Volume %s...", volumeId))
|
||||
description := fmt.Sprintf("Packer: %s", time.Now().String())
|
||||
|
||||
// Collect tags for tagging on resource creation
|
||||
var tagSpecs []*ec2.TagSpecification
|
||||
|
||||
if len(snapshotTags) > 0 {
|
||||
snapTags := &ec2.TagSpecification{
|
||||
ResourceType: aws.String("snapshot"),
|
||||
Tags: snapshotTags,
|
||||
}
|
||||
|
||||
tagSpecs = append(tagSpecs, snapTags)
|
||||
}
|
||||
|
||||
createSnapResp, err := ec2conn.CreateSnapshot(&ec2.CreateSnapshotInput{
|
||||
VolumeId: &volumeId,
|
||||
Description: &description,
|
||||
TagSpecifications: tagSpecs,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set the snapshot ID so we can delete it later
|
||||
s.snapshotMutex.Lock()
|
||||
s.snapshotIds[deviceName] = *createSnapResp.SnapshotId
|
||||
s.snapshotMutex.Unlock()
|
||||
|
||||
// Wait for snapshot to be created
|
||||
err = s.PollingConfig.WaitUntilSnapshotDone(ctx, ec2conn, *createSnapResp.SnapshotId)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *StepSnapshotVolumes) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
s.snapshotIds = map[string]string{}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
var errs *multierror.Error
|
||||
for _, device := range s.LaunchDevices {
|
||||
// Skip devices we've flagged for omission
|
||||
omit, ok := s.SnapshotOmitMap[*device.DeviceName]
|
||||
if ok && omit {
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func(device *ec2.BlockDeviceMapping) {
|
||||
defer wg.Done()
|
||||
if err := s.snapshotVolume(ctx, *device.DeviceName, state); err != nil {
|
||||
errs = multierror.Append(errs, err)
|
||||
}
|
||||
}(device)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
if errs != nil {
|
||||
state.Put("error", errs)
|
||||
ui.Error(errs.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
state.Put("snapshot_ids", s.snapshotIds)
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepSnapshotVolumes) Cleanup(state multistep.StateBag) {
|
||||
if len(s.snapshotIds) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
_, cancelled := state.GetOk(multistep.StateCancelled)
|
||||
_, halted := state.GetOk(multistep.StateHalted)
|
||||
|
||||
if cancelled || halted {
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
ui.Say("Removing snapshots since we cancelled or halted...")
|
||||
s.snapshotMutex.Lock()
|
||||
for _, snapshotId := range s.snapshotIds {
|
||||
_, err := ec2conn.DeleteSnapshot(&ec2.DeleteSnapshotInput{SnapshotId: &snapshotId})
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf("Error: %s", err))
|
||||
}
|
||||
}
|
||||
s.snapshotMutex.Unlock()
|
||||
}
|
||||
}
|
|
@ -1,94 +0,0 @@
|
|||
package ebsvolume
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
// map of region to list of volume IDs
|
||||
type EbsVolumes map[string][]string
|
||||
|
||||
// Artifact is an artifact implementation that contains built AMIs.
|
||||
type Artifact struct {
|
||||
// A map of regions to EBS Volume IDs.
|
||||
Volumes EbsVolumes
|
||||
|
||||
// BuilderId is the unique ID for the builder that created this AMI
|
||||
BuilderIdValue string
|
||||
|
||||
// StateData should store data such as GeneratedData
|
||||
// to be shared with post-processors
|
||||
StateData map[string]interface{}
|
||||
|
||||
// EC2 connection for performing API stuff.
|
||||
Conn *ec2.EC2
|
||||
}
|
||||
|
||||
func (a *Artifact) BuilderId() string {
|
||||
return a.BuilderIdValue
|
||||
}
|
||||
|
||||
func (*Artifact) Files() []string {
|
||||
// We have no files
|
||||
return nil
|
||||
}
|
||||
|
||||
// returns a sorted list of region:ID pairs
|
||||
func (a *Artifact) idList() []string {
|
||||
parts := make([]string, 0, len(a.Volumes))
|
||||
for region, volumeIDs := range a.Volumes {
|
||||
for _, volumeID := range volumeIDs {
|
||||
parts = append(parts, fmt.Sprintf("%s:%s", region, volumeID))
|
||||
}
|
||||
}
|
||||
|
||||
sort.Strings(parts)
|
||||
return parts
|
||||
}
|
||||
|
||||
func (a *Artifact) Id() string {
|
||||
return strings.Join(a.idList(), ",")
|
||||
}
|
||||
|
||||
func (a *Artifact) String() string {
|
||||
return fmt.Sprintf("EBS Volumes were created:\n\n%s", strings.Join(a.idList(), "\n"))
|
||||
}
|
||||
|
||||
func (a *Artifact) State(name string) interface{} {
|
||||
if _, ok := a.StateData[name]; ok {
|
||||
return a.StateData[name]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Artifact) Destroy() error {
|
||||
errors := make([]error, 0)
|
||||
|
||||
for region, volumeIDs := range a.Volumes {
|
||||
for _, volumeID := range volumeIDs {
|
||||
log.Printf("Deregistering Volume ID (%s) from region (%s)", volumeID, region)
|
||||
|
||||
input := &ec2.DeleteVolumeInput{
|
||||
VolumeId: &volumeID,
|
||||
}
|
||||
if _, err := a.Conn.DeleteVolume(input); err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
if len(errors) == 1 {
|
||||
return errors[0]
|
||||
} else {
|
||||
return &packersdk.MultiError{Errors: errors}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,29 +0,0 @@
|
|||
package ebsvolume
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestArtifactState(t *testing.T) {
|
||||
expectedData := "this is the data"
|
||||
artifact := &Artifact{
|
||||
StateData: map[string]interface{}{"state_data": expectedData},
|
||||
}
|
||||
|
||||
// Valid state
|
||||
result := artifact.State("state_data")
|
||||
if result != expectedData {
|
||||
t.Fatalf("Bad: State data was %s instead of %s", result, expectedData)
|
||||
}
|
||||
|
||||
// Invalid state
|
||||
result = artifact.State("invalid_key")
|
||||
if result != nil {
|
||||
t.Fatalf("Bad: State should be nil for invalid state data name")
|
||||
}
|
||||
|
||||
// Nil StateData should not fail and should return nil
|
||||
artifact = &Artifact{}
|
||||
result = artifact.State("key")
|
||||
if result != nil {
|
||||
t.Fatalf("Bad: State should be nil for nil StateData")
|
||||
}
|
||||
}
|
|
@ -1,48 +0,0 @@
|
|||
//go:generate struct-markdown
|
||||
|
||||
package ebsvolume
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/config"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/interpolate"
|
||||
awscommon "github.com/hashicorp/packer/builder/amazon/common"
|
||||
)
|
||||
|
||||
type BlockDevice struct {
|
||||
awscommon.BlockDevice `mapstructure:",squash"`
|
||||
// Key/value pair tags to apply to the volume. These are retained after the builder
|
||||
// completes. This is a [template engine](/docs/templates/legacy_json_templates/engine), see
|
||||
// [Build template data](#build-template-data) for more information.
|
||||
Tags map[string]string `mapstructure:"tags" required:"false"`
|
||||
// Same as [`tags`](#tags) but defined as a singular repeatable block
|
||||
// containing a `key` and a `value` field. In HCL2 mode the
|
||||
// [`dynamic_block`](/docs/templates/hcl_templates/expressions#dynamic-blocks)
|
||||
// will allow you to create those programatically.
|
||||
Tag config.KeyValues `mapstructure:"tag" required:"false"`
|
||||
}
|
||||
|
||||
type BlockDevices []BlockDevice
|
||||
|
||||
func (bds BlockDevices) BuildEC2BlockDeviceMappings() []*ec2.BlockDeviceMapping {
|
||||
var blockDevices []*ec2.BlockDeviceMapping
|
||||
|
||||
for _, blockDevice := range bds {
|
||||
blockDevices = append(blockDevices, blockDevice.BuildEC2BlockDeviceMapping())
|
||||
}
|
||||
return blockDevices
|
||||
}
|
||||
|
||||
func (bds BlockDevices) Prepare(ctx *interpolate.Context) (errs []error) {
|
||||
|
||||
for _, block := range bds {
|
||||
|
||||
errs = append(errs, block.Tag.CopyOn(&block.Tags)...)
|
||||
|
||||
if err := block.Prepare(ctx); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
}
|
||||
return errs
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue