Merge branch 'master' into website-docs-windows-restart
This commit is contained in:
commit
1cca06e5bf
|
@ -1,8 +1,12 @@
|
||||||
|
_Please read these instructions before submitting_
|
||||||
|
|
||||||
**DELETE THIS TEMPLATE BEFORE SUBMITTING**
|
**DELETE THIS TEMPLATE BEFORE SUBMITTING**
|
||||||
|
|
||||||
_Only use Github issues to report bugs or feature requests, see
|
_Only use Github issues to report bugs or feature requests, see
|
||||||
https://www.packer.io/community.html_
|
https://www.packer.io/community.html_
|
||||||
|
|
||||||
|
For example, _Timeouts waiting for SSH/WinRM_ are generally not bugs within packer and are better addressed by the mailing list. Ask on the mailing list if you are unsure.
|
||||||
|
|
||||||
If you are planning to open a pull-request just open the pull-request instead of making an issue first.
|
If you are planning to open a pull-request just open the pull-request instead of making an issue first.
|
||||||
|
|
||||||
FOR FEATURES:
|
FOR FEATURES:
|
||||||
|
@ -19,3 +23,4 @@ Describe the problem and include the following information:
|
||||||
Please paste this in a gist https://gist.github.com
|
Please paste this in a gist https://gist.github.com
|
||||||
- The _simplest example template and scripts_ needed to reproduce the bug.
|
- The _simplest example template and scripts_ needed to reproduce the bug.
|
||||||
Include these in your gist https://gist.github.com
|
Include these in your gist https://gist.github.com
|
||||||
|
|
||||||
|
|
|
@ -4,8 +4,8 @@ Describe the change you are making here!
|
||||||
|
|
||||||
Please include tests. Check out these examples:
|
Please include tests. Check out these examples:
|
||||||
|
|
||||||
- https://github.com/mitchellh/packer/blob/master/builder/virtualbox/common/ssh_config_test.go#L19-L37
|
- https://github.com/hashicorp/packer/blob/master/builder/virtualbox/common/ssh_config_test.go#L19-L37
|
||||||
- https://github.com/mitchellh/packer/blob/master/post-processor/compress/post-processor_test.go#L153-L182
|
- https://github.com/hashicorp/packer/blob/master/post-processor/compress/post-processor_test.go#L153-L182
|
||||||
|
|
||||||
If your PR resolves any open issue(s), please indicate them like this so they will be closed when your PR is merged:
|
If your PR resolves any open issue(s), please indicate them like this so they will be closed when your PR is merged:
|
||||||
|
|
||||||
|
|
|
@ -10,6 +10,7 @@
|
||||||
test/.env
|
test/.env
|
||||||
*~
|
*~
|
||||||
*.received.*
|
*.received.*
|
||||||
|
*.swp
|
||||||
|
|
||||||
website/.bundle
|
website/.bundle
|
||||||
website/vendor
|
website/vendor
|
||||||
|
@ -21,3 +22,5 @@ packer-test*.log
|
||||||
|
|
||||||
.idea/
|
.idea/
|
||||||
*.iml
|
*.iml
|
||||||
|
Thumbs.db
|
||||||
|
/packer.exe
|
|
@ -6,8 +6,9 @@ sudo: false
|
||||||
language: go
|
language: go
|
||||||
|
|
||||||
go:
|
go:
|
||||||
- 1.7.4
|
- 1.7.x
|
||||||
- 1.8
|
- 1.8.x
|
||||||
|
- 1.x
|
||||||
|
|
||||||
install:
|
install:
|
||||||
- make deps
|
- make deps
|
||||||
|
@ -21,5 +22,3 @@ branches:
|
||||||
|
|
||||||
matrix:
|
matrix:
|
||||||
fast_finish: true
|
fast_finish: true
|
||||||
allow_failures:
|
|
||||||
- go: 1.4.3
|
|
||||||
|
|
634
CHANGELOG.md
634
CHANGELOG.md
|
@ -1,33 +1,557 @@
|
||||||
## (Unreleased)
|
## UNRELEASED
|
||||||
|
|
||||||
|
|
||||||
|
## 1.2.0 (February 9, 2018)
|
||||||
|
|
||||||
|
### BACKWARDS INCOMPATIBILITIES:
|
||||||
|
* 3rd party plugins: We have moved internal dependencies, meaning your 3rd
|
||||||
|
party plugins will no longer compile (however existing builds will still
|
||||||
|
work fine); the work to fix them is minimal and documented in GH-5810.
|
||||||
|
[GH-5810]
|
||||||
|
* builder/amazon: The `ssh_private_ip` option has been removed. Instead, please
|
||||||
|
use `"ssh_interface": "private"`. A fixer has been written for this, which
|
||||||
|
can be invoked with `packer fix`. [GH-5876]
|
||||||
|
* builder/openstack: Extension support has been removed. To use OpenStack
|
||||||
|
builder with the OpenStack Newton (Oct 2016) or earlier, we recommend you
|
||||||
|
use Packer v1.1.2 or earlier version.
|
||||||
|
* core: Affects Windows guests: User variables containing Powershell special
|
||||||
|
characters no longer need to be escaped.[GH-5376]
|
||||||
|
* provisioner/file: We've made destination semantics more consistent across the
|
||||||
|
various communicators. In general, if the destination is a directory, files
|
||||||
|
will be uploaded into the directory instead of failing. This mirrors the
|
||||||
|
behavior of `rsync`. There's a chance some users might be depending on the
|
||||||
|
previous buggy behavior, so it's worth ensuring your configuration is
|
||||||
|
correct. [GH-5426]
|
||||||
|
* provisioner/powershell: Regression from v1.1.1 forcing extra escaping of
|
||||||
|
environment variables in the non-elevated provisioner has been fixed.
|
||||||
|
[GH-5515] [GH-5872]
|
||||||
|
|
||||||
|
### IMPROVEMENTS:
|
||||||
|
|
||||||
|
* **New builder:** `ncloud` for building server images using the NAVER Cloud
|
||||||
|
Platform. [GH-5791]
|
||||||
|
* **New builder:** `oci-classic` for building new custom images for use with
|
||||||
|
Oracle Cloud Infrastructure Classic Compute. [GH-5819]
|
||||||
|
* **New builder:** `scaleway` - The Scaleway Packer builder is able to create
|
||||||
|
new images for use with Scaleway BareMetal and Virtual cloud server.
|
||||||
|
[GH-4770]
|
||||||
|
* builder/amazon: Add `kms_key_id` option to block device mappings. [GH-5774]
|
||||||
|
* builder/amazon: Add `skip_metadata_api_check` option to skip consulting the
|
||||||
|
amazon metadata service. [GH-5764]
|
||||||
|
* builder/amazon: Add Paris region (eu-west-3) [GH-5718]
|
||||||
|
* builder/amazon: Give better error messages if we have trouble during
|
||||||
|
authentication. [GH-5764]
|
||||||
|
* builder/amazon: Remove Session Token (STS) from being shown in the log.
|
||||||
|
[GH-5665]
|
||||||
|
* builder/amazon: Replace `InstanceStatusOK` check with `InstanceReady`. This
|
||||||
|
reduces build times universally while still working for all instance types.
|
||||||
|
[GH-5678]
|
||||||
|
* builder/amazon: Report which authentication provider we're using. [GH-5764]
|
||||||
|
* builder/amazon: Timeout early if metadata service can't be reached. [GH-5764]
|
||||||
|
* builder/amazon: Warn during prepare if we didn't get both an access key and a
|
||||||
|
secret key when we were expecting one. [GH-5762]
|
||||||
|
* builder/azure: Add validation for incorrect VHD URLs [GH-5695]
|
||||||
|
* builder/docker: Remove credentials from being shown in the log. [GH-5666]
|
||||||
|
* builder/google: Support specifying licenses for images. [GH-5842]
|
||||||
|
* builder/hyper-v: Allow MAC address specification. [GH-5709]
|
||||||
|
* builder/hyper-v: New option to use differential disks and Inline disk
|
||||||
|
creation to improve build time and reduce disk usage [GH-5631]
|
||||||
|
* builder/qemu: Add Intel HAXM support to QEMU builder [GH-5738]
|
||||||
|
* builder/triton: Triton RBAC is now supported. [GH-5741]
|
||||||
|
* builder/triton: Updated triton-go dependencies, allowing better error
|
||||||
|
handling. [GH-5795]
|
||||||
|
* builder/vmware-iso: Add support for cdrom and disk adapter types. [GH-3417]
|
||||||
|
* builder/vmware-iso: Add support for setting network type and network adapter
|
||||||
|
type. [GH-3417]
|
||||||
|
* builder/vmware-iso: Add support for usb/serial/parallel ports. [GH-3417]
|
||||||
|
* builder/vmware-iso: Add support for virtual soundcards. [GH-3417]
|
||||||
|
* builder/vmware-iso: More reliably retrieve the guest networking
|
||||||
|
configuration. [GH-3417]
|
||||||
|
* builder/vmware: Add support for "super" key in `boot_command`. [GH-5681]
|
||||||
|
* communicator/ssh: Add session-level keep-alives [GH-5830]
|
||||||
|
* communicator/ssh: Detect dead connections. [GH-4709]
|
||||||
|
* core: Gracefully clean up resources on SIGTERM. [GH-5318]
|
||||||
|
* core: Improved error logging in floppy file handling. [GH-5802]
|
||||||
|
* core: Improved support for downloading and validating a uri containing a
|
||||||
|
Windows UNC path or a relative file:// scheme. [GH-2906]
|
||||||
|
* post-processor/amazon-import: Allow user to specify role name in amazon-
|
||||||
|
import [GH-5817]
|
||||||
|
* post-processor/docker: Remove credentials from being shown in the log.
|
||||||
|
[GH-5666]
|
||||||
|
* post-processor/google-export: Synchronize credential semantics with the
|
||||||
|
Google builder. [GH-4148]
|
||||||
|
* post-processor/vagrant: Add vagrant post-processor support for Google
|
||||||
|
[GH-5732]
|
||||||
|
* post-processor/vsphere-template: Now accepts artifacts from the vSphere post-
|
||||||
|
processor. [GH-5380]
|
||||||
|
* provisioner/amazon: Use Amazon SDK's InstanceRunning waiter instead of
|
||||||
|
InstanceStatusOK waiter [GH-5773]
|
||||||
|
* provisioner/ansible: Improve user retrieval. [GH-5758]
|
||||||
|
* provisioner/chef: Add support for 'trusted_certs_dir' chef-client
|
||||||
|
configuration option [GH-5790]
|
||||||
|
* provisioner/chef: Added Policyfile support to chef-client provisioner.
|
||||||
|
[GH-5831]
|
||||||
|
|
||||||
### BUG FIXES:
|
### BUG FIXES:
|
||||||
|
|
||||||
* builder/googlecompute: Correct values for `on_host_maintenance`. [GH-4643]
|
* builder/alicloud-ecs: Attach keypair before starting instance in alicloud
|
||||||
* builder/amazon: Fix crash in `step_region_copy`. [GH-4642]
|
builder [GH-5739]
|
||||||
* core: show correct step name when debugging. [GH-4672]
|
* builder/amazon: Fix tagging support when building in us-gov/china. [GH-5841]
|
||||||
* builder/virtualbox: fix `none` communicator by allowing skipping upload of
|
* builder/amazon: NewSession now inherits MaxRetries and other settings.
|
||||||
version file. [GH-4678]
|
[GH-5719]
|
||||||
* communicator/ssh: fix nil pointer error. [GH-4690]
|
* builder/virtualbox: Fix interpolation ordering so that edge cases around
|
||||||
* builder/hyper-v: Don't wait for shutdown_command to return. [GH-4691]
|
guest_additions_url are handled correctly [GH-5757]
|
||||||
* builder/amazon: Fix b/c issue by reporting again the tags we create.
|
* builder/virtualbox: Fix regression affecting users running Packer on a
|
||||||
[GH-4704]
|
Windows host that kept Packer from finding Virtualbox guest additions if
|
||||||
* builder/virtualbox: retry removing floppy controller. [GH-4705]
|
Packer ran on a different drive from the one where the guest additions were
|
||||||
* builder/googlecompute: Use "default" service account. [GH-4749]
|
stored. [GH-5761]
|
||||||
|
* builder/vmware: Fix case where artifacts might not be cleaned up correctly.
|
||||||
|
[GH-5835]
|
||||||
|
* builder/vmware: Fixed file handle leak that may have caused race conditions
|
||||||
|
in vmware builder [GH-5767]
|
||||||
|
* communicator/ssh: Add deadline to SSH connection to prevent Packer hangs
|
||||||
|
after script provisioner reboots vm [GH-4684]
|
||||||
|
* communicator/winrm: Fix issue copying empty directories. [GH-5763]
|
||||||
|
* provisioner/ansible-local: Fix support for `--extra-vars` in
|
||||||
|
`extra_arguments`. [GH-5703]
|
||||||
|
* provisioner/ansible-remote: Fixes an error where Packer's private key can be
|
||||||
|
overridden by inherited `ansible_ssh_private_key` options. [GH-5869]
|
||||||
|
* provisioner/ansible: The "default extra variables" feature added in Packer
|
||||||
|
v1.0.1 caused the ansible-local provisioner to fail when an --extra-vars
|
||||||
|
argument was specified in the extra_arguments configuration option; this
|
||||||
|
has been fixed. [GH-5335]
|
||||||
|
* provisioner/powershell: Regression from v1.1.1 forcing extra escaping of
|
||||||
|
environment variables in the non-elevated provisioner has been fixed.
|
||||||
|
[GH-5515] [GH-5872]
|
||||||
|
|
||||||
### IMRPOVEMENTS:
|
|
||||||
|
|
||||||
* builder/amazon: validate ssh key name/file. [GH-4665]
|
## 1.1.3 (December 8, 2017)
|
||||||
* builder/amazon: set force_deregister to true on -force. [GH-4649]
|
|
||||||
* builder/hyper-v: validate output dir in step, not in config. [GH-4645]
|
### IMPROVEMENTS:
|
||||||
* website: fix display on ios devices. [GH-4618]
|
|
||||||
|
* builder/alicloud-ecs: Add security token support and set TLS handshake
|
||||||
|
timeout through environment variable. [GH-5641]
|
||||||
|
* builder/amazon: Add a new parameter `ssh_interface`. Valid values include
|
||||||
|
`public_ip`, `private_ip`, `public_dns` or `private_dns`. [GH-5630]
|
||||||
|
* builder/azure: Add sanity checks for resource group names [GH-5599]
|
||||||
|
* builder/azure: Allow users to specify an existing resource group to use,
|
||||||
|
instead of creating a new one for every run. [GH-5548]
|
||||||
|
* builder/hyper-v: Add support for differencing disk. [GH-5458]
|
||||||
|
* builder/vmware-iso: Improve logging of network errors. [GH-5456]
|
||||||
|
* core: Add new `packer_version` template engine. [GH-5619]
|
||||||
|
* core: Improve logic checking for downloaded ISOs in case where user has
|
||||||
|
provided more than one URL in `iso_urls` [GH-5632]
|
||||||
|
* provisioner/ansible-local: Add ability to clean staging directory. [GH-5618]
|
||||||
|
|
||||||
|
### BUG FIXES:
|
||||||
|
|
||||||
|
* builder/amazon: Allow `region` to appear in `ami_regions`. [GH-5660]
|
||||||
|
* builder/amazon: `C5` instance types now build more reliably. [GH-5678]
|
||||||
|
* builder/amazon: Correctly set AWS region if given in template along with a
|
||||||
|
profile. [GH-5676]
|
||||||
|
* builder/amazon: Prevent `sriov_support` and `ena_support` from being used
|
||||||
|
with spot instances, which would cause a build failure. [GH-5679]
|
||||||
|
* builder/hyper-v: Fix interpolation context for user variables in
|
||||||
|
`boot_command` [GH-5547]
|
||||||
|
* builder/qemu: Set default disk size to 40960 MB to prevent boot failures.
|
||||||
|
[GH-5588]
|
||||||
|
* builder/vmware: Correctly detect Windows boot on vmware workstation.
|
||||||
|
[GH-5672]
|
||||||
|
* core: Fix windows path regression when downloading ISOs. [GH-5591]
|
||||||
|
* provisioner/chef: Fix chef installs on Windows. [GH-5649]
|
||||||
|
|
||||||
|
## 1.1.2 (November 15, 2017)
|
||||||
|
|
||||||
|
### IMPROVEMENTS:
|
||||||
|
|
||||||
|
* builder/amazon: Correctly deregister AMIs when `force_deregister` is set.
|
||||||
|
[GH-5525]
|
||||||
|
* builder/digitalocean: Add `ipv6` option to enable on droplet. [GH-5534]
|
||||||
|
* builder/docker: Add `aws_profile` option to control the aws profile for ECR.
|
||||||
|
[GH-5470]
|
||||||
|
* builder/google: Add `clean_image_name` template engine. [GH-5463]
|
||||||
|
* builder/google: Allow selecting container optimized images. [GH-5576]
|
||||||
|
* builder/google: Interpolate network and subnetwork values, rather than
|
||||||
|
relying on an API call that packer may not have permission for. [GH-5343]
|
||||||
|
* builder/hyper-v: Add `disk_additional_size` option to allow for up to 64
|
||||||
|
additional disks. [GH-5491]
|
||||||
|
* builder/hyper-v: Also disable automatic checkpoints for gen 2 VMs. [GH-5517]
|
||||||
|
* builder/lxc: Add new `publish_properties` field to set image properties.
|
||||||
|
[GH-5475]
|
||||||
|
* builder/lxc: Add three new configuration option categories to LXC builder:
|
||||||
|
`create_options`, `start_options`, and `attach_options`. [GH-5530]
|
||||||
|
* builder/triton: Add `source_machine_image_filter` option to select an image
|
||||||
|
ID based on a variety of parameters. [GH-5538]
|
||||||
|
* builder/virtualbox-ovf: Error during prepare if source path doesn't exist.
|
||||||
|
[GH-5573]
|
||||||
|
* builder/virtualbox-ovf: Retry while removing VM to solve for transient
|
||||||
|
errors. [GH-5512]
|
||||||
|
* communicator/ssh: Add socks 5 proxy support. [GH-5439]
|
||||||
|
* core/iso_config: Support relative paths in checksum file. [GH-5578]
|
||||||
|
* core: Rewrite vagrantfile code to make cross-platform development easier.
|
||||||
|
[GH-5539]
|
||||||
|
* post-processor/docker-push: Add `aws_profile` option to control the aws
|
||||||
|
profile for ECR. [GH-5470]
|
||||||
|
* post-processor/vsphere: Properly capture `ovftool` output. [GH-5499]
|
||||||
|
|
||||||
|
### BUG FIXES:
|
||||||
|
|
||||||
|
* builder/amazon: Add a delay option to security group waiter. [GH-5536]
|
||||||
|
* builder/amazon: Fix regressions relating to spot instances and EBS volumes.
|
||||||
|
[GH-5495]
|
||||||
|
* builder/amazon: Set region from profile, if profile is set, rather than being
|
||||||
|
overridden by metadata. [GH-5562]
|
||||||
|
* builder/docker: Remove `login_email`, which no longer exists in the docker
|
||||||
|
client. [GH-5511]
|
||||||
|
* builder/hyperv: Fix admin check that was causing powershell failures.
|
||||||
|
[GH-5510]
|
||||||
|
* builder/oracle: Defaulting of OCI builder region will first check the packer
|
||||||
|
template and the OCI config file. [GH-5407]
|
||||||
|
* builder/triton: Fix a bug where partially created images can be reported as
|
||||||
|
complete. [GH-5566]
|
||||||
|
* post-processor/vsphere: Use the vm disk path information to re-create the vmx
|
||||||
|
datastore path. [GH-5567]
|
||||||
|
* provisioner/windows-restart: Wait for restart no longer endlessly loops if
|
||||||
|
user specifies a custom restart check command. [GH-5563]
|
||||||
|
|
||||||
|
## 1.1.1 (October 13, 2017)
|
||||||
|
|
||||||
|
### IMPROVEMENTS:
|
||||||
|
|
||||||
|
* **New builder:** `hyperv-vmcx` for building images from existing VMs.
|
||||||
|
[GH-4944] [GH-5444]
|
||||||
|
* builder/amazon-instance: Add `.Token` as a variable in the
|
||||||
|
`BundleUploadCommand` template. [GH-5288]
|
||||||
|
* builder/amazon: Add `temporary_security_group_source_cidr` option to control
|
||||||
|
ingress to source instances. [GH-5384]
|
||||||
|
* builder/amazon: Output AMI Name during prevalidation. [GH-5389]
|
||||||
|
* builder/amazon: Support template functions in tag keys. [GH-5381]
|
||||||
|
* builder/amazon: Tag volumes on creation instead of as a separate step.
|
||||||
|
[GH-5417]
|
||||||
|
* builder/docker: Add option to set `--user` flag when running `exec`.
|
||||||
|
[GH-5406]
|
||||||
|
* builder/docker: Set file owner to container user when uploading. Can be
|
||||||
|
disabled by setting `fix_upload_owner` to `false`. [GH-5422]
|
||||||
|
* builder/googlecompute: Support setting labels on the resulting image.
|
||||||
|
[GH-5356]
|
||||||
|
* builder/hyper-v: Add `vhd_temp_path` option to control where the VHD resides
|
||||||
|
while it's being provisioned. [GH-5206]
|
||||||
|
* builder/hyper-v: Allow vhd or vhdx source images instead of just ISO.
|
||||||
|
[GH-4944] [GH-5444]
|
||||||
|
* builder/hyper-v: Disable automatic checkpoints. [GH-5374]
|
||||||
|
* builder/virtualbox-ovf: Add `keep_registered` option. [GH-5336]
|
||||||
|
* builder/vmware: Add `disable_vnc` option to prevent VNC connections from
|
||||||
|
being made. [GH-5436]
|
||||||
|
* core: Releases will now be built for ppc64le.
|
||||||
|
* post-processor/vagrant: When building from a builder/hyper-v artifact, link
|
||||||
|
instead of copy when available. [GH-5207]
|
||||||
|
|
||||||
|
|
||||||
|
### BUG FIXES:
|
||||||
|
|
||||||
|
* builder/cloudstack: Fix panic if build is aborted. [GH-5388]
|
||||||
|
* builder/hyper-v: Respect `enable_dynamic_memory` flag. [GH-5363]
|
||||||
|
* builder/puppet-masterless: Make sure directories created with sudo are
|
||||||
|
writable by the packer user. [GH-5351]
|
||||||
|
* provisioner/chef-solo: Fix issue installing chef-solo on Windows. [GH-5357]
|
||||||
|
* provisioner/powershell: Fix issue setting environment variables by writing
|
||||||
|
them to a file, instead of the command line. [GH-5345]
|
||||||
|
* provisioner/powershell: Fix issue where powershell scripts could hang.
|
||||||
|
[GH-5082]
|
||||||
|
* provisioner/powershell: Fix Powershell progress stream leak to stderr for
|
||||||
|
normal and elevated commands. [GH-5365]
|
||||||
|
* provisioner/puppet-masterless: Fix bug where `puppet_bin_dir` wasn't being
|
||||||
|
respected. [GH-5340]
|
||||||
|
* provisioner/puppet: Fix setting facter vars on Windows. [GH-5341]
|
||||||
|
|
||||||
|
|
||||||
|
## 1.1.0 (September 12, 2017)
|
||||||
|
|
||||||
|
### IMPROVEMENTS:
|
||||||
|
|
||||||
|
* builder/alicloud: Update alicloud go sdk and enable multi sites support for
|
||||||
|
alicloud [GH-5219]
|
||||||
|
* builder/amazon: Upgrade aws-sdk-go to 1.10.14, add tags at instance run time.
|
||||||
|
[GH-5196]
|
||||||
|
* builder/azure: Add object_id to windows_custom_image.json. [GH-5285]
|
||||||
|
* builder/azure: Add support for storage account for managed images. [GH-5244]
|
||||||
|
* builder/azure: Update pkcs12 package. [GH-5301]
|
||||||
|
* builder/cloudstack: Add support for Security Groups. [GH-5175]
|
||||||
|
* builder/docker: Uploading files and directories now use docker cp. [GH-5273]
|
||||||
|
[GH-5333]
|
||||||
|
* builder/googlecompute: Add `labels` option for labeling launched instances.
|
||||||
|
[GH-5308]
|
||||||
|
* builder/googlecompute: Add support for accelerator api. [GH-5137]
|
||||||
|
* builder/profitbricks: added support for Cloud API v4. [GH-5233]
|
||||||
|
* builder/vmware-esxi: Remote builds now respect `output_directory` [GH-4592]
|
||||||
|
* builder/vmware: Set artifact ID to `VMName`. [GH-5187]
|
||||||
|
* core: Build solaris binary by default. [GH-5268] [GH-5248]
|
||||||
|
* core: Remove LGPL dependencies. [GH-5262]
|
||||||
|
* provisioner/puppet: Add `guest_os_type` option to add support for Windows.
|
||||||
|
[GH-5252]
|
||||||
|
* provisioner/salt-masterless: Also use sudo to clean up if we used sudo to
|
||||||
|
install. [GH-5240]
|
||||||
|
|
||||||
|
### BACKWARDS INCOMPATIBILITIES:
|
||||||
|
|
||||||
|
* builder/amazon: Changes way that AMI artifacts are printed out after build,
|
||||||
|
aligning them to builder. Could affect output parsing. [GH-5281]
|
||||||
|
* builder/amazon: Split `enhanced_networking` into `sriov_support` and
|
||||||
|
`ena_support` to support finer grained control. Use `packer fix
|
||||||
|
<template.json>` to automatically update your template to use `ena_support`
|
||||||
|
where previously there was only `enhanced_networking`. Make sure to also
|
||||||
|
add `sriov_support` if you need that feature, and to ensure `ena_support`
|
||||||
|
is what you intended to be in your template. [GH-5284]
|
||||||
|
* builder/cloudstack: Setup temporary SSH keypair; backwards incompatible in
|
||||||
|
the uncommon case that the source image allowed SSH auth with password but
|
||||||
|
not with keypair. [GH-5174]
|
||||||
|
* communicator/ssh: Renamed `ssh_disable_agent` to
|
||||||
|
`ssh_disable_agent_forwarding`. Need to run fixer on packer configs that
|
||||||
|
use `ssh_disable_agent`. [GH-5024]
|
||||||
|
* communicator: Preserve left-sided white-space in remote command output. Make
|
||||||
|
sure any scripts that parse this output can handle the new whitespace
|
||||||
|
before upgrading. [GH-5167]
|
||||||
|
* provisioner/shell: Set default for `ExpectDisconnect` to `false`. If your
|
||||||
|
script causes the connection to be reset, you should set this to `true` to
|
||||||
|
prevent errors. [GH-5283]
|
||||||
|
|
||||||
|
### BUG FIXES:
|
||||||
|
|
||||||
|
* builder/amazon: `force_deregister` works in all regions, not just original
|
||||||
|
region. [GH-5250]
|
||||||
|
* builder/docker: Directory uploads now use the same semantics as the rest of
|
||||||
|
the communicators. [GH-5333]
|
||||||
|
* builder/vmware: Fix timestamp in default VMName. [GH-5274]
|
||||||
|
* builder/winrm: WinRM now waits to make sure commands can run successfully
|
||||||
|
before considering itself connected. [GH-5300]
|
||||||
|
* core: Fix issue where some builders wouldn't respect `-on-error` behavior.
|
||||||
|
[GH-5297]
|
||||||
|
* provisioner/windows-restart: The first powershell provisioner after a restart
|
||||||
|
now works. [GH-5272]
|
||||||
|
|
||||||
|
### FEATURES:
|
||||||
|
|
||||||
|
* **New builder**: Oracle Cloud Infrastructure (OCI) builder for creating
|
||||||
|
custom images. [GH-4554]
|
||||||
|
* **New builder:** `lxc` for building lxc images. [GH-3523]
|
||||||
|
* **New builder:** `lxd` for building lxd images. [GH-3625]
|
||||||
|
* **New post-processor**: vSphere Template post-processor to be used with
|
||||||
|
vmware-iso builder enabling user to mark a VM as a template. [GH-5114]
|
||||||
|
|
||||||
|
## 1.0.4 (August 11, 2017)
|
||||||
|
|
||||||
|
### IMPROVEMENTS:
|
||||||
|
|
||||||
|
* builder/alicloud: Increase polling timeout. [GH-5148]
|
||||||
|
* builder/azure: Add `private_virtual_network_with_public_ip` option to
|
||||||
|
optionally obtain a public IP. [GH-5222]
|
||||||
|
* builder/googlecompute: use a more portable method of obtaining zone.
|
||||||
|
[GH-5192]
|
||||||
|
* builder/hyperv: Properly interpolate user variables in template. [GH-5184]
|
||||||
|
* builder/parallels: Remove soon to be removed --vmtype flag in createvm.
|
||||||
|
[GH-5172]
|
||||||
|
* contrib: add json files to zsh completion. [GH-5195]
|
||||||
|
|
||||||
|
### BUG FIXES:
|
||||||
|
|
||||||
|
* builder/amazon: Don't delete snapshots we didn't create. [GH-5211]
|
||||||
|
* builder/amazon: fix builds when using the null communicator. [GH-5217]
|
||||||
|
* builder/docker: Correctly handle case when uploading an empty directory.
|
||||||
|
[GH-5234]
|
||||||
|
* command/push: Don't push variables if they are unspecified. Reverts to
|
||||||
|
behavior in 1.0.1. [GH-5235]
|
||||||
|
* command/push: fix handling of symlinks. [GH-5226]
|
||||||
|
* core: Strip query parameters from ISO URLs when checking against a checksum
|
||||||
|
file. [GH-5181]
|
||||||
|
* provisioner/ansible-remote: Fix issue where packer could hang communicating
|
||||||
|
with ansible-remote. [GH-5146]
|
||||||
|
|
||||||
|
## 1.0.3 (July 17, 2017)
|
||||||
|
|
||||||
|
### IMPROVEMENTS:
|
||||||
|
* builder/azure: Update to latest Azure SDK, enabling support for managed
|
||||||
|
disks. [GH-4511]
|
||||||
|
* builder/cloudstack: Add default cidr_list [ 0.0.0.0/0 ]. [GH-5125]
|
||||||
|
* builder/cloudstack: Add support for ssh_agent_auth. [GH-5130]
|
||||||
|
* builder/cloudstack: Add support for using a HTTP server. [GH-5017]
|
||||||
|
* builder/cloudstack: Allow reading api_url, api_key, and secret_key from env
|
||||||
|
vars. [GH-5124]
|
||||||
|
* builder/cloudstack: Make expunge optional and improve logging output.
|
||||||
|
[GH-5099]
|
||||||
|
* builder/googlecompute: Allow using URL's for network and subnetwork.
|
||||||
|
[GH-5035]
|
||||||
|
* builder/hyperv: Add support for floppy_dirs with hyperv-iso builder.
|
||||||
|
* builder/hyperv: Add support for override of system %temp% path.
|
||||||
|
* core: Experimental Android ARM support. [GH-5111]
|
||||||
|
* post-processor/atlas: Disallow packer push of vagrant.box artifacts to atlas.
|
||||||
|
[GH-4780]
|
||||||
|
* postprocessor/atlas: Disallow pushing vagrant.box artifacts now that Vagrant
|
||||||
|
cloud is live. [GH-4780]
|
||||||
|
|
||||||
|
### BUG FIXES:
|
||||||
|
* builder/amazon: Fix panic that happens if ami_block_device_mappings is empty.
|
||||||
|
[GH-5059]
|
||||||
|
* builder/azure: Write private SSH to file in debug mode. [GH-5070] [GH-5074]
|
||||||
|
* builder/cloudstack: Properly report back errors. [GH-5103] [GH-5123]
|
||||||
|
* builder/docker: Fix windows filepath in docker-toolbox call [GH-4887]
|
||||||
|
* builder/docker: Fix windows filepath in docker-toolbox call. [GH-4887]
|
||||||
|
* builder/hyperv: Use SID to verify membersip in Admin group, fixing for non-
|
||||||
|
english users. [GH-5022]
|
||||||
|
* builder/hyperv: Verify membership in the group Hyper-V Administrators by SID
|
||||||
|
not name. [GH-5022]
|
||||||
|
* builder/openstack: Update gophercloud version, fixing builds > 1 hr long.
|
||||||
|
[GH-5046]
|
||||||
|
* builder/parallels: Skip missing paths when looking for unnecessary files.
|
||||||
|
[GH-5058]
|
||||||
|
* builder/vmware-esxi: Fix VNC port discovery default timeout. [GH-5051]
|
||||||
|
* communicator/ssh: Add ProvisionerTypes to communicator tests, resolving panic
|
||||||
|
[GH-5116]
|
||||||
|
* communicator/ssh: Resolve race condition that sometimes truncates ssh
|
||||||
|
provisioner stdout [GH-4719]
|
||||||
|
* post-processor/checksum: Fix interpolation of "output". [GH-5112]
|
||||||
|
* push: Push vars in packer config, not just those set from command line and in
|
||||||
|
var-file. [GH-5101]
|
||||||
|
|
||||||
|
## 1.0.2 (June 21, 2017)
|
||||||
|
|
||||||
|
### BUG FIXES:
|
||||||
|
* communicator/ssh: Fix truncated stdout from remote ssh provisioner. [GH-5050]
|
||||||
|
* builder/amazon: Fix bugs related to stop instance command. [GH-4719]
|
||||||
|
* communicator/ssh: Fix ssh connection errors. [GH-5038]
|
||||||
|
* core: Remove logging that shouldn't be there when running commands. [GH-5042]
|
||||||
|
* provisioner/shell: Fix bug where scripts were being run under `sh`. [GH-5043]
|
||||||
|
|
||||||
|
### IMPROVEMENTS:
|
||||||
|
|
||||||
|
* provisioner/windows-restart: make it clear that timeouts come from the
|
||||||
|
provisioner, not winrm. [GH-5040]
|
||||||
|
|
||||||
|
## 1.0.1 (June 19, 2017)
|
||||||
|
|
||||||
|
### IMPROVEMENTS:
|
||||||
|
|
||||||
|
* builder/amazon: Allow amis to be copied to other regions, encrypted with
|
||||||
|
custom KMS keys. [GH-4948]
|
||||||
|
* builder/amazon: Allow configuration of api endpoint to support api-compatible
|
||||||
|
cloud providers. [GH-4896]
|
||||||
|
* builder/amazon: Fix regex used for ami name validation [GH-4902]
|
||||||
|
* builder/amazon: Look up vpc from subnet id if no vpc was specified. [GH-4879]
|
||||||
|
* builder/amazon: Print temporary security group name to the UI. [GH-4997]
|
||||||
|
* builder/amazon: Support Assume Role with MFA and ECS Task Roles. Also updates
|
||||||
|
to a newer version of aws-sdk-go. [GH-4996]
|
||||||
|
* builder/amazon: Use retry logic when creating instance tags. [GH-4876]
|
||||||
|
* builder/amazon: Validate ami name. [GH-4762]
|
||||||
|
* builder/azure: Add build output to artifact. [GH-4953]
|
||||||
|
* builder/azure: Use disk URI as artifact ID. [GH-4981]
|
||||||
|
* builder/digitalocean: Added support for monitoring. [GH-4782]
|
||||||
|
* builder/digitalocean: Support for copying snapshot to other regions.
|
||||||
|
[GH-4893]
|
||||||
|
* builder/hyper-v: Remove the check for administrator rights when sending key
|
||||||
|
strokes to Hyper-V. [GH-4687] # builder/openstack: Fix private key error
|
||||||
|
message to match documentation [GH-4898]
|
||||||
|
* builder/null: Support SSH agent auth [GH-4956]
|
||||||
* builder/openstack: Add ssh agent support. [GH-4655]
|
* builder/openstack: Add ssh agent support. [GH-4655]
|
||||||
|
* builder/openstack: Support client x509 certificates. [GH-4921]
|
||||||
* builder/parallels-iso: Configuration of disk type, plain or expanding.
|
* builder/parallels-iso: Configuration of disk type, plain or expanding.
|
||||||
[GH-4621]
|
[GH-4621]
|
||||||
|
* builder/triton: An SSH agent can be used to authenticate requests, making
|
||||||
|
`triton_key_material` optional. [GH-4838]
|
||||||
|
* builder/triton: If no source machine networks are specified, instances are
|
||||||
|
started on the default public and internal networks. [GH-4838]
|
||||||
|
* builder/virtualbox: Add sata port count configuration option. [GH-4699]
|
||||||
|
* builder/virtualbox: Don't add port forwarding when using "none" communicator.
|
||||||
|
[GH-4960]
|
||||||
|
* builder/vmware: Add option to remove interfaces from the vmx. [GH-4927]
|
||||||
|
* builder/vmware: Properly remove mounted CDs on OS X. [GH-4810]
|
||||||
|
* builder/vmware: VNC probe timeout is configurable. [GH-4919]
|
||||||
|
* command/push: add `-sensitive` flag to mark pushed vars are sensitive.
|
||||||
|
[GH-4970]
|
||||||
|
* command/push: Vagrant support in Terraform Enterprise is deprecated.
|
||||||
|
[GH-4950]
|
||||||
|
* communicator/ssh: Add ssh agent support for bastion connections. [GH-4940]
|
||||||
|
* communicator/winrm: Add NTLM authentication support. [GH-4979]
|
||||||
|
* communicator/winrm: Add support for file downloads. [GH-4748]
|
||||||
|
* core: add telemetry for better product support. [GH-5015]
|
||||||
|
* core: Build binaries for arm64 [GH-4892]
|
||||||
|
* post-processor/amazon-import: Add support for `license_type`. [GH-4634]
|
||||||
|
* post-processor/vagrant-cloud: Get vagrant cloud token from environment.
|
||||||
|
[GH-4982]
|
||||||
|
* provisioner/ansible-local: Add extra-vars `packer_build_name`,
|
||||||
|
`packer_builder_type`, and `packer_http_addr`. [GH-4821]
|
||||||
|
* provisioner/ansible: Add `inventory_directory` option to control where to
|
||||||
|
place the generated inventory file. [GH-4760]
|
||||||
|
* provisioner/ansible: Add `skip_version_check` flag for when ansible will be
|
||||||
|
installed from a prior provisioner. [GH-4983]
|
||||||
|
* provisioner/ansible: Add extra-vars `packer_build_name` and
|
||||||
|
`packer_builder_type`. [GH-4821]
|
||||||
|
* provisioner/chef-solo: Add option to select Chef version. [GH-4791]
|
||||||
|
* provisioner/salt: Add salt bin directory configuration. [GH-5009]
|
||||||
|
* provisioner/salt: Add support for grains. [GH-4961]
|
||||||
|
* provisioner/shell: Use `env` to set environment variables to support freebsd
|
||||||
|
out of the box. [GH-4909]
|
||||||
|
* website/docs: Clarify language, improve formatting. [GH-4866]
|
||||||
|
* website/docs: Update docker metadata fields that can be changed. [GH-4867]
|
||||||
|
|
||||||
|
|
||||||
|
### BUG FIXES:
|
||||||
|
|
||||||
|
* builder/amazon-ebssurrogate: Use ami device settings when creating the AMI.
|
||||||
|
[GH-4972]
|
||||||
|
* builder/amazon: don't try to delete extra volumes during clean up. [GH-4930]
|
||||||
|
* builder/amazon: fix `force_delete_snapshot` when the launch instance has
|
||||||
|
extra volumes. [GH-4931]
|
||||||
|
* builder/amazon: Only delete temporary key if we created one. [GH-4850]
|
||||||
|
* builder/azure: Replace calls to panic with error returns. [GH-4846]
|
||||||
|
* communicator/winrm: Use KeepAlive to keep long-running connections open.
|
||||||
|
[GH-4952]
|
||||||
|
* core: Correctly reject config files which have junk after valid json.
|
||||||
|
[GH-4906]
|
||||||
|
* post-processor/checksum: fix crash when invalid checksum is used. [GH-4812]
|
||||||
|
* post-processor/vagrant-cloud: don't read files to upload in to memory first.
|
||||||
|
[GH-5005]
|
||||||
|
* post-processor/vagrant-cloud: only upload once under normal conditions.
|
||||||
|
[GH-5008]
|
||||||
|
* provisioner/ansible-local: Correctly set the default staging directory under
|
||||||
|
Windows. [GH-4792]
|
||||||
|
|
||||||
|
### FEATURES:
|
||||||
|
|
||||||
|
* **New builder:** `alicloud-ecs` for building Alicloud ECS images. [GH-4619]
|
||||||
|
|
||||||
|
|
||||||
|
## 1.0.0 (April 4, 2017)
|
||||||
|
|
||||||
|
### BUG FIXES:
|
||||||
|
|
||||||
|
* builder/amazon: Fix b/c issue by reporting again the tags we create.
|
||||||
|
[GH-4704]
|
||||||
|
* builder/amazon: Fix crash in `step_region_copy`. [GH-4642]
|
||||||
|
* builder/googlecompute: Correct values for `on_host_maintenance`. [GH-4643]
|
||||||
|
* builder/googlecompute: Use "default" service account. [GH-4749]
|
||||||
|
* builder/hyper-v: Don't wait for shutdown_command to return. [GH-4691]
|
||||||
|
* builder/virtualbox: fix `none` communicator by allowing skipping upload of
|
||||||
|
version file. [GH-4678]
|
||||||
|
* builder/virtualbox: retry removing floppy controller. [GH-4705]
|
||||||
|
* communicator/ssh: don't return error if we can't close connection. [GH-4741]
|
||||||
|
* communicator/ssh: fix nil pointer error. [GH-4690]
|
||||||
|
* core: fix version number
|
||||||
|
* core: Invoking packer `--help` or `--version` now exits with status 0.
|
||||||
|
[GH-4723]
|
||||||
|
* core: show correct step name when debugging. [GH-4672]
|
||||||
|
* communicator/winrm: Directory uploads behave more like scp. [GH-4438]
|
||||||
|
|
||||||
|
### IMPROVEMENTS:
|
||||||
|
|
||||||
|
* builder/amazon-chroot: Ability to give an empty list in `copy_files` to
|
||||||
|
prevent the default `/etc/resolv.conf` file from being copied. If
|
||||||
|
`copy_files` isn't given at all, the default behavior remains. [GH-4708]
|
||||||
|
* builder/amazon: set force_deregister to true on -force. [GH-4649]
|
||||||
|
* builder/amazon: validate ssh key name/file. [GH-4665]
|
||||||
* builder/ansible: Clearer error message when we have problems getting the
|
* builder/ansible: Clearer error message when we have problems getting the
|
||||||
ansible version. [GH-4694]
|
ansible version. [GH-4694]
|
||||||
* builder/amazon-chroot: Ability to give an empty list in `copy_files` to
|
* builder/hyper-v: validate output dir in step, not in config. [GH-4645]
|
||||||
prevent the default `/etc/resolv.conf` file from being copied. If `copy_files`
|
* More diligently try to complete azure-setup.sh. [GH-4752]
|
||||||
isn't given at all, the default behavior remains. [GH-4708]
|
* website: fix display on ios devices. [GH-4618]
|
||||||
|
|
||||||
## 0.12.3 (March 1, 2017)
|
## 0.12.3 (March 1, 2017)
|
||||||
|
|
||||||
|
@ -41,42 +565,43 @@
|
||||||
|
|
||||||
### IMPROVEMENTS:
|
### IMPROVEMENTS:
|
||||||
|
|
||||||
|
* builder/amazon-chroot: support encrypted boot volume. [GH-4584]
|
||||||
* builder/amazon: Add BuildRegion and SourceAMI template variables. [GH-4399]
|
* builder/amazon: Add BuildRegion and SourceAMI template variables. [GH-4399]
|
||||||
* builder/amazon: Change EC2 Windows password timeout to 20 minutes. [GH-4590]
|
* builder/amazon: Change EC2 Windows password timeout to 20 minutes. [GH-4590]
|
||||||
* builder/amazon-chroot: support encrypted boot volume. [GH-4584]
|
|
||||||
* builder/docker: create export dir if needed. [GH-4439]
|
|
||||||
* builder/googlecompute: Add `on_host_maintenance` option. [GH-4544]
|
|
||||||
* builder/openstack: add reuse_ips option to try to re-use existing IPs. [GH-4564]
|
|
||||||
* communicator/docker: preserve file mode. [GH-4443]
|
|
||||||
* communicator/winrm: support ProxyFromEnvironment. [GH-4463]
|
|
||||||
* core: make VNC links clickable in terminal. [GH-4497] [GH-4498]
|
|
||||||
* post-processor/amazon-import: support AMI attributes on import [GH-4216]
|
|
||||||
* communicator/ssh: Use SSH agent when enabled for bastion step. [GH-4598]
|
|
||||||
* builder/amazon: enable ena when `enhanced_networking` is set. [GH-4578]
|
* builder/amazon: enable ena when `enhanced_networking` is set. [GH-4578]
|
||||||
* builder/vmware-esxi: try for longer to connect to vnc port. [GH-4480]
|
|
||||||
[GH-4610]
|
|
||||||
* core: don't show ui color if we're not colorized. [GH-4525]
|
|
||||||
* builder/vmware: don't cache ip address so we know if it changes. [GH-4532]
|
|
||||||
* builder/vmware: allow extra options for ovftool. [GH-4536]
|
|
||||||
* docs: add community page. [GH-4550]
|
|
||||||
* post-processor/docker-import: print stderr on docker import failure.
|
|
||||||
[GH-4529]
|
|
||||||
* builder/azure:: add two new config variables for temp_compute_name and
|
* builder/azure:: add two new config variables for temp_compute_name and
|
||||||
temp_resource_group_name. [GH-4468]
|
temp_resource_group_name. [GH-4468]
|
||||||
|
* builder/docker: create export dir if needed. [GH-4439]
|
||||||
|
* builder/googlecompute: Add `on_host_maintenance` option. [GH-4544]
|
||||||
|
* builder/openstack: add reuse_ips option to try to re-use existing IPs.
|
||||||
|
[GH-4564]
|
||||||
|
* builder/vmware-esxi: try for longer to connect to vnc port. [GH-4480]
|
||||||
|
[GH-4610]
|
||||||
|
* builder/vmware: allow extra options for ovftool. [GH-4536]
|
||||||
|
* builder/vmware: don't cache ip address so we know if it changes. [GH-4532]
|
||||||
|
* communicator/docker: preserve file mode. [GH-4443]
|
||||||
|
* communicator/ssh: Use SSH agent when enabled for bastion step. [GH-4598]
|
||||||
|
* communicator/winrm: support ProxyFromEnvironment. [GH-4463]
|
||||||
|
* core: don't show ui color if we're not colorized. [GH-4525]
|
||||||
|
* core: make VNC links clickable in terminal. [GH-4497] [GH-4498]
|
||||||
|
* docs: add community page. [GH-4550]
|
||||||
|
* post-processor/amazon-import: support AMI attributes on import [GH-4216]
|
||||||
|
* post-processor/docker-import: print stderr on docker import failure.
|
||||||
|
[GH-4529]
|
||||||
|
|
||||||
### BUG FIXES:
|
### BUG FIXES:
|
||||||
|
|
||||||
* builder/amazon: Fix ssh agent authentication. [GH-4597]
|
|
||||||
* builder/amazon-ebsvolume: Fix interpolation of block_device. [GH-4464]
|
* builder/amazon-ebsvolume: Fix interpolation of block_device. [GH-4464]
|
||||||
|
* builder/amazon: Fix ssh agent authentication. [GH-4597]
|
||||||
|
* builder/docker: Don't force tag if using a docker version that doesn't
|
||||||
|
support it. [GH-4560]
|
||||||
* builder/googlecompute: fix bug when creating image from custom image_family.
|
* builder/googlecompute: fix bug when creating image from custom image_family.
|
||||||
[GH-4518]
|
[GH-4518]
|
||||||
* builder/virtualbox: remove guest additions before saving image. [GH-4496]
|
* builder/virtualbox: remove guest additions before saving image. [GH-4496]
|
||||||
* core: always check for an error first when walking a path. [GH-4467]
|
* core: always check for an error first when walking a path. [GH-4467]
|
||||||
* builder/docker: Don't force tag if using a docker version that doesn't
|
* core: update crypto/ssh lib to fix large file uploads. [GH-4546]
|
||||||
support it. [GH-4560]
|
|
||||||
* provisioner/chef-client: only upload knife config if we're cleaning.
|
* provisioner/chef-client: only upload knife config if we're cleaning.
|
||||||
[GH-4534]
|
[GH-4534]
|
||||||
* core: update crypto/ssh lib to fix large file uploads. [GH-4546]
|
|
||||||
|
|
||||||
## 0.12.2 (January 20, 2017)
|
## 0.12.2 (January 20, 2017)
|
||||||
|
|
||||||
|
@ -89,12 +614,13 @@
|
||||||
|
|
||||||
* builder/hyperv-iso: add `iso_target_extension` option. [GH-4294]
|
* builder/hyperv-iso: add `iso_target_extension` option. [GH-4294]
|
||||||
* builder/openstack: Add support for instance metadata. [GH-4361]
|
* builder/openstack: Add support for instance metadata. [GH-4361]
|
||||||
* builder/openstack: Attempt to use existing floating IPs before allocating
|
* builder/openstack: Attempt to use existing floating IPs before allocating a
|
||||||
a new one. [GH-4357]
|
new one. [GH-4357]
|
||||||
* builder/parallels-iso: add `iso_target_extension` option. [GH-4294]
|
* builder/parallels-iso: add `iso_target_extension` option. [GH-4294]
|
||||||
* builder/qemu: add `iso_target_extension` option. [GH-4294]
|
* builder/qemu: add `iso_target_extension` option. [GH-4294]
|
||||||
* builder/qemu: add `use_default_display` option for osx compatibility.
|
* builder/qemu: add `use_default_display` option for osx compatibility.
|
||||||
[GH-4293]
|
[GH-4293]
|
||||||
|
* builder/qemu: Detect input disk image format during copy/convert. [GH-4343]
|
||||||
* builder/virtualbox-iso: add `iso_target_extension` option. [GH-4294]
|
* builder/virtualbox-iso: add `iso_target_extension` option. [GH-4294]
|
||||||
* builder/virtualbox: add `skip_export` option to skip exporting the VM after
|
* builder/virtualbox: add `skip_export` option to skip exporting the VM after
|
||||||
build completes. [GH-4339]
|
build completes. [GH-4339]
|
||||||
|
@ -106,23 +632,22 @@
|
||||||
* builder/vmware: Try to use `ip address` to find host IP. [GH-4411]
|
* builder/vmware: Try to use `ip address` to find host IP. [GH-4411]
|
||||||
* common/step_http\_server: set `PACKER_HTTP_ADDR` env var for accessing http
|
* common/step_http\_server: set `PACKER_HTTP_ADDR` env var for accessing http
|
||||||
server from inside builder. [GH-4409]
|
server from inside builder. [GH-4409]
|
||||||
* provisioner/powershell: Allow equals sign in value of environment
|
* provisioner/powershell: Allow equals sign in value of environment variables.
|
||||||
variables. [GH-4328]
|
[GH-4328]
|
||||||
* provisioner/puppet-server: Add default facts. [GH-4286]
|
* provisioner/puppet-server: Add default facts. [GH-4286]
|
||||||
* builder/qemu: Detect input disk image format during copy/convert. [GH-4343]
|
|
||||||
|
|
||||||
### BUG FIXES:
|
### BUG FIXES:
|
||||||
|
|
||||||
* builder/amazon-chroot: Panic in AMI region copy step. [GH-4341]
|
* builder/amazon-chroot: Panic in AMI region copy step. [GH-4341]
|
||||||
* builder/amazon: Crashes when new EBS vols are used. [GH-4308]
|
* builder/amazon: Crashes when new EBS vols are used. [GH-4308]
|
||||||
* builder/amazon: Fix crash in amazon-instance. [GH-4372]
|
* builder/amazon: Fix crash in amazon-instance. [GH-4372]
|
||||||
|
* builder/amazon: fix run volume tagging [GH-4420]
|
||||||
|
* builder/amazon: fix when using non-existant security\_group\_id. [GH-4425]
|
||||||
* builder/amazon: Properly error if we don't have the
|
* builder/amazon: Properly error if we don't have the
|
||||||
ec2:DescribeSecurityGroups permission. [GH-4304]
|
ec2:DescribeSecurityGroups permission. [GH-4304]
|
||||||
* builder/amazon: Properly wait for security group to exist. [GH-4369]
|
* builder/amazon: Properly wait for security group to exist. [GH-4369]
|
||||||
* builder/amazon: fix run volume tagging [GH-4420]
|
* builder/docker: Fix crash when performing log in to ECR with an invalid URL.
|
||||||
* builder/amazon: fix when using non-existant security\_group\_id. [GH-4425]
|
[GH-4385]
|
||||||
* builder/docker: Fix crash when performing log in to ECR with an invalid
|
|
||||||
URL. [GH-4385]
|
|
||||||
* builder/openstack: fix for finding resource by ID. [GH-4301]
|
* builder/openstack: fix for finding resource by ID. [GH-4301]
|
||||||
* builder/qemu: Explicitly set WinRMPort for StepConnect. [GH-4321]
|
* builder/qemu: Explicitly set WinRMPort for StepConnect. [GH-4321]
|
||||||
* builder/virtualbox: Explicitly set WinRMPort for StepConnect. [GH-4321]
|
* builder/virtualbox: Explicitly set WinRMPort for StepConnect. [GH-4321]
|
||||||
|
@ -133,8 +658,7 @@
|
||||||
* command/push: Don't interpolate variables when pushing. [GH-4389]
|
* command/push: Don't interpolate variables when pushing. [GH-4389]
|
||||||
* common/step_http_server: make port range inclusive. [GH-4398]
|
* common/step_http_server: make port range inclusive. [GH-4398]
|
||||||
* communicator/winrm: update winrm client, resolving `MaxMemoryPerShellMB`
|
* communicator/winrm: update winrm client, resolving `MaxMemoryPerShellMB`
|
||||||
errors and properly error logging instead of panicking. [GH-4412]
|
errors and properly error logging instead of panicking. [GH-4412] [GH-4424]
|
||||||
[GH-4424]
|
|
||||||
* provider/windows-shell: Allows equals sign in env var value. [GH-4423]
|
* provider/windows-shell: Allows equals sign in env var value. [GH-4423]
|
||||||
|
|
||||||
## 0.12.1 (December 15, 2016)
|
## 0.12.1 (December 15, 2016)
|
||||||
|
@ -146,9 +670,9 @@
|
||||||
"packer fix template.json" to migrate a template. [GH-4285]
|
"packer fix template.json" to migrate a template. [GH-4285]
|
||||||
* builder/openstack: No long supports the `api_key` option for rackspace.
|
* builder/openstack: No long supports the `api_key` option for rackspace.
|
||||||
[GH-4283]
|
[GH-4283]
|
||||||
* post-processor/manifest: Changed `filename` field to be `output`, to be
|
* post-processor/manifest: Changed `filename` field to be `output`, to be more
|
||||||
more consistent with other post-processors. `packer fix` will fix this
|
consistent with other post-processors. `packer fix` will fix this for you.
|
||||||
for you. [GH-4192]
|
[GH-4192]
|
||||||
* post-processor/shell-local: Now runs per-builder instead of per-file. The
|
* post-processor/shell-local: Now runs per-builder instead of per-file. The
|
||||||
filename is no longer passed in as an argument to the script, but instead
|
filename is no longer passed in as an argument to the script, but instead
|
||||||
needs to be gleaned from the manifest post-processor. [GH-4189]
|
needs to be gleaned from the manifest post-processor. [GH-4189]
|
||||||
|
|
|
@ -0,0 +1,28 @@
|
||||||
|
* @hashicorp/packer
|
||||||
|
|
||||||
|
# builders
|
||||||
|
|
||||||
|
/builder/alicloud/ dongxiao.zzh@alibaba-inc.com
|
||||||
|
/builder/amazon/ebssurrogate/ @jen20
|
||||||
|
/builder/amazon/ebsvolume/ @jen20
|
||||||
|
/builder/azure/ @boumenot
|
||||||
|
/builder/hyperv/ @taliesins
|
||||||
|
/builder/lxc/ @ChrisLundquist
|
||||||
|
/builder/lxd/ @ChrisLundquist
|
||||||
|
/builder/oneandone/ @jasmingacic
|
||||||
|
/builder/oracle/ @prydie @owainlewis
|
||||||
|
/builder/profitbricks/ @jasmingacic
|
||||||
|
/builder/triton/ @jen20 @sean-
|
||||||
|
/builder/ncloud/ @YuSungDuk
|
||||||
|
/builder/scaleway/ @dimtion @edouardb
|
||||||
|
|
||||||
|
# provisioners
|
||||||
|
|
||||||
|
/provisioner/ansible/ @bhcleek
|
||||||
|
/provisioner/converge/ @stevendborrelli
|
||||||
|
|
||||||
|
# post-processors
|
||||||
|
/post-processor/alicloud-import/ dongxiao.zzh@alibaba-inc.com
|
||||||
|
/post-processor/checksum/ v.tolstov@selfip.ru
|
||||||
|
/post-processor/googlecompute-export/ crunkleton@google.com
|
||||||
|
/post-processor/vsphere-template/ nelson@bennu.cl
|
167
CONTRIBUTING.md
167
CONTRIBUTING.md
|
@ -1,46 +1,46 @@
|
||||||
# Contributing to Packer
|
# Contributing to Packer
|
||||||
|
|
||||||
**First:** if you're unsure or afraid of _anything_, just ask
|
**First:** if you're unsure or afraid of _anything_, just ask or submit the
|
||||||
or submit the issue or pull request anyways. You won't be yelled at for
|
issue or pull request anyways. You won't be yelled at for giving your best
|
||||||
giving your best effort. The worst that can happen is that you'll be
|
effort. The worst that can happen is that you'll be politely asked to change
|
||||||
politely asked to change something. We appreciate any sort of contributions,
|
something. We appreciate any sort of contributions, and don't want a wall of
|
||||||
and don't want a wall of rules to get in the way of that.
|
rules to get in the way of that.
|
||||||
|
|
||||||
However, for those individuals who want a bit more guidance on the
|
However, for those individuals who want a bit more guidance on the best way to
|
||||||
best way to contribute to the project, read on. This document will cover
|
contribute to the project, read on. This document will cover what we're looking
|
||||||
what we're looking for. By addressing all the points we're looking for,
|
for. By addressing all the points we're looking for, it raises the chances we
|
||||||
it raises the chances we can quickly merge or address your contributions.
|
can quickly merge or address your contributions.
|
||||||
|
|
||||||
## Issues
|
## Issues
|
||||||
|
|
||||||
### Reporting an Issue
|
### Reporting an Issue
|
||||||
|
|
||||||
* Make sure you test against the latest released version. It is possible
|
* Make sure you test against the latest released version. It is possible we
|
||||||
we already fixed the bug you're experiencing.
|
already fixed the bug you're experiencing.
|
||||||
|
|
||||||
* Run the command with debug ouput with the environment variable
|
* Run the command with debug output with the environment variable `PACKER_LOG`.
|
||||||
`PACKER_LOG`. For example: `PACKER_LOG=1 packer build template.json`. Take
|
For example: `PACKER_LOG=1 packer build template.json`. Take the _entire_
|
||||||
the *entire* output and create a [gist](https://gist.github.com) for linking
|
output and create a [gist](https://gist.github.com) for linking to in your
|
||||||
to in your issue. Packer should strip sensitive keys from the output,
|
issue. Packer should strip sensitive keys from the output, but take a look
|
||||||
but take a look through just in case.
|
through just in case.
|
||||||
|
|
||||||
* Provide a reproducible test case. If a contributor can't reproduce an
|
* Provide a reproducible test case. If a contributor can't reproduce an issue,
|
||||||
issue, then it dramatically lowers the chances it'll get fixed. And in
|
then it dramatically lowers the chances it'll get fixed. And in some cases,
|
||||||
some cases, the issue will eventually be closed.
|
the issue will eventually be closed.
|
||||||
|
|
||||||
* Respond promptly to any questions made by the Packer team to your issue.
|
* Respond promptly to any questions made by the Packer team to your issue. Stale
|
||||||
Stale issues will be closed.
|
issues will be closed.
|
||||||
|
|
||||||
### Issue Lifecycle
|
### Issue Lifecycle
|
||||||
|
|
||||||
1. The issue is reported.
|
1. The issue is reported.
|
||||||
|
|
||||||
2. The issue is verified and categorized by a Packer collaborator.
|
2. The issue is verified and categorized by a Packer collaborator.
|
||||||
Categorization is done via tags. For example, bugs are marked as "bugs"
|
Categorization is done via tags. For example, bugs are marked as "bugs" and
|
||||||
and easy fixes are marked as "easy".
|
easy fixes are marked as "easy".
|
||||||
|
|
||||||
3. Unless it is critical, the issue is left for a period of time (sometimes
|
3. Unless it is critical, the issue is left for a period of time (sometimes many
|
||||||
many weeks), giving outside contributors a chance to address the issue.
|
weeks), giving outside contributors a chance to address the issue.
|
||||||
|
|
||||||
4. The issue is addressed in a pull request or commit. The issue will be
|
4. The issue is addressed in a pull request or commit. The issue will be
|
||||||
referenced in the commit message so that the code that fixes it is clearly
|
referenced in the commit message so that the code that fixes it is clearly
|
||||||
|
@ -50,86 +50,108 @@ it raises the chances we can quickly merge or address your contributions.
|
||||||
|
|
||||||
## Setting up Go to work on Packer
|
## Setting up Go to work on Packer
|
||||||
|
|
||||||
If you have never worked with Go before, you will have to complete the
|
If you have never worked with Go before, you will have to complete the following
|
||||||
following steps in order to be able to compile and test Packer. These instructions target POSIX-like environments (Mac OS X, Linux, Cygwin, etc.) so you may need to adjust them for Windows or other shells.
|
steps in order to be able to compile and test Packer. These instructions target
|
||||||
|
POSIX-like environments (Mac OS X, Linux, Cygwin, etc.) so you may need to
|
||||||
|
adjust them for Windows or other shells.
|
||||||
|
|
||||||
1. [Download](https://golang.org/dl) and install Go. The instructions below
|
1. [Download](https://golang.org/dl) and install Go. The instructions below are
|
||||||
are for go 1.6. Earlier versions of Go are no longer supported.
|
for go 1.7. Earlier versions of Go are no longer supported.
|
||||||
|
|
||||||
2. Set and export the `GOPATH` environment variable and update your `PATH`. For
|
2. Set and export the `GOPATH` environment variable and update your `PATH`. For
|
||||||
example, you can add to your `.bash_profile`.
|
example, you can add the following to your `.bash_profile` (or comparable
|
||||||
|
shell startup scripts):
|
||||||
|
|
||||||
```
|
```
|
||||||
export GOPATH=$HOME/go
|
export GOPATH=$HOME/go
|
||||||
export PATH=$PATH:$GOPATH/bin
|
export PATH=$PATH:$GOPATH/bin
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Download the Packer source (and its dependencies) by running `go get
|
3. Download the Packer source (and its dependencies) by running
|
||||||
github.com/mitchellh/packer`. This will download the Packer source to
|
`go get github.com/hashicorp/packer`. This will download the Packer source to
|
||||||
`$GOPATH/src/github.com/mitchellh/packer`.
|
`$GOPATH/src/github.com/hashicorp/packer`.
|
||||||
|
|
||||||
4. When working on packer `cd $GOPATH/src/github.com/mitchellh/packer` so you
|
4. When working on Packer, first `cd $GOPATH/src/github.com/hashicorp/packer`
|
||||||
can run `make` and easily access other files. Run `make help` to get
|
so you can run `make` and easily access other files. Run `make help` to get
|
||||||
information about make targets.
|
information about make targets.
|
||||||
|
|
||||||
5. Make your changes to the Packer source. You can run `make` in
|
5. Make your changes to the Packer source. You can run `make` in
|
||||||
`$GOPATH/src/github.com/mitchellh/packer` to run tests and build the packer
|
`$GOPATH/src/github.com/hashicorp/packer` to run tests and build the Packer
|
||||||
binary. Any compilation errors will be shown when the binaries are
|
binary. Any compilation errors will be shown when the binaries are
|
||||||
rebuilding. If you don't have `make` you can simply run `go build -o bin/packer .` from the project root.
|
rebuilding. If you don't have `make` you can simply run
|
||||||
|
`go build -o bin/packer .` from the project root.
|
||||||
|
|
||||||
6. After running building packer successfully, use
|
6. After running building Packer successfully, use
|
||||||
`$GOPATH/src/github.com/mitchellh/packer/bin/packer` to build a machine and
|
`$GOPATH/src/github.com/hashicorp/packer/bin/packer` to build a machine and
|
||||||
verify your changes work. For instance: `$GOPATH/src/github.com/mitchellh/packer/bin/packer build template.json`.
|
verify your changes work. For instance:
|
||||||
|
`$GOPATH/src/github.com/hashicorp/packer/bin/packer build template.json`.
|
||||||
|
|
||||||
7. If everything works well and the tests pass, run `go fmt` on your code
|
7. If everything works well and the tests pass, run `go fmt` on your code before
|
||||||
before submitting a pull-request.
|
submitting a pull-request.
|
||||||
|
|
||||||
### Opening an Pull Request
|
### Opening an Pull Request
|
||||||
|
|
||||||
When you are ready to open a pull-request, you will need to [fork packer](https://github.com/mitchellh/packer#fork-destination-box), push your changes to your fork, and then open a pull-request.
|
When you are ready to open a pull-request, you will need to
|
||||||
|
[fork Packer](https://github.com/hashicorp/packer#fork-destination-box), push
|
||||||
|
your changes to your fork, and then open a pull-request.
|
||||||
|
|
||||||
For example, my github username is `cbednarski` so I would do the following:
|
For example, my github username is `cbednarski`, so I would do the following:
|
||||||
|
|
||||||
|
```
|
||||||
git checkout -b f-my-feature
|
git checkout -b f-my-feature
|
||||||
// develop a patch
|
# Develop a patch.
|
||||||
git push https://github.com/cbednarski/packer f-my-feature
|
git push https://github.com/cbednarski/Packer f-my-feature
|
||||||
|
```
|
||||||
|
|
||||||
From there, open your fork in your browser to open a new pull-request.
|
From there, open your fork in your browser to open a new pull-request.
|
||||||
|
|
||||||
**Note** Go infers package names from their filepaths. This means `go build` will break if you `git clone` your fork instead of using `go get` on the main packer project.
|
**Note:** Go infers package names from their file paths. This means `go build`
|
||||||
|
will break if you `git clone` your fork instead of using `go get` on the main
|
||||||
|
Packer project.
|
||||||
|
|
||||||
### Tips for Working on Packer
|
### Tips for Working on Packer
|
||||||
|
|
||||||
#### Working on forks
|
#### Working on forks
|
||||||
|
|
||||||
The easiest way to work on a fork is to set it as a remote of the packer project. After following the steps in "Setting up Go to work on Packer":
|
The easiest way to work on a fork is to set it as a remote of the Packer
|
||||||
|
project. After following the steps in "Setting up Go to work on Packer":
|
||||||
|
|
||||||
1. Navigate to $GOPATH/src/github.com/mitchellh/packer
|
1. Navigate to `$GOPATH/src/github.com/hashicorp/packer`
|
||||||
2. Add the remote `git remote add <name of remote> <github url of fork>`. For example `git remote add mwhooker https://github.com/mwhooker/packer.git`.
|
2. Add the remote by running
|
||||||
|
`git remote add <name of remote> <github url of fork>`. For example:
|
||||||
|
`git remote add mwhooker https://github.com/mwhooker/packer.git`.
|
||||||
3. Checkout a feature branch: `git checkout -b new-feature`
|
3. Checkout a feature branch: `git checkout -b new-feature`
|
||||||
4. Make changes
|
4. Make changes
|
||||||
5. (Optional) Push your changes to the fork: `git push -u <name of remote> new-feature`
|
5. (Optional) Push your changes to the fork:
|
||||||
|
`git push -u <name of remote> new-feature`
|
||||||
|
|
||||||
This way you can push to your fork to create a PR, but the code on disk still lives in the spot where the go cli tools are expecting to find it.
|
This way you can push to your fork to create a PR, but the code on disk still
|
||||||
|
lives in the spot where the go cli tools are expecting to find it.
|
||||||
|
|
||||||
#### Govendor
|
#### Govendor
|
||||||
|
|
||||||
If you are submitting a change that requires new or updated dependencies, please include them in `vendor/vendor.json` and in the `vendor/` folder. This helps everything get tested properly in CI.
|
If you are submitting a change that requires new or updated dependencies, please
|
||||||
|
include them in `vendor/vendor.json` and in the `vendor/` folder. This helps
|
||||||
|
everything get tested properly in CI.
|
||||||
|
|
||||||
Note that you will need to use [govendor](https://github.com/kardianos/govendor) to do this. This step is recommended but not required; if you don't use govendor please indicate in your PR which dependencies have changed and to what versions.
|
Note that you will need to use [govendor](https://github.com/kardianos/govendor)
|
||||||
|
to do this. This step is recommended but not required; if you don't use govendor
|
||||||
|
please indicate in your PR which dependencies have changed and to what versions.
|
||||||
|
|
||||||
Use `govendor fetch <project>` to add dependencies to the project. See
|
Use `govendor fetch <project>` to add dependencies to the project. See
|
||||||
[govendor quick
|
[govendor quick start](https://github.com/kardianos/govendor#quick-start-also-see-the-faq)
|
||||||
start](https://github.com/kardianos/govendor#quick-start-also-see-the-faq) for
|
for examples.
|
||||||
examples.
|
|
||||||
|
|
||||||
Please only apply the minimal vendor changes to get your PR to work. Packer does not attempt to track the latest version for each dependency.
|
Please only apply the minimal vendor changes to get your PR to work. Packer does
|
||||||
|
not attempt to track the latest version for each dependency.
|
||||||
|
|
||||||
#### Running Unit Tests
|
#### Running Unit Tests
|
||||||
|
|
||||||
You can run tests for individual packages using commands like this:
|
You can run tests for individual packages using commands like this:
|
||||||
|
|
||||||
$ make test TEST=./builder/amazon/...
|
```
|
||||||
|
make test TEST=./builder/amazon/...
|
||||||
|
```
|
||||||
|
|
||||||
#### Running Acceptance Tests
|
#### Running Acceptance Tests
|
||||||
|
|
||||||
|
@ -137,21 +159,34 @@ Packer has [acceptance tests](https://en.wikipedia.org/wiki/Acceptance_testing)
|
||||||
for various builders. These typically require an API key (AWS, GCE), or
|
for various builders. These typically require an API key (AWS, GCE), or
|
||||||
additional software to be installed on your computer (VirtualBox, VMware).
|
additional software to be installed on your computer (VirtualBox, VMware).
|
||||||
|
|
||||||
If you're working on a new builder or builder feature and want verify it is functioning (and also hasn't broken anything else), we recommend running the
|
If you're working on a new builder or builder feature and want verify it is
|
||||||
|
functioning (and also hasn't broken anything else), we recommend running the
|
||||||
acceptance tests.
|
acceptance tests.
|
||||||
|
|
||||||
**Warning:** The acceptance tests create/destroy/modify *real resources*, which
|
**Warning:** The acceptance tests create/destroy/modify _real resources_, which
|
||||||
may incur costs for real money. In the presence of a bug, it is possible that resources may be left behind, which can cost money even though you were not using them. We recommend running tests in an account used only for that purpose so it is easy to see if there are any dangling resources, and so production resources are not accidentally destroyed or overwritten during testing.
|
may incur costs for real money. In the presence of a bug, it is possible that
|
||||||
|
resources may be left behind, which can cost money even though you were not
|
||||||
|
using them. We recommend running tests in an account used only for that purpose
|
||||||
|
so it is easy to see if there are any dangling resources, and so production
|
||||||
|
resources are not accidentally destroyed or overwritten during testing.
|
||||||
|
|
||||||
To run the acceptance tests, invoke `make testacc`:
|
To run the acceptance tests, invoke `make testacc`:
|
||||||
|
|
||||||
$ make testacc TEST=./builder/amazon/ebs
|
```
|
||||||
|
make testacc TEST=./builder/amazon/ebs
|
||||||
...
|
...
|
||||||
|
```
|
||||||
|
|
||||||
The `TEST` variable lets you narrow the scope of the acceptance tests to a
|
The `TEST` variable lets you narrow the scope of the acceptance tests to a
|
||||||
specific package / folder. The `TESTARGS` variable is recommended to filter
|
specific package / folder. The `TESTARGS` variable is recommended to filter down
|
||||||
down to a specific resource to test, since testing all of them at once can
|
to a specific resource to test, since testing all of them at once can sometimes
|
||||||
sometimes take a very long time.
|
take a very long time.
|
||||||
|
|
||||||
|
To run only a specific test, use the `-run` argument:
|
||||||
|
|
||||||
|
```
|
||||||
|
make testacc TEST=./builder/amazon/ebs TESTARGS="-run TestBuilderAcc_forceDeleteSnapshot"
|
||||||
|
```
|
||||||
|
|
||||||
Acceptance tests typically require other environment variables to be set for
|
Acceptance tests typically require other environment variables to be set for
|
||||||
things such as API tokens and keys. Each test should error and tell you which
|
things such as API tokens and keys. Each test should error and tell you which
|
||||||
|
|
37
Makefile
37
Makefile
|
@ -5,6 +5,17 @@ GITSHA:=$(shell git rev-parse HEAD)
|
||||||
# Get the current local branch name from git (if we can, this may be blank)
|
# Get the current local branch name from git (if we can, this may be blank)
|
||||||
GITBRANCH:=$(shell git symbolic-ref --short HEAD 2>/dev/null)
|
GITBRANCH:=$(shell git symbolic-ref --short HEAD 2>/dev/null)
|
||||||
GOFMT_FILES?=$$(find . -not -path "./vendor/*" -name "*.go")
|
GOFMT_FILES?=$$(find . -not -path "./vendor/*" -name "*.go")
|
||||||
|
GOOS=$(shell go env GOOS)
|
||||||
|
GOARCH=$(shell go env GOARCH)
|
||||||
|
GOPATH=$(shell go env GOPATH)
|
||||||
|
|
||||||
|
# Get the git commit
|
||||||
|
GIT_DIRTY=$(shell test -n "`git status --porcelain`" && echo "+CHANGES" || true)
|
||||||
|
GIT_COMMIT=$(shell git rev-parse --short HEAD)
|
||||||
|
GIT_IMPORT=github.com/hashicorp/packer/version
|
||||||
|
GOLDFLAGS=-X $(GIT_IMPORT).GitCommit=$(GIT_COMMIT)$(GIT_DIRTY)
|
||||||
|
|
||||||
|
export GOLDFLAGS
|
||||||
|
|
||||||
default: deps generate test dev
|
default: deps generate test dev
|
||||||
|
|
||||||
|
@ -13,32 +24,37 @@ ci: deps test
|
||||||
release: deps test releasebin package ## Build a release build
|
release: deps test releasebin package ## Build a release build
|
||||||
|
|
||||||
bin: deps ## Build debug/test build
|
bin: deps ## Build debug/test build
|
||||||
|
@go get github.com/mitchellh/gox
|
||||||
@echo "WARN: 'make bin' is for debug / test builds only. Use 'make release' for release builds."
|
@echo "WARN: 'make bin' is for debug / test builds only. Use 'make release' for release builds."
|
||||||
@GO15VENDOREXPERIMENT=1 sh -c "$(CURDIR)/scripts/build.sh"
|
@sh -c "$(CURDIR)/scripts/build.sh"
|
||||||
|
|
||||||
releasebin: deps
|
releasebin: deps
|
||||||
|
@go get github.com/mitchellh/gox
|
||||||
@grep 'const VersionPrerelease = "dev"' version/version.go > /dev/null ; if [ $$? -eq 0 ]; then \
|
@grep 'const VersionPrerelease = "dev"' version/version.go > /dev/null ; if [ $$? -eq 0 ]; then \
|
||||||
echo "ERROR: You must remove prerelease tags from version/version.go prior to release."; \
|
echo "ERROR: You must remove prerelease tags from version/version.go prior to release."; \
|
||||||
exit 1; \
|
exit 1; \
|
||||||
fi
|
fi
|
||||||
@GO15VENDOREXPERIMENT=1 sh -c "$(CURDIR)/scripts/build.sh"
|
@sh -c "$(CURDIR)/scripts/build.sh"
|
||||||
|
|
||||||
package:
|
package:
|
||||||
$(if $(VERSION),,@echo 'VERSION= needed to release; Use make package skip compilation'; exit 1)
|
$(if $(VERSION),,@echo 'VERSION= needed to release; Use make package skip compilation'; exit 1)
|
||||||
@sh -c "$(CURDIR)/scripts/dist.sh $(VERSION)"
|
@sh -c "$(CURDIR)/scripts/dist.sh $(VERSION)"
|
||||||
|
|
||||||
deps:
|
deps:
|
||||||
go get github.com/mitchellh/gox
|
@go get golang.org/x/tools/cmd/stringer
|
||||||
go get golang.org/x/tools/cmd/stringer
|
@go get github.com/kardianos/govendor
|
||||||
go get github.com/kardianos/govendor
|
@govendor sync
|
||||||
govendor sync
|
|
||||||
|
|
||||||
dev: deps ## Build and install a development build
|
dev: deps ## Build and install a development build
|
||||||
@grep 'const VersionPrerelease = ""' version/version.go > /dev/null ; if [ $$? -eq 0 ]; then \
|
@grep 'const VersionPrerelease = ""' version/version.go > /dev/null ; if [ $$? -eq 0 ]; then \
|
||||||
echo "ERROR: You must add prerelease tags to version/version.go prior to making a dev build."; \
|
echo "ERROR: You must add prerelease tags to version/version.go prior to making a dev build."; \
|
||||||
exit 1; \
|
exit 1; \
|
||||||
fi
|
fi
|
||||||
@PACKER_DEV=1 GO15VENDOREXPERIMENT=1 sh -c "$(CURDIR)/scripts/build.sh"
|
@mkdir -p pkg/$(GOOS)_$(GOARCH)
|
||||||
|
@mkdir -p bin
|
||||||
|
@go install -ldflags '$(GOLDFLAGS)'
|
||||||
|
@cp $(GOPATH)/bin/packer bin/packer
|
||||||
|
@cp $(GOPATH)/bin/packer pkg/$(GOOS)_$(GOARCH)
|
||||||
|
|
||||||
fmt: ## Format Go code
|
fmt: ## Format Go code
|
||||||
@gofmt -w -s $(GOFMT_FILES)
|
@gofmt -w -s $(GOFMT_FILES)
|
||||||
|
@ -46,6 +62,9 @@ fmt: ## Format Go code
|
||||||
fmt-check: ## Check go code formatting
|
fmt-check: ## Check go code formatting
|
||||||
$(CURDIR)/scripts/gofmtcheck.sh $(GOFMT_FILES)
|
$(CURDIR)/scripts/gofmtcheck.sh $(GOFMT_FILES)
|
||||||
|
|
||||||
|
fmt-docs:
|
||||||
|
@find ./website/source/docs -name "*.md" -exec pandoc --wrap auto --columns 79 --atx-headers -s -f "markdown_github+yaml_metadata_block" -t "markdown_github+yaml_metadata_block" {} -o {} \;
|
||||||
|
|
||||||
# Install js-beautify with npm install -g js-beautify
|
# Install js-beautify with npm install -g js-beautify
|
||||||
fmt-examples:
|
fmt-examples:
|
||||||
find examples -name *.json | xargs js-beautify -r -s 2 -n -eol "\n"
|
find examples -name *.json | xargs js-beautify -r -s 2 -n -eol "\n"
|
||||||
|
@ -72,11 +91,9 @@ testrace: deps ## Test for race conditions
|
||||||
@go test -race $(TEST) $(TESTARGS) -timeout=2m
|
@go test -race $(TEST) $(TESTARGS) -timeout=2m
|
||||||
|
|
||||||
updatedeps:
|
updatedeps:
|
||||||
go get -u github.com/mitchellh/gox
|
|
||||||
go get -u golang.org/x/tools/cmd/stringer
|
|
||||||
@echo "INFO: Packer deps are managed by govendor. See CONTRIBUTING.md"
|
@echo "INFO: Packer deps are managed by govendor. See CONTRIBUTING.md"
|
||||||
|
|
||||||
help:
|
help:
|
||||||
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
|
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
|
||||||
|
|
||||||
.PHONY: bin checkversion ci default deps fmt fmt-examples generate releasebin test testacc testrace updatedeps
|
.PHONY: bin checkversion ci default deps fmt fmt-docs fmt-examples generate releasebin test testacc testrace updatedeps
|
||||||
|
|
20
README.md
20
README.md
|
@ -5,9 +5,9 @@
|
||||||
[![GoDoc][godoc-badge]][godoc]
|
[![GoDoc][godoc-badge]][godoc]
|
||||||
[![GoReportCard][report-badge]][report]
|
[![GoReportCard][report-badge]][report]
|
||||||
|
|
||||||
[travis-badge]: https://travis-ci.org/mitchellh/packer.svg?branch=master
|
[travis-badge]: https://travis-ci.org/hashicorp/packer.svg?branch=master
|
||||||
[travis]: https://travis-ci.org/mitchellh/packer
|
[travis]: https://travis-ci.org/hashicorp/packer
|
||||||
[appveyor-badge]: https://ci.appveyor.com/api/projects/status/github/mitchellh/packer?branch=master&svg=true
|
[appveyor-badge]: https://ci.appveyor.com/api/projects/status/miavlgnp989e5obc/branch/master?svg=true
|
||||||
[appveyor]: https://ci.appveyor.com/project/hashicorp/packer
|
[appveyor]: https://ci.appveyor.com/project/hashicorp/packer
|
||||||
[godoc-badge]: https://godoc.org/github.com/mitchellh/packer?status.svg
|
[godoc-badge]: https://godoc.org/github.com/mitchellh/packer?status.svg
|
||||||
[godoc]: https://godoc.org/github.com/mitchellh/packer
|
[godoc]: https://godoc.org/github.com/mitchellh/packer
|
||||||
|
@ -16,7 +16,7 @@
|
||||||
|
|
||||||
* Website: https://www.packer.io
|
* Website: https://www.packer.io
|
||||||
* IRC: `#packer-tool` on Freenode
|
* IRC: `#packer-tool` on Freenode
|
||||||
* Mailing list: [Google Groups](http://groups.google.com/group/packer-tool)
|
* Mailing list: [Google Groups](https://groups.google.com/forum/#!forum/packer-tool)
|
||||||
|
|
||||||
Packer is a tool for building identical machine images for multiple platforms
|
Packer is a tool for building identical machine images for multiple platforms
|
||||||
from a single source configuration.
|
from a single source configuration.
|
||||||
|
@ -34,9 +34,11 @@ comes out of the box with support for the following platforms:
|
||||||
* Hyper-V
|
* Hyper-V
|
||||||
* 1&1
|
* 1&1
|
||||||
* OpenStack
|
* OpenStack
|
||||||
|
* Oracle Cloud Infrastructure
|
||||||
* Parallels
|
* Parallels
|
||||||
* ProfitBricks
|
* ProfitBricks
|
||||||
* QEMU. Both KVM and Xen images.
|
* QEMU. Both KVM and Xen images.
|
||||||
|
* Scaleway
|
||||||
* Triton (Joyent Public Cloud)
|
* Triton (Joyent Public Cloud)
|
||||||
* VMware
|
* VMware
|
||||||
* VirtualBox
|
* VirtualBox
|
||||||
|
@ -49,16 +51,16 @@ The images that Packer creates can easily be turned into
|
||||||
## Quick Start
|
## Quick Start
|
||||||
Download and install packages and dependencies
|
Download and install packages and dependencies
|
||||||
```
|
```
|
||||||
go get github.com/mitchellh/packer
|
go get github.com/hashicorp/packer
|
||||||
```
|
```
|
||||||
|
|
||||||
**Note:** There is a great
|
**Note:** There is a great
|
||||||
[introduction and getting started guide](http://www.packer.io/intro)
|
[introduction and getting started guide](https://www.packer.io/intro)
|
||||||
for those with a bit more patience. Otherwise, the quick start below
|
for those with a bit more patience. Otherwise, the quick start below
|
||||||
will get you up and running quickly, at the sacrifice of not explaining some
|
will get you up and running quickly, at the sacrifice of not explaining some
|
||||||
key points.
|
key points.
|
||||||
|
|
||||||
First, [download a pre-built Packer binary](http://www.packer.io/downloads.html)
|
First, [download a pre-built Packer binary](https://www.packer.io/downloads.html)
|
||||||
for your operating system or [compile Packer yourself](CONTRIBUTING.md#setting-up-go-to-work-on-packer).
|
for your operating system or [compile Packer yourself](CONTRIBUTING.md#setting-up-go-to-work-on-packer).
|
||||||
|
|
||||||
After Packer is installed, create your first template, which tells Packer
|
After Packer is installed, create your first template, which tells Packer
|
||||||
|
@ -103,8 +105,8 @@ they're run, etc. is up to you.
|
||||||
|
|
||||||
Comprehensive documentation is viewable on the Packer website:
|
Comprehensive documentation is viewable on the Packer website:
|
||||||
|
|
||||||
http://www.packer.io/docs
|
https://www.packer.io/docs
|
||||||
|
|
||||||
## Developing Packer
|
## Developing Packer
|
||||||
|
|
||||||
See [CONTRIBUTING.md](https://github.com/mitchellh/packer/blob/master/CONTRIBUTING.md) for best practices and instructions on setting up your development environment to work on Packer.
|
See [CONTRIBUTING.md](https://github.com/hashicorp/packer/blob/master/CONTRIBUTING.md) for best practices and instructions on setting up your development environment to work on Packer.
|
||||||
|
|
|
@ -1,50 +1,84 @@
|
||||||
# -*- mode: ruby -*-
|
# -*- mode: ruby -*-
|
||||||
# vi: set ft=ruby :
|
# vi: set ft=ruby :
|
||||||
|
|
||||||
$script = <<SCRIPT
|
LINUX_BASE_BOX = "bento/ubuntu-16.04"
|
||||||
# Fetch from https://golang.org/dl
|
FREEBSD_BASE_BOX = "jen20/FreeBSD-12.0-CURRENT"
|
||||||
TARBALL="https://storage.googleapis.com/golang/go1.6.linux-amd64.tar.gz"
|
|
||||||
|
|
||||||
UNTARPATH="/opt"
|
|
||||||
GOROOT="${UNTARPATH}/go"
|
|
||||||
GOPATH="${UNTARPATH}/gopath"
|
|
||||||
|
|
||||||
# Install Go
|
|
||||||
if [ ! -d ${GOROOT} ]; then
|
|
||||||
sudo wget --progress=bar:force --output-document - ${TARBALL} |\
|
|
||||||
tar xfz - -C ${UNTARPATH}
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Setup the GOPATH
|
|
||||||
sudo mkdir -p ${GOPATH}
|
|
||||||
cat <<EOF >/tmp/gopath.sh
|
|
||||||
export GOROOT="${GOROOT}"
|
|
||||||
export GOPATH="${GOPATH}"
|
|
||||||
export PATH="${GOROOT}/bin:${GOPATH}/bin:\$PATH"
|
|
||||||
EOF
|
|
||||||
sudo mv /tmp/gopath.sh /etc/profile.d/gopath.sh
|
|
||||||
|
|
||||||
# Make sure the GOPATH is usable by vagrant
|
|
||||||
sudo chown -R vagrant:vagrant ${GOROOT}
|
|
||||||
sudo chown -R vagrant:vagrant ${GOPATH}
|
|
||||||
|
|
||||||
# Install some other stuff we need
|
|
||||||
sudo apt-get update
|
|
||||||
sudo apt-get install -y curl make git mercurial bzr zip
|
|
||||||
SCRIPT
|
|
||||||
|
|
||||||
Vagrant.configure(2) do |config|
|
Vagrant.configure(2) do |config|
|
||||||
config.vm.box = "bento/ubuntu-14.04"
|
# Compilation and development boxes
|
||||||
|
config.vm.define "linux", autostart: true, primary: true do |vmCfg|
|
||||||
|
vmCfg.vm.box = LINUX_BASE_BOX
|
||||||
|
vmCfg.vm.hostname = "linux"
|
||||||
|
vmCfg = configureProviders vmCfg,
|
||||||
|
cpus: suggestedCPUCores()
|
||||||
|
|
||||||
config.vm.provision "shell", inline: $script
|
vmCfg.vm.synced_folder ".", "/vagrant", disabled: true
|
||||||
|
vmCfg.vm.synced_folder '.',
|
||||||
|
'/opt/gopath/src/github.com/hashicorp/packer'
|
||||||
|
|
||||||
config.vm.synced_folder ".", "/vagrant", disabled: true
|
vmCfg.vm.provision "shell",
|
||||||
|
privileged: true,
|
||||||
|
inline: 'rm -f /home/vagrant/linux.iso'
|
||||||
|
|
||||||
|
vmCfg.vm.provision "shell",
|
||||||
|
privileged: true,
|
||||||
|
path: './scripts/vagrant-linux-priv-go.sh'
|
||||||
|
|
||||||
|
vmCfg.vm.provision "shell",
|
||||||
|
privileged: true,
|
||||||
|
path: './scripts/vagrant-linux-priv-config.sh'
|
||||||
|
|
||||||
|
vmCfg.vm.provision "shell",
|
||||||
|
privileged: false,
|
||||||
|
path: './scripts/vagrant-linux-unpriv-bootstrap.sh'
|
||||||
|
end
|
||||||
|
|
||||||
|
config.vm.define "freebsd", autostart: false, primary: false do |vmCfg|
|
||||||
|
vmCfg.vm.box = FREEBSD_BASE_BOX
|
||||||
|
vmCfg.vm.hostname = "freebsd"
|
||||||
|
vmCfg = configureProviders vmCfg,
|
||||||
|
cpus: suggestedCPUCores()
|
||||||
|
|
||||||
|
vmCfg.vm.synced_folder ".", "/vagrant", disabled: true
|
||||||
|
vmCfg.vm.synced_folder '.',
|
||||||
|
'/opt/gopath/src/github.com/hashicorp/packer',
|
||||||
|
type: "nfs",
|
||||||
|
bsd__nfs_options: ['noatime']
|
||||||
|
|
||||||
|
vmCfg.vm.provision "shell",
|
||||||
|
privileged: true,
|
||||||
|
path: './scripts/vagrant-freebsd-priv-config.sh'
|
||||||
|
|
||||||
|
vmCfg.vm.provision "shell",
|
||||||
|
privileged: false,
|
||||||
|
path: './scripts/vagrant-freebsd-unpriv-bootstrap.sh'
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def configureProviders(vmCfg, cpus: "2", memory: "2048")
|
||||||
|
vmCfg.vm.provider "virtualbox" do |v|
|
||||||
|
v.memory = memory
|
||||||
|
v.cpus = cpus
|
||||||
|
end
|
||||||
|
|
||||||
["vmware_fusion", "vmware_workstation"].each do |p|
|
["vmware_fusion", "vmware_workstation"].each do |p|
|
||||||
config.vm.provider "p" do |v|
|
vmCfg.vm.provider p do |v|
|
||||||
v.vmx["memsize"] = "2048"
|
v.enable_vmrun_ip_lookup = false
|
||||||
v.vmx["numvcpus"] = "2"
|
v.vmx["memsize"] = memory
|
||||||
v.vmx["cpuid.coresPerSocket"] = "1"
|
v.vmx["numvcpus"] = cpus
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
return vmCfg
|
||||||
|
end
|
||||||
|
|
||||||
|
def suggestedCPUCores()
|
||||||
|
case RbConfig::CONFIG['host_os']
|
||||||
|
when /darwin/
|
||||||
|
Integer(`sysctl -n hw.ncpu`) / 2
|
||||||
|
when /linux/
|
||||||
|
Integer(`cat /proc/cpuinfo | grep processor | wc -l`) / 2
|
||||||
|
else
|
||||||
|
2
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|
|
@ -13,7 +13,7 @@ os: Windows Server 2012 R2
|
||||||
environment:
|
environment:
|
||||||
GOPATH: c:\gopath
|
GOPATH: c:\gopath
|
||||||
|
|
||||||
clone_folder: c:\gopath\src\github.com\mitchellh\packer
|
clone_folder: c:\gopath\src\github.com\hashicorp\packer
|
||||||
|
|
||||||
install:
|
install:
|
||||||
- set GO15VENDOREXPERIMENT=1
|
- set GO15VENDOREXPERIMENT=1
|
||||||
|
@ -29,9 +29,9 @@ build_script:
|
||||||
- ps: |
|
- ps: |
|
||||||
go.exe test (go.exe list ./... `
|
go.exe test (go.exe list ./... `
|
||||||
|? { -not $_.Contains('/vendor/') } `
|
|? { -not $_.Contains('/vendor/') } `
|
||||||
|? { $_ -ne 'github.com/mitchellh/packer/builder/parallels/common' } `
|
|? { $_ -ne 'github.com/hashicorp/packer/builder/parallels/common' } `
|
||||||
|? { $_ -ne 'github.com/mitchellh/packer/common' }`
|
|? { $_ -ne 'github.com/hashicorp/packer/common' }`
|
||||||
|? { $_ -ne 'github.com/mitchellh/packer/provisioner/ansible' })
|
|? { $_ -ne 'github.com/hashicorp/packer/provisioner/ansible' })
|
||||||
|
|
||||||
test: off
|
test: off
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,89 @@
|
||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/common"
|
||||||
|
"github.com/denverdino/aliyungo/ecs"
|
||||||
|
"github.com/hashicorp/packer/template/interpolate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Config of alicloud
|
||||||
|
type AlicloudAccessConfig struct {
|
||||||
|
AlicloudAccessKey string `mapstructure:"access_key"`
|
||||||
|
AlicloudSecretKey string `mapstructure:"secret_key"`
|
||||||
|
AlicloudRegion string `mapstructure:"region"`
|
||||||
|
AlicloudSkipValidation bool `mapstructure:"skip_region_validation"`
|
||||||
|
SecurityToken string `mapstructure:"security_token"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Client for AlicloudClient
|
||||||
|
func (c *AlicloudAccessConfig) Client() (*ecs.Client, error) {
|
||||||
|
if err := c.loadAndValidate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if c.SecurityToken == "" {
|
||||||
|
c.SecurityToken = os.Getenv("SECURITY_TOKEN")
|
||||||
|
}
|
||||||
|
client := ecs.NewECSClientWithSecurityToken(c.AlicloudAccessKey, c.AlicloudSecretKey,
|
||||||
|
c.SecurityToken, common.Region(c.AlicloudRegion))
|
||||||
|
|
||||||
|
client.SetBusinessInfo("Packer")
|
||||||
|
if _, err := client.DescribeRegions(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return client, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *AlicloudAccessConfig) Prepare(ctx *interpolate.Context) []error {
|
||||||
|
var errs []error
|
||||||
|
if err := c.Config(); err != nil {
|
||||||
|
errs = append(errs, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.AlicloudRegion != "" && !c.AlicloudSkipValidation {
|
||||||
|
if c.validateRegion() != nil {
|
||||||
|
errs = append(errs, fmt.Errorf("Unknown alicloud region: %s", c.AlicloudRegion))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(errs) > 0 {
|
||||||
|
return errs
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *AlicloudAccessConfig) Config() error {
|
||||||
|
if c.AlicloudAccessKey == "" {
|
||||||
|
c.AlicloudAccessKey = os.Getenv("ALICLOUD_ACCESS_KEY")
|
||||||
|
}
|
||||||
|
if c.AlicloudSecretKey == "" {
|
||||||
|
c.AlicloudSecretKey = os.Getenv("ALICLOUD_SECRET_KEY")
|
||||||
|
}
|
||||||
|
if c.AlicloudAccessKey == "" || c.AlicloudSecretKey == "" {
|
||||||
|
return fmt.Errorf("ALICLOUD_ACCESS_KEY and ALICLOUD_SECRET_KEY must be set in template file or environment variables.")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *AlicloudAccessConfig) loadAndValidate() error {
|
||||||
|
if err := c.validateRegion(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *AlicloudAccessConfig) validateRegion() error {
|
||||||
|
|
||||||
|
for _, valid := range common.ValidRegions {
|
||||||
|
if c.AlicloudRegion == string(valid) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("Not a valid alicloud region: %s", c.AlicloudRegion)
|
||||||
|
}
|
|
@ -0,0 +1,44 @@
|
||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testAlicloudAccessConfig() *AlicloudAccessConfig {
|
||||||
|
return &AlicloudAccessConfig{
|
||||||
|
AlicloudAccessKey: "ak",
|
||||||
|
AlicloudSecretKey: "acs",
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAlicloudAccessConfigPrepareRegion(t *testing.T) {
|
||||||
|
c := testAlicloudAccessConfig()
|
||||||
|
c.AlicloudRegion = ""
|
||||||
|
if err := c.Prepare(nil); err != nil {
|
||||||
|
t.Fatalf("shouldn't have err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.AlicloudRegion = "cn-beijing-3"
|
||||||
|
if err := c.Prepare(nil); err == nil {
|
||||||
|
t.Fatal("should have error")
|
||||||
|
}
|
||||||
|
|
||||||
|
c.AlicloudRegion = "cn-beijing"
|
||||||
|
if err := c.Prepare(nil); err != nil {
|
||||||
|
t.Fatalf("shouldn't have err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.AlicloudRegion = "unknown"
|
||||||
|
if err := c.Prepare(nil); err == nil {
|
||||||
|
t.Fatalf("should have err")
|
||||||
|
}
|
||||||
|
|
||||||
|
c.AlicloudRegion = "unknown"
|
||||||
|
c.AlicloudSkipValidation = true
|
||||||
|
if err := c.Prepare(nil); err != nil {
|
||||||
|
t.Fatalf("shouldn't have err: %s", err)
|
||||||
|
}
|
||||||
|
c.AlicloudSkipValidation = false
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,135 @@
|
||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/common"
|
||||||
|
"github.com/denverdino/aliyungo/ecs"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Artifact struct {
|
||||||
|
// A map of regions to alicloud image IDs.
|
||||||
|
AlicloudImages map[string]string
|
||||||
|
|
||||||
|
// BuilderId is the unique ID for the builder that created this alicloud image
|
||||||
|
BuilderIdValue string
|
||||||
|
|
||||||
|
// Alcloud connection for performing API stuff.
|
||||||
|
Client *ecs.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Artifact) BuilderId() string {
|
||||||
|
return a.BuilderIdValue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*Artifact) Files() []string {
|
||||||
|
// We have no files
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Artifact) Id() string {
|
||||||
|
parts := make([]string, 0, len(a.AlicloudImages))
|
||||||
|
for region, ecsImageId := range a.AlicloudImages {
|
||||||
|
parts = append(parts, fmt.Sprintf("%s:%s", region, ecsImageId))
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Strings(parts)
|
||||||
|
return strings.Join(parts, ",")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Artifact) String() string {
|
||||||
|
alicloudImageStrings := make([]string, 0, len(a.AlicloudImages))
|
||||||
|
for region, id := range a.AlicloudImages {
|
||||||
|
single := fmt.Sprintf("%s: %s", region, id)
|
||||||
|
alicloudImageStrings = append(alicloudImageStrings, single)
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Strings(alicloudImageStrings)
|
||||||
|
return fmt.Sprintf("Alicloud images were created:\n\n%s", strings.Join(alicloudImageStrings, "\n"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Artifact) State(name string) interface{} {
|
||||||
|
switch name {
|
||||||
|
case "atlas.artifact.metadata":
|
||||||
|
return a.stateAtlasMetadata()
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Artifact) Destroy() error {
|
||||||
|
errors := make([]error, 0)
|
||||||
|
|
||||||
|
for region, imageId := range a.AlicloudImages {
|
||||||
|
log.Printf("Delete alicloud image ID (%s) from region (%s)", imageId, region)
|
||||||
|
|
||||||
|
// Get alicloud image metadata
|
||||||
|
images, _, err := a.Client.DescribeImages(&ecs.DescribeImagesArgs{
|
||||||
|
RegionId: common.Region(region),
|
||||||
|
ImageId: imageId})
|
||||||
|
if err != nil {
|
||||||
|
errors = append(errors, err)
|
||||||
|
}
|
||||||
|
if len(images) == 0 {
|
||||||
|
err := fmt.Errorf("Error retrieving details for alicloud image(%s), no alicloud images found", imageId)
|
||||||
|
errors = append(errors, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
//Unshared the shared account before destroy
|
||||||
|
sharePermissions, err := a.Client.DescribeImageSharePermission(&ecs.ModifyImageSharePermissionArgs{RegionId: common.Region(region), ImageId: imageId})
|
||||||
|
if err != nil {
|
||||||
|
errors = append(errors, err)
|
||||||
|
}
|
||||||
|
accountsNumber := len(sharePermissions.Accounts.Account)
|
||||||
|
if accountsNumber > 0 {
|
||||||
|
accounts := make([]string, accountsNumber)
|
||||||
|
for index, account := range sharePermissions.Accounts.Account {
|
||||||
|
accounts[index] = account.AliyunId
|
||||||
|
}
|
||||||
|
err := a.Client.ModifyImageSharePermission(&ecs.ModifyImageSharePermissionArgs{
|
||||||
|
|
||||||
|
RegionId: common.Region(region),
|
||||||
|
ImageId: imageId,
|
||||||
|
RemoveAccount: accounts,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
errors = append(errors, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Delete alicloud images
|
||||||
|
if err := a.Client.DeleteImage(common.Region(region), imageId); err != nil {
|
||||||
|
errors = append(errors, err)
|
||||||
|
}
|
||||||
|
//Delete the snapshot of this images
|
||||||
|
for _, diskDevices := range images[0].DiskDeviceMappings.DiskDeviceMapping {
|
||||||
|
if err := a.Client.DeleteSnapshot(diskDevices.SnapshotId); err != nil {
|
||||||
|
errors = append(errors, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(errors) > 0 {
|
||||||
|
if len(errors) == 1 {
|
||||||
|
return errors[0]
|
||||||
|
} else {
|
||||||
|
return &packer.MultiError{Errors: errors}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Artifact) stateAtlasMetadata() interface{} {
|
||||||
|
metadata := make(map[string]string)
|
||||||
|
for region, imageId := range a.AlicloudImages {
|
||||||
|
k := fmt.Sprintf("region.%s", region)
|
||||||
|
metadata[k] = imageId
|
||||||
|
}
|
||||||
|
|
||||||
|
return metadata
|
||||||
|
}
|
|
@ -0,0 +1,47 @@
|
||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestArtifact_Impl(t *testing.T) {
|
||||||
|
var _ packer.Artifact = new(Artifact)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestArtifactId(t *testing.T) {
|
||||||
|
expected := `east:foo,west:bar`
|
||||||
|
|
||||||
|
ecsImages := make(map[string]string)
|
||||||
|
ecsImages["east"] = "foo"
|
||||||
|
ecsImages["west"] = "bar"
|
||||||
|
|
||||||
|
a := &Artifact{
|
||||||
|
AlicloudImages: ecsImages,
|
||||||
|
}
|
||||||
|
|
||||||
|
result := a.Id()
|
||||||
|
if result != expected {
|
||||||
|
t.Fatalf("bad: %s", result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestArtifactState_atlasMetadata(t *testing.T) {
|
||||||
|
a := &Artifact{
|
||||||
|
AlicloudImages: map[string]string{
|
||||||
|
"east": "foo",
|
||||||
|
"west": "bar",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
actual := a.State("atlas.artifact.metadata")
|
||||||
|
expected := map[string]string{
|
||||||
|
"region.east": "foo",
|
||||||
|
"region.west": "bar",
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(actual, expected) {
|
||||||
|
t.Fatalf("bad: %#v", actual)
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,243 @@
|
||||||
|
// The alicloud contains a packer.Builder implementation that
|
||||||
|
// builds ecs images for alicloud.
|
||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/hashicorp/packer/common"
|
||||||
|
"github.com/hashicorp/packer/helper/communicator"
|
||||||
|
"github.com/hashicorp/packer/helper/config"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
"github.com/hashicorp/packer/template/interpolate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// The unique ID for this builder
|
||||||
|
const BuilderId = "alibaba.alicloud"
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
common.PackerConfig `mapstructure:",squash"`
|
||||||
|
AlicloudAccessConfig `mapstructure:",squash"`
|
||||||
|
AlicloudImageConfig `mapstructure:",squash"`
|
||||||
|
RunConfig `mapstructure:",squash"`
|
||||||
|
|
||||||
|
ctx interpolate.Context
|
||||||
|
}
|
||||||
|
|
||||||
|
type Builder struct {
|
||||||
|
config Config
|
||||||
|
runner multistep.Runner
|
||||||
|
}
|
||||||
|
|
||||||
|
type InstanceNetWork string
|
||||||
|
|
||||||
|
const (
|
||||||
|
ClassicNet = InstanceNetWork("classic")
|
||||||
|
VpcNet = InstanceNetWork("vpc")
|
||||||
|
ALICLOUD_DEFAULT_SHORT_TIMEOUT = 180
|
||||||
|
ALICLOUD_DEFAULT_TIMEOUT = 1800
|
||||||
|
ALICLOUD_DEFAULT_LONG_TIMEOUT = 3600
|
||||||
|
)
|
||||||
|
|
||||||
|
func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
|
||||||
|
err := config.Decode(&b.config, &config.DecodeOpts{
|
||||||
|
Interpolate: true,
|
||||||
|
InterpolateContext: &b.config.ctx,
|
||||||
|
InterpolateFilter: &interpolate.RenderFilter{
|
||||||
|
Exclude: []string{
|
||||||
|
"run_command",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, raws...)
|
||||||
|
b.config.ctx.EnableEnv = true
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Accumulate any errors
|
||||||
|
var errs *packer.MultiError
|
||||||
|
errs = packer.MultiErrorAppend(errs, b.config.AlicloudAccessConfig.Prepare(&b.config.ctx)...)
|
||||||
|
errs = packer.MultiErrorAppend(errs, b.config.AlicloudImageConfig.Prepare(&b.config.ctx)...)
|
||||||
|
errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(&b.config.ctx)...)
|
||||||
|
|
||||||
|
if errs != nil && len(errs.Errors) > 0 {
|
||||||
|
return nil, errs
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Println(common.ScrubConfig(b.config, b.config.AlicloudAccessKey, b.config.AlicloudSecretKey))
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) {
|
||||||
|
|
||||||
|
client, err := b.config.Client()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
state := new(multistep.BasicStateBag)
|
||||||
|
state.Put("config", b.config)
|
||||||
|
state.Put("client", client)
|
||||||
|
state.Put("hook", hook)
|
||||||
|
state.Put("ui", ui)
|
||||||
|
state.Put("networktype", b.chooseNetworkType())
|
||||||
|
var steps []multistep.Step
|
||||||
|
|
||||||
|
// Build the steps
|
||||||
|
steps = []multistep.Step{
|
||||||
|
&stepPreValidate{
|
||||||
|
AlicloudDestImageName: b.config.AlicloudImageName,
|
||||||
|
ForceDelete: b.config.AlicloudImageForceDetele,
|
||||||
|
},
|
||||||
|
&stepCheckAlicloudSourceImage{
|
||||||
|
SourceECSImageId: b.config.AlicloudSourceImage,
|
||||||
|
},
|
||||||
|
&StepConfigAlicloudKeyPair{
|
||||||
|
Debug: b.config.PackerDebug,
|
||||||
|
KeyPairName: b.config.SSHKeyPairName,
|
||||||
|
PrivateKeyFile: b.config.Comm.SSHPrivateKey,
|
||||||
|
TemporaryKeyPairName: b.config.TemporaryKeyPairName,
|
||||||
|
SSHAgentAuth: b.config.Comm.SSHAgentAuth,
|
||||||
|
DebugKeyPath: fmt.Sprintf("ecs_%s.pem", b.config.PackerBuildName),
|
||||||
|
RegionId: b.config.AlicloudRegion,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if b.chooseNetworkType() == VpcNet {
|
||||||
|
steps = append(steps,
|
||||||
|
&stepConfigAlicloudVPC{
|
||||||
|
VpcId: b.config.VpcId,
|
||||||
|
CidrBlock: b.config.CidrBlock,
|
||||||
|
VpcName: b.config.VpcName,
|
||||||
|
},
|
||||||
|
&stepConfigAlicloudVSwitch{
|
||||||
|
VSwitchId: b.config.VSwitchId,
|
||||||
|
ZoneId: b.config.ZoneId,
|
||||||
|
CidrBlock: b.config.CidrBlock,
|
||||||
|
VSwitchName: b.config.VSwitchName,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
steps = append(steps,
|
||||||
|
&stepConfigAlicloudSecurityGroup{
|
||||||
|
SecurityGroupId: b.config.SecurityGroupId,
|
||||||
|
SecurityGroupName: b.config.SecurityGroupId,
|
||||||
|
RegionId: b.config.AlicloudRegion,
|
||||||
|
},
|
||||||
|
&stepCreateAlicloudInstance{
|
||||||
|
IOOptimized: b.config.IOOptimized,
|
||||||
|
InstanceType: b.config.InstanceType,
|
||||||
|
UserData: b.config.UserData,
|
||||||
|
UserDataFile: b.config.UserDataFile,
|
||||||
|
RegionId: b.config.AlicloudRegion,
|
||||||
|
InternetChargeType: b.config.InternetChargeType,
|
||||||
|
InternetMaxBandwidthOut: b.config.InternetMaxBandwidthOut,
|
||||||
|
InstnaceName: b.config.InstanceName,
|
||||||
|
ZoneId: b.config.ZoneId,
|
||||||
|
})
|
||||||
|
if b.chooseNetworkType() == VpcNet {
|
||||||
|
steps = append(steps, &setpConfigAlicloudEIP{
|
||||||
|
AssociatePublicIpAddress: b.config.AssociatePublicIpAddress,
|
||||||
|
RegionId: b.config.AlicloudRegion,
|
||||||
|
InternetChargeType: b.config.InternetChargeType,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
steps = append(steps, &stepConfigAlicloudPublicIP{
|
||||||
|
RegionId: b.config.AlicloudRegion,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
steps = append(steps,
|
||||||
|
&stepAttachKeyPar{},
|
||||||
|
&stepRunAlicloudInstance{},
|
||||||
|
&stepMountAlicloudDisk{},
|
||||||
|
&communicator.StepConnect{
|
||||||
|
Config: &b.config.RunConfig.Comm,
|
||||||
|
Host: SSHHost(
|
||||||
|
client,
|
||||||
|
b.config.SSHPrivateIp),
|
||||||
|
SSHConfig: SSHConfig(
|
||||||
|
b.config.RunConfig.Comm.SSHAgentAuth,
|
||||||
|
b.config.RunConfig.Comm.SSHUsername,
|
||||||
|
b.config.RunConfig.Comm.SSHPassword),
|
||||||
|
},
|
||||||
|
&common.StepProvision{},
|
||||||
|
&stepStopAlicloudInstance{
|
||||||
|
ForceStop: b.config.ForceStopInstance,
|
||||||
|
},
|
||||||
|
&stepDeleteAlicloudImageSnapshots{
|
||||||
|
AlicloudImageForceDeteleSnapshots: b.config.AlicloudImageForceDeteleSnapshots,
|
||||||
|
AlicloudImageForceDetele: b.config.AlicloudImageForceDetele,
|
||||||
|
AlicloudImageName: b.config.AlicloudImageName,
|
||||||
|
},
|
||||||
|
&stepCreateAlicloudImage{},
|
||||||
|
&setpRegionCopyAlicloudImage{
|
||||||
|
AlicloudImageDestinationRegions: b.config.AlicloudImageDestinationRegions,
|
||||||
|
AlicloudImageDestinationNames: b.config.AlicloudImageDestinationNames,
|
||||||
|
RegionId: b.config.AlicloudRegion,
|
||||||
|
},
|
||||||
|
&setpShareAlicloudImage{
|
||||||
|
AlicloudImageShareAccounts: b.config.AlicloudImageShareAccounts,
|
||||||
|
AlicloudImageUNShareAccounts: b.config.AlicloudImageUNShareAccounts,
|
||||||
|
RegionId: b.config.AlicloudRegion,
|
||||||
|
})
|
||||||
|
|
||||||
|
// Run!
|
||||||
|
b.runner = common.NewRunner(steps, b.config.PackerConfig, ui)
|
||||||
|
b.runner.Run(state)
|
||||||
|
|
||||||
|
// If there was an error, return that
|
||||||
|
if rawErr, ok := state.GetOk("error"); ok {
|
||||||
|
return nil, rawErr.(error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If there are no ECS images, then just return
|
||||||
|
if _, ok := state.GetOk("alicloudimages"); !ok {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build the artifact and return it
|
||||||
|
artifact := &Artifact{
|
||||||
|
AlicloudImages: state.Get("alicloudimages").(map[string]string),
|
||||||
|
BuilderIdValue: BuilderId,
|
||||||
|
Client: client,
|
||||||
|
}
|
||||||
|
|
||||||
|
return artifact, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Builder) Cancel() {
|
||||||
|
if b.runner != nil {
|
||||||
|
log.Println("Cancelling the step runner...")
|
||||||
|
b.runner.Cancel()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Builder) chooseNetworkType() InstanceNetWork {
|
||||||
|
if b.isVpcNetRequired() {
|
||||||
|
return VpcNet
|
||||||
|
} else {
|
||||||
|
return ClassicNet
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Builder) isVpcNetRequired() bool {
|
||||||
|
// UserData and KeyPair only works in VPC
|
||||||
|
return b.isVpcSpecified() || b.isUserDataNeeded() || b.isKeyPairNeeded()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Builder) isVpcSpecified() bool {
|
||||||
|
return b.config.VpcId != "" || b.config.VSwitchId != ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Builder) isUserDataNeeded() bool {
|
||||||
|
// Public key setup requires userdata
|
||||||
|
if b.config.RunConfig.Comm.SSHPrivateKey != "" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.config.UserData != "" || b.config.UserDataFile != ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Builder) isKeyPairNeeded() bool {
|
||||||
|
return b.config.SSHKeyPairName != "" || b.config.TemporaryKeyPairName != ""
|
||||||
|
}
|
|
@ -0,0 +1,331 @@
|
||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/common"
|
||||||
|
"github.com/denverdino/aliyungo/ecs"
|
||||||
|
builderT "github.com/hashicorp/packer/helper/builder/testing"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestBuilderAcc_basic(t *testing.T) {
|
||||||
|
builderT.Test(t, builderT.TestCase{
|
||||||
|
PreCheck: func() {
|
||||||
|
testAccPreCheck(t)
|
||||||
|
},
|
||||||
|
Builder: &Builder{},
|
||||||
|
Template: testBuilderAccBasic,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
//func TestBuilderAcc_windows(t *testing.T) {
|
||||||
|
// builderT.Test(t, builderT.TestCase{
|
||||||
|
// PreCheck: func() {
|
||||||
|
// testAccPreCheck(t)
|
||||||
|
// },
|
||||||
|
// Builder: &Builder{},
|
||||||
|
// Template: testBuilderAccWindows,
|
||||||
|
// })
|
||||||
|
//}
|
||||||
|
|
||||||
|
//func TestBuilderAcc_regionCopy(t *testing.T) {
|
||||||
|
// builderT.Test(t, builderT.TestCase{
|
||||||
|
// PreCheck: func() {
|
||||||
|
// testAccPreCheck(t)
|
||||||
|
// },
|
||||||
|
// Builder: &Builder{},
|
||||||
|
// Template: testBuilderAccRegionCopy,
|
||||||
|
// Check: checkRegionCopy([]string{"cn-hangzhou", "cn-shenzhen"}),
|
||||||
|
// })
|
||||||
|
//}
|
||||||
|
|
||||||
|
func TestBuilderAcc_forceDelete(t *testing.T) {
|
||||||
|
// Build the same alicloud image twice, with ecs_image_force_delete on the second run
|
||||||
|
builderT.Test(t, builderT.TestCase{
|
||||||
|
PreCheck: func() {
|
||||||
|
testAccPreCheck(t)
|
||||||
|
},
|
||||||
|
Builder: &Builder{},
|
||||||
|
Template: buildForceDeregisterConfig("false", "delete"),
|
||||||
|
SkipArtifactTeardown: true,
|
||||||
|
})
|
||||||
|
|
||||||
|
builderT.Test(t, builderT.TestCase{
|
||||||
|
PreCheck: func() {
|
||||||
|
testAccPreCheck(t)
|
||||||
|
},
|
||||||
|
Builder: &Builder{},
|
||||||
|
Template: buildForceDeregisterConfig("true", "delete"),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuilderAcc_ECSImageSharing(t *testing.T) {
|
||||||
|
builderT.Test(t, builderT.TestCase{
|
||||||
|
PreCheck: func() {
|
||||||
|
testAccPreCheck(t)
|
||||||
|
},
|
||||||
|
Builder: &Builder{},
|
||||||
|
Template: testBuilderAccSharing,
|
||||||
|
Check: checkECSImageSharing("1309208528360047"),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuilderAcc_forceDeleteSnapshot(t *testing.T) {
|
||||||
|
destImageName := "delete"
|
||||||
|
|
||||||
|
// Build the same alicloud image name twice, with force_delete_snapshot on the second run
|
||||||
|
builderT.Test(t, builderT.TestCase{
|
||||||
|
PreCheck: func() {
|
||||||
|
testAccPreCheck(t)
|
||||||
|
},
|
||||||
|
Builder: &Builder{},
|
||||||
|
Template: buildForceDeleteSnapshotConfig("false", destImageName),
|
||||||
|
SkipArtifactTeardown: true,
|
||||||
|
})
|
||||||
|
|
||||||
|
// Get image data by image image name
|
||||||
|
client, _ := testAliyunClient()
|
||||||
|
images, _, _ := client.DescribeImages(&ecs.DescribeImagesArgs{
|
||||||
|
ImageName: "packer-test-" + destImageName,
|
||||||
|
RegionId: common.Region("cn-beijing")})
|
||||||
|
|
||||||
|
image := images[0]
|
||||||
|
|
||||||
|
// Get snapshot ids for image
|
||||||
|
snapshotIds := []string{}
|
||||||
|
for _, device := range image.DiskDeviceMappings.DiskDeviceMapping {
|
||||||
|
if device.Device != "" && device.SnapshotId != "" {
|
||||||
|
snapshotIds = append(snapshotIds, device.SnapshotId)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
builderT.Test(t, builderT.TestCase{
|
||||||
|
PreCheck: func() {
|
||||||
|
testAccPreCheck(t)
|
||||||
|
},
|
||||||
|
Builder: &Builder{},
|
||||||
|
Template: buildForceDeleteSnapshotConfig("true", destImageName),
|
||||||
|
Check: checkSnapshotsDeleted(snapshotIds),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkSnapshotsDeleted(snapshotIds []string) builderT.TestCheckFunc {
|
||||||
|
return func(artifacts []packer.Artifact) error {
|
||||||
|
// Verify the snapshots are gone
|
||||||
|
client, _ := testAliyunClient()
|
||||||
|
snapshotResp, _, err := client.DescribeSnapshots(
|
||||||
|
&ecs.DescribeSnapshotsArgs{RegionId: common.Region("cn-beijing"), SnapshotIds: snapshotIds},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Query snapshot failed %v", err)
|
||||||
|
}
|
||||||
|
if len(snapshotResp) > 0 {
|
||||||
|
return fmt.Errorf("Snapshots weren't successfully deleted by " +
|
||||||
|
"`ecs_image_force_delete_snapshots`")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkECSImageSharing(uid string) builderT.TestCheckFunc {
|
||||||
|
return func(artifacts []packer.Artifact) error {
|
||||||
|
if len(artifacts) > 1 {
|
||||||
|
return fmt.Errorf("more than 1 artifact")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the actual *Artifact pointer so we can access the AMIs directly
|
||||||
|
artifactRaw := artifacts[0]
|
||||||
|
artifact, ok := artifactRaw.(*Artifact)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unknown artifact: %#v", artifactRaw)
|
||||||
|
}
|
||||||
|
|
||||||
|
// describe the image, get block devices with a snapshot
|
||||||
|
client, _ := testAliyunClient()
|
||||||
|
imageSharePermissionResponse, err := client.DescribeImageSharePermission(
|
||||||
|
&ecs.ModifyImageSharePermissionArgs{
|
||||||
|
RegionId: "cn-beijing",
|
||||||
|
ImageId: artifact.AlicloudImages["cn-beijing"],
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error retrieving Image Attributes for ECS Image Artifact (%#v) "+
|
||||||
|
"in ECS Image Sharing Test: %s", artifact, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(imageSharePermissionResponse.Accounts.Account) != 1 &&
|
||||||
|
imageSharePermissionResponse.Accounts.Account[0].AliyunId != uid {
|
||||||
|
return fmt.Errorf("share account is incorrect %d",
|
||||||
|
len(imageSharePermissionResponse.Accounts.Account))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkRegionCopy(regions []string) builderT.TestCheckFunc {
|
||||||
|
return func(artifacts []packer.Artifact) error {
|
||||||
|
if len(artifacts) > 1 {
|
||||||
|
return fmt.Errorf("more than 1 artifact")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the actual *Artifact pointer so we can access the AMIs directly
|
||||||
|
artifactRaw := artifacts[0]
|
||||||
|
artifact, ok := artifactRaw.(*Artifact)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unknown artifact: %#v", artifactRaw)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that we copied to only the regions given
|
||||||
|
regionSet := make(map[string]struct{})
|
||||||
|
for _, r := range regions {
|
||||||
|
regionSet[r] = struct{}{}
|
||||||
|
}
|
||||||
|
for r := range artifact.AlicloudImages {
|
||||||
|
if r == "cn-beijing" {
|
||||||
|
delete(regionSet, r)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, ok := regionSet[r]; !ok {
|
||||||
|
return fmt.Errorf("unknown region: %s", r)
|
||||||
|
}
|
||||||
|
|
||||||
|
delete(regionSet, r)
|
||||||
|
}
|
||||||
|
if len(regionSet) > 0 {
|
||||||
|
return fmt.Errorf("didn't copy to: %#v", regionSet)
|
||||||
|
}
|
||||||
|
client, _ := testAliyunClient()
|
||||||
|
for key, value := range artifact.AlicloudImages {
|
||||||
|
client.WaitForImageReady(common.Region(key), value, 1800)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccPreCheck(t *testing.T) {
|
||||||
|
if v := os.Getenv("ALICLOUD_ACCESS_KEY"); v == "" {
|
||||||
|
t.Fatal("ALICLOUD_ACCESS_KEY must be set for acceptance tests")
|
||||||
|
}
|
||||||
|
|
||||||
|
if v := os.Getenv("ALICLOUD_SECRET_KEY"); v == "" {
|
||||||
|
t.Fatal("ALICLOUD_SECRET_KEY must be set for acceptance tests")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAliyunClient() (*ecs.Client, error) {
|
||||||
|
access := &AlicloudAccessConfig{AlicloudRegion: "cn-beijing"}
|
||||||
|
err := access.Config()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
client, err := access.Client()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return client, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const testBuilderAccBasic = `
|
||||||
|
{ "builders": [{
|
||||||
|
"type": "test",
|
||||||
|
"region": "cn-beijing",
|
||||||
|
"instance_type": "ecs.n1.tiny",
|
||||||
|
"source_image":"ubuntu_16_0402_64_40G_base_20170222.vhd",
|
||||||
|
"ssh_username": "ubuntu",
|
||||||
|
"io_optimized":"true",
|
||||||
|
"ssh_username":"root",
|
||||||
|
"image_name": "packer-test_{{timestamp}}"
|
||||||
|
}]
|
||||||
|
}`
|
||||||
|
|
||||||
|
const testBuilderAccRegionCopy = `
|
||||||
|
{
|
||||||
|
"builders": [{
|
||||||
|
"type": "test",
|
||||||
|
"region": "cn-beijing",
|
||||||
|
"instance_type": "ecs.n1.tiny",
|
||||||
|
"source_image":"ubuntu_16_0402_64_40G_base_20170222.vhd",
|
||||||
|
"io_optimized":"true",
|
||||||
|
"ssh_username":"root",
|
||||||
|
"image_name": "packer-test_{{timestamp}}",
|
||||||
|
"image_copy_regions": ["cn-hangzhou", "cn-shenzhen"]
|
||||||
|
}]
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
const testBuilderAccForceDelete = `
|
||||||
|
{
|
||||||
|
"builders": [{
|
||||||
|
"type": "test",
|
||||||
|
"region": "cn-beijing",
|
||||||
|
"instance_type": "ecs.n1.tiny",
|
||||||
|
"source_image":"ubuntu_16_0402_64_40G_base_20170222.vhd",
|
||||||
|
"io_optimized":"true",
|
||||||
|
"ssh_username":"root",
|
||||||
|
"image_force_delete": "%s",
|
||||||
|
"image_name": "packer-test_%s"
|
||||||
|
}]
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
const testBuilderAccForceDeleteSnapshot = `
|
||||||
|
{
|
||||||
|
"builders": [{
|
||||||
|
"type": "test",
|
||||||
|
"region": "cn-beijing",
|
||||||
|
"instance_type": "ecs.n1.tiny",
|
||||||
|
"source_image":"ubuntu_16_0402_64_40G_base_20170222.vhd",
|
||||||
|
"io_optimized":"true",
|
||||||
|
"ssh_username":"root",
|
||||||
|
"image_force_delete_snapshots": "%s",
|
||||||
|
"image_force_delete": "%s",
|
||||||
|
"image_name": "packer-test-%s"
|
||||||
|
}]
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
// share with catsby
|
||||||
|
const testBuilderAccSharing = `
|
||||||
|
{
|
||||||
|
"builders": [{
|
||||||
|
"type": "test",
|
||||||
|
"region": "cn-beijing",
|
||||||
|
"instance_type": "ecs.n1.tiny",
|
||||||
|
"source_image":"ubuntu_16_0402_64_40G_base_20170222.vhd",
|
||||||
|
"io_optimized":"true",
|
||||||
|
"ssh_username":"root",
|
||||||
|
"image_name": "packer-test_{{timestamp}}",
|
||||||
|
"image_share_account":["1309208528360047"]
|
||||||
|
}]
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
func buildForceDeregisterConfig(val, name string) string {
|
||||||
|
return fmt.Sprintf(testBuilderAccForceDelete, val, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildForceDeleteSnapshotConfig(val, name string) string {
|
||||||
|
return fmt.Sprintf(testBuilderAccForceDeleteSnapshot, val, val, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
const testBuilderAccWindows = `
|
||||||
|
{ "builders": [{
|
||||||
|
"type": "test",
|
||||||
|
"region": "cn-beijing",
|
||||||
|
"instance_type": "ecs.n1.tiny",
|
||||||
|
"source_image":"win2008_64_ent_r2_zh-cn_40G_alibase_20170301.vhd",
|
||||||
|
"io_optimized":"true",
|
||||||
|
"image_force_delete":"true",
|
||||||
|
"communicator": "winrm",
|
||||||
|
"winrm_port": 5985,
|
||||||
|
"winrm_username": "Administrator",
|
||||||
|
"winrm_password": "Test1234",
|
||||||
|
"image_name": "packer-test_{{timestamp}}"
|
||||||
|
}]
|
||||||
|
}`
|
|
@ -0,0 +1,95 @@
|
||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testBuilderConfig() map[string]interface{} {
|
||||||
|
return map[string]interface{}{
|
||||||
|
"access_key": "foo",
|
||||||
|
"secret_key": "bar",
|
||||||
|
"source_image": "foo",
|
||||||
|
"instance_type": "ecs.n1.tiny",
|
||||||
|
"region": "cn-beijing",
|
||||||
|
"ssh_username": "root",
|
||||||
|
"image_name": "foo",
|
||||||
|
"io_optimized": true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuilder_ImplementsBuilder(t *testing.T) {
|
||||||
|
var raw interface{}
|
||||||
|
raw = &Builder{}
|
||||||
|
if _, ok := raw.(packer.Builder); !ok {
|
||||||
|
t.Fatalf("Builder should be a builder")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuilder_Prepare_BadType(t *testing.T) {
|
||||||
|
b := &Builder{}
|
||||||
|
c := map[string]interface{}{
|
||||||
|
"access_key": []string{},
|
||||||
|
}
|
||||||
|
|
||||||
|
warnings, err := b.Prepare(c)
|
||||||
|
if len(warnings) > 0 {
|
||||||
|
t.Fatalf("bad: %#v", warnings)
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("prepare should fail")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuilderPrepare_ECSImageName(t *testing.T) {
|
||||||
|
var b Builder
|
||||||
|
config := testBuilderConfig()
|
||||||
|
|
||||||
|
// Test good
|
||||||
|
config["image_name"] = "ecs.n1.tiny"
|
||||||
|
warnings, err := b.Prepare(config)
|
||||||
|
if len(warnings) > 0 {
|
||||||
|
t.Fatalf("bad: %#v", warnings)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("should not have error: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test bad
|
||||||
|
config["ecs_image_name"] = "foo {{"
|
||||||
|
b = Builder{}
|
||||||
|
warnings, err = b.Prepare(config)
|
||||||
|
if len(warnings) > 0 {
|
||||||
|
t.Fatalf("bad: %#v", warnings)
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("should have error")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test bad
|
||||||
|
delete(config, "image_name")
|
||||||
|
b = Builder{}
|
||||||
|
warnings, err = b.Prepare(config)
|
||||||
|
if len(warnings) > 0 {
|
||||||
|
t.Fatalf("bad: %#v", warnings)
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("should have error")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuilderPrepare_InvalidKey(t *testing.T) {
|
||||||
|
var b Builder
|
||||||
|
config := testBuilderConfig()
|
||||||
|
|
||||||
|
// Add a random key
|
||||||
|
config["i_should_not_be_valid"] = true
|
||||||
|
warnings, err := b.Prepare(config)
|
||||||
|
if len(warnings) > 0 {
|
||||||
|
t.Fatalf("bad: %#v", warnings)
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("should have error")
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,100 @@
|
||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/common"
|
||||||
|
"github.com/hashicorp/packer/template/interpolate"
|
||||||
|
)
|
||||||
|
|
||||||
|
type AlicloudDiskDevice struct {
|
||||||
|
DiskName string `mapstructure:"disk_name"`
|
||||||
|
DiskCategory string `mapstructure:"disk_category"`
|
||||||
|
DiskSize int `mapstructure:"disk_size"`
|
||||||
|
SnapshotId string `mapstructure:"disk_snapshot_id"`
|
||||||
|
Description string `mapstructure:"disk_description"`
|
||||||
|
DeleteWithInstance bool `mapstructure:"disk_delete_with_instance"`
|
||||||
|
Device string `mapstructure:"disk_device"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type AlicloudDiskDevices struct {
|
||||||
|
ECSImagesDiskMappings []AlicloudDiskDevice `mapstructure:"image_disk_mappings"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type AlicloudImageConfig struct {
|
||||||
|
AlicloudImageName string `mapstructure:"image_name"`
|
||||||
|
AlicloudImageVersion string `mapstructure:"image_version"`
|
||||||
|
AlicloudImageDescription string `mapstructure:"image_description"`
|
||||||
|
AlicloudImageShareAccounts []string `mapstructure:"image_share_account"`
|
||||||
|
AlicloudImageUNShareAccounts []string `mapstructure:"image_unshare_account"`
|
||||||
|
AlicloudImageDestinationRegions []string `mapstructure:"image_copy_regions"`
|
||||||
|
AlicloudImageDestinationNames []string `mapstructure:"image_copy_names"`
|
||||||
|
AlicloudImageForceDetele bool `mapstructure:"image_force_delete"`
|
||||||
|
AlicloudImageForceDeteleSnapshots bool `mapstructure:"image_force_delete_snapshots"`
|
||||||
|
AlicloudImageForceDeleteInstances bool `mapstructure:"image_force_delete_instances"`
|
||||||
|
AlicloudImageSkipRegionValidation bool `mapstructure:"skip_region_validation"`
|
||||||
|
AlicloudDiskDevices `mapstructure:",squash"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *AlicloudImageConfig) Prepare(ctx *interpolate.Context) []error {
|
||||||
|
var errs []error
|
||||||
|
if c.AlicloudImageName == "" {
|
||||||
|
errs = append(errs, fmt.Errorf("image_name must be specified"))
|
||||||
|
} else if len(c.AlicloudImageName) < 2 || len(c.AlicloudImageName) > 128 {
|
||||||
|
errs = append(errs, fmt.Errorf("image_name must less than 128 letters and more than 1 letters"))
|
||||||
|
} else if strings.HasPrefix(c.AlicloudImageName, "http://") ||
|
||||||
|
strings.HasPrefix(c.AlicloudImageName, "https://") {
|
||||||
|
errs = append(errs, fmt.Errorf("image_name can't start with 'http://' or 'https://'"))
|
||||||
|
}
|
||||||
|
reg := regexp.MustCompile("\\s+")
|
||||||
|
if reg.FindString(c.AlicloudImageName) != "" {
|
||||||
|
errs = append(errs, fmt.Errorf("image_name can't include spaces"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(c.AlicloudImageDestinationRegions) > 0 {
|
||||||
|
regionSet := make(map[string]struct{})
|
||||||
|
regions := make([]string, 0, len(c.AlicloudImageDestinationRegions))
|
||||||
|
|
||||||
|
for _, region := range c.AlicloudImageDestinationRegions {
|
||||||
|
// If we already saw the region, then don't look again
|
||||||
|
if _, ok := regionSet[region]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mark that we saw the region
|
||||||
|
regionSet[region] = struct{}{}
|
||||||
|
|
||||||
|
if !c.AlicloudImageSkipRegionValidation {
|
||||||
|
// Verify the region is real
|
||||||
|
if valid := validateRegion(region); valid != nil {
|
||||||
|
errs = append(errs, fmt.Errorf("Unknown region: %s", region))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
regions = append(regions, region)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.AlicloudImageDestinationRegions = regions
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(errs) > 0 {
|
||||||
|
return errs
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateRegion(region string) error {
|
||||||
|
|
||||||
|
for _, valid := range common.ValidRegions {
|
||||||
|
if region == string(valid) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("Not a valid alicloud region: %s", region)
|
||||||
|
}
|
|
@ -0,0 +1,64 @@
|
||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testAlicloudImageConfig() *AlicloudImageConfig {
|
||||||
|
return &AlicloudImageConfig{
|
||||||
|
AlicloudImageName: "foo",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestECSImageConfigPrepare_name(t *testing.T) {
|
||||||
|
c := testAlicloudImageConfig()
|
||||||
|
if err := c.Prepare(nil); err != nil {
|
||||||
|
t.Fatalf("shouldn't have err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.AlicloudImageName = ""
|
||||||
|
if err := c.Prepare(nil); err == nil {
|
||||||
|
t.Fatal("should have error")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAMIConfigPrepare_regions(t *testing.T) {
|
||||||
|
c := testAlicloudImageConfig()
|
||||||
|
c.AlicloudImageDestinationRegions = nil
|
||||||
|
if err := c.Prepare(nil); err != nil {
|
||||||
|
t.Fatalf("shouldn't have err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.AlicloudImageDestinationRegions = regionsToString()
|
||||||
|
if err := c.Prepare(nil); err != nil {
|
||||||
|
t.Fatalf("shouldn't have err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.AlicloudImageDestinationRegions = []string{"foo"}
|
||||||
|
if err := c.Prepare(nil); err == nil {
|
||||||
|
t.Fatal("should have error")
|
||||||
|
}
|
||||||
|
|
||||||
|
c.AlicloudImageDestinationRegions = []string{"cn-beijing", "cn-hangzhou", "eu-central-1"}
|
||||||
|
if err := c.Prepare(nil); err != nil {
|
||||||
|
t.Fatalf("bad: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.AlicloudImageDestinationRegions = []string{"unknow"}
|
||||||
|
c.AlicloudImageSkipRegionValidation = true
|
||||||
|
if err := c.Prepare(nil); err != nil {
|
||||||
|
t.Fatal("shouldn't have error")
|
||||||
|
}
|
||||||
|
c.AlicloudImageSkipRegionValidation = false
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func regionsToString() []string {
|
||||||
|
var regions []string
|
||||||
|
for _, region := range common.ValidRegions {
|
||||||
|
regions = append(regions, string(region))
|
||||||
|
}
|
||||||
|
return regions
|
||||||
|
}
|
|
@ -0,0 +1,22 @@
|
||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
func message(state multistep.StateBag, module string) {
|
||||||
|
_, cancelled := state.GetOk(multistep.StateCancelled)
|
||||||
|
_, halted := state.GetOk(multistep.StateHalted)
|
||||||
|
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
|
if cancelled || halted {
|
||||||
|
ui.Say(fmt.Sprintf("Deleting %s because of cancellation or error...", module))
|
||||||
|
} else {
|
||||||
|
ui.Say(fmt.Sprintf("Cleaning up '%s'", module))
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,72 @@
|
||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/hashicorp/packer/common/uuid"
|
||||||
|
"github.com/hashicorp/packer/helper/communicator"
|
||||||
|
"github.com/hashicorp/packer/template/interpolate"
|
||||||
|
)
|
||||||
|
|
||||||
|
type RunConfig struct {
|
||||||
|
AssociatePublicIpAddress bool `mapstructure:"associate_public_ip_address"`
|
||||||
|
ZoneId string `mapstructure:"zone_id"`
|
||||||
|
IOOptimized bool `mapstructure:"io_optimized"`
|
||||||
|
InstanceType string `mapstructure:"instance_type"`
|
||||||
|
Description string `mapstructure:"description"`
|
||||||
|
AlicloudSourceImage string `mapstructure:"source_image"`
|
||||||
|
ForceStopInstance bool `mapstructure:"force_stop_instance"`
|
||||||
|
SecurityGroupId string `mapstructure:"security_group_id"`
|
||||||
|
SecurityGroupName string `mapstructure:"security_group_name"`
|
||||||
|
UserData string `mapstructure:"user_data"`
|
||||||
|
UserDataFile string `mapstructure:"user_data_file"`
|
||||||
|
VpcId string `mapstructure:"vpc_id"`
|
||||||
|
VpcName string `mapstructure:"vpc_name"`
|
||||||
|
CidrBlock string `mapstructure:"vpc_cidr_block"`
|
||||||
|
VSwitchId string `mapstructure:"vswitch_id"`
|
||||||
|
VSwitchName string `mapstructure:"vswitch_id"`
|
||||||
|
InstanceName string `mapstructure:"instance_name"`
|
||||||
|
InternetChargeType string `mapstructure:"internet_charge_type"`
|
||||||
|
InternetMaxBandwidthOut int `mapstructure:"internet_max_bandwidth_out"`
|
||||||
|
TemporaryKeyPairName string `mapstructure:"temporary_key_pair_name"`
|
||||||
|
|
||||||
|
// Communicator settings
|
||||||
|
Comm communicator.Config `mapstructure:",squash"`
|
||||||
|
SSHKeyPairName string `mapstructure:"ssh_keypair_name"`
|
||||||
|
SSHPrivateIp bool `mapstructure:"ssh_private_ip"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RunConfig) Prepare(ctx *interpolate.Context) []error {
|
||||||
|
if c.SSHKeyPairName == "" && c.TemporaryKeyPairName == "" &&
|
||||||
|
c.Comm.SSHPrivateKey == "" && c.Comm.SSHPassword == "" && c.Comm.WinRMPassword == "" {
|
||||||
|
|
||||||
|
c.TemporaryKeyPairName = fmt.Sprintf("packer_%s", uuid.TimeOrderedUUID())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validation
|
||||||
|
errs := c.Comm.Prepare(ctx)
|
||||||
|
if c.AlicloudSourceImage == "" {
|
||||||
|
errs = append(errs, errors.New("A source_image must be specified"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.TrimSpace(c.AlicloudSourceImage) != c.AlicloudSourceImage {
|
||||||
|
errs = append(errs, errors.New("The source_image can't include spaces"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.InstanceType == "" {
|
||||||
|
errs = append(errs, errors.New("An aliclod_instance_type must be specified"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.UserData != "" && c.UserDataFile != "" {
|
||||||
|
errs = append(errs, fmt.Errorf("Only one of user_data or user_data_file can be specified."))
|
||||||
|
} else if c.UserDataFile != "" {
|
||||||
|
if _, err := os.Stat(c.UserDataFile); err != nil {
|
||||||
|
errs = append(errs, fmt.Errorf("user_data_file not found: %s", c.UserDataFile))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return errs
|
||||||
|
}
|
|
@ -0,0 +1,122 @@
|
||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/packer/helper/communicator"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testConfig() *RunConfig {
|
||||||
|
return &RunConfig{
|
||||||
|
AlicloudSourceImage: "alicloud_images",
|
||||||
|
InstanceType: "ecs.n1.tiny",
|
||||||
|
Comm: communicator.Config{
|
||||||
|
SSHUsername: "alicloud",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRunConfigPrepare(t *testing.T) {
|
||||||
|
c := testConfig()
|
||||||
|
err := c.Prepare(nil)
|
||||||
|
if len(err) > 0 {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRunConfigPrepare_InstanceType(t *testing.T) {
|
||||||
|
c := testConfig()
|
||||||
|
c.InstanceType = ""
|
||||||
|
if err := c.Prepare(nil); len(err) != 1 {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRunConfigPrepare_SourceECSImage(t *testing.T) {
|
||||||
|
c := testConfig()
|
||||||
|
c.AlicloudSourceImage = ""
|
||||||
|
if err := c.Prepare(nil); len(err) != 1 {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRunConfigPrepare_SSHPort(t *testing.T) {
|
||||||
|
c := testConfig()
|
||||||
|
c.Comm.SSHPort = 0
|
||||||
|
if err := c.Prepare(nil); len(err) != 0 {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Comm.SSHPort != 22 {
|
||||||
|
t.Fatalf("invalid value: %d", c.Comm.SSHPort)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Comm.SSHPort = 44
|
||||||
|
if err := c.Prepare(nil); len(err) != 0 {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Comm.SSHPort != 44 {
|
||||||
|
t.Fatalf("invalid value: %d", c.Comm.SSHPort)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRunConfigPrepare_UserData(t *testing.T) {
|
||||||
|
c := testConfig()
|
||||||
|
tf, err := ioutil.TempFile("", "packer")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
defer tf.Close()
|
||||||
|
|
||||||
|
c.UserData = "foo"
|
||||||
|
c.UserDataFile = tf.Name()
|
||||||
|
if err := c.Prepare(nil); len(err) != 1 {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRunConfigPrepare_UserDataFile(t *testing.T) {
|
||||||
|
c := testConfig()
|
||||||
|
if err := c.Prepare(nil); len(err) != 0 {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.UserDataFile = "idontexistidontthink"
|
||||||
|
if err := c.Prepare(nil); len(err) != 1 {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tf, err := ioutil.TempFile("", "packer")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
defer tf.Close()
|
||||||
|
|
||||||
|
c.UserDataFile = tf.Name()
|
||||||
|
if err := c.Prepare(nil); len(err) != 0 {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRunConfigPrepare_TemporaryKeyPairName(t *testing.T) {
|
||||||
|
c := testConfig()
|
||||||
|
c.TemporaryKeyPairName = ""
|
||||||
|
if err := c.Prepare(nil); len(err) != 0 {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.TemporaryKeyPairName == "" {
|
||||||
|
t.Fatal("keypair name is empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
c.TemporaryKeyPairName = "ssh-key-123"
|
||||||
|
if err := c.Prepare(nil); len(err) != 0 {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.TemporaryKeyPairName != "ssh-key-123" {
|
||||||
|
t.Fatal("keypair name does not match")
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,83 @@
|
||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
packerssh "github.com/hashicorp/packer/communicator/ssh"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"golang.org/x/crypto/ssh"
|
||||||
|
"golang.org/x/crypto/ssh/agent"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// modified in tests
|
||||||
|
sshHostSleepDuration = time.Second
|
||||||
|
)
|
||||||
|
|
||||||
|
type alicloudSSHHelper interface {
|
||||||
|
}
|
||||||
|
|
||||||
|
// SSHHost returns a function that can be given to the SSH communicator
|
||||||
|
func SSHHost(e alicloudSSHHelper, private bool) func(multistep.StateBag) (string, error) {
|
||||||
|
return func(state multistep.StateBag) (string, error) {
|
||||||
|
ipAddress := state.Get("ipaddress").(string)
|
||||||
|
return ipAddress, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SSHConfig returns a function that can be used for the SSH communicator
|
||||||
|
// config for connecting to the instance created over SSH using the private key
|
||||||
|
// or password.
|
||||||
|
func SSHConfig(useAgent bool, username, password string) func(multistep.StateBag) (*ssh.ClientConfig, error) {
|
||||||
|
return func(state multistep.StateBag) (*ssh.ClientConfig, error) {
|
||||||
|
if useAgent {
|
||||||
|
authSock := os.Getenv("SSH_AUTH_SOCK")
|
||||||
|
if authSock == "" {
|
||||||
|
return nil, fmt.Errorf("SSH_AUTH_SOCK is not set")
|
||||||
|
}
|
||||||
|
|
||||||
|
sshAgent, err := net.Dial("unix", authSock)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Cannot connect to SSH Agent socket %q: %s", authSock, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &ssh.ClientConfig{
|
||||||
|
User: username,
|
||||||
|
Auth: []ssh.AuthMethod{
|
||||||
|
ssh.PublicKeysCallback(agent.NewClient(sshAgent).Signers),
|
||||||
|
},
|
||||||
|
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
privateKey, hasKey := state.GetOk("privateKey")
|
||||||
|
if hasKey {
|
||||||
|
|
||||||
|
signer, err := ssh.ParsePrivateKey([]byte(privateKey.(string)))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Error setting up SSH config: %s", err)
|
||||||
|
}
|
||||||
|
return &ssh.ClientConfig{
|
||||||
|
User: username,
|
||||||
|
Auth: []ssh.AuthMethod{
|
||||||
|
ssh.PublicKeys(signer),
|
||||||
|
},
|
||||||
|
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
|
||||||
|
}, nil
|
||||||
|
|
||||||
|
} else {
|
||||||
|
return &ssh.ClientConfig{
|
||||||
|
User: username,
|
||||||
|
Auth: []ssh.AuthMethod{
|
||||||
|
ssh.Password(password),
|
||||||
|
ssh.KeyboardInteractive(
|
||||||
|
packerssh.PasswordKeyboardInteractive(password)),
|
||||||
|
},
|
||||||
|
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,75 @@
|
||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/common"
|
||||||
|
"github.com/denverdino/aliyungo/ecs"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type stepAttachKeyPar struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepAttachKeyPar) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
keyPairName := state.Get("keyPair").(string)
|
||||||
|
if keyPairName == "" {
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
config := state.Get("config").(Config)
|
||||||
|
instance := state.Get("instance").(*ecs.InstanceAttributesType)
|
||||||
|
timeoutPoint := time.Now().Add(120 * time.Second)
|
||||||
|
for {
|
||||||
|
err := client.AttachKeyPair(&ecs.AttachKeyPairArgs{RegionId: common.Region(config.AlicloudRegion),
|
||||||
|
KeyPairName: keyPairName, InstanceIds: "[\"" + instance.InstanceId + "\"]"})
|
||||||
|
if err != nil {
|
||||||
|
e, _ := err.(*common.Error)
|
||||||
|
if (!(e.Code == "MissingParameter" || e.Code == "DependencyViolation.WindowsInstance" ||
|
||||||
|
e.Code == "InvalidKeyPairName.NotFound" || e.Code == "InvalidRegionId.NotFound")) &&
|
||||||
|
time.Now().Before(timeoutPoint) {
|
||||||
|
time.Sleep(5 * time.Second)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
err := fmt.Errorf("Error attaching keypair %s to instance %s : %s",
|
||||||
|
keyPairName, instance.InstanceId, err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
ui.Message(fmt.Sprintf("Attach keypair %s to instance: %s", keyPairName, instance.InstanceId))
|
||||||
|
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepAttachKeyPar) Cleanup(state multistep.StateBag) {
|
||||||
|
keyPairName := state.Get("keyPair").(string)
|
||||||
|
if keyPairName == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
config := state.Get("config").(Config)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
instance := state.Get("instance").(*ecs.InstanceAttributesType)
|
||||||
|
|
||||||
|
err := client.DetachKeyPair(&ecs.DetachKeyPairArgs{RegionId: common.Region(config.AlicloudRegion),
|
||||||
|
KeyPairName: keyPairName, InstanceIds: "[\"" + instance.InstanceId + "\"]"})
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error Detaching keypair %s to instance %s : %s", keyPairName,
|
||||||
|
instance.InstanceId, err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ui.Message(fmt.Sprintf("Detach keypair %s from instance: %s", keyPairName, instance.InstanceId))
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,43 @@
|
||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/common"
|
||||||
|
"github.com/denverdino/aliyungo/ecs"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type stepCheckAlicloudSourceImage struct {
|
||||||
|
SourceECSImageId string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepCheckAlicloudSourceImage) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
config := state.Get("config").(Config)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
images, _, err := client.DescribeImages(&ecs.DescribeImagesArgs{RegionId: common.Region(config.AlicloudRegion),
|
||||||
|
ImageId: config.AlicloudSourceImage})
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error querying alicloud image: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(images) == 0 {
|
||||||
|
err := fmt.Errorf("No alicloud image was found matching filters: %v", config.AlicloudSourceImage)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
ui.Message(fmt.Sprintf("Found image ID: %s", images[0].ImageId))
|
||||||
|
|
||||||
|
state.Put("source_image", &images[0])
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepCheckAlicloudSourceImage) Cleanup(multistep.StateBag) {}
|
|
@ -0,0 +1,80 @@
|
||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/common"
|
||||||
|
"github.com/denverdino/aliyungo/ecs"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type setpConfigAlicloudEIP struct {
|
||||||
|
AssociatePublicIpAddress bool
|
||||||
|
RegionId string
|
||||||
|
InternetChargeType string
|
||||||
|
allocatedId string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *setpConfigAlicloudEIP) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
instance := state.Get("instance").(*ecs.InstanceAttributesType)
|
||||||
|
ui.Say("Allocating eip")
|
||||||
|
ipaddress, allocateId, err := client.AllocateEipAddress(&ecs.AllocateEipAddressArgs{
|
||||||
|
RegionId: common.Region(s.RegionId), InternetChargeType: common.InternetChargeType(s.InternetChargeType),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Say(fmt.Sprintf("Error allocating eip: %s", err))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
s.allocatedId = allocateId
|
||||||
|
if err = client.WaitForEip(common.Region(s.RegionId), allocateId,
|
||||||
|
ecs.EipStatusAvailable, ALICLOUD_DEFAULT_SHORT_TIMEOUT); err != nil {
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Say(fmt.Sprintf("Error allocating eip: %s", err))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = client.AssociateEipAddress(allocateId, instance.InstanceId); err != nil {
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Say(fmt.Sprintf("Error binding eip: %s", err))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = client.WaitForEip(common.Region(s.RegionId), allocateId,
|
||||||
|
ecs.EipStatusInUse, ALICLOUD_DEFAULT_SHORT_TIMEOUT); err != nil {
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Say(fmt.Sprintf("Error associating eip: %s", err))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
ui.Say(fmt.Sprintf("Allocated eip %s", ipaddress))
|
||||||
|
state.Put("ipaddress", ipaddress)
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *setpConfigAlicloudEIP) Cleanup(state multistep.StateBag) {
|
||||||
|
if len(s.allocatedId) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
instance := state.Get("instance").(*ecs.InstanceAttributesType)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
|
message(state, "EIP")
|
||||||
|
|
||||||
|
if err := client.UnassociateEipAddress(s.allocatedId, instance.InstanceId); err != nil {
|
||||||
|
ui.Say(fmt.Sprintf("Failed to unassociate eip."))
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := client.WaitForEip(common.Region(s.RegionId), s.allocatedId, ecs.EipStatusAvailable, ALICLOUD_DEFAULT_SHORT_TIMEOUT); err != nil {
|
||||||
|
ui.Say(fmt.Sprintf("Timeout while unassociating eip."))
|
||||||
|
}
|
||||||
|
if err := client.ReleaseEipAddress(s.allocatedId); err != nil {
|
||||||
|
ui.Say(fmt.Sprintf("Failed to release eip."))
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,140 @@
|
||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/common"
|
||||||
|
"github.com/denverdino/aliyungo/ecs"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type StepConfigAlicloudKeyPair struct {
|
||||||
|
Debug bool
|
||||||
|
SSHAgentAuth bool
|
||||||
|
DebugKeyPath string
|
||||||
|
TemporaryKeyPairName string
|
||||||
|
KeyPairName string
|
||||||
|
PrivateKeyFile string
|
||||||
|
RegionId string
|
||||||
|
|
||||||
|
keyName string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StepConfigAlicloudKeyPair) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
|
if s.PrivateKeyFile != "" {
|
||||||
|
ui.Say("Using existing SSH private key")
|
||||||
|
privateKeyBytes, err := ioutil.ReadFile(s.PrivateKeyFile)
|
||||||
|
if err != nil {
|
||||||
|
state.Put("error", fmt.Errorf(
|
||||||
|
"Error loading configured private key file: %s", err))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
state.Put("keyPair", s.KeyPairName)
|
||||||
|
state.Put("privateKey", string(privateKeyBytes))
|
||||||
|
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.SSHAgentAuth && s.KeyPairName == "" {
|
||||||
|
ui.Say("Using SSH Agent with key pair in source image")
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.SSHAgentAuth && s.KeyPairName != "" {
|
||||||
|
ui.Say(fmt.Sprintf("Using SSH Agent for existing key pair %s", s.KeyPairName))
|
||||||
|
state.Put("keyPair", s.KeyPairName)
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.TemporaryKeyPairName == "" {
|
||||||
|
ui.Say("Not using temporary keypair")
|
||||||
|
state.Put("keyPair", "")
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
|
||||||
|
ui.Say(fmt.Sprintf("Creating temporary keypair: %s", s.TemporaryKeyPairName))
|
||||||
|
keyResp, err := client.CreateKeyPair(&ecs.CreateKeyPairArgs{
|
||||||
|
KeyPairName: s.TemporaryKeyPairName,
|
||||||
|
RegionId: common.Region(s.RegionId),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
state.Put("error", fmt.Errorf("Error creating temporary keypair: %s", err))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the keyname so we know to delete it later
|
||||||
|
s.keyName = s.TemporaryKeyPairName
|
||||||
|
|
||||||
|
// Set some state data for use in future steps
|
||||||
|
state.Put("keyPair", s.keyName)
|
||||||
|
state.Put("privateKey", keyResp.PrivateKeyBody)
|
||||||
|
|
||||||
|
// If we're in debug mode, output the private key to the working
|
||||||
|
// directory.
|
||||||
|
if s.Debug {
|
||||||
|
ui.Message(fmt.Sprintf("Saving key for debug purposes: %s", s.DebugKeyPath))
|
||||||
|
f, err := os.Create(s.DebugKeyPath)
|
||||||
|
if err != nil {
|
||||||
|
state.Put("error", fmt.Errorf("Error saving debug key: %s", err))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
// Write the key out
|
||||||
|
if _, err := f.Write([]byte(keyResp.PrivateKeyBody)); err != nil {
|
||||||
|
state.Put("error", fmt.Errorf("Error saving debug key: %s", err))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
// Chmod it so that it is SSH ready
|
||||||
|
if runtime.GOOS != "windows" {
|
||||||
|
if err := f.Chmod(0600); err != nil {
|
||||||
|
state.Put("error", fmt.Errorf("Error setting permissions of debug key: %s", err))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StepConfigAlicloudKeyPair) Cleanup(state multistep.StateBag) {
|
||||||
|
// If no key name is set, then we never created it, so just return
|
||||||
|
// If we used an SSH private key file, do not go about deleting
|
||||||
|
// keypairs
|
||||||
|
if s.PrivateKeyFile != "" || (s.KeyPairName == "" && s.keyName == "") {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
|
// Remove the keypair
|
||||||
|
ui.Say("Deleting temporary keypair...")
|
||||||
|
err := client.DeleteKeyPairs(&ecs.DeleteKeyPairsArgs{
|
||||||
|
RegionId: common.Region(s.RegionId),
|
||||||
|
KeyPairNames: "[\"" + s.keyName + "\"]",
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
ui.Error(fmt.Sprintf(
|
||||||
|
"Error cleaning up keypair. Please delete the key manually: %s", s.keyName))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Also remove the physical key if we're debugging.
|
||||||
|
if s.Debug {
|
||||||
|
if err := os.Remove(s.DebugKeyPath); err != nil {
|
||||||
|
ui.Error(fmt.Sprintf(
|
||||||
|
"Error removing debug key '%s': %s", s.DebugKeyPath, err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,36 @@
|
||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/ecs"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type stepConfigAlicloudPublicIP struct {
|
||||||
|
publicIPAdress string
|
||||||
|
RegionId string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepConfigAlicloudPublicIP) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
instance := state.Get("instance").(*ecs.InstanceAttributesType)
|
||||||
|
|
||||||
|
ipaddress, err := client.AllocatePublicIpAddress(instance.InstanceId)
|
||||||
|
if err != nil {
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Say(fmt.Sprintf("Error allocating public ip: %s", err))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
s.publicIPAdress = ipaddress
|
||||||
|
ui.Say(fmt.Sprintf("Allocated public ip address %s.", ipaddress))
|
||||||
|
state.Put("ipaddress", ipaddress)
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepConfigAlicloudPublicIP) Cleanup(state multistep.StateBag) {
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,138 @@
|
||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/common"
|
||||||
|
"github.com/denverdino/aliyungo/ecs"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type stepConfigAlicloudSecurityGroup struct {
|
||||||
|
SecurityGroupId string
|
||||||
|
SecurityGroupName string
|
||||||
|
Description string
|
||||||
|
VpcId string
|
||||||
|
RegionId string
|
||||||
|
isCreate bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepConfigAlicloudSecurityGroup) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
networkType := state.Get("networktype").(InstanceNetWork)
|
||||||
|
|
||||||
|
var securityGroupItems []ecs.SecurityGroupItemType
|
||||||
|
var err error
|
||||||
|
if len(s.SecurityGroupId) != 0 {
|
||||||
|
if networkType == VpcNet {
|
||||||
|
vpcId := state.Get("vpcid").(string)
|
||||||
|
securityGroupItems, _, err = client.DescribeSecurityGroups(&ecs.DescribeSecurityGroupsArgs{
|
||||||
|
VpcId: vpcId,
|
||||||
|
RegionId: common.Region(s.RegionId),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
securityGroupItems, _, err = client.DescribeSecurityGroups(&ecs.DescribeSecurityGroupsArgs{
|
||||||
|
RegionId: common.Region(s.RegionId),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
ui.Say(fmt.Sprintf("Failed querying security group: %s", err))
|
||||||
|
state.Put("error", err)
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
for _, securityGroupItem := range securityGroupItems {
|
||||||
|
if securityGroupItem.SecurityGroupId == s.SecurityGroupId {
|
||||||
|
state.Put("securitygroupid", s.SecurityGroupId)
|
||||||
|
s.isCreate = false
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.isCreate = false
|
||||||
|
message := fmt.Sprintf("The specified security group {%s} doesn't exist.", s.SecurityGroupId)
|
||||||
|
state.Put("error", errors.New(message))
|
||||||
|
ui.Say(message)
|
||||||
|
return multistep.ActionHalt
|
||||||
|
|
||||||
|
}
|
||||||
|
var securityGroupId string
|
||||||
|
ui.Say("Creating security groups...")
|
||||||
|
if networkType == VpcNet {
|
||||||
|
vpcId := state.Get("vpcid").(string)
|
||||||
|
securityGroupId, err = client.CreateSecurityGroup(&ecs.CreateSecurityGroupArgs{
|
||||||
|
RegionId: common.Region(s.RegionId),
|
||||||
|
SecurityGroupName: s.SecurityGroupName,
|
||||||
|
VpcId: vpcId,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
securityGroupId, err = client.CreateSecurityGroup(&ecs.CreateSecurityGroupArgs{
|
||||||
|
RegionId: common.Region(s.RegionId),
|
||||||
|
SecurityGroupName: s.SecurityGroupName,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Say(fmt.Sprintf("Failed creating security group %s.", err))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
state.Put("securitygroupid", securityGroupId)
|
||||||
|
s.isCreate = true
|
||||||
|
s.SecurityGroupId = securityGroupId
|
||||||
|
err = client.AuthorizeSecurityGroupEgress(&ecs.AuthorizeSecurityGroupEgressArgs{
|
||||||
|
SecurityGroupId: securityGroupId,
|
||||||
|
RegionId: common.Region(s.RegionId),
|
||||||
|
IpProtocol: ecs.IpProtocolAll,
|
||||||
|
PortRange: "-1/-1",
|
||||||
|
NicType: ecs.NicTypeInternet,
|
||||||
|
DestCidrIp: "0.0.0.0/0", //The input parameter "DestGroupId" or "DestCidrIp" cannot be both blank.
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Say(fmt.Sprintf("Failed authorizing security group: %s", err))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
err = client.AuthorizeSecurityGroup(&ecs.AuthorizeSecurityGroupArgs{
|
||||||
|
SecurityGroupId: securityGroupId,
|
||||||
|
RegionId: common.Region(s.RegionId),
|
||||||
|
IpProtocol: ecs.IpProtocolAll,
|
||||||
|
PortRange: "-1/-1",
|
||||||
|
NicType: ecs.NicTypeInternet,
|
||||||
|
SourceCidrIp: "0.0.0.0/0", //The input parameter "SourceGroupId" or "SourceCidrIp" cannot be both blank.
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Say(fmt.Sprintf("Failed authorizing security group: %s", err))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepConfigAlicloudSecurityGroup) Cleanup(state multistep.StateBag) {
|
||||||
|
if !s.isCreate {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
|
message(state, "security group")
|
||||||
|
timeoutPoint := time.Now().Add(120 * time.Second)
|
||||||
|
for {
|
||||||
|
if err := client.DeleteSecurityGroup(common.Region(s.RegionId), s.SecurityGroupId); err != nil {
|
||||||
|
e, _ := err.(*common.Error)
|
||||||
|
if e.Code == "DependencyViolation" && time.Now().Before(timeoutPoint) {
|
||||||
|
time.Sleep(5 * time.Second)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ui.Error(fmt.Sprintf("Failed to delete security group, it may still be around: %s", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,97 @@
|
||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/common"
|
||||||
|
"github.com/denverdino/aliyungo/ecs"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type stepConfigAlicloudVPC struct {
|
||||||
|
VpcId string
|
||||||
|
CidrBlock string //192.168.0.0/16 or 172.16.0.0/16 (default)
|
||||||
|
VpcName string
|
||||||
|
isCreate bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepConfigAlicloudVPC) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
config := state.Get("config").(Config)
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
|
if len(s.VpcId) != 0 {
|
||||||
|
vpcs, _, err := client.DescribeVpcs(&ecs.DescribeVpcsArgs{
|
||||||
|
VpcId: s.VpcId,
|
||||||
|
RegionId: common.Region(config.AlicloudRegion),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
ui.Say(fmt.Sprintf("Failed querying vpcs: %s", err))
|
||||||
|
state.Put("error", err)
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
if len(vpcs) > 0 {
|
||||||
|
vpc := vpcs[0]
|
||||||
|
state.Put("vpcid", vpc.VpcId)
|
||||||
|
s.isCreate = false
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
message := fmt.Sprintf("The specified vpc {%s} doesn't exist.", s.VpcId)
|
||||||
|
state.Put("error", errors.New(message))
|
||||||
|
ui.Say(message)
|
||||||
|
return multistep.ActionHalt
|
||||||
|
|
||||||
|
}
|
||||||
|
ui.Say("Creating vpc")
|
||||||
|
vpc, err := client.CreateVpc(&ecs.CreateVpcArgs{
|
||||||
|
RegionId: common.Region(config.AlicloudRegion),
|
||||||
|
CidrBlock: s.CidrBlock,
|
||||||
|
VpcName: s.VpcName,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Say(fmt.Sprintf("Failed creating vpc: %s", err))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
err = client.WaitForVpcAvailable(common.Region(config.AlicloudRegion), vpc.VpcId, ALICLOUD_DEFAULT_SHORT_TIMEOUT)
|
||||||
|
if err != nil {
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Say(fmt.Sprintf("Failed waiting for vpc to become available: %s", err))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
state.Put("vpcid", vpc.VpcId)
|
||||||
|
s.isCreate = true
|
||||||
|
s.VpcId = vpc.VpcId
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepConfigAlicloudVPC) Cleanup(state multistep.StateBag) {
|
||||||
|
if !s.isCreate {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
|
message(state, "VPC")
|
||||||
|
timeoutPoint := time.Now().Add(60 * time.Second)
|
||||||
|
for {
|
||||||
|
if err := client.DeleteVpc(s.VpcId); err != nil {
|
||||||
|
e, _ := err.(*common.Error)
|
||||||
|
if (e.Code == "DependencyViolation.Instance" || e.Code == "DependencyViolation.RouteEntry" ||
|
||||||
|
e.Code == "DependencyViolation.VSwitch" ||
|
||||||
|
e.Code == "DependencyViolation.SecurityGroup") && time.Now().Before(timeoutPoint) {
|
||||||
|
time.Sleep(1 * time.Second)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ui.Error(fmt.Sprintf("Error deleting vpc, it may still be around: %s", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,149 @@
|
||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/common"
|
||||||
|
"github.com/denverdino/aliyungo/ecs"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type stepConfigAlicloudVSwitch struct {
|
||||||
|
VSwitchId string
|
||||||
|
ZoneId string
|
||||||
|
isCreate bool
|
||||||
|
CidrBlock string
|
||||||
|
VSwitchName string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepConfigAlicloudVSwitch) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
vpcId := state.Get("vpcid").(string)
|
||||||
|
config := state.Get("config").(Config)
|
||||||
|
|
||||||
|
if len(s.VSwitchId) != 0 {
|
||||||
|
vswitchs, _, err := client.DescribeVSwitches(&ecs.DescribeVSwitchesArgs{
|
||||||
|
VpcId: vpcId,
|
||||||
|
VSwitchId: s.VSwitchId,
|
||||||
|
ZoneId: s.ZoneId,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
ui.Say(fmt.Sprintf("Failed querying vswitch: %s", err))
|
||||||
|
state.Put("error", err)
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
if len(vswitchs) > 0 {
|
||||||
|
vswitch := vswitchs[0]
|
||||||
|
state.Put("vswitchid", vswitch.VSwitchId)
|
||||||
|
s.isCreate = false
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
s.isCreate = false
|
||||||
|
message := fmt.Sprintf("The specified vswitch {%s} doesn't exist.", s.VSwitchId)
|
||||||
|
state.Put("error", errors.New(message))
|
||||||
|
ui.Say(message)
|
||||||
|
return multistep.ActionHalt
|
||||||
|
|
||||||
|
}
|
||||||
|
if s.ZoneId == "" {
|
||||||
|
|
||||||
|
zones, err := client.DescribeZones(common.Region(config.AlicloudRegion))
|
||||||
|
if err != nil {
|
||||||
|
ui.Say(fmt.Sprintf("Query for available zones failed: %s", err))
|
||||||
|
state.Put("error", err)
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
var instanceTypes []string
|
||||||
|
for _, zone := range zones {
|
||||||
|
isVSwitchSupported := false
|
||||||
|
for _, resourceType := range zone.AvailableResourceCreation.ResourceTypes {
|
||||||
|
if resourceType == ecs.ResourceTypeVSwitch {
|
||||||
|
isVSwitchSupported = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if isVSwitchSupported {
|
||||||
|
for _, instanceType := range zone.AvailableInstanceTypes.InstanceTypes {
|
||||||
|
if instanceType == config.InstanceType {
|
||||||
|
s.ZoneId = zone.ZoneId
|
||||||
|
break
|
||||||
|
}
|
||||||
|
instanceTypes = append(instanceTypes, instanceType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.ZoneId == "" {
|
||||||
|
if len(instanceTypes) > 0 {
|
||||||
|
ui.Say(fmt.Sprintf("The instance type %s isn't available in this region."+
|
||||||
|
"\n You can either change the instance to one of following: %v \n"+
|
||||||
|
"or choose another region.", config.InstanceType, instanceTypes))
|
||||||
|
|
||||||
|
state.Put("error", fmt.Errorf("The instance type %s isn't available in this region."+
|
||||||
|
"\n You can either change the instance to one of following: %v \n"+
|
||||||
|
"or choose another region.", config.InstanceType, instanceTypes))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
} else {
|
||||||
|
ui.Say(fmt.Sprintf("The instance type %s isn't available in this region."+
|
||||||
|
"\n You can change to other regions.", config.InstanceType))
|
||||||
|
|
||||||
|
state.Put("error", fmt.Errorf("The instance type %s isn't available in this region."+
|
||||||
|
"\n You can change to other regions.", config.InstanceType))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if config.CidrBlock == "" {
|
||||||
|
s.CidrBlock = "172.16.0.0/24" //use the default CirdBlock
|
||||||
|
}
|
||||||
|
ui.Say("Creating vswitch...")
|
||||||
|
vswitchId, err := client.CreateVSwitch(&ecs.CreateVSwitchArgs{
|
||||||
|
CidrBlock: s.CidrBlock,
|
||||||
|
ZoneId: s.ZoneId,
|
||||||
|
VpcId: vpcId,
|
||||||
|
VSwitchName: s.VSwitchName,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Say(fmt.Sprintf("Create vswitch failed %v", err))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
if err := client.WaitForVSwitchAvailable(vpcId, s.VSwitchId, ALICLOUD_DEFAULT_TIMEOUT); err != nil {
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(fmt.Sprintf("Timeout waiting for vswitch to become available: %v", err))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
state.Put("vswitchid", vswitchId)
|
||||||
|
s.isCreate = true
|
||||||
|
s.VSwitchId = vswitchId
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepConfigAlicloudVSwitch) Cleanup(state multistep.StateBag) {
|
||||||
|
if !s.isCreate {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
message(state, "vSwitch")
|
||||||
|
timeoutPoint := time.Now().Add(10 * time.Second)
|
||||||
|
for {
|
||||||
|
if err := client.DeleteVSwitch(s.VSwitchId); err != nil {
|
||||||
|
e, _ := err.(*common.Error)
|
||||||
|
if (e.Code == "IncorrectVSwitchStatus" || e.Code == "DependencyViolation" ||
|
||||||
|
e.Code == "DependencyViolation.HaVip" ||
|
||||||
|
e.Code == "IncorretRouteEntryStatus") && time.Now().Before(timeoutPoint) {
|
||||||
|
time.Sleep(1 * time.Second)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ui.Error(fmt.Sprintf("Error deleting vswitch, it may still be around: %s", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,95 @@
|
||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/common"
|
||||||
|
"github.com/denverdino/aliyungo/ecs"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type stepCreateAlicloudImage struct {
|
||||||
|
image *ecs.ImageType
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepCreateAlicloudImage) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
config := state.Get("config").(Config)
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
|
// Create the alicloud image
|
||||||
|
ui.Say(fmt.Sprintf("Creating image: %s", config.AlicloudImageName))
|
||||||
|
var imageId string
|
||||||
|
var err error
|
||||||
|
|
||||||
|
instance := state.Get("instance").(*ecs.InstanceAttributesType)
|
||||||
|
imageId, err = client.CreateImage(&ecs.CreateImageArgs{
|
||||||
|
RegionId: common.Region(config.AlicloudRegion),
|
||||||
|
InstanceId: instance.InstanceId,
|
||||||
|
ImageName: config.AlicloudImageName,
|
||||||
|
ImageVersion: config.AlicloudImageVersion,
|
||||||
|
Description: config.AlicloudImageDescription})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error creating image: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
err = client.WaitForImageReady(common.Region(config.AlicloudRegion),
|
||||||
|
imageId, ALICLOUD_DEFAULT_LONG_TIMEOUT)
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Timeout waiting for image to be created: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
images, _, err := client.DescribeImages(&ecs.DescribeImagesArgs{
|
||||||
|
RegionId: common.Region(config.AlicloudRegion),
|
||||||
|
ImageId: imageId})
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error querying created image: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(images) == 0 {
|
||||||
|
err := fmt.Errorf("Unable to find created image: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
s.image = &images[0]
|
||||||
|
|
||||||
|
state.Put("alicloudimage", imageId)
|
||||||
|
alicloudImages := make(map[string]string)
|
||||||
|
alicloudImages[config.AlicloudRegion] = images[0].ImageId
|
||||||
|
state.Put("alicloudimages", alicloudImages)
|
||||||
|
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepCreateAlicloudImage) Cleanup(state multistep.StateBag) {
|
||||||
|
if s.image == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_, cancelled := state.GetOk(multistep.StateCancelled)
|
||||||
|
_, halted := state.GetOk(multistep.StateHalted)
|
||||||
|
if !cancelled && !halted {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
config := state.Get("config").(Config)
|
||||||
|
|
||||||
|
ui.Say("Deleting the image because of cancellation or error...")
|
||||||
|
if err := client.DeleteImage(common.Region(config.AlicloudRegion), s.image.ImageId); err != nil {
|
||||||
|
ui.Error(fmt.Sprintf("Error deleting image, it may still be around: %s", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,164 @@
|
||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/common"
|
||||||
|
"github.com/denverdino/aliyungo/ecs"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type stepCreateAlicloudInstance struct {
|
||||||
|
IOOptimized bool
|
||||||
|
InstanceType string
|
||||||
|
UserData string
|
||||||
|
UserDataFile string
|
||||||
|
instanceId string
|
||||||
|
RegionId string
|
||||||
|
InternetChargeType string
|
||||||
|
InternetMaxBandwidthOut int
|
||||||
|
InstnaceName string
|
||||||
|
ZoneId string
|
||||||
|
instance *ecs.InstanceAttributesType
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepCreateAlicloudInstance) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
config := state.Get("config").(Config)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
source_image := state.Get("source_image").(*ecs.ImageType)
|
||||||
|
network_type := state.Get("networktype").(InstanceNetWork)
|
||||||
|
securityGroupId := state.Get("securitygroupid").(string)
|
||||||
|
var instanceId string
|
||||||
|
var err error
|
||||||
|
|
||||||
|
ioOptimized := ecs.IoOptimizedNone
|
||||||
|
if s.IOOptimized {
|
||||||
|
ioOptimized = ecs.IoOptimizedOptimized
|
||||||
|
}
|
||||||
|
password := config.Comm.SSHPassword
|
||||||
|
if password == "" && config.Comm.WinRMPassword != "" {
|
||||||
|
password = config.Comm.WinRMPassword
|
||||||
|
}
|
||||||
|
ui.Say("Creating instance.")
|
||||||
|
if network_type == VpcNet {
|
||||||
|
userData, err := s.getUserData(state)
|
||||||
|
if err != nil {
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
vswitchId := state.Get("vswitchid").(string)
|
||||||
|
instanceId, err = client.CreateInstance(&ecs.CreateInstanceArgs{
|
||||||
|
RegionId: common.Region(s.RegionId),
|
||||||
|
ImageId: source_image.ImageId,
|
||||||
|
InstanceType: s.InstanceType,
|
||||||
|
InternetChargeType: common.InternetChargeType(s.InternetChargeType), //"PayByTraffic",
|
||||||
|
InternetMaxBandwidthOut: s.InternetMaxBandwidthOut,
|
||||||
|
UserData: userData,
|
||||||
|
IoOptimized: ioOptimized,
|
||||||
|
VSwitchId: vswitchId,
|
||||||
|
SecurityGroupId: securityGroupId,
|
||||||
|
InstanceName: s.InstnaceName,
|
||||||
|
Password: password,
|
||||||
|
ZoneId: s.ZoneId,
|
||||||
|
DataDisk: diskDeviceToDiskType(config.AlicloudImageConfig.ECSImagesDiskMappings),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error creating instance: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if s.InstanceType == "" {
|
||||||
|
s.InstanceType = "PayByTraffic"
|
||||||
|
}
|
||||||
|
if s.InternetMaxBandwidthOut == 0 {
|
||||||
|
s.InternetMaxBandwidthOut = 5
|
||||||
|
}
|
||||||
|
instanceId, err = client.CreateInstance(&ecs.CreateInstanceArgs{
|
||||||
|
RegionId: common.Region(s.RegionId),
|
||||||
|
ImageId: source_image.ImageId,
|
||||||
|
InstanceType: s.InstanceType,
|
||||||
|
InternetChargeType: common.InternetChargeType(s.InternetChargeType), //"PayByTraffic",
|
||||||
|
InternetMaxBandwidthOut: s.InternetMaxBandwidthOut,
|
||||||
|
IoOptimized: ioOptimized,
|
||||||
|
SecurityGroupId: securityGroupId,
|
||||||
|
InstanceName: s.InstnaceName,
|
||||||
|
Password: password,
|
||||||
|
ZoneId: s.ZoneId,
|
||||||
|
DataDisk: diskDeviceToDiskType(config.AlicloudImageConfig.ECSImagesDiskMappings),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error creating instance: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = client.WaitForInstance(instanceId, ecs.Stopped, ALICLOUD_DEFAULT_TIMEOUT)
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error creating instance: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
instance, err := client.DescribeInstanceAttribute(instanceId)
|
||||||
|
if err != nil {
|
||||||
|
ui.Say(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
s.instance = instance
|
||||||
|
state.Put("instance", instance)
|
||||||
|
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepCreateAlicloudInstance) Cleanup(state multistep.StateBag) {
|
||||||
|
if s.instance == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
message(state, "instance")
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
err := client.DeleteInstance(s.instance.InstanceId)
|
||||||
|
if err != nil {
|
||||||
|
ui.Say(fmt.Sprintf("Failed to clean up instance %s: %v", s.instance.InstanceId, err.Error()))
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepCreateAlicloudInstance) getUserData(state multistep.StateBag) (string, error) {
|
||||||
|
userData := s.UserData
|
||||||
|
if s.UserDataFile != "" {
|
||||||
|
data, err := ioutil.ReadFile(s.UserDataFile)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
userData = string(data)
|
||||||
|
}
|
||||||
|
log.Printf(userData)
|
||||||
|
return userData, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func diskDeviceToDiskType(diskDevices []AlicloudDiskDevice) []ecs.DataDiskType {
|
||||||
|
result := make([]ecs.DataDiskType, len(diskDevices))
|
||||||
|
for _, diskDevice := range diskDevices {
|
||||||
|
result = append(result, ecs.DataDiskType{
|
||||||
|
DiskName: diskDevice.DiskName,
|
||||||
|
Category: ecs.DiskCategory(diskDevice.DiskCategory),
|
||||||
|
Size: diskDevice.DiskSize,
|
||||||
|
SnapshotId: diskDevice.SnapshotId,
|
||||||
|
Description: diskDevice.Description,
|
||||||
|
DeleteWithInstance: diskDevice.DeleteWithInstance,
|
||||||
|
Device: diskDevice.Device,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
|
@ -0,0 +1,64 @@
|
||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/common"
|
||||||
|
"github.com/denverdino/aliyungo/ecs"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type stepDeleteAlicloudImageSnapshots struct {
|
||||||
|
AlicloudImageForceDetele bool
|
||||||
|
AlicloudImageForceDeteleSnapshots bool
|
||||||
|
AlicloudImageName string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepDeleteAlicloudImageSnapshots) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
config := state.Get("config").(Config)
|
||||||
|
ui.Say("Deleting image snapshots.")
|
||||||
|
// Check for force delete
|
||||||
|
if s.AlicloudImageForceDetele {
|
||||||
|
images, _, err := client.DescribeImages(&ecs.DescribeImagesArgs{
|
||||||
|
RegionId: common.Region(config.AlicloudRegion),
|
||||||
|
ImageName: s.AlicloudImageName,
|
||||||
|
})
|
||||||
|
if len(images) < 1 {
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
for _, image := range images {
|
||||||
|
if image.ImageOwnerAlias != string(ecs.ImageOwnerSelf) {
|
||||||
|
log.Printf("You can only delete instances based on customized images %s ", image.ImageId)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
err = client.DeleteImage(common.Region(config.AlicloudRegion), image.ImageId)
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Failed to delete image: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
if s.AlicloudImageForceDeteleSnapshots {
|
||||||
|
for _, diskDevice := range image.DiskDeviceMappings.DiskDeviceMapping {
|
||||||
|
if err := client.DeleteSnapshot(diskDevice.SnapshotId); err != nil {
|
||||||
|
err := fmt.Errorf("Deleting ECS snapshot failed: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepDeleteAlicloudImageSnapshots) Cleanup(state multistep.StateBag) {
|
||||||
|
}
|
|
@ -0,0 +1,72 @@
|
||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/ecs"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type stepMountAlicloudDisk struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepMountAlicloudDisk) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
config := state.Get("config").(Config)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
instance := state.Get("instance").(*ecs.InstanceAttributesType)
|
||||||
|
alicloudDiskDevices := config.ECSImagesDiskMappings
|
||||||
|
if len(config.ECSImagesDiskMappings) == 0 {
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
ui.Say("Mounting disks.")
|
||||||
|
disks, _, err := client.DescribeDisks(&ecs.DescribeDisksArgs{InstanceId: instance.InstanceId,
|
||||||
|
RegionId: instance.RegionId})
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error querying disks: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
for _, disk := range disks {
|
||||||
|
if disk.Status == ecs.DiskStatusAvailable {
|
||||||
|
if err := client.AttachDisk(&ecs.AttachDiskArgs{DiskId: disk.DiskId,
|
||||||
|
InstanceId: instance.InstanceId,
|
||||||
|
Device: getDevice(&disk, alicloudDiskDevices),
|
||||||
|
}); err != nil {
|
||||||
|
err := fmt.Errorf("Error mounting disks: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, disk := range disks {
|
||||||
|
if err := client.WaitForDisk(instance.RegionId, disk.DiskId, ecs.DiskStatusInUse, ALICLOUD_DEFAULT_SHORT_TIMEOUT); err != nil {
|
||||||
|
err := fmt.Errorf("Timeout waiting for mount: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ui.Say("Finished mounting disks.")
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepMountAlicloudDisk) Cleanup(state multistep.StateBag) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func getDevice(disk *ecs.DiskItemType, diskDevices []AlicloudDiskDevice) string {
|
||||||
|
if disk.Device != "" {
|
||||||
|
return disk.Device
|
||||||
|
}
|
||||||
|
for _, alicloudDiskDevice := range diskDevices {
|
||||||
|
if alicloudDiskDevice.DiskName == disk.DiskName || alicloudDiskDevice.SnapshotId == disk.SourceSnapshotId {
|
||||||
|
return alicloudDiskDevice.Device
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
|
@ -0,0 +1,49 @@
|
||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/common"
|
||||||
|
"github.com/denverdino/aliyungo/ecs"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type stepPreValidate struct {
|
||||||
|
AlicloudDestImageName string
|
||||||
|
ForceDelete bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepPreValidate) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
if s.ForceDelete {
|
||||||
|
ui.Say("Force delete flag found, skipping prevalidating image name.")
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
config := state.Get("config").(Config)
|
||||||
|
ui.Say("Prevalidating image name...")
|
||||||
|
images, _, err := client.DescribeImages(&ecs.DescribeImagesArgs{
|
||||||
|
ImageName: s.AlicloudDestImageName,
|
||||||
|
RegionId: common.Region(config.AlicloudRegion)})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error querying alicloud image: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(images) > 0 {
|
||||||
|
err := fmt.Errorf("Error: name conflicts with an existing alicloud image: %s", images[0].ImageId)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepPreValidate) Cleanup(multistep.StateBag) {}
|
|
@ -0,0 +1,72 @@
|
||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/common"
|
||||||
|
"github.com/denverdino/aliyungo/ecs"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type setpRegionCopyAlicloudImage struct {
|
||||||
|
AlicloudImageDestinationRegions []string
|
||||||
|
AlicloudImageDestinationNames []string
|
||||||
|
RegionId string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *setpRegionCopyAlicloudImage) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
if len(s.AlicloudImageDestinationRegions) == 0 {
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
imageId := state.Get("alicloudimage").(string)
|
||||||
|
alicloudImages := state.Get("alicloudimages").(map[string]string)
|
||||||
|
region := common.Region(s.RegionId)
|
||||||
|
|
||||||
|
numberOfName := len(s.AlicloudImageDestinationNames)
|
||||||
|
for index, destinationRegion := range s.AlicloudImageDestinationRegions {
|
||||||
|
if destinationRegion == s.RegionId {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ecsImageName := ""
|
||||||
|
if numberOfName > 0 && index < numberOfName {
|
||||||
|
ecsImageName = s.AlicloudImageDestinationNames[index]
|
||||||
|
}
|
||||||
|
imageId, err := client.CopyImage(
|
||||||
|
&ecs.CopyImageArgs{
|
||||||
|
RegionId: region,
|
||||||
|
ImageId: imageId,
|
||||||
|
DestinationRegionId: common.Region(destinationRegion),
|
||||||
|
DestinationImageName: ecsImageName,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Say(fmt.Sprintf("Error copying images: %s", err))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
alicloudImages[destinationRegion] = imageId
|
||||||
|
}
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *setpRegionCopyAlicloudImage) Cleanup(state multistep.StateBag) {
|
||||||
|
_, cancelled := state.GetOk(multistep.StateCancelled)
|
||||||
|
_, halted := state.GetOk(multistep.StateHalted)
|
||||||
|
if cancelled || halted {
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
alicloudImages := state.Get("alicloudimages").(map[string]string)
|
||||||
|
ui.Say(fmt.Sprintf("Stopping copy image because cancellation or error..."))
|
||||||
|
for copiedRegionId, copiedImageId := range alicloudImages {
|
||||||
|
if copiedRegionId == s.RegionId {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := client.CancelCopyImage(common.Region(copiedRegionId), copiedImageId); err != nil {
|
||||||
|
ui.Say(fmt.Sprintf("Error cancelling copy image: %v", err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,57 @@
|
||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/ecs"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type stepRunAlicloudInstance struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepRunAlicloudInstance) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
instance := state.Get("instance").(*ecs.InstanceAttributesType)
|
||||||
|
|
||||||
|
err := client.StartInstance(instance.InstanceId)
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error starting instance: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
ui.Say("Starting instance.")
|
||||||
|
err = client.WaitForInstance(instance.InstanceId, ecs.Running, ALICLOUD_DEFAULT_TIMEOUT)
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Timeout waiting for instance to start: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepRunAlicloudInstance) Cleanup(state multistep.StateBag) {
|
||||||
|
_, cancelled := state.GetOk(multistep.StateCancelled)
|
||||||
|
_, halted := state.GetOk(multistep.StateHalted)
|
||||||
|
if cancelled || halted {
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
instance := state.Get("instance").(*ecs.InstanceAttributesType)
|
||||||
|
instanceAttrubite, _ := client.DescribeInstanceAttribute(instance.InstanceId)
|
||||||
|
if instanceAttrubite.Status == ecs.Starting || instanceAttrubite.Status == ecs.Running {
|
||||||
|
if err := client.StopInstance(instance.InstanceId, true); err != nil {
|
||||||
|
ui.Say(fmt.Sprintf("Error stopping instance %s, it may still be around %s", instance.InstanceId, err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := client.WaitForInstance(instance.InstanceId, ecs.Stopped, ALICLOUD_DEFAULT_TIMEOUT); err != nil {
|
||||||
|
ui.Say(fmt.Sprintf("Error stopping instance %s, it may still be around %s", instance.InstanceId, err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,61 @@
|
||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/common"
|
||||||
|
"github.com/denverdino/aliyungo/ecs"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type setpShareAlicloudImage struct {
|
||||||
|
AlicloudImageShareAccounts []string
|
||||||
|
AlicloudImageUNShareAccounts []string
|
||||||
|
RegionId string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *setpShareAlicloudImage) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
alicloudImages := state.Get("alicloudimages").(map[string]string)
|
||||||
|
for copiedRegion, copiedImageId := range alicloudImages {
|
||||||
|
err := client.ModifyImageSharePermission(
|
||||||
|
&ecs.ModifyImageSharePermissionArgs{
|
||||||
|
RegionId: common.Region(copiedRegion),
|
||||||
|
ImageId: copiedImageId,
|
||||||
|
AddAccount: s.AlicloudImageShareAccounts,
|
||||||
|
RemoveAccount: s.AlicloudImageUNShareAccounts,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Say(fmt.Sprintf("Failed modifying image share permissions: %s", err))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *setpShareAlicloudImage) Cleanup(state multistep.StateBag) {
|
||||||
|
_, cancelled := state.GetOk(multistep.StateCancelled)
|
||||||
|
_, halted := state.GetOk(multistep.StateHalted)
|
||||||
|
if cancelled || halted {
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
alicloudImages := state.Get("alicloudimages").(map[string]string)
|
||||||
|
ui.Say("Restoring image share permission because cancellations or error...")
|
||||||
|
for copiedRegion, copiedImageId := range alicloudImages {
|
||||||
|
err := client.ModifyImageSharePermission(
|
||||||
|
&ecs.ModifyImageSharePermissionArgs{
|
||||||
|
RegionId: common.Region(copiedRegion),
|
||||||
|
ImageId: copiedImageId,
|
||||||
|
AddAccount: s.AlicloudImageUNShareAccounts,
|
||||||
|
RemoveAccount: s.AlicloudImageShareAccounts,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
ui.Say(fmt.Sprintf("Restoring image share permission failed: %s", err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,44 @@
|
||||||
|
package ecs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/denverdino/aliyungo/ecs"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type stepStopAlicloudInstance struct {
|
||||||
|
ForceStop bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepStopAlicloudInstance) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
client := state.Get("client").(*ecs.Client)
|
||||||
|
instance := state.Get("instance").(*ecs.InstanceAttributesType)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
|
err := client.StopInstance(instance.InstanceId, s.ForceStop)
|
||||||
|
if err != nil {
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error stopping alicloud instance: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = client.WaitForInstance(instance.InstanceId, ecs.Stopped, ALICLOUD_DEFAULT_TIMEOUT)
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error waiting for alicloud instance to stop: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stepStopAlicloudInstance) Cleanup(multistep.StateBag) {
|
||||||
|
// No cleanup...
|
||||||
|
}
|
|
@ -9,14 +9,13 @@ import (
|
||||||
"log"
|
"log"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws/session"
|
|
||||||
"github.com/aws/aws-sdk-go/service/ec2"
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
"github.com/mitchellh/multistep"
|
awscommon "github.com/hashicorp/packer/builder/amazon/common"
|
||||||
awscommon "github.com/mitchellh/packer/builder/amazon/common"
|
"github.com/hashicorp/packer/common"
|
||||||
"github.com/mitchellh/packer/common"
|
"github.com/hashicorp/packer/helper/config"
|
||||||
"github.com/mitchellh/packer/helper/config"
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
"github.com/mitchellh/packer/packer"
|
"github.com/hashicorp/packer/packer"
|
||||||
"github.com/mitchellh/packer/template/interpolate"
|
"github.com/hashicorp/packer/template/interpolate"
|
||||||
)
|
)
|
||||||
|
|
||||||
// The unique ID for this builder
|
// The unique ID for this builder
|
||||||
|
@ -122,7 +121,8 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
|
||||||
var warns []string
|
var warns []string
|
||||||
|
|
||||||
errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(&b.config.ctx)...)
|
errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(&b.config.ctx)...)
|
||||||
errs = packer.MultiErrorAppend(errs, b.config.AMIConfig.Prepare(&b.config.ctx)...)
|
errs = packer.MultiErrorAppend(errs,
|
||||||
|
b.config.AMIConfig.Prepare(&b.config.AccessConfig, &b.config.ctx)...)
|
||||||
|
|
||||||
for _, mounts := range b.config.ChrootMounts {
|
for _, mounts := range b.config.ChrootMounts {
|
||||||
if len(mounts) != 3 {
|
if len(mounts) != 3 {
|
||||||
|
@ -173,7 +173,7 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
|
||||||
return warns, errs
|
return warns, errs
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Println(common.ScrubConfig(b.config, b.config.AccessKey, b.config.SecretKey))
|
log.Println(common.ScrubConfig(b.config, b.config.AccessKey, b.config.SecretKey, b.config.Token))
|
||||||
return warns, nil
|
return warns, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -182,12 +182,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
||||||
return nil, errors.New("The amazon-chroot builder only works on Linux environments.")
|
return nil, errors.New("The amazon-chroot builder only works on Linux environments.")
|
||||||
}
|
}
|
||||||
|
|
||||||
config, err := b.config.Config()
|
session, err := b.config.Session()
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
session, err := session.NewSession(config)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -203,6 +198,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
||||||
state := new(multistep.BasicStateBag)
|
state := new(multistep.BasicStateBag)
|
||||||
state.Put("config", &b.config)
|
state.Put("config", &b.config)
|
||||||
state.Put("ec2", ec2conn)
|
state.Put("ec2", ec2conn)
|
||||||
|
state.Put("awsSession", session)
|
||||||
state.Put("hook", hook)
|
state.Put("hook", hook)
|
||||||
state.Put("ui", ui)
|
state.Put("ui", ui)
|
||||||
state.Put("wrappedCommand", CommandWrapper(wrappedCommand))
|
state.Put("wrappedCommand", CommandWrapper(wrappedCommand))
|
||||||
|
@ -220,7 +216,8 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
||||||
steps = append(steps,
|
steps = append(steps,
|
||||||
&awscommon.StepSourceAMIInfo{
|
&awscommon.StepSourceAMIInfo{
|
||||||
SourceAmi: b.config.SourceAmi,
|
SourceAmi: b.config.SourceAmi,
|
||||||
EnhancedNetworking: b.config.AMIEnhancedNetworking,
|
EnableAMISriovNetSupport: b.config.AMISriovNetSupport,
|
||||||
|
EnableAMIENASupport: b.config.AMIENASupport,
|
||||||
AmiFilters: b.config.SourceAmiFilter,
|
AmiFilters: b.config.SourceAmiFilter,
|
||||||
},
|
},
|
||||||
&StepCheckRootDevice{},
|
&StepCheckRootDevice{},
|
||||||
|
@ -251,21 +248,28 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
||||||
&StepEarlyCleanup{},
|
&StepEarlyCleanup{},
|
||||||
&StepSnapshot{},
|
&StepSnapshot{},
|
||||||
&awscommon.StepDeregisterAMI{
|
&awscommon.StepDeregisterAMI{
|
||||||
|
AccessConfig: &b.config.AccessConfig,
|
||||||
ForceDeregister: b.config.AMIForceDeregister,
|
ForceDeregister: b.config.AMIForceDeregister,
|
||||||
ForceDeleteSnapshot: b.config.AMIForceDeleteSnapshot,
|
ForceDeleteSnapshot: b.config.AMIForceDeleteSnapshot,
|
||||||
AMIName: b.config.AMIName,
|
AMIName: b.config.AMIName,
|
||||||
|
Regions: b.config.AMIRegions,
|
||||||
},
|
},
|
||||||
&StepRegisterAMI{
|
&StepRegisterAMI{
|
||||||
RootVolumeSize: b.config.RootVolumeSize,
|
RootVolumeSize: b.config.RootVolumeSize,
|
||||||
|
EnableAMISriovNetSupport: b.config.AMISriovNetSupport,
|
||||||
|
EnableAMIENASupport: b.config.AMIENASupport,
|
||||||
},
|
},
|
||||||
&awscommon.StepCreateEncryptedAMICopy{
|
&awscommon.StepCreateEncryptedAMICopy{
|
||||||
KeyID: b.config.AMIKmsKeyId,
|
KeyID: b.config.AMIKmsKeyId,
|
||||||
EncryptBootVolume: b.config.AMIEncryptBootVolume,
|
EncryptBootVolume: b.config.AMIEncryptBootVolume,
|
||||||
Name: b.config.AMIName,
|
Name: b.config.AMIName,
|
||||||
|
AMIMappings: b.config.AMIBlockDevices.AMIMappings,
|
||||||
},
|
},
|
||||||
&awscommon.StepAMIRegionCopy{
|
&awscommon.StepAMIRegionCopy{
|
||||||
AccessConfig: &b.config.AccessConfig,
|
AccessConfig: &b.config.AccessConfig,
|
||||||
Regions: b.config.AMIRegions,
|
Regions: b.config.AMIRegions,
|
||||||
|
RegionKeyIds: b.config.AMIRegionKMSKeyIDs,
|
||||||
|
EncryptBootVolume: b.config.AMIEncryptBootVolume,
|
||||||
Name: b.config.AMIName,
|
Name: b.config.AMIName,
|
||||||
},
|
},
|
||||||
&awscommon.StepModifyAMIAttributes{
|
&awscommon.StepModifyAMIAttributes{
|
||||||
|
@ -302,7 +306,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
||||||
artifact := &awscommon.Artifact{
|
artifact := &awscommon.Artifact{
|
||||||
Amis: state.Get("amis").(map[string]string),
|
Amis: state.Get("amis").(map[string]string),
|
||||||
BuilderIdValue: BuilderId,
|
BuilderIdValue: BuilderId,
|
||||||
Conn: ec2conn,
|
Session: session,
|
||||||
}
|
}
|
||||||
|
|
||||||
return artifact, nil
|
return artifact, nil
|
||||||
|
|
|
@ -3,13 +3,14 @@ package chroot
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/mitchellh/packer/packer"
|
"github.com/hashicorp/packer/packer"
|
||||||
)
|
)
|
||||||
|
|
||||||
func testConfig() map[string]interface{} {
|
func testConfig() map[string]interface{} {
|
||||||
return map[string]interface{}{
|
return map[string]interface{}{
|
||||||
"ami_name": "foo",
|
"ami_name": "foo",
|
||||||
"source_ami": "foo",
|
"source_ami": "foo",
|
||||||
|
"region": "us-east-1",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -70,11 +71,16 @@ func TestBuilderPrepare_ChrootMounts(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("err: %s", err)
|
t.Errorf("err: %s", err)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuilderPrepare_ChrootMountsBadDefaults(t *testing.T) {
|
||||||
|
b := &Builder{}
|
||||||
|
config := testConfig()
|
||||||
|
|
||||||
config["chroot_mounts"] = [][]string{
|
config["chroot_mounts"] = [][]string{
|
||||||
{"bad"},
|
{"bad"},
|
||||||
}
|
}
|
||||||
warnings, err = b.Prepare(config)
|
warnings, err := b.Prepare(config)
|
||||||
if len(warnings) > 0 {
|
if len(warnings) > 0 {
|
||||||
t.Fatalf("bad: %#v", warnings)
|
t.Fatalf("bad: %#v", warnings)
|
||||||
}
|
}
|
||||||
|
@ -134,9 +140,14 @@ func TestBuilderPrepare_CopyFiles(t *testing.T) {
|
||||||
if len(b.config.CopyFiles) != 1 && b.config.CopyFiles[0] != "/etc/resolv.conf" {
|
if len(b.config.CopyFiles) != 1 && b.config.CopyFiles[0] != "/etc/resolv.conf" {
|
||||||
t.Errorf("Was expecting default value for copy_files.")
|
t.Errorf("Was expecting default value for copy_files.")
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuilderPrepare_CopyFilesNoDefault(t *testing.T) {
|
||||||
|
b := &Builder{}
|
||||||
|
config := testConfig()
|
||||||
|
|
||||||
config["copy_files"] = []string{}
|
config["copy_files"] = []string{}
|
||||||
warnings, err = b.Prepare(config)
|
warnings, err := b.Prepare(config)
|
||||||
if len(warnings) > 0 {
|
if len(warnings) > 0 {
|
||||||
t.Fatalf("bad: %#v", warnings)
|
t.Fatalf("bad: %#v", warnings)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
package chroot
|
package chroot
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/mitchellh/multistep"
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Cleanup is an interface that some steps implement for early cleanup.
|
// Cleanup is an interface that some steps implement for early cleanup.
|
||||||
|
|
|
@ -12,7 +12,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
"github.com/mitchellh/packer/packer"
|
"github.com/hashicorp/packer/packer"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Communicator is a special communicator that works by executing
|
// Communicator is a special communicator that works by executing
|
||||||
|
|
|
@ -1,8 +1,9 @@
|
||||||
package chroot
|
package chroot
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/mitchellh/packer/packer"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestCommunicator_ImplementsCommunicator(t *testing.T) {
|
func TestCommunicator_ImplementsCommunicator(t *testing.T) {
|
||||||
|
|
|
@ -0,0 +1,10 @@
|
||||||
|
package chroot
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func TestDevicePrefixMatch(t *testing.T) {
|
||||||
|
/*
|
||||||
|
if devicePrefixMatch("nvme0n1") != "" {
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
}
|
|
@ -4,7 +4,8 @@ package chroot
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
"syscall"
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
// See: http://linux.die.net/include/sys/file.h
|
// See: http://linux.die.net/include/sys/file.h
|
||||||
|
@ -13,7 +14,7 @@ const LOCK_NB = 4
|
||||||
const LOCK_UN = 8
|
const LOCK_UN = 8
|
||||||
|
|
||||||
func lockFile(f *os.File) error {
|
func lockFile(f *os.File) error {
|
||||||
err := syscall.Flock(int(f.Fd()), LOCK_EX)
|
err := unix.Flock(int(f.Fd()), LOCK_EX)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -22,5 +23,5 @@ func lockFile(f *os.File) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func unlockFile(f *os.File) error {
|
func unlockFile(f *os.File) error {
|
||||||
return syscall.Flock(int(f.Fd()), LOCK_UN)
|
return unix.Flock(int(f.Fd()), LOCK_UN)
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,9 +3,9 @@ package chroot
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/mitchellh/packer/packer"
|
"github.com/hashicorp/packer/packer"
|
||||||
"github.com/mitchellh/packer/post-processor/shell-local"
|
"github.com/hashicorp/packer/post-processor/shell-local"
|
||||||
"github.com/mitchellh/packer/template/interpolate"
|
"github.com/hashicorp/packer/template/interpolate"
|
||||||
)
|
)
|
||||||
|
|
||||||
func RunLocalCommands(commands []string, wrappedCommand CommandWrapper, ctx interpolate.Context, ui packer.Ui) error {
|
func RunLocalCommands(commands []string, wrappedCommand CommandWrapper, ctx interpolate.Context, ui packer.Ui) error {
|
||||||
|
|
|
@ -1,15 +1,16 @@
|
||||||
package chroot
|
package chroot
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/service/ec2"
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
"github.com/mitchellh/multistep"
|
awscommon "github.com/hashicorp/packer/builder/amazon/common"
|
||||||
awscommon "github.com/mitchellh/packer/builder/amazon/common"
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
"github.com/mitchellh/packer/packer"
|
"github.com/hashicorp/packer/packer"
|
||||||
)
|
)
|
||||||
|
|
||||||
// StepAttachVolume attaches the previously created volume to an
|
// StepAttachVolume attaches the previously created volume to an
|
||||||
|
@ -23,7 +24,7 @@ type StepAttachVolume struct {
|
||||||
volumeId string
|
volumeId string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StepAttachVolume) Run(state multistep.StateBag) multistep.StepAction {
|
func (s *StepAttachVolume) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||||
device := state.Get("device").(string)
|
device := state.Get("device").(string)
|
||||||
instance := state.Get("instance").(*ec2.Instance)
|
instance := state.Get("instance").(*ec2.Instance)
|
||||||
|
|
|
@ -1,17 +1,18 @@
|
||||||
package chroot
|
package chroot
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/service/ec2"
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
"github.com/mitchellh/multistep"
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
"github.com/mitchellh/packer/packer"
|
"github.com/hashicorp/packer/packer"
|
||||||
)
|
)
|
||||||
|
|
||||||
// StepCheckRootDevice makes sure the root device on the AMI is EBS-backed.
|
// StepCheckRootDevice makes sure the root device on the AMI is EBS-backed.
|
||||||
type StepCheckRootDevice struct{}
|
type StepCheckRootDevice struct{}
|
||||||
|
|
||||||
func (s *StepCheckRootDevice) Run(state multistep.StateBag) multistep.StepAction {
|
func (s *StepCheckRootDevice) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
image := state.Get("source_image").(*ec2.Image)
|
image := state.Get("source_image").(*ec2.Image)
|
||||||
ui := state.Get("ui").(packer.Ui)
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
|
|
|
@ -1,17 +1,18 @@
|
||||||
package chroot
|
package chroot
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"log"
|
"log"
|
||||||
|
|
||||||
"github.com/mitchellh/multistep"
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
"github.com/mitchellh/packer/packer"
|
"github.com/hashicorp/packer/packer"
|
||||||
)
|
)
|
||||||
|
|
||||||
// StepChrootProvision provisions the instance within a chroot.
|
// StepChrootProvision provisions the instance within a chroot.
|
||||||
type StepChrootProvision struct {
|
type StepChrootProvision struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StepChrootProvision) Run(state multistep.StateBag) multistep.StepAction {
|
func (s *StepChrootProvision) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
hook := state.Get("hook").(packer.Hook)
|
hook := state.Get("hook").(packer.Hook)
|
||||||
mountPath := state.Get("mount_path").(string)
|
mountPath := state.Get("mount_path").(string)
|
||||||
ui := state.Get("ui").(packer.Ui)
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
|
@ -2,11 +2,13 @@ package chroot
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/mitchellh/multistep"
|
|
||||||
"github.com/mitchellh/packer/packer"
|
|
||||||
"log"
|
"log"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
)
|
)
|
||||||
|
|
||||||
// StepCopyFiles copies some files from the host into the chroot environment.
|
// StepCopyFiles copies some files from the host into the chroot environment.
|
||||||
|
@ -18,7 +20,7 @@ type StepCopyFiles struct {
|
||||||
files []string
|
files []string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StepCopyFiles) Run(state multistep.StateBag) multistep.StepAction {
|
func (s *StepCopyFiles) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
config := state.Get("config").(*Config)
|
config := state.Get("config").(*Config)
|
||||||
mountPath := state.Get("mount_path").(string)
|
mountPath := state.Get("mount_path").(string)
|
||||||
ui := state.Get("ui").(packer.Ui)
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
|
@ -1,14 +1,15 @@
|
||||||
package chroot
|
package chroot
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/service/ec2"
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
"github.com/mitchellh/multistep"
|
awscommon "github.com/hashicorp/packer/builder/amazon/common"
|
||||||
awscommon "github.com/mitchellh/packer/builder/amazon/common"
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
"github.com/mitchellh/packer/packer"
|
"github.com/hashicorp/packer/packer"
|
||||||
)
|
)
|
||||||
|
|
||||||
// StepCreateVolume creates a new volume from the snapshot of the root
|
// StepCreateVolume creates a new volume from the snapshot of the root
|
||||||
|
@ -21,7 +22,7 @@ type StepCreateVolume struct {
|
||||||
RootVolumeSize int64
|
RootVolumeSize int64
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StepCreateVolume) Run(state multistep.StateBag) multistep.StepAction {
|
func (s *StepCreateVolume) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
config := state.Get("config").(*Config)
|
config := state.Get("config").(*Config)
|
||||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||||
instance := state.Get("instance").(*ec2.Instance)
|
instance := state.Get("instance").(*ec2.Instance)
|
||||||
|
|
|
@ -1,17 +1,19 @@
|
||||||
package chroot
|
package chroot
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/mitchellh/multistep"
|
|
||||||
"github.com/mitchellh/packer/packer"
|
|
||||||
"log"
|
"log"
|
||||||
|
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
)
|
)
|
||||||
|
|
||||||
// StepEarlyCleanup performs some of the cleanup steps early in order to
|
// StepEarlyCleanup performs some of the cleanup steps early in order to
|
||||||
// prepare for snapshotting and creating an AMI.
|
// prepare for snapshotting and creating an AMI.
|
||||||
type StepEarlyCleanup struct{}
|
type StepEarlyCleanup struct{}
|
||||||
|
|
||||||
func (s *StepEarlyCleanup) Run(state multistep.StateBag) multistep.StepAction {
|
func (s *StepEarlyCleanup) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
ui := state.Get("ui").(packer.Ui)
|
ui := state.Get("ui").(packer.Ui)
|
||||||
cleanupKeys := []string{
|
cleanupKeys := []string{
|
||||||
"copy_files_cleanup",
|
"copy_files_cleanup",
|
||||||
|
|
|
@ -1,16 +1,18 @@
|
||||||
package chroot
|
package chroot
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/mitchellh/multistep"
|
|
||||||
"github.com/mitchellh/packer/packer"
|
|
||||||
"log"
|
"log"
|
||||||
|
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
)
|
)
|
||||||
|
|
||||||
// StepEarlyUnflock unlocks the flock.
|
// StepEarlyUnflock unlocks the flock.
|
||||||
type StepEarlyUnflock struct{}
|
type StepEarlyUnflock struct{}
|
||||||
|
|
||||||
func (s *StepEarlyUnflock) Run(state multistep.StateBag) multistep.StepAction {
|
func (s *StepEarlyUnflock) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
cleanup := state.Get("flock_cleanup").(Cleanup)
|
cleanup := state.Get("flock_cleanup").(Cleanup)
|
||||||
ui := state.Get("ui").(packer.Ui)
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
|
|
|
@ -1,12 +1,14 @@
|
||||||
package chroot
|
package chroot
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/mitchellh/multistep"
|
|
||||||
"github.com/mitchellh/packer/packer"
|
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
)
|
)
|
||||||
|
|
||||||
// StepFlock provisions the instance within a chroot.
|
// StepFlock provisions the instance within a chroot.
|
||||||
|
@ -17,7 +19,7 @@ type StepFlock struct {
|
||||||
fh *os.File
|
fh *os.File
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StepFlock) Run(state multistep.StateBag) multistep.StepAction {
|
func (s *StepFlock) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
ui := state.Get("ui").(packer.Ui)
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
lockfile := "/var/lock/packer-chroot/lock"
|
lockfile := "/var/lock/packer-chroot/lock"
|
||||||
|
|
|
@ -1,27 +1,31 @@
|
||||||
package chroot
|
package chroot
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
"github.com/aws/aws-sdk-go/service/ec2"
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
"github.com/mitchellh/multistep"
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
"github.com/mitchellh/packer/builder/amazon/common"
|
"github.com/hashicorp/packer/packer"
|
||||||
"github.com/mitchellh/packer/packer"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// StepInstanceInfo verifies that this builder is running on an EC2 instance.
|
// StepInstanceInfo verifies that this builder is running on an EC2 instance.
|
||||||
type StepInstanceInfo struct{}
|
type StepInstanceInfo struct{}
|
||||||
|
|
||||||
func (s *StepInstanceInfo) Run(state multistep.StateBag) multistep.StepAction {
|
func (s *StepInstanceInfo) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||||
|
session := state.Get("awsSession").(*session.Session)
|
||||||
ui := state.Get("ui").(packer.Ui)
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
// Get our own instance ID
|
// Get our own instance ID
|
||||||
ui.Say("Gathering information about this EC2 instance...")
|
ui.Say("Gathering information about this EC2 instance...")
|
||||||
instanceIdBytes, err := common.GetInstanceMetaData("instance-id")
|
|
||||||
|
ec2meta := ec2metadata.New(session)
|
||||||
|
identity, err := ec2meta.GetInstanceIdentityDocument()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Error: %s", err)
|
|
||||||
err := fmt.Errorf(
|
err := fmt.Errorf(
|
||||||
"Error retrieving the ID of the instance Packer is running on.\n" +
|
"Error retrieving the ID of the instance Packer is running on.\n" +
|
||||||
"Please verify Packer is running on a proper AWS EC2 instance.")
|
"Please verify Packer is running on a proper AWS EC2 instance.")
|
||||||
|
@ -29,12 +33,10 @@ func (s *StepInstanceInfo) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
ui.Error(err.Error())
|
ui.Error(err.Error())
|
||||||
return multistep.ActionHalt
|
return multistep.ActionHalt
|
||||||
}
|
}
|
||||||
|
log.Printf("Instance ID: %s", identity.InstanceID)
|
||||||
instanceId := string(instanceIdBytes)
|
|
||||||
log.Printf("Instance ID: %s", instanceId)
|
|
||||||
|
|
||||||
// Query the entire instance metadata
|
// Query the entire instance metadata
|
||||||
instancesResp, err := ec2conn.DescribeInstances(&ec2.DescribeInstancesInput{InstanceIds: []*string{&instanceId}})
|
instancesResp, err := ec2conn.DescribeInstances(&ec2.DescribeInstancesInput{InstanceIds: []*string{&identity.InstanceID}})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err := fmt.Errorf("Error getting instance data: %s", err)
|
err := fmt.Errorf("Error getting instance data: %s", err)
|
||||||
state.Put("error", err)
|
state.Put("error", err)
|
||||||
|
|
|
@ -2,6 +2,7 @@ package chroot
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
|
@ -9,9 +10,9 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/service/ec2"
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
"github.com/mitchellh/multistep"
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
"github.com/mitchellh/packer/packer"
|
"github.com/hashicorp/packer/packer"
|
||||||
"github.com/mitchellh/packer/template/interpolate"
|
"github.com/hashicorp/packer/template/interpolate"
|
||||||
)
|
)
|
||||||
|
|
||||||
type mountPathData struct {
|
type mountPathData struct {
|
||||||
|
@ -30,7 +31,7 @@ type StepMountDevice struct {
|
||||||
mountPath string
|
mountPath string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StepMountDevice) Run(state multistep.StateBag) multistep.StepAction {
|
func (s *StepMountDevice) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
config := state.Get("config").(*Config)
|
config := state.Get("config").(*Config)
|
||||||
ui := state.Get("ui").(packer.Ui)
|
ui := state.Get("ui").(packer.Ui)
|
||||||
device := state.Get("device").(string)
|
device := state.Get("device").(string)
|
||||||
|
|
|
@ -2,12 +2,14 @@ package chroot
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/mitchellh/multistep"
|
|
||||||
"github.com/mitchellh/packer/packer"
|
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
)
|
)
|
||||||
|
|
||||||
// StepMountExtra mounts the attached device.
|
// StepMountExtra mounts the attached device.
|
||||||
|
@ -18,7 +20,7 @@ type StepMountExtra struct {
|
||||||
mounts []string
|
mounts []string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StepMountExtra) Run(state multistep.StateBag) multistep.StepAction {
|
func (s *StepMountExtra) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
config := state.Get("config").(*Config)
|
config := state.Get("config").(*Config)
|
||||||
mountPath := state.Get("mount_path").(string)
|
mountPath := state.Get("mount_path").(string)
|
||||||
ui := state.Get("ui").(packer.Ui)
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
|
@ -1,8 +1,10 @@
|
||||||
package chroot
|
package chroot
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/mitchellh/multistep"
|
"context"
|
||||||
"github.com/mitchellh/packer/packer"
|
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
)
|
)
|
||||||
|
|
||||||
type postMountCommandsData struct {
|
type postMountCommandsData struct {
|
||||||
|
@ -16,7 +18,7 @@ type StepPostMountCommands struct {
|
||||||
Commands []string
|
Commands []string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StepPostMountCommands) Run(state multistep.StateBag) multistep.StepAction {
|
func (s *StepPostMountCommands) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
config := state.Get("config").(*Config)
|
config := state.Get("config").(*Config)
|
||||||
device := state.Get("device").(string)
|
device := state.Get("device").(string)
|
||||||
mountPath := state.Get("mount_path").(string)
|
mountPath := state.Get("mount_path").(string)
|
||||||
|
|
|
@ -1,8 +1,10 @@
|
||||||
package chroot
|
package chroot
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/mitchellh/multistep"
|
"context"
|
||||||
"github.com/mitchellh/packer/packer"
|
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
)
|
)
|
||||||
|
|
||||||
type preMountCommandsData struct {
|
type preMountCommandsData struct {
|
||||||
|
@ -14,7 +16,7 @@ type StepPreMountCommands struct {
|
||||||
Commands []string
|
Commands []string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StepPreMountCommands) Run(state multistep.StateBag) multistep.StepAction {
|
func (s *StepPreMountCommands) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
config := state.Get("config").(*Config)
|
config := state.Get("config").(*Config)
|
||||||
device := state.Get("device").(string)
|
device := state.Get("device").(string)
|
||||||
ui := state.Get("ui").(packer.Ui)
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
|
@ -1,19 +1,20 @@
|
||||||
package chroot
|
package chroot
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/mitchellh/multistep"
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
"github.com/mitchellh/packer/packer"
|
"github.com/hashicorp/packer/packer"
|
||||||
)
|
)
|
||||||
|
|
||||||
// StepPrepareDevice finds an available device and sets it.
|
// StepPrepareDevice finds an available device and sets it.
|
||||||
type StepPrepareDevice struct {
|
type StepPrepareDevice struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StepPrepareDevice) Run(state multistep.StateBag) multistep.StepAction {
|
func (s *StepPrepareDevice) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
config := state.Get("config").(*Config)
|
config := state.Get("config").(*Config)
|
||||||
ui := state.Get("ui").(packer.Ui)
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
|
|
|
@ -1,21 +1,24 @@
|
||||||
package chroot
|
package chroot
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/service/ec2"
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
"github.com/mitchellh/multistep"
|
awscommon "github.com/hashicorp/packer/builder/amazon/common"
|
||||||
awscommon "github.com/mitchellh/packer/builder/amazon/common"
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
"github.com/mitchellh/packer/packer"
|
"github.com/hashicorp/packer/packer"
|
||||||
)
|
)
|
||||||
|
|
||||||
// StepRegisterAMI creates the AMI.
|
// StepRegisterAMI creates the AMI.
|
||||||
type StepRegisterAMI struct {
|
type StepRegisterAMI struct {
|
||||||
RootVolumeSize int64
|
RootVolumeSize int64
|
||||||
|
EnableAMIENASupport bool
|
||||||
|
EnableAMISriovNetSupport bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction {
|
func (s *StepRegisterAMI) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
config := state.Get("config").(*Config)
|
config := state.Get("config").(*Config)
|
||||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||||
snapshotId := state.Get("snapshot_id").(string)
|
snapshotId := state.Get("snapshot_id").(string)
|
||||||
|
@ -75,11 +78,12 @@ func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
registerOpts = buildRegisterOpts(config, image, newMappings)
|
registerOpts = buildRegisterOpts(config, image, newMappings)
|
||||||
}
|
}
|
||||||
|
|
||||||
if config.AMIEnhancedNetworking {
|
if s.EnableAMISriovNetSupport {
|
||||||
// Set SriovNetSupport to "simple". See http://goo.gl/icuXh5
|
// Set SriovNetSupport to "simple". See http://goo.gl/icuXh5
|
||||||
// As of February 2017, this applies to C3, C4, D2, I2, R3, and M4 (excluding m4.16xlarge)
|
// As of February 2017, this applies to C3, C4, D2, I2, R3, and M4 (excluding m4.16xlarge)
|
||||||
registerOpts.SriovNetSupport = aws.String("simple")
|
registerOpts.SriovNetSupport = aws.String("simple")
|
||||||
|
}
|
||||||
|
if s.EnableAMIENASupport {
|
||||||
// Set EnaSupport to true
|
// Set EnaSupport to true
|
||||||
// As of February 2017, this applies to C5, I3, P2, R4, X1, and m4.16xlarge
|
// As of February 2017, this applies to C5, I3, P2, R4, X1, and m4.16xlarge
|
||||||
registerOpts.EnaSupport = aws.Bool(true)
|
registerOpts.EnaSupport = aws.Bool(true)
|
||||||
|
|
|
@ -1,14 +1,15 @@
|
||||||
package chroot
|
package chroot
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/service/ec2"
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
"github.com/mitchellh/multistep"
|
awscommon "github.com/hashicorp/packer/builder/amazon/common"
|
||||||
awscommon "github.com/mitchellh/packer/builder/amazon/common"
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
"github.com/mitchellh/packer/packer"
|
"github.com/hashicorp/packer/packer"
|
||||||
)
|
)
|
||||||
|
|
||||||
// StepSnapshot creates a snapshot of the created volume.
|
// StepSnapshot creates a snapshot of the created volume.
|
||||||
|
@ -19,7 +20,7 @@ type StepSnapshot struct {
|
||||||
snapshotId string
|
snapshotId string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StepSnapshot) Run(state multistep.StateBag) multistep.StepAction {
|
func (s *StepSnapshot) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||||
ui := state.Get("ui").(packer.Ui)
|
ui := state.Get("ui").(packer.Ui)
|
||||||
volumeId := state.Get("volume_id").(string)
|
volumeId := state.Get("volume_id").(string)
|
||||||
|
|
|
@ -1,119 +1,198 @@
|
||||||
package common
|
package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"log"
|
||||||
"net/http"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"unicode"
|
"time"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
|
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
|
||||||
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||||
"github.com/aws/aws-sdk-go/aws/session"
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
"github.com/mitchellh/packer/template/interpolate"
|
"github.com/hashicorp/go-cleanhttp"
|
||||||
|
"github.com/hashicorp/packer/template/interpolate"
|
||||||
)
|
)
|
||||||
|
|
||||||
// AccessConfig is for common configuration related to AWS access
|
// AccessConfig is for common configuration related to AWS access
|
||||||
type AccessConfig struct {
|
type AccessConfig struct {
|
||||||
AccessKey string `mapstructure:"access_key"`
|
AccessKey string `mapstructure:"access_key"`
|
||||||
SecretKey string `mapstructure:"secret_key"`
|
CustomEndpointEc2 string `mapstructure:"custom_endpoint_ec2"`
|
||||||
RawRegion string `mapstructure:"region"`
|
MFACode string `mapstructure:"mfa_code"`
|
||||||
SkipValidation bool `mapstructure:"skip_region_validation"`
|
|
||||||
Token string `mapstructure:"token"`
|
|
||||||
ProfileName string `mapstructure:"profile"`
|
ProfileName string `mapstructure:"profile"`
|
||||||
|
RawRegion string `mapstructure:"region"`
|
||||||
|
SecretKey string `mapstructure:"secret_key"`
|
||||||
|
SkipValidation bool `mapstructure:"skip_region_validation"`
|
||||||
|
SkipMetadataApiCheck bool `mapstructure:"skip_metadata_api_check"`
|
||||||
|
Token string `mapstructure:"token"`
|
||||||
|
session *session.Session
|
||||||
}
|
}
|
||||||
|
|
||||||
// Config returns a valid aws.Config object for access to AWS services, or
|
// Config returns a valid aws.Config object for access to AWS services, or
|
||||||
// an error if the authentication and region couldn't be resolved
|
// an error if the authentication and region couldn't be resolved
|
||||||
func (c *AccessConfig) Config() (*aws.Config, error) {
|
func (c *AccessConfig) Session() (*session.Session, error) {
|
||||||
var creds *credentials.Credentials
|
if c.session != nil {
|
||||||
|
return c.session, nil
|
||||||
|
}
|
||||||
|
|
||||||
region, err := c.Region()
|
// build a chain provider, lazy-evaluated by aws-sdk
|
||||||
if err != nil {
|
providers := []credentials.Provider{
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
config := aws.NewConfig().WithRegion(region).WithMaxRetries(11)
|
|
||||||
if c.ProfileName != "" {
|
|
||||||
profile, err := NewFromProfile(c.ProfileName)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
creds, err = profile.CredentialsFromProfile(config)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
creds = credentials.NewChainCredentials([]credentials.Provider{
|
|
||||||
&credentials.StaticProvider{Value: credentials.Value{
|
&credentials.StaticProvider{Value: credentials.Value{
|
||||||
AccessKeyID: c.AccessKey,
|
AccessKeyID: c.AccessKey,
|
||||||
SecretAccessKey: c.SecretKey,
|
SecretAccessKey: c.SecretKey,
|
||||||
SessionToken: c.Token,
|
SessionToken: c.Token,
|
||||||
}},
|
}},
|
||||||
&credentials.EnvProvider{},
|
&credentials.EnvProvider{},
|
||||||
&credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
|
&credentials.SharedCredentialsProvider{
|
||||||
&ec2rolecreds.EC2RoleProvider{
|
Filename: "",
|
||||||
Client: ec2metadata.New(session.New(config)),
|
Profile: c.ProfileName,
|
||||||
},
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build isolated HTTP client to avoid issues with globally-shared settings
|
||||||
|
client := cleanhttp.DefaultClient()
|
||||||
|
|
||||||
|
// Keep the default timeout (100ms) low as we don't want to wait in non-EC2 environments
|
||||||
|
client.Timeout = 100 * time.Millisecond
|
||||||
|
|
||||||
|
const userTimeoutEnvVar = "AWS_METADATA_TIMEOUT"
|
||||||
|
userTimeout := os.Getenv(userTimeoutEnvVar)
|
||||||
|
if userTimeout != "" {
|
||||||
|
newTimeout, err := time.ParseDuration(userTimeout)
|
||||||
|
if err == nil {
|
||||||
|
if newTimeout.Nanoseconds() > 0 {
|
||||||
|
client.Timeout = newTimeout
|
||||||
|
} else {
|
||||||
|
log.Printf("[WARN] Non-positive value of %s (%s) is meaningless, ignoring", userTimeoutEnvVar, newTimeout.String())
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.Printf("[WARN] Error converting %s to time.Duration: %s", userTimeoutEnvVar, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[INFO] Setting AWS metadata API timeout to %s", client.Timeout.String())
|
||||||
|
cfg := &aws.Config{
|
||||||
|
HTTPClient: client,
|
||||||
|
}
|
||||||
|
if !c.SkipMetadataApiCheck {
|
||||||
|
// Real AWS should reply to a simple metadata request.
|
||||||
|
// We check it actually does to ensure something else didn't just
|
||||||
|
// happen to be listening on the same IP:Port
|
||||||
|
metadataClient := ec2metadata.New(session.New(cfg))
|
||||||
|
if metadataClient.Available() {
|
||||||
|
providers = append(providers, &ec2rolecreds.EC2RoleProvider{
|
||||||
|
Client: metadataClient,
|
||||||
})
|
})
|
||||||
|
log.Print("[INFO] AWS EC2 instance detected via default metadata" +
|
||||||
|
" API endpoint, EC2RoleProvider added to the auth chain")
|
||||||
|
} else {
|
||||||
|
log.Printf("[INFO] Ignoring AWS metadata API endpoint " +
|
||||||
|
"as it doesn't return any instance-id")
|
||||||
}
|
}
|
||||||
return config.WithCredentials(creds), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Region returns the aws.Region object for access to AWS services, requesting
|
creds := credentials.NewChainCredentials(providers)
|
||||||
// the region from the instance metadata if possible.
|
cp, err := creds.Get()
|
||||||
func (c *AccessConfig) Region() (string, error) {
|
|
||||||
if c.RawRegion != "" {
|
|
||||||
if !c.SkipValidation {
|
|
||||||
if valid := ValidateRegion(c.RawRegion); !valid {
|
|
||||||
return "", fmt.Errorf("Not a valid region: %s", c.RawRegion)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return c.RawRegion, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
md, err := GetInstanceMetaData("placement/availability-zone")
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NoCredentialProviders" {
|
||||||
|
return nil, errors.New("No valid credential sources found for AWS Builder. " +
|
||||||
|
"Please see https://www.packer.io/docs/builders/amazon.html#specifying-amazon-credentials " +
|
||||||
|
"for more information on providing credentials for the AWS Builder.")
|
||||||
}
|
}
|
||||||
|
|
||||||
region := strings.TrimRightFunc(string(md), unicode.IsLetter)
|
return nil, fmt.Errorf("Error loading credentials for AWS Provider: %s", err)
|
||||||
return region, nil
|
}
|
||||||
|
log.Printf("[INFO] AWS Auth provider used: %q", cp.ProviderName)
|
||||||
|
|
||||||
|
config := aws.NewConfig().WithMaxRetries(11).WithCredentialsChainVerboseErrors(true)
|
||||||
|
config = config.WithCredentials(creds)
|
||||||
|
|
||||||
|
if c.RawRegion != "" {
|
||||||
|
config = config.WithRegion(c.RawRegion)
|
||||||
|
} else if region := c.metadataRegion(); region != "" {
|
||||||
|
config = config.WithRegion(region)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.CustomEndpointEc2 != "" {
|
||||||
|
config = config.WithEndpoint(c.CustomEndpointEc2)
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := session.Options{
|
||||||
|
SharedConfigState: session.SharedConfigEnable,
|
||||||
|
Config: *config,
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.MFACode != "" {
|
||||||
|
opts.AssumeRoleTokenProvider = func() (string, error) {
|
||||||
|
return c.MFACode, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if sess, err := session.NewSessionWithOptions(opts); err != nil {
|
||||||
|
return nil, err
|
||||||
|
} else if *sess.Config.Region == "" {
|
||||||
|
return nil, fmt.Errorf("Could not find AWS region, make sure it's set.")
|
||||||
|
} else {
|
||||||
|
log.Printf("Found region %s", *sess.Config.Region)
|
||||||
|
c.session = sess
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.session, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *AccessConfig) SessionRegion() string {
|
||||||
|
if c.session == nil {
|
||||||
|
panic("access config session should be set.")
|
||||||
|
}
|
||||||
|
return aws.StringValue(c.session.Config.Region)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *AccessConfig) IsGovCloud() bool {
|
||||||
|
return strings.HasPrefix(c.SessionRegion(), "us-gov-")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *AccessConfig) IsChinaCloud() bool {
|
||||||
|
return strings.HasPrefix(c.SessionRegion(), "cn-")
|
||||||
|
}
|
||||||
|
|
||||||
|
// metadataRegion returns the region from the metadata service
|
||||||
|
func (c *AccessConfig) metadataRegion() string {
|
||||||
|
|
||||||
|
client := cleanhttp.DefaultClient()
|
||||||
|
|
||||||
|
// Keep the default timeout (100ms) low as we don't want to wait in non-EC2 environments
|
||||||
|
client.Timeout = 100 * time.Millisecond
|
||||||
|
ec2meta := ec2metadata.New(session.New(), &aws.Config{
|
||||||
|
HTTPClient: client,
|
||||||
|
})
|
||||||
|
region, err := ec2meta.Region()
|
||||||
|
if err != nil {
|
||||||
|
log.Println("Error getting region from metadata service, "+
|
||||||
|
"probably because we're not running on AWS.", err)
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return region
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *AccessConfig) Prepare(ctx *interpolate.Context) []error {
|
func (c *AccessConfig) Prepare(ctx *interpolate.Context) []error {
|
||||||
var errs []error
|
var errs []error
|
||||||
|
|
||||||
|
// Either both access and secret key must be set or neither of them should
|
||||||
|
// be.
|
||||||
|
if (len(c.AccessKey) > 0) != (len(c.SecretKey) > 0) {
|
||||||
|
errs = append(errs,
|
||||||
|
fmt.Errorf("`access_key` and `secret_key` must both be either set or not set."))
|
||||||
|
}
|
||||||
|
|
||||||
if c.RawRegion != "" && !c.SkipValidation {
|
if c.RawRegion != "" && !c.SkipValidation {
|
||||||
if valid := ValidateRegion(c.RawRegion); !valid {
|
if valid := ValidateRegion(c.RawRegion); !valid {
|
||||||
errs = append(errs, fmt.Errorf("Unknown region: %s", c.RawRegion))
|
errs = append(errs, fmt.Errorf("Unknown region: %s", c.RawRegion))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(errs) > 0 {
|
|
||||||
return errs
|
return errs
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetInstanceMetaData(path string) (contents []byte, err error) {
|
|
||||||
url := "http://169.254.169.254/latest/meta-data/" + path
|
|
||||||
|
|
||||||
resp, err := http.Get(url)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
if resp.StatusCode != 200 {
|
|
||||||
err = fmt.Errorf("Code %d returned for url %s", resp.StatusCode, url)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
body, err := ioutil.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return body, err
|
|
||||||
}
|
|
||||||
|
|
|
@ -2,6 +2,9 @@ package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
)
|
)
|
||||||
|
|
||||||
func testAccessConfig() *AccessConfig {
|
func testAccessConfig() *AccessConfig {
|
||||||
|
@ -38,3 +41,20 @@ func TestAccessConfigPrepare_Region(t *testing.T) {
|
||||||
c.SkipValidation = false
|
c.SkipValidation = false
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccessConfigPrepare_RegionRestrictd(t *testing.T) {
|
||||||
|
c := testAccessConfig()
|
||||||
|
|
||||||
|
// Create a Session with a custom region
|
||||||
|
c.session = session.Must(session.NewSession(&aws.Config{
|
||||||
|
Region: aws.String("us-gov-west-1"),
|
||||||
|
}))
|
||||||
|
|
||||||
|
if err := c.Prepare(nil); err != nil {
|
||||||
|
t.Fatalf("shouldn't have err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !c.IsGovCloud() {
|
||||||
|
t.Fatal("We should be in gov region.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -2,8 +2,9 @@ package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log"
|
||||||
|
|
||||||
"github.com/mitchellh/packer/template/interpolate"
|
"github.com/hashicorp/packer/template/interpolate"
|
||||||
)
|
)
|
||||||
|
|
||||||
// AMIConfig is for common configuration related to creating AMIs.
|
// AMIConfig is for common configuration related to creating AMIs.
|
||||||
|
@ -16,23 +17,45 @@ type AMIConfig struct {
|
||||||
AMIProductCodes []string `mapstructure:"ami_product_codes"`
|
AMIProductCodes []string `mapstructure:"ami_product_codes"`
|
||||||
AMIRegions []string `mapstructure:"ami_regions"`
|
AMIRegions []string `mapstructure:"ami_regions"`
|
||||||
AMISkipRegionValidation bool `mapstructure:"skip_region_validation"`
|
AMISkipRegionValidation bool `mapstructure:"skip_region_validation"`
|
||||||
AMITags map[string]string `mapstructure:"tags"`
|
AMITags TagMap `mapstructure:"tags"`
|
||||||
AMIEnhancedNetworking bool `mapstructure:"enhanced_networking"`
|
AMIENASupport bool `mapstructure:"ena_support"`
|
||||||
|
AMISriovNetSupport bool `mapstructure:"sriov_support"`
|
||||||
AMIForceDeregister bool `mapstructure:"force_deregister"`
|
AMIForceDeregister bool `mapstructure:"force_deregister"`
|
||||||
AMIForceDeleteSnapshot bool `mapstructure:"force_delete_snapshot"`
|
AMIForceDeleteSnapshot bool `mapstructure:"force_delete_snapshot"`
|
||||||
AMIEncryptBootVolume bool `mapstructure:"encrypt_boot"`
|
AMIEncryptBootVolume bool `mapstructure:"encrypt_boot"`
|
||||||
AMIKmsKeyId string `mapstructure:"kms_key_id"`
|
AMIKmsKeyId string `mapstructure:"kms_key_id"`
|
||||||
SnapshotTags map[string]string `mapstructure:"snapshot_tags"`
|
AMIRegionKMSKeyIDs map[string]string `mapstructure:"region_kms_key_ids"`
|
||||||
|
SnapshotTags TagMap `mapstructure:"snapshot_tags"`
|
||||||
SnapshotUsers []string `mapstructure:"snapshot_users"`
|
SnapshotUsers []string `mapstructure:"snapshot_users"`
|
||||||
SnapshotGroups []string `mapstructure:"snapshot_groups"`
|
SnapshotGroups []string `mapstructure:"snapshot_groups"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *AMIConfig) Prepare(ctx *interpolate.Context) []error {
|
func stringInSlice(s []string, searchstr string) bool {
|
||||||
|
for _, item := range s {
|
||||||
|
if item == searchstr {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *AMIConfig) Prepare(accessConfig *AccessConfig, ctx *interpolate.Context) []error {
|
||||||
var errs []error
|
var errs []error
|
||||||
|
|
||||||
if c.AMIName == "" {
|
if c.AMIName == "" {
|
||||||
errs = append(errs, fmt.Errorf("ami_name must be specified"))
|
errs = append(errs, fmt.Errorf("ami_name must be specified"))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Make sure that if we have region_kms_key_ids defined,
|
||||||
|
// the regions in region_kms_key_ids are also in ami_regions
|
||||||
|
if len(c.AMIRegionKMSKeyIDs) > 0 {
|
||||||
|
for kmsKeyRegion := range c.AMIRegionKMSKeyIDs {
|
||||||
|
if !stringInSlice(c.AMIRegions, kmsKeyRegion) {
|
||||||
|
errs = append(errs, fmt.Errorf("Region %s is in region_kms_key_ids but not in ami_regions", kmsKeyRegion))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if len(c.AMIRegions) > 0 {
|
if len(c.AMIRegions) > 0 {
|
||||||
regionSet := make(map[string]struct{})
|
regionSet := make(map[string]struct{})
|
||||||
regions := make([]string, 0, len(c.AMIRegions))
|
regions := make([]string, 0, len(c.AMIRegions))
|
||||||
|
@ -50,10 +73,22 @@ func (c *AMIConfig) Prepare(ctx *interpolate.Context) []error {
|
||||||
// Verify the region is real
|
// Verify the region is real
|
||||||
if valid := ValidateRegion(region); !valid {
|
if valid := ValidateRegion(region); !valid {
|
||||||
errs = append(errs, fmt.Errorf("Unknown region: %s", region))
|
errs = append(errs, fmt.Errorf("Unknown region: %s", region))
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Make sure that if we have region_kms_key_ids defined,
|
||||||
|
// the regions in ami_regions are also in region_kms_key_ids
|
||||||
|
if len(c.AMIRegionKMSKeyIDs) > 0 {
|
||||||
|
if _, ok := c.AMIRegionKMSKeyIDs[region]; !ok {
|
||||||
|
errs = append(errs, fmt.Errorf("Region %s is in ami_regions but not in region_kms_key_ids", region))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (accessConfig != nil) && (region == accessConfig.RawRegion) {
|
||||||
|
// make sure we don't try to copy to the region we originally
|
||||||
|
// create the AMI in.
|
||||||
|
log.Printf("Cannot copy AMI to AWS session region '%s', deleting it from `ami_regions`.", region)
|
||||||
|
continue
|
||||||
|
}
|
||||||
regions = append(regions, region)
|
regions = append(regions, region)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -64,9 +99,30 @@ func (c *AMIConfig) Prepare(ctx *interpolate.Context) []error {
|
||||||
errs = append(errs, fmt.Errorf("Cannot share AMI with encrypted boot volume"))
|
errs = append(errs, fmt.Errorf("Cannot share AMI with encrypted boot volume"))
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(c.SnapshotUsers) > 0 && len(c.AMIKmsKeyId) == 0 && c.AMIEncryptBootVolume {
|
if len(c.SnapshotUsers) > 0 {
|
||||||
|
if len(c.AMIKmsKeyId) == 0 && c.AMIEncryptBootVolume {
|
||||||
errs = append(errs, fmt.Errorf("Cannot share snapshot encrypted with default KMS key"))
|
errs = append(errs, fmt.Errorf("Cannot share snapshot encrypted with default KMS key"))
|
||||||
}
|
}
|
||||||
|
if len(c.AMIRegionKMSKeyIDs) > 0 {
|
||||||
|
for _, kmsKey := range c.AMIRegionKMSKeyIDs {
|
||||||
|
if len(kmsKey) == 0 {
|
||||||
|
errs = append(errs, fmt.Errorf("Cannot share snapshot encrypted with default KMS key"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(c.AMIName) < 3 || len(c.AMIName) > 128 {
|
||||||
|
errs = append(errs, fmt.Errorf("ami_name must be between 3 and 128 characters long"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.AMIName != templateCleanAMIName(c.AMIName) {
|
||||||
|
errs = append(errs, fmt.Errorf("AMIName should only contain "+
|
||||||
|
"alphanumeric characters, parentheses (()), square brackets ([]), spaces "+
|
||||||
|
"( ), periods (.), slashes (/), dashes (-), single quotes ('), at-signs "+
|
||||||
|
"(@), or underscores(_). You can use the `clean_ami_name` template "+
|
||||||
|
"filter to automatically clean your ami name."))
|
||||||
|
}
|
||||||
|
|
||||||
if len(errs) > 0 {
|
if len(errs) > 0 {
|
||||||
return errs
|
return errs
|
||||||
|
|
|
@ -11,14 +11,20 @@ func testAMIConfig() *AMIConfig {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getFakeAccessConfig(region string) *AccessConfig {
|
||||||
|
return &AccessConfig{
|
||||||
|
RawRegion: region,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestAMIConfigPrepare_name(t *testing.T) {
|
func TestAMIConfigPrepare_name(t *testing.T) {
|
||||||
c := testAMIConfig()
|
c := testAMIConfig()
|
||||||
if err := c.Prepare(nil); err != nil {
|
if err := c.Prepare(nil, nil); err != nil {
|
||||||
t.Fatalf("shouldn't have err: %s", err)
|
t.Fatalf("shouldn't have err: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
c.AMIName = ""
|
c.AMIName = ""
|
||||||
if err := c.Prepare(nil); err == nil {
|
if err := c.Prepare(nil, nil); err == nil {
|
||||||
t.Fatal("should have error")
|
t.Fatal("should have error")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -26,22 +32,22 @@ func TestAMIConfigPrepare_name(t *testing.T) {
|
||||||
func TestAMIConfigPrepare_regions(t *testing.T) {
|
func TestAMIConfigPrepare_regions(t *testing.T) {
|
||||||
c := testAMIConfig()
|
c := testAMIConfig()
|
||||||
c.AMIRegions = nil
|
c.AMIRegions = nil
|
||||||
if err := c.Prepare(nil); err != nil {
|
if err := c.Prepare(nil, nil); err != nil {
|
||||||
t.Fatalf("shouldn't have err: %s", err)
|
t.Fatalf("shouldn't have err: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
c.AMIRegions = listEC2Regions()
|
c.AMIRegions = listEC2Regions()
|
||||||
if err := c.Prepare(nil); err != nil {
|
if err := c.Prepare(nil, nil); err != nil {
|
||||||
t.Fatalf("shouldn't have err: %s", err)
|
t.Fatalf("shouldn't have err: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
c.AMIRegions = []string{"foo"}
|
c.AMIRegions = []string{"foo"}
|
||||||
if err := c.Prepare(nil); err == nil {
|
if err := c.Prepare(nil, nil); err == nil {
|
||||||
t.Fatal("should have error")
|
t.Fatal("should have error")
|
||||||
}
|
}
|
||||||
|
|
||||||
c.AMIRegions = []string{"us-east-1", "us-west-1", "us-east-1"}
|
c.AMIRegions = []string{"us-east-1", "us-west-1", "us-east-1"}
|
||||||
if err := c.Prepare(nil); err != nil {
|
if err := c.Prepare(nil, nil); err != nil {
|
||||||
t.Fatalf("bad: %s", err)
|
t.Fatalf("bad: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -52,11 +58,81 @@ func TestAMIConfigPrepare_regions(t *testing.T) {
|
||||||
|
|
||||||
c.AMIRegions = []string{"custom"}
|
c.AMIRegions = []string{"custom"}
|
||||||
c.AMISkipRegionValidation = true
|
c.AMISkipRegionValidation = true
|
||||||
if err := c.Prepare(nil); err != nil {
|
if err := c.Prepare(nil, nil); err != nil {
|
||||||
t.Fatal("shouldn't have error")
|
t.Fatal("shouldn't have error")
|
||||||
}
|
}
|
||||||
c.AMISkipRegionValidation = false
|
c.AMISkipRegionValidation = false
|
||||||
|
|
||||||
|
c.AMIRegions = []string{"us-east-1", "us-east-2", "us-west-1"}
|
||||||
|
c.AMIRegionKMSKeyIDs = map[string]string{
|
||||||
|
"us-east-1": "123-456-7890",
|
||||||
|
"us-west-1": "789-012-3456",
|
||||||
|
"us-east-2": "456-789-0123",
|
||||||
|
}
|
||||||
|
if err := c.Prepare(nil, nil); err != nil {
|
||||||
|
t.Fatal("shouldn't have error")
|
||||||
|
}
|
||||||
|
|
||||||
|
c.AMIRegions = []string{"us-east-1", "us-east-2", "us-west-1"}
|
||||||
|
c.AMIRegionKMSKeyIDs = map[string]string{
|
||||||
|
"us-east-1": "123-456-7890",
|
||||||
|
"us-west-1": "789-012-3456",
|
||||||
|
"us-east-2": "",
|
||||||
|
}
|
||||||
|
if err := c.Prepare(nil, nil); err != nil {
|
||||||
|
t.Fatal("should have passed; we are able to use default KMS key if not sharing")
|
||||||
|
}
|
||||||
|
|
||||||
|
c.SnapshotUsers = []string{"user-foo", "user-bar"}
|
||||||
|
c.AMIRegions = []string{"us-east-1", "us-east-2", "us-west-1"}
|
||||||
|
c.AMIRegionKMSKeyIDs = map[string]string{
|
||||||
|
"us-east-1": "123-456-7890",
|
||||||
|
"us-west-1": "789-012-3456",
|
||||||
|
"us-east-2": "",
|
||||||
|
}
|
||||||
|
if err := c.Prepare(nil, nil); err == nil {
|
||||||
|
t.Fatal("should have an error b/c can't use default KMS key if sharing")
|
||||||
|
}
|
||||||
|
|
||||||
|
c.AMIRegions = []string{"us-east-1", "us-west-1"}
|
||||||
|
c.AMIRegionKMSKeyIDs = map[string]string{
|
||||||
|
"us-east-1": "123-456-7890",
|
||||||
|
"us-west-1": "789-012-3456",
|
||||||
|
"us-east-2": "456-789-0123",
|
||||||
|
}
|
||||||
|
if err := c.Prepare(nil, nil); err == nil {
|
||||||
|
t.Fatal("should have error b/c theres a region in the key map that isn't in ami_regions")
|
||||||
|
}
|
||||||
|
|
||||||
|
c.AMIRegions = []string{"us-east-1", "us-west-1", "us-east-2"}
|
||||||
|
c.AMIRegionKMSKeyIDs = map[string]string{
|
||||||
|
"us-east-1": "123-456-7890",
|
||||||
|
"us-west-1": "789-012-3456",
|
||||||
|
}
|
||||||
|
if err := c.Prepare(nil, nil); err == nil {
|
||||||
|
t.Fatal("should have error b/c theres a region in in ami_regions that isn't in the key map")
|
||||||
|
}
|
||||||
|
|
||||||
|
c.SnapshotUsers = []string{"foo", "bar"}
|
||||||
|
c.AMIKmsKeyId = "123-abc-456"
|
||||||
|
c.AMIEncryptBootVolume = true
|
||||||
|
c.AMIRegions = []string{"us-east-1", "us-west-1"}
|
||||||
|
c.AMIRegionKMSKeyIDs = map[string]string{
|
||||||
|
"us-east-1": "123-456-7890",
|
||||||
|
"us-west-1": "",
|
||||||
|
}
|
||||||
|
if err := c.Prepare(nil, nil); err == nil {
|
||||||
|
t.Fatal("should have error b/c theres a region in in ami_regions that isn't in the key map")
|
||||||
|
}
|
||||||
|
|
||||||
|
// allow rawregion to exist in ami_regions list.
|
||||||
|
accessConf := getFakeAccessConfig("us-east-1")
|
||||||
|
c.AMIRegions = []string{"us-east-1", "us-west-1", "us-east-2"}
|
||||||
|
c.AMIRegionKMSKeyIDs = nil
|
||||||
|
if err := c.Prepare(accessConf, nil); err != nil {
|
||||||
|
t.Fatal("should allow user to have the raw region in ami_regions")
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAMIConfigPrepare_Share_EncryptedBoot(t *testing.T) {
|
func TestAMIConfigPrepare_Share_EncryptedBoot(t *testing.T) {
|
||||||
|
@ -65,12 +141,46 @@ func TestAMIConfigPrepare_Share_EncryptedBoot(t *testing.T) {
|
||||||
c.AMIEncryptBootVolume = true
|
c.AMIEncryptBootVolume = true
|
||||||
|
|
||||||
c.AMIKmsKeyId = ""
|
c.AMIKmsKeyId = ""
|
||||||
if err := c.Prepare(nil); err == nil {
|
if err := c.Prepare(nil, nil); err == nil {
|
||||||
t.Fatal("shouldn't be able to share ami with encrypted boot volume")
|
t.Fatal("shouldn't be able to share ami with encrypted boot volume")
|
||||||
}
|
}
|
||||||
|
|
||||||
c.AMIKmsKeyId = "89c3fb9a-de87-4f2a-aedc-fddc5138193c"
|
c.AMIKmsKeyId = "89c3fb9a-de87-4f2a-aedc-fddc5138193c"
|
||||||
if err := c.Prepare(nil); err == nil {
|
if err := c.Prepare(nil, nil); err == nil {
|
||||||
t.Fatal("shouldn't be able to share ami with encrypted boot volume")
|
t.Fatal("shouldn't be able to share ami with encrypted boot volume")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAMINameValidation(t *testing.T) {
|
||||||
|
c := testAMIConfig()
|
||||||
|
|
||||||
|
c.AMIName = "aa"
|
||||||
|
if err := c.Prepare(nil, nil); err == nil {
|
||||||
|
t.Fatal("shouldn't be able to have an ami name with less than 3 characters")
|
||||||
|
}
|
||||||
|
|
||||||
|
var longAmiName string
|
||||||
|
for i := 0; i < 129; i++ {
|
||||||
|
longAmiName += "a"
|
||||||
|
}
|
||||||
|
c.AMIName = longAmiName
|
||||||
|
if err := c.Prepare(nil, nil); err == nil {
|
||||||
|
t.Fatal("shouldn't be able to have an ami name with great than 128 characters")
|
||||||
|
}
|
||||||
|
|
||||||
|
c.AMIName = "+aaa"
|
||||||
|
if err := c.Prepare(nil, nil); err == nil {
|
||||||
|
t.Fatal("shouldn't be able to have an ami name with invalid characters")
|
||||||
|
}
|
||||||
|
|
||||||
|
c.AMIName = "fooBAR1()[] ./-'@_"
|
||||||
|
if err := c.Prepare(nil, nil); err != nil {
|
||||||
|
t.Fatal("should be able to use all of the allowed AMI characters")
|
||||||
|
}
|
||||||
|
|
||||||
|
c.AMIName = `xyz-base-2017-04-05-1934`
|
||||||
|
if err := c.Prepare(nil, nil); err != nil {
|
||||||
|
t.Fatalf("expected `xyz-base-2017-04-05-1934` to pass validation.")
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
|
@ -9,7 +9,7 @@ import (
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/aws/session"
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
"github.com/aws/aws-sdk-go/service/ec2"
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
"github.com/mitchellh/packer/packer"
|
"github.com/hashicorp/packer/packer"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Artifact is an artifact implementation that contains built AMIs.
|
// Artifact is an artifact implementation that contains built AMIs.
|
||||||
|
@ -21,7 +21,7 @@ type Artifact struct {
|
||||||
BuilderIdValue string
|
BuilderIdValue string
|
||||||
|
|
||||||
// EC2 connection for performing API stuff.
|
// EC2 connection for performing API stuff.
|
||||||
Conn *ec2.EC2
|
Session *session.Session
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Artifact) BuilderId() string {
|
func (a *Artifact) BuilderId() string {
|
||||||
|
@ -51,7 +51,7 @@ func (a *Artifact) String() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
sort.Strings(amiStrings)
|
sort.Strings(amiStrings)
|
||||||
return fmt.Sprintf("AMIs were created:\n\n%s", strings.Join(amiStrings, "\n"))
|
return fmt.Sprintf("AMIs were created:\n%s\n", strings.Join(amiStrings, "\n"))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Artifact) State(name string) interface{} {
|
func (a *Artifact) State(name string) interface{} {
|
||||||
|
@ -69,15 +69,9 @@ func (a *Artifact) Destroy() error {
|
||||||
for region, imageId := range a.Amis {
|
for region, imageId := range a.Amis {
|
||||||
log.Printf("Deregistering image ID (%s) from region (%s)", imageId, region)
|
log.Printf("Deregistering image ID (%s) from region (%s)", imageId, region)
|
||||||
|
|
||||||
regionConfig := &aws.Config{
|
regionConn := ec2.New(a.Session, &aws.Config{
|
||||||
Credentials: a.Conn.Config.Credentials,
|
|
||||||
Region: aws.String(region),
|
Region: aws.String(region),
|
||||||
}
|
})
|
||||||
session, err := session.NewSession(regionConfig)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
regionConn := ec2.New(session)
|
|
||||||
|
|
||||||
// Get image metadata
|
// Get image metadata
|
||||||
imageResp, err := regionConn.DescribeImages(&ec2.DescribeImagesInput{
|
imageResp, err := regionConn.DescribeImages(&ec2.DescribeImagesInput{
|
||||||
|
|
|
@ -4,7 +4,7 @@ import (
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/mitchellh/packer/packer"
|
"github.com/hashicorp/packer/packer"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestArtifact_Impl(t *testing.T) {
|
func TestArtifact_Impl(t *testing.T) {
|
||||||
|
@ -48,9 +48,9 @@ func TestArtifactState_atlasMetadata(t *testing.T) {
|
||||||
|
|
||||||
func TestArtifactString(t *testing.T) {
|
func TestArtifactString(t *testing.T) {
|
||||||
expected := `AMIs were created:
|
expected := `AMIs were created:
|
||||||
|
|
||||||
east: foo
|
east: foo
|
||||||
west: bar`
|
west: bar
|
||||||
|
`
|
||||||
|
|
||||||
amis := make(map[string]string)
|
amis := make(map[string]string)
|
||||||
amis["east"] = "foo"
|
amis["east"] = "foo"
|
||||||
|
|
|
@ -1,11 +1,12 @@
|
||||||
package common
|
package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/service/ec2"
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
"github.com/mitchellh/packer/template/interpolate"
|
"github.com/hashicorp/packer/template/interpolate"
|
||||||
)
|
)
|
||||||
|
|
||||||
// BlockDevice
|
// BlockDevice
|
||||||
|
@ -19,6 +20,7 @@ type BlockDevice struct {
|
||||||
VirtualName string `mapstructure:"virtual_name"`
|
VirtualName string `mapstructure:"virtual_name"`
|
||||||
VolumeType string `mapstructure:"volume_type"`
|
VolumeType string `mapstructure:"volume_type"`
|
||||||
VolumeSize int64 `mapstructure:"volume_size"`
|
VolumeSize int64 `mapstructure:"volume_size"`
|
||||||
|
KmsKeyId string `mapstructure:"kms_key_id"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type BlockDevices struct {
|
type BlockDevices struct {
|
||||||
|
@ -73,6 +75,10 @@ func buildBlockDevices(b []BlockDevice) []*ec2.BlockDeviceMapping {
|
||||||
ebsBlockDevice.Encrypted = aws.Bool(blockDevice.Encrypted)
|
ebsBlockDevice.Encrypted = aws.Bool(blockDevice.Encrypted)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if blockDevice.KmsKeyId != "" {
|
||||||
|
ebsBlockDevice.KmsKeyId = aws.String(blockDevice.KmsKeyId)
|
||||||
|
}
|
||||||
|
|
||||||
mapping.Ebs = ebsBlockDevice
|
mapping.Ebs = ebsBlockDevice
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -81,10 +87,29 @@ func buildBlockDevices(b []BlockDevice) []*ec2.BlockDeviceMapping {
|
||||||
return blockDevices
|
return blockDevices
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *BlockDevices) Prepare(ctx *interpolate.Context) []error {
|
func (b *BlockDevice) Prepare(ctx *interpolate.Context) error {
|
||||||
|
// Warn that encrypted must be true when setting kms_key_id
|
||||||
|
if b.KmsKeyId != "" && b.Encrypted == false {
|
||||||
|
return fmt.Errorf("The device %v, must also have `encrypted: "+
|
||||||
|
"true` when setting a kms_key_id.", b.DeviceName)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (b *BlockDevices) Prepare(ctx *interpolate.Context) (errs []error) {
|
||||||
|
for _, d := range b.AMIMappings {
|
||||||
|
if err := d.Prepare(ctx); err != nil {
|
||||||
|
errs = append(errs, fmt.Errorf("AMIMapping: %s", err.Error()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, d := range b.LaunchMappings {
|
||||||
|
if err := d.Prepare(ctx); err != nil {
|
||||||
|
errs = append(errs, fmt.Errorf("LaunchMapping: %s", err.Error()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return errs
|
||||||
|
}
|
||||||
|
|
||||||
func (b *AMIBlockDevices) BuildAMIDevices() []*ec2.BlockDeviceMapping {
|
func (b *AMIBlockDevices) BuildAMIDevices() []*ec2.BlockDeviceMapping {
|
||||||
return buildBlockDevices(b.AMIMappings)
|
return buildBlockDevices(b.AMIMappings)
|
||||||
}
|
}
|
||||||
|
|
|
@ -84,6 +84,27 @@ func TestBlockDevice(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Config: &BlockDevice{
|
||||||
|
DeviceName: "/dev/sdb",
|
||||||
|
VolumeType: "gp2",
|
||||||
|
VolumeSize: 8,
|
||||||
|
DeleteOnTermination: true,
|
||||||
|
Encrypted: true,
|
||||||
|
KmsKeyId: "2Fa48a521f-3aff-4b34-a159-376ac5d37812",
|
||||||
|
},
|
||||||
|
|
||||||
|
Result: &ec2.BlockDeviceMapping{
|
||||||
|
DeviceName: aws.String("/dev/sdb"),
|
||||||
|
Ebs: &ec2.EbsBlockDevice{
|
||||||
|
VolumeType: aws.String("gp2"),
|
||||||
|
VolumeSize: aws.Int64(8),
|
||||||
|
DeleteOnTermination: aws.Bool(true),
|
||||||
|
Encrypted: aws.Bool(true),
|
||||||
|
KmsKeyId: aws.String("2Fa48a521f-3aff-4b34-a159-376ac5d37812"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Config: &BlockDevice{
|
Config: &BlockDevice{
|
||||||
DeviceName: "/dev/sdb",
|
DeviceName: "/dev/sdb",
|
||||||
|
|
|
@ -1,156 +0,0 @@
|
||||||
package common
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/session"
|
|
||||||
"github.com/aws/aws-sdk-go/service/sts"
|
|
||||||
"github.com/go-ini/ini"
|
|
||||||
"github.com/mitchellh/go-homedir"
|
|
||||||
)
|
|
||||||
|
|
||||||
type CLIConfig struct {
|
|
||||||
ProfileName string
|
|
||||||
SourceProfile string
|
|
||||||
|
|
||||||
AssumeRoleInput *sts.AssumeRoleInput
|
|
||||||
SourceCredentials *credentials.Credentials
|
|
||||||
|
|
||||||
profileCfg *ini.Section
|
|
||||||
profileCred *ini.Section
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return a new CLIConfig with stored profile settings
|
|
||||||
func NewFromProfile(name string) (*CLIConfig, error) {
|
|
||||||
c := &CLIConfig{}
|
|
||||||
c.AssumeRoleInput = new(sts.AssumeRoleInput)
|
|
||||||
err := c.Prepare(name)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
sessName, err := c.getSessionName(c.profileCfg.Key("role_session_name").Value())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
c.AssumeRoleInput.RoleSessionName = aws.String(sessName)
|
|
||||||
arn := c.profileCfg.Key("role_arn").Value()
|
|
||||||
if arn != "" {
|
|
||||||
c.AssumeRoleInput.RoleArn = aws.String(arn)
|
|
||||||
}
|
|
||||||
id := c.profileCfg.Key("external_id").Value()
|
|
||||||
if id != "" {
|
|
||||||
c.AssumeRoleInput.ExternalId = aws.String(id)
|
|
||||||
}
|
|
||||||
c.SourceCredentials = credentials.NewStaticCredentials(
|
|
||||||
c.profileCred.Key("aws_access_key_id").Value(),
|
|
||||||
c.profileCred.Key("aws_secret_access_key").Value(),
|
|
||||||
c.profileCred.Key("aws_session_token").Value(),
|
|
||||||
)
|
|
||||||
return c, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return AWS Credentials using current profile. Must supply source config.
|
|
||||||
func (c *CLIConfig) CredentialsFromProfile(conf *aws.Config) (*credentials.Credentials, error) {
|
|
||||||
// If the profile name is equal to the source profile, there is no role to assume so return
|
|
||||||
// the source credentials as they were captured.
|
|
||||||
if c.ProfileName == c.SourceProfile {
|
|
||||||
return c.SourceCredentials, nil
|
|
||||||
}
|
|
||||||
srcCfg := aws.NewConfig().Copy(conf).WithCredentials(c.SourceCredentials)
|
|
||||||
session, err := session.NewSession(srcCfg)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
svc := sts.New(session)
|
|
||||||
res, err := svc.AssumeRole(c.AssumeRoleInput)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return credentials.NewStaticCredentials(
|
|
||||||
*res.Credentials.AccessKeyId,
|
|
||||||
*res.Credentials.SecretAccessKey,
|
|
||||||
*res.Credentials.SessionToken,
|
|
||||||
), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sets params in the struct based on the file section
|
|
||||||
func (c *CLIConfig) Prepare(name string) error {
|
|
||||||
var err error
|
|
||||||
c.ProfileName = name
|
|
||||||
c.profileCfg, err = configFromName(c.ProfileName)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
c.SourceProfile = c.profileCfg.Key("source_profile").Value()
|
|
||||||
if c.SourceProfile == "" {
|
|
||||||
c.SourceProfile = c.ProfileName
|
|
||||||
}
|
|
||||||
c.profileCred, err = credsFromName(c.SourceProfile)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CLIConfig) getSessionName(rawName string) (string, error) {
|
|
||||||
if rawName == "" {
|
|
||||||
name := "packer-"
|
|
||||||
host, err := os.Hostname()
|
|
||||||
if err != nil {
|
|
||||||
return name, err
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%s%s", name, host), nil
|
|
||||||
} else {
|
|
||||||
return rawName, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func configFromName(name string) (*ini.Section, error) {
|
|
||||||
filePath := os.Getenv("AWS_CONFIG_FILE")
|
|
||||||
if filePath == "" {
|
|
||||||
home, err := homedir.Dir()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
filePath = path.Join(home, ".aws", "config")
|
|
||||||
}
|
|
||||||
file, err := readFile(filePath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
profileName := fmt.Sprintf("profile %s", name)
|
|
||||||
cfg, err := file.GetSection(profileName)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return cfg, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func credsFromName(name string) (*ini.Section, error) {
|
|
||||||
filePath := os.Getenv("AWS_SHARED_CREDENTIALS_FILE")
|
|
||||||
if filePath == "" {
|
|
||||||
home, err := homedir.Dir()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
filePath = path.Join(home, ".aws", "credentials")
|
|
||||||
}
|
|
||||||
file, err := readFile(filePath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
cfg, err := file.GetSection(name)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return cfg, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func readFile(path string) (*ini.File, error) {
|
|
||||||
cfg, err := ini.Load(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return cfg, nil
|
|
||||||
}
|
|
|
@ -1,118 +0,0 @@
|
||||||
package common
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"strconv"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
os.Setenv("AWS_ACCESS_KEY_ID", "")
|
|
||||||
os.Setenv("AWS_ACCESS_KEY", "")
|
|
||||||
os.Setenv("AWS_SECRET_ACCESS_KEY", "")
|
|
||||||
os.Setenv("AWS_SECRET_KEY", "")
|
|
||||||
os.Setenv("AWS_CONFIG_FILE", "")
|
|
||||||
os.Setenv("AWS_SHARED_CREDENTIALS_FILE", "")
|
|
||||||
}
|
|
||||||
|
|
||||||
func testCLIConfig() *CLIConfig {
|
|
||||||
return &CLIConfig{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCLIConfigNewFromProfile(t *testing.T) {
|
|
||||||
tmpDir := mockConfig(t)
|
|
||||||
|
|
||||||
c, err := NewFromProfile("testing2")
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
if c.AssumeRoleInput.RoleArn != nil {
|
|
||||||
t.Errorf("RoleArn should be nil. Instead %p", c.AssumeRoleInput.RoleArn)
|
|
||||||
}
|
|
||||||
if c.AssumeRoleInput.ExternalId != nil {
|
|
||||||
t.Errorf("ExternalId should be nil. Instead %p", c.AssumeRoleInput.ExternalId)
|
|
||||||
}
|
|
||||||
|
|
||||||
mockConfigClose(t, tmpDir)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAssumeRole(t *testing.T) {
|
|
||||||
tmpDir := mockConfig(t)
|
|
||||||
|
|
||||||
c, err := NewFromProfile("testing1")
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
// Role
|
|
||||||
e := "arn:aws:iam::123456789011:role/rolename"
|
|
||||||
a := *c.AssumeRoleInput.RoleArn
|
|
||||||
if e != a {
|
|
||||||
t.Errorf("RoleArn value should be %s. Instead %s", e, a)
|
|
||||||
}
|
|
||||||
// Session
|
|
||||||
a = *c.AssumeRoleInput.RoleSessionName
|
|
||||||
e = "testsession"
|
|
||||||
if e != a {
|
|
||||||
t.Errorf("RoleSessionName value should be %s. Instead %s", e, a)
|
|
||||||
}
|
|
||||||
|
|
||||||
config := aws.NewConfig()
|
|
||||||
_, err = c.CredentialsFromProfile(config)
|
|
||||||
if err == nil {
|
|
||||||
t.Error("Should have errored")
|
|
||||||
}
|
|
||||||
mockConfigClose(t, tmpDir)
|
|
||||||
}
|
|
||||||
|
|
||||||
func mockConfig(t *testing.T) string {
|
|
||||||
time := time.Now().UnixNano()
|
|
||||||
dir, err := ioutil.TempDir("", strconv.FormatInt(time, 10))
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cfg := []byte(`[profile testing1]
|
|
||||||
region=us-west-2
|
|
||||||
source_profile=testingcredentials
|
|
||||||
role_arn = arn:aws:iam::123456789011:role/rolename
|
|
||||||
role_session_name = testsession
|
|
||||||
|
|
||||||
[profile testing2]
|
|
||||||
region=us-west-2
|
|
||||||
`)
|
|
||||||
cfgFile := path.Join(dir, "config")
|
|
||||||
err = ioutil.WriteFile(cfgFile, cfg, 0644)
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
os.Setenv("AWS_CONFIG_FILE", cfgFile)
|
|
||||||
|
|
||||||
crd := []byte(`[testingcredentials]
|
|
||||||
aws_access_key_id = foo
|
|
||||||
aws_secret_access_key = bar
|
|
||||||
|
|
||||||
[testing2]
|
|
||||||
aws_access_key_id = baz
|
|
||||||
aws_secret_access_key = qux
|
|
||||||
`)
|
|
||||||
crdFile := path.Join(dir, "credentials")
|
|
||||||
err = ioutil.WriteFile(crdFile, crd, 0644)
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
os.Setenv("AWS_SHARED_CREDENTIALS_FILE", crdFile)
|
|
||||||
|
|
||||||
return dir
|
|
||||||
}
|
|
||||||
|
|
||||||
func mockConfigClose(t *testing.T, dir string) {
|
|
||||||
err := os.RemoveAll(dir)
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -12,6 +12,7 @@ func listEC2Regions() []string {
|
||||||
"eu-central-1",
|
"eu-central-1",
|
||||||
"eu-west-1",
|
"eu-west-1",
|
||||||
"eu-west-2",
|
"eu-west-2",
|
||||||
|
"eu-west-3",
|
||||||
"sa-east-1",
|
"sa-east-1",
|
||||||
"us-east-1",
|
"us-east-1",
|
||||||
"us-east-2",
|
"us-east-2",
|
||||||
|
|
|
@ -3,13 +3,14 @@ package common
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"regexp"
|
"regexp"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/mitchellh/packer/common/uuid"
|
"github.com/hashicorp/packer/common/uuid"
|
||||||
"github.com/mitchellh/packer/helper/communicator"
|
"github.com/hashicorp/packer/helper/communicator"
|
||||||
"github.com/mitchellh/packer/template/interpolate"
|
"github.com/hashicorp/packer/template/interpolate"
|
||||||
)
|
)
|
||||||
|
|
||||||
var reShutdownBehavior = regexp.MustCompile("^(stop|terminate)$")
|
var reShutdownBehavior = regexp.MustCompile("^(stop|terminate)$")
|
||||||
|
@ -40,6 +41,7 @@ type RunConfig struct {
|
||||||
DisableStopInstance bool `mapstructure:"disable_stop_instance"`
|
DisableStopInstance bool `mapstructure:"disable_stop_instance"`
|
||||||
SecurityGroupId string `mapstructure:"security_group_id"`
|
SecurityGroupId string `mapstructure:"security_group_id"`
|
||||||
SecurityGroupIds []string `mapstructure:"security_group_ids"`
|
SecurityGroupIds []string `mapstructure:"security_group_ids"`
|
||||||
|
TemporarySGSourceCidr string `mapstructure:"temporary_security_group_source_cidr"`
|
||||||
SubnetId string `mapstructure:"subnet_id"`
|
SubnetId string `mapstructure:"subnet_id"`
|
||||||
TemporaryKeyPairName string `mapstructure:"temporary_key_pair_name"`
|
TemporaryKeyPairName string `mapstructure:"temporary_key_pair_name"`
|
||||||
UserData string `mapstructure:"user_data"`
|
UserData string `mapstructure:"user_data"`
|
||||||
|
@ -51,7 +53,7 @@ type RunConfig struct {
|
||||||
// Communicator settings
|
// Communicator settings
|
||||||
Comm communicator.Config `mapstructure:",squash"`
|
Comm communicator.Config `mapstructure:",squash"`
|
||||||
SSHKeyPairName string `mapstructure:"ssh_keypair_name"`
|
SSHKeyPairName string `mapstructure:"ssh_keypair_name"`
|
||||||
SSHPrivateIp bool `mapstructure:"ssh_private_ip"`
|
SSHInterface string `mapstructure:"ssh_interface"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *RunConfig) Prepare(ctx *interpolate.Context) []error {
|
func (c *RunConfig) Prepare(ctx *interpolate.Context) []error {
|
||||||
|
@ -75,11 +77,21 @@ func (c *RunConfig) Prepare(ctx *interpolate.Context) []error {
|
||||||
|
|
||||||
// Validation
|
// Validation
|
||||||
errs := c.Comm.Prepare(ctx)
|
errs := c.Comm.Prepare(ctx)
|
||||||
|
|
||||||
|
// Valadating ssh_interface
|
||||||
|
if c.SSHInterface != "public_ip" &&
|
||||||
|
c.SSHInterface != "private_ip" &&
|
||||||
|
c.SSHInterface != "public_dns" &&
|
||||||
|
c.SSHInterface != "private_dns" &&
|
||||||
|
c.SSHInterface != "" {
|
||||||
|
errs = append(errs, errors.New(fmt.Sprintf("Unknown interface type: %s", c.SSHInterface)))
|
||||||
|
}
|
||||||
|
|
||||||
if c.SSHKeyPairName != "" {
|
if c.SSHKeyPairName != "" {
|
||||||
if c.Comm.Type == "winrm" && c.Comm.WinRMPassword == "" && c.Comm.SSHPrivateKey == "" {
|
if c.Comm.Type == "winrm" && c.Comm.WinRMPassword == "" && c.Comm.SSHPrivateKey == "" {
|
||||||
errs = append(errs, errors.New("A private_key_file must be provided to retrieve the winrm password when using ssh_keypair_name."))
|
errs = append(errs, errors.New("ssh_private_key_file must be provided to retrieve the winrm password when using ssh_keypair_name."))
|
||||||
} else if c.Comm.SSHPrivateKey == "" && !c.Comm.SSHAgentAuth {
|
} else if c.Comm.SSHPrivateKey == "" && !c.Comm.SSHAgentAuth {
|
||||||
errs = append(errs, errors.New("A private_key_file must be provided or ssh_agent_auth enabled when ssh_keypair_name is specified."))
|
errs = append(errs, errors.New("ssh_private_key_file must be provided or ssh_agent_auth enabled when ssh_keypair_name is specified."))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -115,6 +127,14 @@ func (c *RunConfig) Prepare(ctx *interpolate.Context) []error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if c.TemporarySGSourceCidr == "" {
|
||||||
|
c.TemporarySGSourceCidr = "0.0.0.0/0"
|
||||||
|
} else {
|
||||||
|
if _, _, err := net.ParseCIDR(c.TemporarySGSourceCidr); err != nil {
|
||||||
|
errs = append(errs, fmt.Errorf("Error parsing temporary_security_group_source_cidr: %s", err.Error()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if c.InstanceInitiatedShutdownBehavior == "" {
|
if c.InstanceInitiatedShutdownBehavior == "" {
|
||||||
c.InstanceInitiatedShutdownBehavior = "stop"
|
c.InstanceInitiatedShutdownBehavior = "stop"
|
||||||
} else if !reShutdownBehavior.MatchString(c.InstanceInitiatedShutdownBehavior) {
|
} else if !reShutdownBehavior.MatchString(c.InstanceInitiatedShutdownBehavior) {
|
||||||
|
@ -123,3 +143,7 @@ func (c *RunConfig) Prepare(ctx *interpolate.Context) []error {
|
||||||
|
|
||||||
return errs
|
return errs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *RunConfig) IsSpotInstance() bool {
|
||||||
|
return c.SpotPrice != "" && c.SpotPrice != "0"
|
||||||
|
}
|
||||||
|
|
|
@ -6,7 +6,7 @@ import (
|
||||||
"regexp"
|
"regexp"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/mitchellh/packer/helper/communicator"
|
"github.com/hashicorp/packer/helper/communicator"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
|
|
@ -8,8 +8,8 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/service/ec2"
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
"github.com/mitchellh/multistep"
|
packerssh "github.com/hashicorp/packer/communicator/ssh"
|
||||||
packerssh "github.com/mitchellh/packer/communicator/ssh"
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
"golang.org/x/crypto/ssh"
|
"golang.org/x/crypto/ssh"
|
||||||
"golang.org/x/crypto/ssh/agent"
|
"golang.org/x/crypto/ssh/agent"
|
||||||
)
|
)
|
||||||
|
@ -25,21 +25,40 @@ var (
|
||||||
|
|
||||||
// SSHHost returns a function that can be given to the SSH communicator
|
// SSHHost returns a function that can be given to the SSH communicator
|
||||||
// for determining the SSH address based on the instance DNS name.
|
// for determining the SSH address based on the instance DNS name.
|
||||||
func SSHHost(e ec2Describer, private bool) func(multistep.StateBag) (string, error) {
|
func SSHHost(e ec2Describer, sshInterface string) func(multistep.StateBag) (string, error) {
|
||||||
return func(state multistep.StateBag) (string, error) {
|
return func(state multistep.StateBag) (string, error) {
|
||||||
const tries = 2
|
const tries = 2
|
||||||
// <= with current structure to check result of describing `tries` times
|
// <= with current structure to check result of describing `tries` times
|
||||||
for j := 0; j <= tries; j++ {
|
for j := 0; j <= tries; j++ {
|
||||||
var host string
|
var host string
|
||||||
i := state.Get("instance").(*ec2.Instance)
|
i := state.Get("instance").(*ec2.Instance)
|
||||||
if i.VpcId != nil && *i.VpcId != "" {
|
if sshInterface != "" {
|
||||||
if i.PublicIpAddress != nil && *i.PublicIpAddress != "" && !private {
|
switch sshInterface {
|
||||||
|
case "public_ip":
|
||||||
|
if i.PublicIpAddress != nil {
|
||||||
|
host = *i.PublicIpAddress
|
||||||
|
}
|
||||||
|
case "private_ip":
|
||||||
|
if i.PrivateIpAddress != nil {
|
||||||
|
host = *i.PrivateIpAddress
|
||||||
|
}
|
||||||
|
case "public_dns":
|
||||||
|
if i.PublicDnsName != nil {
|
||||||
|
host = *i.PublicDnsName
|
||||||
|
}
|
||||||
|
case "private_dns":
|
||||||
|
if i.PrivateDnsName != nil {
|
||||||
|
host = *i.PrivateDnsName
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("Unknown interface type: %s", sshInterface))
|
||||||
|
}
|
||||||
|
} else if i.VpcId != nil && *i.VpcId != "" {
|
||||||
|
if i.PublicIpAddress != nil && *i.PublicIpAddress != "" {
|
||||||
host = *i.PublicIpAddress
|
host = *i.PublicIpAddress
|
||||||
} else if i.PrivateIpAddress != nil && *i.PrivateIpAddress != "" {
|
} else if i.PrivateIpAddress != nil && *i.PrivateIpAddress != "" {
|
||||||
host = *i.PrivateIpAddress
|
host = *i.PrivateIpAddress
|
||||||
}
|
}
|
||||||
} else if private && i.PrivateIpAddress != nil && *i.PrivateIpAddress != "" {
|
|
||||||
host = *i.PrivateIpAddress
|
|
||||||
} else if i.PublicDnsName != nil && *i.PublicDnsName != "" {
|
} else if i.PublicDnsName != nil && *i.PublicDnsName != "" {
|
||||||
host = *i.PublicDnsName
|
host = *i.PublicDnsName
|
||||||
}
|
}
|
||||||
|
@ -63,7 +82,7 @@ func SSHHost(e ec2Describer, private bool) func(multistep.StateBag) (string, err
|
||||||
time.Sleep(sshHostSleepDuration)
|
time.Sleep(sshHostSleepDuration)
|
||||||
}
|
}
|
||||||
|
|
||||||
return "", errors.New("couldn't determine IP address for instance")
|
return "", errors.New("couldn't determine address for instance")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -88,6 +107,7 @@ func SSHConfig(useAgent bool, username, password string) func(multistep.StateBag
|
||||||
Auth: []ssh.AuthMethod{
|
Auth: []ssh.AuthMethod{
|
||||||
ssh.PublicKeysCallback(agent.NewClient(sshAgent).Signers),
|
ssh.PublicKeysCallback(agent.NewClient(sshAgent).Signers),
|
||||||
},
|
},
|
||||||
|
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -103,11 +123,13 @@ func SSHConfig(useAgent bool, username, password string) func(multistep.StateBag
|
||||||
Auth: []ssh.AuthMethod{
|
Auth: []ssh.AuthMethod{
|
||||||
ssh.PublicKeys(signer),
|
ssh.PublicKeys(signer),
|
||||||
},
|
},
|
||||||
|
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
|
||||||
}, nil
|
}, nil
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
return &ssh.ClientConfig{
|
return &ssh.ClientConfig{
|
||||||
User: username,
|
User: username,
|
||||||
|
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
|
||||||
Auth: []ssh.AuthMethod{
|
Auth: []ssh.AuthMethod{
|
||||||
ssh.Password(password),
|
ssh.Password(password),
|
||||||
ssh.KeyboardInteractive(
|
ssh.KeyboardInteractive(
|
||||||
|
|
|
@ -5,12 +5,13 @@ import (
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/service/ec2"
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
"github.com/mitchellh/multistep"
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
privateIP = "10.0.0.1"
|
privateIP = "10.0.0.1"
|
||||||
publicIP = "192.168.1.1"
|
publicIP = "192.168.1.1"
|
||||||
|
privateDNS = "private.dns.test"
|
||||||
publicDNS = "public.dns.test"
|
publicDNS = "public.dns.test"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -22,42 +23,52 @@ func TestSSHHost(t *testing.T) {
|
||||||
var cases = []struct {
|
var cases = []struct {
|
||||||
allowTries int
|
allowTries int
|
||||||
vpcId string
|
vpcId string
|
||||||
private bool
|
sshInterface string
|
||||||
|
|
||||||
ok bool
|
ok bool
|
||||||
wantHost string
|
wantHost string
|
||||||
}{
|
}{
|
||||||
{1, "", false, true, publicDNS},
|
{1, "", "", true, publicDNS},
|
||||||
{1, "", true, true, privateIP},
|
{1, "", "private_ip", true, privateIP},
|
||||||
{1, "vpc-id", false, true, publicIP},
|
{1, "vpc-id", "", true, publicIP},
|
||||||
{1, "vpc-id", true, true, privateIP},
|
{1, "vpc-id", "private_ip", true, privateIP},
|
||||||
{2, "", false, true, publicDNS},
|
{1, "vpc-id", "private_dns", true, privateDNS},
|
||||||
{2, "", true, true, privateIP},
|
{1, "vpc-id", "public_dns", true, publicDNS},
|
||||||
{2, "vpc-id", false, true, publicIP},
|
{1, "vpc-id", "public_ip", true, publicIP},
|
||||||
{2, "vpc-id", true, true, privateIP},
|
{2, "", "", true, publicDNS},
|
||||||
{3, "", false, false, ""},
|
{2, "", "private_ip", true, privateIP},
|
||||||
{3, "", true, false, ""},
|
{2, "vpc-id", "", true, publicIP},
|
||||||
{3, "vpc-id", false, false, ""},
|
{2, "vpc-id", "private_ip", true, privateIP},
|
||||||
{3, "vpc-id", true, false, ""},
|
{2, "vpc-id", "private_dns", true, privateDNS},
|
||||||
|
{2, "vpc-id", "public_dns", true, publicDNS},
|
||||||
|
{2, "vpc-id", "public_ip", true, publicIP},
|
||||||
|
{3, "", "", false, ""},
|
||||||
|
{3, "", "private_ip", false, ""},
|
||||||
|
{3, "vpc-id", "", false, ""},
|
||||||
|
{3, "vpc-id", "private_ip", false, ""},
|
||||||
|
{3, "vpc-id", "private_dns", false, ""},
|
||||||
|
{3, "vpc-id", "public_dns", false, ""},
|
||||||
|
{3, "vpc-id", "public_ip", false, ""},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
testSSHHost(t, c.allowTries, c.vpcId, c.private, c.ok, c.wantHost)
|
testSSHHost(t, c.allowTries, c.vpcId, c.sshInterface, c.ok, c.wantHost)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func testSSHHost(t *testing.T, allowTries int, vpcId string, private, ok bool, wantHost string) {
|
func testSSHHost(t *testing.T, allowTries int, vpcId string, sshInterface string, ok bool, wantHost string) {
|
||||||
t.Logf("allowTries=%d vpcId=%s private=%t ok=%t wantHost=%q", allowTries, vpcId, private, ok, wantHost)
|
t.Logf("allowTries=%d vpcId=%s sshInterface=%s ok=%t wantHost=%q", allowTries, vpcId, sshInterface, ok, wantHost)
|
||||||
|
|
||||||
e := &fakeEC2Describer{
|
e := &fakeEC2Describer{
|
||||||
allowTries: allowTries,
|
allowTries: allowTries,
|
||||||
vpcId: vpcId,
|
vpcId: vpcId,
|
||||||
privateIP: privateIP,
|
privateIP: privateIP,
|
||||||
publicIP: publicIP,
|
publicIP: publicIP,
|
||||||
|
privateDNS: privateDNS,
|
||||||
publicDNS: publicDNS,
|
publicDNS: publicDNS,
|
||||||
}
|
}
|
||||||
|
|
||||||
f := SSHHost(e, private)
|
f := SSHHost(e, sshInterface)
|
||||||
st := &multistep.BasicStateBag{}
|
st := &multistep.BasicStateBag{}
|
||||||
st.Put("instance", &ec2.Instance{
|
st.Put("instance", &ec2.Instance{
|
||||||
InstanceId: aws.String("instance-id"),
|
InstanceId: aws.String("instance-id"),
|
||||||
|
@ -86,7 +97,7 @@ type fakeEC2Describer struct {
|
||||||
tries int
|
tries int
|
||||||
|
|
||||||
vpcId string
|
vpcId string
|
||||||
privateIP, publicIP, publicDNS string
|
privateIP, publicIP, privateDNS, publicDNS string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *fakeEC2Describer) DescribeInstances(in *ec2.DescribeInstancesInput) (*ec2.DescribeInstancesOutput, error) {
|
func (d *fakeEC2Describer) DescribeInstances(in *ec2.DescribeInstancesInput) (*ec2.DescribeInstancesOutput, error) {
|
||||||
|
@ -104,6 +115,7 @@ func (d *fakeEC2Describer) DescribeInstances(in *ec2.DescribeInstancesInput) (*e
|
||||||
instance.PublicIpAddress = aws.String(d.publicIP)
|
instance.PublicIpAddress = aws.String(d.publicIP)
|
||||||
instance.PrivateIpAddress = aws.String(d.privateIP)
|
instance.PrivateIpAddress = aws.String(d.privateIP)
|
||||||
instance.PublicDnsName = aws.String(d.publicDNS)
|
instance.PublicDnsName = aws.String(d.publicDNS)
|
||||||
|
instance.PrivateDnsName = aws.String(d.privateDNS)
|
||||||
}
|
}
|
||||||
|
|
||||||
out := &ec2.DescribeInstancesOutput{
|
out := &ec2.DescribeInstancesOutput{
|
||||||
|
|
|
@ -12,7 +12,7 @@ import (
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
"github.com/aws/aws-sdk-go/service/ec2"
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
"github.com/mitchellh/multistep"
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
)
|
)
|
||||||
|
|
||||||
// StateRefreshFunc is a function type used for StateChangeConf that is
|
// StateRefreshFunc is a function type used for StateChangeConf that is
|
||||||
|
|
|
@ -1,24 +1,26 @@
|
||||||
package common
|
package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/aws/session"
|
|
||||||
"github.com/aws/aws-sdk-go/service/ec2"
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
|
|
||||||
"github.com/mitchellh/multistep"
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
"github.com/mitchellh/packer/packer"
|
"github.com/hashicorp/packer/packer"
|
||||||
)
|
)
|
||||||
|
|
||||||
type StepAMIRegionCopy struct {
|
type StepAMIRegionCopy struct {
|
||||||
AccessConfig *AccessConfig
|
AccessConfig *AccessConfig
|
||||||
Regions []string
|
Regions []string
|
||||||
|
RegionKeyIds map[string]string
|
||||||
|
EncryptBootVolume bool
|
||||||
Name string
|
Name string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StepAMIRegionCopy) Run(state multistep.StateBag) multistep.StepAction {
|
func (s *StepAMIRegionCopy) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||||
ui := state.Get("ui").(packer.Ui)
|
ui := state.Get("ui").(packer.Ui)
|
||||||
amis := state.Get("amis").(map[string]string)
|
amis := state.Get("amis").(map[string]string)
|
||||||
|
@ -33,6 +35,7 @@ func (s *StepAMIRegionCopy) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
|
|
||||||
var lock sync.Mutex
|
var lock sync.Mutex
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
|
var regKeyID string
|
||||||
errs := new(packer.MultiError)
|
errs := new(packer.MultiError)
|
||||||
for _, region := range s.Regions {
|
for _, region := range s.Regions {
|
||||||
if region == *ec2conn.Config.Region {
|
if region == *ec2conn.Config.Region {
|
||||||
|
@ -44,10 +47,13 @@ func (s *StepAMIRegionCopy) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
ui.Message(fmt.Sprintf("Copying to: %s", region))
|
ui.Message(fmt.Sprintf("Copying to: %s", region))
|
||||||
|
|
||||||
|
if s.EncryptBootVolume {
|
||||||
|
regKeyID = s.RegionKeyIds[region]
|
||||||
|
}
|
||||||
|
|
||||||
go func(region string) {
|
go func(region string) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
id, snapshotIds, err := amiRegionCopy(state, s.AccessConfig, s.Name, ami, region, *ec2conn.Config.Region)
|
id, snapshotIds, err := amiRegionCopy(state, s.AccessConfig, s.Name, ami, region, *ec2conn.Config.Region, regKeyID)
|
||||||
|
|
||||||
lock.Lock()
|
lock.Lock()
|
||||||
defer lock.Unlock()
|
defer lock.Unlock()
|
||||||
amis[region] = id
|
amis[region] = id
|
||||||
|
@ -80,26 +86,29 @@ func (s *StepAMIRegionCopy) Cleanup(state multistep.StateBag) {
|
||||||
// amiRegionCopy does a copy for the given AMI to the target region and
|
// amiRegionCopy does a copy for the given AMI to the target region and
|
||||||
// returns the resulting ID and snapshot IDs, or error.
|
// returns the resulting ID and snapshot IDs, or error.
|
||||||
func amiRegionCopy(state multistep.StateBag, config *AccessConfig, name string, imageId string,
|
func amiRegionCopy(state multistep.StateBag, config *AccessConfig, name string, imageId string,
|
||||||
target string, source string) (string, []string, error) {
|
target string, source string, keyID string) (string, []string, error) {
|
||||||
snapshotIds := []string{}
|
snapshotIds := []string{}
|
||||||
|
isEncrypted := false
|
||||||
|
|
||||||
// Connect to the region where the AMI will be copied to
|
// Connect to the region where the AMI will be copied to
|
||||||
awsConfig, err := config.Config()
|
session, err := config.Session()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", snapshotIds, err
|
return "", snapshotIds, err
|
||||||
}
|
}
|
||||||
awsConfig.Region = aws.String(target)
|
// if we've provided a map of key ids to regions, use those keys.
|
||||||
|
if len(keyID) > 0 {
|
||||||
session, err := session.NewSession(awsConfig)
|
isEncrypted = true
|
||||||
if err != nil {
|
|
||||||
return "", snapshotIds, err
|
|
||||||
}
|
}
|
||||||
regionconn := ec2.New(session)
|
regionconn := ec2.New(session.Copy(&aws.Config{
|
||||||
|
Region: aws.String(target)},
|
||||||
|
))
|
||||||
|
|
||||||
resp, err := regionconn.CopyImage(&ec2.CopyImageInput{
|
resp, err := regionconn.CopyImage(&ec2.CopyImageInput{
|
||||||
SourceRegion: &source,
|
SourceRegion: &source,
|
||||||
SourceImageId: &imageId,
|
SourceImageId: &imageId,
|
||||||
Name: &name,
|
Name: &name,
|
||||||
|
Encrypted: aws.Bool(isEncrypted),
|
||||||
|
KmsKeyId: aws.String(keyID),
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -1,26 +1,28 @@
|
||||||
package common
|
package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
"github.com/aws/aws-sdk-go/aws/session"
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
"github.com/aws/aws-sdk-go/service/ec2"
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
"github.com/mitchellh/multistep"
|
retry "github.com/hashicorp/packer/common"
|
||||||
retry "github.com/mitchellh/packer/common"
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
"github.com/mitchellh/packer/packer"
|
"github.com/hashicorp/packer/packer"
|
||||||
"github.com/mitchellh/packer/template/interpolate"
|
"github.com/hashicorp/packer/template/interpolate"
|
||||||
)
|
)
|
||||||
|
|
||||||
type StepCreateTags struct {
|
type StepCreateTags struct {
|
||||||
Tags map[string]string
|
Tags TagMap
|
||||||
SnapshotTags map[string]string
|
SnapshotTags TagMap
|
||||||
Ctx interpolate.Context
|
Ctx interpolate.Context
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StepCreateTags) Run(state multistep.StateBag) multistep.StepAction {
|
func (s *StepCreateTags) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||||
|
session := state.Get("awsSession").(*session.Session)
|
||||||
ui := state.Get("ui").(packer.Ui)
|
ui := state.Get("ui").(packer.Ui)
|
||||||
amis := state.Get("amis").(map[string]string)
|
amis := state.Get("amis").(map[string]string)
|
||||||
|
|
||||||
|
@ -31,7 +33,7 @@ func (s *StepCreateTags) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
sourceAMI = ""
|
sourceAMI = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(s.Tags) == 0 && len(s.SnapshotTags) == 0 {
|
if !s.Tags.IsSet() && !s.SnapshotTags.IsSet() {
|
||||||
return multistep.ActionContinue
|
return multistep.ActionContinue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -39,23 +41,13 @@ func (s *StepCreateTags) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
for region, ami := range amis {
|
for region, ami := range amis {
|
||||||
ui.Say(fmt.Sprintf("Adding tags to AMI (%s)...", ami))
|
ui.Say(fmt.Sprintf("Adding tags to AMI (%s)...", ami))
|
||||||
|
|
||||||
// Declare list of resources to tag
|
regionConn := ec2.New(session, &aws.Config{
|
||||||
awsConfig := aws.Config{
|
|
||||||
Credentials: ec2conn.Config.Credentials,
|
|
||||||
Region: aws.String(region),
|
Region: aws.String(region),
|
||||||
}
|
})
|
||||||
session, err := session.NewSession(&awsConfig)
|
|
||||||
if err != nil {
|
|
||||||
err := fmt.Errorf("Error creating AWS session: %s", err)
|
|
||||||
state.Put("error", err)
|
|
||||||
ui.Error(err.Error())
|
|
||||||
return multistep.ActionHalt
|
|
||||||
}
|
|
||||||
regionconn := ec2.New(session)
|
|
||||||
|
|
||||||
// Retrieve image list for given AMI
|
// Retrieve image list for given AMI
|
||||||
resourceIds := []*string{&ami}
|
resourceIds := []*string{&ami}
|
||||||
imageResp, err := regionconn.DescribeImages(&ec2.DescribeImagesInput{
|
imageResp, err := regionConn.DescribeImages(&ec2.DescribeImagesInput{
|
||||||
ImageIds: resourceIds,
|
ImageIds: resourceIds,
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -87,27 +79,27 @@ func (s *StepCreateTags) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
|
|
||||||
// Convert tags to ec2.Tag format
|
// Convert tags to ec2.Tag format
|
||||||
ui.Say("Creating AMI tags")
|
ui.Say("Creating AMI tags")
|
||||||
amiTags, err := ConvertToEC2Tags(s.Tags, *ec2conn.Config.Region, sourceAMI, s.Ctx)
|
amiTags, err := s.Tags.EC2Tags(s.Ctx, *ec2conn.Config.Region, sourceAMI)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
state.Put("error", err)
|
state.Put("error", err)
|
||||||
ui.Error(err.Error())
|
ui.Error(err.Error())
|
||||||
return multistep.ActionHalt
|
return multistep.ActionHalt
|
||||||
}
|
}
|
||||||
ReportTags(ui, amiTags)
|
amiTags.Report(ui)
|
||||||
|
|
||||||
ui.Say("Creating snapshot tags")
|
ui.Say("Creating snapshot tags")
|
||||||
snapshotTags, err := ConvertToEC2Tags(s.SnapshotTags, *ec2conn.Config.Region, sourceAMI, s.Ctx)
|
snapshotTags, err := s.SnapshotTags.EC2Tags(s.Ctx, *ec2conn.Config.Region, sourceAMI)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
state.Put("error", err)
|
state.Put("error", err)
|
||||||
ui.Error(err.Error())
|
ui.Error(err.Error())
|
||||||
return multistep.ActionHalt
|
return multistep.ActionHalt
|
||||||
}
|
}
|
||||||
ReportTags(ui, snapshotTags)
|
snapshotTags.Report(ui)
|
||||||
|
|
||||||
// Retry creating tags for about 2.5 minutes
|
// Retry creating tags for about 2.5 minutes
|
||||||
err = retry.Retry(0.2, 30, 11, func() (bool, error) {
|
err = retry.Retry(0.2, 30, 11, func(_ uint) (bool, error) {
|
||||||
// Tag images and snapshots
|
// Tag images and snapshots
|
||||||
_, err := regionconn.CreateTags(&ec2.CreateTagsInput{
|
_, err := regionConn.CreateTags(&ec2.CreateTagsInput{
|
||||||
Resources: resourceIds,
|
Resources: resourceIds,
|
||||||
Tags: amiTags,
|
Tags: amiTags,
|
||||||
})
|
})
|
||||||
|
@ -120,7 +112,7 @@ func (s *StepCreateTags) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
|
|
||||||
// Override tags on snapshots
|
// Override tags on snapshots
|
||||||
if len(snapshotTags) > 0 {
|
if len(snapshotTags) > 0 {
|
||||||
_, err = regionconn.CreateTags(&ec2.CreateTagsInput{
|
_, err = regionConn.CreateTags(&ec2.CreateTagsInput{
|
||||||
Resources: snapshotIds,
|
Resources: snapshotIds,
|
||||||
Tags: snapshotTags,
|
Tags: snapshotTags,
|
||||||
})
|
})
|
||||||
|
@ -150,32 +142,3 @@ func (s *StepCreateTags) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
func (s *StepCreateTags) Cleanup(state multistep.StateBag) {
|
func (s *StepCreateTags) Cleanup(state multistep.StateBag) {
|
||||||
// No cleanup...
|
// No cleanup...
|
||||||
}
|
}
|
||||||
|
|
||||||
func ReportTags(ui packer.Ui, tags []*ec2.Tag) {
|
|
||||||
for _, tag := range tags {
|
|
||||||
ui.Message(fmt.Sprintf("Adding tag: \"%s\": \"%s\"",
|
|
||||||
aws.StringValue(tag.Key), aws.StringValue(tag.Value)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func ConvertToEC2Tags(tags map[string]string, region, sourceAmiId string, ctx interpolate.Context) ([]*ec2.Tag, error) {
|
|
||||||
var ec2Tags []*ec2.Tag
|
|
||||||
for key, value := range tags {
|
|
||||||
|
|
||||||
ctx.Data = &BuildInfoTemplate{
|
|
||||||
SourceAMI: sourceAmiId,
|
|
||||||
BuildRegion: region,
|
|
||||||
}
|
|
||||||
interpolatedValue, err := interpolate.Render(value, &ctx)
|
|
||||||
if err != nil {
|
|
||||||
return ec2Tags, fmt.Errorf("Error processing tag: %s:%s - %s", key, value, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
ec2Tags = append(ec2Tags, &ec2.Tag{
|
|
||||||
Key: aws.String(key),
|
|
||||||
Value: aws.String(interpolatedValue),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return ec2Tags, nil
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,27 +1,46 @@
|
||||||
package common
|
package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/service/ec2"
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
"github.com/mitchellh/multistep"
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
"github.com/mitchellh/packer/packer"
|
"github.com/hashicorp/packer/packer"
|
||||||
)
|
)
|
||||||
|
|
||||||
type StepDeregisterAMI struct {
|
type StepDeregisterAMI struct {
|
||||||
|
AccessConfig *AccessConfig
|
||||||
ForceDeregister bool
|
ForceDeregister bool
|
||||||
ForceDeleteSnapshot bool
|
ForceDeleteSnapshot bool
|
||||||
AMIName string
|
AMIName string
|
||||||
|
Regions []string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StepDeregisterAMI) Run(state multistep.StateBag) multistep.StepAction {
|
func (s *StepDeregisterAMI) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
|
||||||
ui := state.Get("ui").(packer.Ui)
|
|
||||||
|
|
||||||
// Check for force deregister
|
// Check for force deregister
|
||||||
if s.ForceDeregister {
|
if !s.ForceDeregister {
|
||||||
resp, err := ec2conn.DescribeImages(&ec2.DescribeImagesInput{
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||||
|
// Add the session region to list of regions will will deregister AMIs in
|
||||||
|
regions := append(s.Regions, *ec2conn.Config.Region)
|
||||||
|
|
||||||
|
for _, region := range regions {
|
||||||
|
// get new connection for each region in which we need to deregister vms
|
||||||
|
session, err := s.AccessConfig.Session()
|
||||||
|
if err != nil {
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
regionconn := ec2.New(session.Copy(&aws.Config{
|
||||||
|
Region: aws.String(region)},
|
||||||
|
))
|
||||||
|
|
||||||
|
resp, err := regionconn.DescribeImages(&ec2.DescribeImagesInput{
|
||||||
Filters: []*ec2.Filter{{
|
Filters: []*ec2.Filter{{
|
||||||
Name: aws.String("name"),
|
Name: aws.String("name"),
|
||||||
Values: []*string{aws.String(s.AMIName)},
|
Values: []*string{aws.String(s.AMIName)},
|
||||||
|
@ -36,7 +55,7 @@ func (s *StepDeregisterAMI) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
|
|
||||||
// Deregister image(s) by name
|
// Deregister image(s) by name
|
||||||
for _, i := range resp.Images {
|
for _, i := range resp.Images {
|
||||||
_, err := ec2conn.DeregisterImage(&ec2.DeregisterImageInput{
|
_, err := regionconn.DeregisterImage(&ec2.DeregisterImageInput{
|
||||||
ImageId: i.ImageId,
|
ImageId: i.ImageId,
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -51,8 +70,8 @@ func (s *StepDeregisterAMI) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
// Delete snapshot(s) by image
|
// Delete snapshot(s) by image
|
||||||
if s.ForceDeleteSnapshot {
|
if s.ForceDeleteSnapshot {
|
||||||
for _, b := range i.BlockDeviceMappings {
|
for _, b := range i.BlockDeviceMappings {
|
||||||
if b.Ebs != nil {
|
if b.Ebs != nil && aws.StringValue(b.Ebs.SnapshotId) != "" {
|
||||||
_, err := ec2conn.DeleteSnapshot(&ec2.DeleteSnapshotInput{
|
_, err := regionconn.DeleteSnapshot(&ec2.DeleteSnapshotInput{
|
||||||
SnapshotId: b.Ebs.SnapshotId,
|
SnapshotId: b.Ebs.SnapshotId,
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
|
@ -1,13 +1,14 @@
|
||||||
package common
|
package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/service/ec2"
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
"github.com/mitchellh/multistep"
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
"github.com/mitchellh/packer/packer"
|
"github.com/hashicorp/packer/packer"
|
||||||
)
|
)
|
||||||
|
|
||||||
type StepCreateEncryptedAMICopy struct {
|
type StepCreateEncryptedAMICopy struct {
|
||||||
|
@ -15,9 +16,10 @@ type StepCreateEncryptedAMICopy struct {
|
||||||
KeyID string
|
KeyID string
|
||||||
EncryptBootVolume bool
|
EncryptBootVolume bool
|
||||||
Name string
|
Name string
|
||||||
|
AMIMappings []BlockDevice
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StepCreateEncryptedAMICopy) Run(state multistep.StateBag) multistep.StepAction {
|
func (s *StepCreateEncryptedAMICopy) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||||
ui := state.Get("ui").(packer.Ui)
|
ui := state.Get("ui").(packer.Ui)
|
||||||
kmsKeyId := s.KeyID
|
kmsKeyId := s.KeyID
|
||||||
|
@ -36,14 +38,14 @@ func (s *StepCreateEncryptedAMICopy) Run(state multistep.StateBag) multistep.Ste
|
||||||
var region, id string
|
var region, id string
|
||||||
if amis != nil {
|
if amis != nil {
|
||||||
for region, id = range amis {
|
for region, id = range amis {
|
||||||
break // Only get the first
|
break // There is only ever one region:ami pair in this map
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ui.Say(fmt.Sprintf("Copying AMI: %s(%s)", region, id))
|
ui.Say(fmt.Sprintf("Copying AMI: %s(%s)", region, id))
|
||||||
|
|
||||||
if kmsKeyId != "" {
|
if kmsKeyId != "" {
|
||||||
ui.Say(fmt.Sprintf("Encypting with KMS Key ID: %s", kmsKeyId))
|
ui.Say(fmt.Sprintf("Encrypting with KMS Key ID: %s", kmsKeyId))
|
||||||
}
|
}
|
||||||
|
|
||||||
copyOpts := &ec2.CopyImageInput{
|
copyOpts := &ec2.CopyImageInput{
|
||||||
|
@ -116,9 +118,18 @@ func (s *StepCreateEncryptedAMICopy) Run(state multistep.StateBag) multistep.Ste
|
||||||
ui.Say("Deleting unencrypted snapshots")
|
ui.Say("Deleting unencrypted snapshots")
|
||||||
snapshots := state.Get("snapshots").(map[string][]string)
|
snapshots := state.Get("snapshots").(map[string][]string)
|
||||||
|
|
||||||
|
OuterLoop:
|
||||||
for _, blockDevice := range unencImage.BlockDeviceMappings {
|
for _, blockDevice := range unencImage.BlockDeviceMappings {
|
||||||
if blockDevice.Ebs != nil && blockDevice.Ebs.SnapshotId != nil {
|
if blockDevice.Ebs != nil && blockDevice.Ebs.SnapshotId != nil {
|
||||||
ui.Message(fmt.Sprintf("Snapshot ID: %s", *blockDevice.Ebs.SnapshotId))
|
// If this packer run didn't create it, then don't delete it
|
||||||
|
for _, origDevice := range s.AMIMappings {
|
||||||
|
if origDevice.SnapshotId == *blockDevice.Ebs.SnapshotId {
|
||||||
|
ui.Message(fmt.Sprintf("Keeping Snapshot ID: %s", *blockDevice.Ebs.SnapshotId))
|
||||||
|
continue OuterLoop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ui.Message(fmt.Sprintf("Deleting Snapshot ID: %s", *blockDevice.Ebs.SnapshotId))
|
||||||
deleteSnapOpts := &ec2.DeleteSnapshotInput{
|
deleteSnapOpts := &ec2.DeleteSnapshotInput{
|
||||||
SnapshotId: aws.String(*blockDevice.Ebs.SnapshotId),
|
SnapshotId: aws.String(*blockDevice.Ebs.SnapshotId),
|
||||||
}
|
}
|
||||||
|
@ -161,7 +172,7 @@ func (s *StepCreateEncryptedAMICopy) Cleanup(state multistep.StateBag) {
|
||||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||||
ui := state.Get("ui").(packer.Ui)
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
ui.Say("Deregistering the AMI because cancelation or error...")
|
ui.Say("Deregistering the AMI because cancellation or error...")
|
||||||
deregisterOpts := &ec2.DeregisterImageInput{ImageId: s.image.ImageId}
|
deregisterOpts := &ec2.DeregisterImageInput{ImageId: s.image.ImageId}
|
||||||
if _, err := ec2conn.DeregisterImage(deregisterOpts); err != nil {
|
if _, err := ec2conn.DeregisterImage(deregisterOpts); err != nil {
|
||||||
ui.Error(fmt.Sprintf("Error deregistering AMI, may still be around: %s", err))
|
ui.Error(fmt.Sprintf("Error deregistering AMI, may still be around: %s", err))
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package common
|
package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"crypto/rsa"
|
"crypto/rsa"
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
|
@ -11,9 +12,9 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/service/ec2"
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
"github.com/mitchellh/multistep"
|
"github.com/hashicorp/packer/helper/communicator"
|
||||||
"github.com/mitchellh/packer/helper/communicator"
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
"github.com/mitchellh/packer/packer"
|
"github.com/hashicorp/packer/packer"
|
||||||
)
|
)
|
||||||
|
|
||||||
// StepGetPassword reads the password from a Windows server and sets it
|
// StepGetPassword reads the password from a Windows server and sets it
|
||||||
|
@ -24,7 +25,7 @@ type StepGetPassword struct {
|
||||||
Timeout time.Duration
|
Timeout time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StepGetPassword) Run(state multistep.StateBag) multistep.StepAction {
|
func (s *StepGetPassword) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
ui := state.Get("ui").(packer.Ui)
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
// Skip if we're not using winrm
|
// Skip if we're not using winrm
|
||||||
|
|
|
@ -1,14 +1,15 @@
|
||||||
package common
|
package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/service/ec2"
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
"github.com/mitchellh/multistep"
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
"github.com/mitchellh/packer/packer"
|
"github.com/hashicorp/packer/packer"
|
||||||
)
|
)
|
||||||
|
|
||||||
type StepKeyPair struct {
|
type StepKeyPair struct {
|
||||||
|
@ -19,10 +20,10 @@ type StepKeyPair struct {
|
||||||
KeyPairName string
|
KeyPairName string
|
||||||
PrivateKeyFile string
|
PrivateKeyFile string
|
||||||
|
|
||||||
keyName string
|
doCleanup bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction {
|
func (s *StepKeyPair) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
ui := state.Get("ui").(packer.Ui)
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
if s.PrivateKeyFile != "" {
|
if s.PrivateKeyFile != "" {
|
||||||
|
@ -67,11 +68,10 @@ func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
return multistep.ActionHalt
|
return multistep.ActionHalt
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set the keyname so we know to delete it later
|
s.doCleanup = true
|
||||||
s.keyName = s.TemporaryKeyPairName
|
|
||||||
|
|
||||||
// Set some state data for use in future steps
|
// Set some state data for use in future steps
|
||||||
state.Put("keyPair", s.keyName)
|
state.Put("keyPair", s.TemporaryKeyPairName)
|
||||||
state.Put("privateKey", *keyResp.KeyMaterial)
|
state.Put("privateKey", *keyResp.KeyMaterial)
|
||||||
|
|
||||||
// If we're in debug mode, output the private key to the working
|
// If we're in debug mode, output the private key to the working
|
||||||
|
@ -104,10 +104,7 @@ func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StepKeyPair) Cleanup(state multistep.StateBag) {
|
func (s *StepKeyPair) Cleanup(state multistep.StateBag) {
|
||||||
// If no key name is set, then we never created it, so just return
|
if !s.doCleanup {
|
||||||
// If we used an SSH private key file, do not go about deleting
|
|
||||||
// keypairs
|
|
||||||
if s.PrivateKeyFile != "" || (s.KeyPairName == "" && s.keyName == "") {
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -116,10 +113,10 @@ func (s *StepKeyPair) Cleanup(state multistep.StateBag) {
|
||||||
|
|
||||||
// Remove the keypair
|
// Remove the keypair
|
||||||
ui.Say("Deleting temporary keypair...")
|
ui.Say("Deleting temporary keypair...")
|
||||||
_, err := ec2conn.DeleteKeyPair(&ec2.DeleteKeyPairInput{KeyName: &s.keyName})
|
_, err := ec2conn.DeleteKeyPair(&ec2.DeleteKeyPairInput{KeyName: &s.TemporaryKeyPairName})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ui.Error(fmt.Sprintf(
|
ui.Error(fmt.Sprintf(
|
||||||
"Error cleaning up keypair. Please delete the key manually: %s", s.keyName))
|
"Error cleaning up keypair. Please delete the key manually: %s", s.TemporaryKeyPairName))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Also remove the physical key if we're debugging.
|
// Also remove the physical key if we're debugging.
|
||||||
|
|
|
@ -1,14 +1,15 @@
|
||||||
package common
|
package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/aws/session"
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
"github.com/aws/aws-sdk-go/service/ec2"
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
"github.com/mitchellh/multistep"
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
"github.com/mitchellh/packer/packer"
|
"github.com/hashicorp/packer/packer"
|
||||||
"github.com/mitchellh/packer/template/interpolate"
|
"github.com/hashicorp/packer/template/interpolate"
|
||||||
)
|
)
|
||||||
|
|
||||||
type StepModifyAMIAttributes struct {
|
type StepModifyAMIAttributes struct {
|
||||||
|
@ -21,8 +22,9 @@ type StepModifyAMIAttributes struct {
|
||||||
Ctx interpolate.Context
|
Ctx interpolate.Context
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StepModifyAMIAttributes) Run(state multistep.StateBag) multistep.StepAction {
|
func (s *StepModifyAMIAttributes) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||||
|
session := state.Get("awsSession").(*session.Session)
|
||||||
ui := state.Get("ui").(packer.Ui)
|
ui := state.Get("ui").(packer.Ui)
|
||||||
amis := state.Get("amis").(map[string]string)
|
amis := state.Get("amis").(map[string]string)
|
||||||
|
|
||||||
|
@ -152,22 +154,13 @@ func (s *StepModifyAMIAttributes) Run(state multistep.StateBag) multistep.StepAc
|
||||||
// Modifying image attributes
|
// Modifying image attributes
|
||||||
for region, ami := range amis {
|
for region, ami := range amis {
|
||||||
ui.Say(fmt.Sprintf("Modifying attributes on AMI (%s)...", ami))
|
ui.Say(fmt.Sprintf("Modifying attributes on AMI (%s)...", ami))
|
||||||
awsConfig := aws.Config{
|
regionConn := ec2.New(session, &aws.Config{
|
||||||
Credentials: ec2conn.Config.Credentials,
|
|
||||||
Region: aws.String(region),
|
Region: aws.String(region),
|
||||||
}
|
})
|
||||||
session, err := session.NewSession(&awsConfig)
|
|
||||||
if err != nil {
|
|
||||||
err := fmt.Errorf("Error creating AWS session: %s", err)
|
|
||||||
state.Put("error", err)
|
|
||||||
ui.Error(err.Error())
|
|
||||||
return multistep.ActionHalt
|
|
||||||
}
|
|
||||||
regionconn := ec2.New(session)
|
|
||||||
for name, input := range options {
|
for name, input := range options {
|
||||||
ui.Message(fmt.Sprintf("Modifying: %s", name))
|
ui.Message(fmt.Sprintf("Modifying: %s", name))
|
||||||
input.ImageId = &ami
|
input.ImageId = &ami
|
||||||
_, err := regionconn.ModifyImageAttribute(input)
|
_, err := regionConn.ModifyImageAttribute(input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err := fmt.Errorf("Error modify AMI attributes: %s", err)
|
err := fmt.Errorf("Error modify AMI attributes: %s", err)
|
||||||
state.Put("error", err)
|
state.Put("error", err)
|
||||||
|
@ -181,16 +174,13 @@ func (s *StepModifyAMIAttributes) Run(state multistep.StateBag) multistep.StepAc
|
||||||
for region, region_snapshots := range snapshots {
|
for region, region_snapshots := range snapshots {
|
||||||
for _, snapshot := range region_snapshots {
|
for _, snapshot := range region_snapshots {
|
||||||
ui.Say(fmt.Sprintf("Modifying attributes on snapshot (%s)...", snapshot))
|
ui.Say(fmt.Sprintf("Modifying attributes on snapshot (%s)...", snapshot))
|
||||||
awsConfig := aws.Config{
|
regionConn := ec2.New(session, &aws.Config{
|
||||||
Credentials: ec2conn.Config.Credentials,
|
|
||||||
Region: aws.String(region),
|
Region: aws.String(region),
|
||||||
}
|
})
|
||||||
session := session.New(&awsConfig)
|
|
||||||
regionconn := ec2.New(session)
|
|
||||||
for name, input := range snapshotOptions {
|
for name, input := range snapshotOptions {
|
||||||
ui.Message(fmt.Sprintf("Modifying: %s", name))
|
ui.Message(fmt.Sprintf("Modifying: %s", name))
|
||||||
input.SnapshotId = &snapshot
|
input.SnapshotId = &snapshot
|
||||||
_, err := regionconn.ModifySnapshotAttribute(input)
|
_, err := regionConn.ModifySnapshotAttribute(input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err := fmt.Errorf("Error modify snapshot attributes: %s", err)
|
err := fmt.Errorf("Error modify snapshot attributes: %s", err)
|
||||||
state.Put("error", err)
|
state.Put("error", err)
|
||||||
|
|
|
@ -1,26 +1,28 @@
|
||||||
package common
|
package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/service/ec2"
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
"github.com/mitchellh/multistep"
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
"github.com/mitchellh/packer/packer"
|
"github.com/hashicorp/packer/packer"
|
||||||
)
|
)
|
||||||
|
|
||||||
type StepModifyEBSBackedInstance struct {
|
type StepModifyEBSBackedInstance struct {
|
||||||
EnableEnhancedNetworking bool
|
EnableAMIENASupport bool
|
||||||
|
EnableAMISriovNetSupport bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StepModifyEBSBackedInstance) Run(state multistep.StateBag) multistep.StepAction {
|
func (s *StepModifyEBSBackedInstance) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||||
instance := state.Get("instance").(*ec2.Instance)
|
instance := state.Get("instance").(*ec2.Instance)
|
||||||
ui := state.Get("ui").(packer.Ui)
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
if s.EnableEnhancedNetworking {
|
|
||||||
// Set SriovNetSupport to "simple". See http://goo.gl/icuXh5
|
// Set SriovNetSupport to "simple". See http://goo.gl/icuXh5
|
||||||
// As of February 2017, this applies to C3, C4, D2, I2, R3, and M4 (excluding m4.16xlarge)
|
// As of February 2017, this applies to C3, C4, D2, I2, R3, and M4 (excluding m4.16xlarge)
|
||||||
|
if s.EnableAMISriovNetSupport {
|
||||||
ui.Say("Enabling Enhanced Networking (SR-IOV)...")
|
ui.Say("Enabling Enhanced Networking (SR-IOV)...")
|
||||||
simple := "simple"
|
simple := "simple"
|
||||||
_, err := ec2conn.ModifyInstanceAttribute(&ec2.ModifyInstanceAttributeInput{
|
_, err := ec2conn.ModifyInstanceAttribute(&ec2.ModifyInstanceAttributeInput{
|
||||||
|
@ -33,11 +35,13 @@ func (s *StepModifyEBSBackedInstance) Run(state multistep.StateBag) multistep.St
|
||||||
ui.Error(err.Error())
|
ui.Error(err.Error())
|
||||||
return multistep.ActionHalt
|
return multistep.ActionHalt
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Set EnaSupport to true.
|
// Set EnaSupport to true.
|
||||||
// As of February 2017, this applies to C5, I3, P2, R4, X1, and m4.16xlarge
|
// As of February 2017, this applies to C5, I3, P2, R4, X1, and m4.16xlarge
|
||||||
|
if s.EnableAMIENASupport {
|
||||||
ui.Say("Enabling Enhanced Networking (ENA)...")
|
ui.Say("Enabling Enhanced Networking (ENA)...")
|
||||||
_, err = ec2conn.ModifyInstanceAttribute(&ec2.ModifyInstanceAttributeInput{
|
_, err := ec2conn.ModifyInstanceAttribute(&ec2.ModifyInstanceAttributeInput{
|
||||||
InstanceId: instance.InstanceId,
|
InstanceId: instance.InstanceId,
|
||||||
EnaSupport: &ec2.AttributeBooleanValue{Value: aws.Bool(true)},
|
EnaSupport: &ec2.AttributeBooleanValue{Value: aws.Bool(true)},
|
||||||
})
|
})
|
||||||
|
|
|
@ -1,12 +1,13 @@
|
||||||
package common
|
package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/service/ec2"
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
"github.com/mitchellh/multistep"
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
"github.com/mitchellh/packer/packer"
|
"github.com/hashicorp/packer/packer"
|
||||||
)
|
)
|
||||||
|
|
||||||
// StepPreValidate provides an opportunity to pre-validate any configuration for
|
// StepPreValidate provides an opportunity to pre-validate any configuration for
|
||||||
|
@ -17,7 +18,7 @@ type StepPreValidate struct {
|
||||||
ForceDeregister bool
|
ForceDeregister bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StepPreValidate) Run(state multistep.StateBag) multistep.StepAction {
|
func (s *StepPreValidate) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
ui := state.Get("ui").(packer.Ui)
|
ui := state.Get("ui").(packer.Ui)
|
||||||
if s.ForceDeregister {
|
if s.ForceDeregister {
|
||||||
ui.Say("Force Deregister flag found, skipping prevalidating AMI Name")
|
ui.Say("Force Deregister flag found, skipping prevalidating AMI Name")
|
||||||
|
@ -26,7 +27,7 @@ func (s *StepPreValidate) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
|
|
||||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||||
|
|
||||||
ui.Say("Prevalidating AMI Name...")
|
ui.Say(fmt.Sprintf("Prevalidating AMI Name: %s", s.DestAmiName))
|
||||||
resp, err := ec2conn.DescribeImages(&ec2.DescribeImagesInput{
|
resp, err := ec2conn.DescribeImages(&ec2.DescribeImagesInput{
|
||||||
Filters: []*ec2.Filter{{
|
Filters: []*ec2.Filter{{
|
||||||
Name: aws.String("name"),
|
Name: aws.String("name"),
|
||||||
|
|
|
@ -1,45 +1,45 @@
|
||||||
package common
|
package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
"github.com/aws/aws-sdk-go/service/ec2"
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
|
|
||||||
"github.com/mitchellh/multistep"
|
retry "github.com/hashicorp/packer/common"
|
||||||
"github.com/mitchellh/packer/packer"
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
"github.com/mitchellh/packer/template/interpolate"
|
"github.com/hashicorp/packer/packer"
|
||||||
|
"github.com/hashicorp/packer/template/interpolate"
|
||||||
)
|
)
|
||||||
|
|
||||||
type StepRunSourceInstance struct {
|
type StepRunSourceInstance struct {
|
||||||
AssociatePublicIpAddress bool
|
AssociatePublicIpAddress bool
|
||||||
AvailabilityZone string
|
AvailabilityZone string
|
||||||
BlockDevices BlockDevices
|
BlockDevices BlockDevices
|
||||||
|
Ctx interpolate.Context
|
||||||
Debug bool
|
Debug bool
|
||||||
EbsOptimized bool
|
EbsOptimized bool
|
||||||
ExpectedRootDevice string
|
ExpectedRootDevice string
|
||||||
IamInstanceProfile string
|
IamInstanceProfile string
|
||||||
InstanceInitiatedShutdownBehavior string
|
InstanceInitiatedShutdownBehavior string
|
||||||
InstanceType string
|
InstanceType string
|
||||||
|
IsRestricted bool
|
||||||
SourceAMI string
|
SourceAMI string
|
||||||
SpotPrice string
|
|
||||||
SpotPriceProduct string
|
|
||||||
SubnetId string
|
SubnetId string
|
||||||
Tags map[string]string
|
Tags TagMap
|
||||||
UserData string
|
UserData string
|
||||||
UserDataFile string
|
UserDataFile string
|
||||||
Ctx interpolate.Context
|
VolumeTags TagMap
|
||||||
|
|
||||||
instanceId string
|
instanceId string
|
||||||
spotRequest *ec2.SpotInstanceRequest
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepAction {
|
func (s *StepRunSourceInstance) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||||
var keyName string
|
var keyName string
|
||||||
if name, ok := state.GetOk("keyPair"); ok {
|
if name, ok := state.GetOk("keyPair"); ok {
|
||||||
|
@ -81,60 +81,29 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi
|
||||||
return multistep.ActionHalt
|
return multistep.ActionHalt
|
||||||
}
|
}
|
||||||
|
|
||||||
spotPrice := s.SpotPrice
|
|
||||||
availabilityZone := s.AvailabilityZone
|
|
||||||
if spotPrice == "auto" {
|
|
||||||
ui.Message(fmt.Sprintf(
|
|
||||||
"Finding spot price for %s %s...",
|
|
||||||
s.SpotPriceProduct, s.InstanceType))
|
|
||||||
|
|
||||||
// Detect the spot price
|
|
||||||
startTime := time.Now().Add(-1 * time.Hour)
|
|
||||||
resp, err := ec2conn.DescribeSpotPriceHistory(&ec2.DescribeSpotPriceHistoryInput{
|
|
||||||
InstanceTypes: []*string{&s.InstanceType},
|
|
||||||
ProductDescriptions: []*string{&s.SpotPriceProduct},
|
|
||||||
AvailabilityZone: &s.AvailabilityZone,
|
|
||||||
StartTime: &startTime,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
err := fmt.Errorf("Error finding spot price: %s", err)
|
|
||||||
state.Put("error", err)
|
|
||||||
ui.Error(err.Error())
|
|
||||||
return multistep.ActionHalt
|
|
||||||
}
|
|
||||||
|
|
||||||
var price float64
|
|
||||||
for _, history := range resp.SpotPriceHistory {
|
|
||||||
log.Printf("[INFO] Candidate spot price: %s", *history.SpotPrice)
|
|
||||||
current, err := strconv.ParseFloat(*history.SpotPrice, 64)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("[ERR] Error parsing spot price: %s", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if price == 0 || current < price {
|
|
||||||
price = current
|
|
||||||
if s.AvailabilityZone == "" {
|
|
||||||
availabilityZone = *history.AvailabilityZone
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if price == 0 {
|
|
||||||
err := fmt.Errorf("No candidate spot prices found!")
|
|
||||||
state.Put("error", err)
|
|
||||||
ui.Error(err.Error())
|
|
||||||
return multistep.ActionHalt
|
|
||||||
} else {
|
|
||||||
// Add 0.5 cents to minimum spot bid to ensure capacity will be available
|
|
||||||
// Avoids price-too-low error in active markets which can fluctuate
|
|
||||||
price = price + 0.005
|
|
||||||
}
|
|
||||||
|
|
||||||
spotPrice = strconv.FormatFloat(price, 'f', -1, 64)
|
|
||||||
}
|
|
||||||
|
|
||||||
var instanceId string
|
var instanceId string
|
||||||
|
|
||||||
if spotPrice == "" || spotPrice == "0" {
|
ui.Say("Adding tags to source instance")
|
||||||
|
if _, exists := s.Tags["Name"]; !exists {
|
||||||
|
s.Tags["Name"] = "Packer Builder"
|
||||||
|
}
|
||||||
|
|
||||||
|
ec2Tags, err := s.Tags.EC2Tags(s.Ctx, *ec2conn.Config.Region, s.SourceAMI)
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error tagging source instance: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
volTags, err := s.VolumeTags.EC2Tags(s.Ctx, *ec2conn.Config.Region, s.SourceAMI)
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error tagging volumes: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
runOpts := &ec2.RunInstancesInput{
|
runOpts := &ec2.RunInstancesInput{
|
||||||
ImageId: &s.SourceAMI,
|
ImageId: &s.SourceAMI,
|
||||||
InstanceType: &s.InstanceType,
|
InstanceType: &s.InstanceType,
|
||||||
|
@ -147,6 +116,34 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi
|
||||||
EbsOptimized: &s.EbsOptimized,
|
EbsOptimized: &s.EbsOptimized,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Collect tags for tagging on resource creation
|
||||||
|
var tagSpecs []*ec2.TagSpecification
|
||||||
|
|
||||||
|
if len(ec2Tags) > 0 {
|
||||||
|
runTags := &ec2.TagSpecification{
|
||||||
|
ResourceType: aws.String("instance"),
|
||||||
|
Tags: ec2Tags,
|
||||||
|
}
|
||||||
|
|
||||||
|
tagSpecs = append(tagSpecs, runTags)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(volTags) > 0 {
|
||||||
|
runVolTags := &ec2.TagSpecification{
|
||||||
|
ResourceType: aws.String("volume"),
|
||||||
|
Tags: volTags,
|
||||||
|
}
|
||||||
|
|
||||||
|
tagSpecs = append(tagSpecs, runVolTags)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If our region supports it, set tag specifications
|
||||||
|
if len(tagSpecs) > 0 && !s.IsRestricted {
|
||||||
|
runOpts.SetTagSpecifications(tagSpecs)
|
||||||
|
ec2Tags.Report(ui)
|
||||||
|
volTags.Report(ui)
|
||||||
|
}
|
||||||
|
|
||||||
if keyName != "" {
|
if keyName != "" {
|
||||||
runOpts.KeyName = &keyName
|
runOpts.KeyName = &keyName
|
||||||
}
|
}
|
||||||
|
@ -178,129 +175,32 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi
|
||||||
return multistep.ActionHalt
|
return multistep.ActionHalt
|
||||||
}
|
}
|
||||||
instanceId = *runResp.Instances[0].InstanceId
|
instanceId = *runResp.Instances[0].InstanceId
|
||||||
} else {
|
|
||||||
ui.Message(fmt.Sprintf(
|
|
||||||
"Requesting spot instance '%s' for: %s",
|
|
||||||
s.InstanceType, spotPrice))
|
|
||||||
|
|
||||||
runOpts := &ec2.RequestSpotLaunchSpecification{
|
|
||||||
ImageId: &s.SourceAMI,
|
|
||||||
InstanceType: &s.InstanceType,
|
|
||||||
UserData: &userData,
|
|
||||||
IamInstanceProfile: &ec2.IamInstanceProfileSpecification{Name: &s.IamInstanceProfile},
|
|
||||||
Placement: &ec2.SpotPlacement{
|
|
||||||
AvailabilityZone: &availabilityZone,
|
|
||||||
},
|
|
||||||
BlockDeviceMappings: s.BlockDevices.BuildLaunchDevices(),
|
|
||||||
EbsOptimized: &s.EbsOptimized,
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.SubnetId != "" && s.AssociatePublicIpAddress {
|
|
||||||
runOpts.NetworkInterfaces = []*ec2.InstanceNetworkInterfaceSpecification{
|
|
||||||
{
|
|
||||||
DeviceIndex: aws.Int64(0),
|
|
||||||
AssociatePublicIpAddress: &s.AssociatePublicIpAddress,
|
|
||||||
SubnetId: &s.SubnetId,
|
|
||||||
Groups: securityGroupIds,
|
|
||||||
DeleteOnTermination: aws.Bool(true),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
runOpts.SubnetId = &s.SubnetId
|
|
||||||
runOpts.SecurityGroupIds = securityGroupIds
|
|
||||||
}
|
|
||||||
|
|
||||||
if keyName != "" {
|
|
||||||
runOpts.KeyName = &keyName
|
|
||||||
}
|
|
||||||
|
|
||||||
runSpotResp, err := ec2conn.RequestSpotInstances(&ec2.RequestSpotInstancesInput{
|
|
||||||
SpotPrice: &spotPrice,
|
|
||||||
LaunchSpecification: runOpts,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
err := fmt.Errorf("Error launching source spot instance: %s", err)
|
|
||||||
state.Put("error", err)
|
|
||||||
ui.Error(err.Error())
|
|
||||||
return multistep.ActionHalt
|
|
||||||
}
|
|
||||||
|
|
||||||
s.spotRequest = runSpotResp.SpotInstanceRequests[0]
|
|
||||||
|
|
||||||
spotRequestId := s.spotRequest.SpotInstanceRequestId
|
|
||||||
ui.Message(fmt.Sprintf("Waiting for spot request (%s) to become active...", *spotRequestId))
|
|
||||||
stateChange := StateChangeConf{
|
|
||||||
Pending: []string{"open"},
|
|
||||||
Target: "active",
|
|
||||||
Refresh: SpotRequestStateRefreshFunc(ec2conn, *spotRequestId),
|
|
||||||
StepState: state,
|
|
||||||
}
|
|
||||||
_, err = WaitForState(&stateChange)
|
|
||||||
if err != nil {
|
|
||||||
err := fmt.Errorf("Error waiting for spot request (%s) to become ready: %s", *spotRequestId, err)
|
|
||||||
state.Put("error", err)
|
|
||||||
ui.Error(err.Error())
|
|
||||||
return multistep.ActionHalt
|
|
||||||
}
|
|
||||||
|
|
||||||
spotResp, err := ec2conn.DescribeSpotInstanceRequests(&ec2.DescribeSpotInstanceRequestsInput{
|
|
||||||
SpotInstanceRequestIds: []*string{spotRequestId},
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
err := fmt.Errorf("Error finding spot request (%s): %s", *spotRequestId, err)
|
|
||||||
state.Put("error", err)
|
|
||||||
ui.Error(err.Error())
|
|
||||||
return multistep.ActionHalt
|
|
||||||
}
|
|
||||||
instanceId = *spotResp.SpotInstanceRequests[0].InstanceId
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the instance ID so that the cleanup works properly
|
// Set the instance ID so that the cleanup works properly
|
||||||
s.instanceId = instanceId
|
s.instanceId = instanceId
|
||||||
|
|
||||||
ui.Message(fmt.Sprintf("Instance ID: %s", instanceId))
|
ui.Message(fmt.Sprintf("Instance ID: %s", instanceId))
|
||||||
ui.Say(fmt.Sprintf("Waiting for instance (%v) to become ready...", instanceId))
|
ui.Say(fmt.Sprintf("Waiting for instance (%v) to become ready...", instanceId))
|
||||||
stateChange := StateChangeConf{
|
|
||||||
Pending: []string{"pending"},
|
describeInstance := &ec2.DescribeInstancesInput{
|
||||||
Target: "running",
|
InstanceIds: []*string{aws.String(instanceId)},
|
||||||
Refresh: InstanceStateRefreshFunc(ec2conn, instanceId),
|
|
||||||
StepState: state,
|
|
||||||
}
|
}
|
||||||
latestInstance, err := WaitForState(&stateChange)
|
if err := ec2conn.WaitUntilInstanceRunning(describeInstance); err != nil {
|
||||||
if err != nil {
|
|
||||||
err := fmt.Errorf("Error waiting for instance (%s) to become ready: %s", instanceId, err)
|
err := fmt.Errorf("Error waiting for instance (%s) to become ready: %s", instanceId, err)
|
||||||
state.Put("error", err)
|
state.Put("error", err)
|
||||||
ui.Error(err.Error())
|
ui.Error(err.Error())
|
||||||
return multistep.ActionHalt
|
return multistep.ActionHalt
|
||||||
}
|
}
|
||||||
|
|
||||||
instance := latestInstance.(*ec2.Instance)
|
r, err := ec2conn.DescribeInstances(describeInstance)
|
||||||
|
|
||||||
ui.Say("Adding tags to source instance")
|
if err != nil || len(r.Reservations) == 0 || len(r.Reservations[0].Instances) == 0 {
|
||||||
if _, exists := s.Tags["Name"]; !exists {
|
err := fmt.Errorf("Error finding source instance.")
|
||||||
s.Tags["Name"] = "Packer Builder"
|
|
||||||
}
|
|
||||||
|
|
||||||
ec2Tags, err := ConvertToEC2Tags(s.Tags, *ec2conn.Config.Region, s.SourceAMI, s.Ctx)
|
|
||||||
if err != nil {
|
|
||||||
err := fmt.Errorf("Error tagging source instance: %s", err)
|
|
||||||
state.Put("error", err)
|
|
||||||
ui.Error(err.Error())
|
|
||||||
return multistep.ActionHalt
|
|
||||||
}
|
|
||||||
|
|
||||||
ReportTags(ui, ec2Tags)
|
|
||||||
|
|
||||||
_, err = ec2conn.CreateTags(&ec2.CreateTagsInput{
|
|
||||||
Tags: ec2Tags,
|
|
||||||
Resources: []*string{instance.InstanceId},
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
err := fmt.Errorf("Error tagging source instance: %s", err)
|
|
||||||
state.Put("error", err)
|
state.Put("error", err)
|
||||||
ui.Error(err.Error())
|
ui.Error(err.Error())
|
||||||
return multistep.ActionHalt
|
return multistep.ActionHalt
|
||||||
}
|
}
|
||||||
|
instance := r.Reservations[0].Instances[0]
|
||||||
|
|
||||||
if s.Debug {
|
if s.Debug {
|
||||||
if instance.PublicDnsName != nil && *instance.PublicDnsName != "" {
|
if instance.PublicDnsName != nil && *instance.PublicDnsName != "" {
|
||||||
|
@ -318,6 +218,70 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi
|
||||||
|
|
||||||
state.Put("instance", instance)
|
state.Put("instance", instance)
|
||||||
|
|
||||||
|
// If we're in a region that doesn't support tagging on instance creation,
|
||||||
|
// do that now.
|
||||||
|
|
||||||
|
if s.IsRestricted {
|
||||||
|
ec2Tags.Report(ui)
|
||||||
|
// Retry creating tags for about 2.5 minutes
|
||||||
|
err = retry.Retry(0.2, 30, 11, func(_ uint) (bool, error) {
|
||||||
|
_, err := ec2conn.CreateTags(&ec2.CreateTagsInput{
|
||||||
|
Tags: ec2Tags,
|
||||||
|
Resources: []*string{instance.InstanceId},
|
||||||
|
})
|
||||||
|
if err == nil {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
if awsErr, ok := err.(awserr.Error); ok {
|
||||||
|
if awsErr.Code() == "InvalidInstanceID.NotFound" {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true, err
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error tagging source instance: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now tag volumes
|
||||||
|
|
||||||
|
volumeIds := make([]*string, 0)
|
||||||
|
for _, v := range instance.BlockDeviceMappings {
|
||||||
|
if ebs := v.Ebs; ebs != nil {
|
||||||
|
volumeIds = append(volumeIds, ebs.VolumeId)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(volumeIds) > 0 && s.VolumeTags.IsSet() {
|
||||||
|
ui.Say("Adding tags to source EBS Volumes")
|
||||||
|
|
||||||
|
volumeTags, err := s.VolumeTags.EC2Tags(s.Ctx, *ec2conn.Config.Region, s.SourceAMI)
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error tagging source EBS Volumes on %s: %s", *instance.InstanceId, err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
volumeTags.Report(ui)
|
||||||
|
|
||||||
|
_, err = ec2conn.CreateTags(&ec2.CreateTagsInput{
|
||||||
|
Resources: volumeIds,
|
||||||
|
Tags: volumeTags,
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error tagging source EBS Volumes on %s: %s", *instance.InstanceId, err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return multistep.ActionContinue
|
return multistep.ActionContinue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -326,29 +290,6 @@ func (s *StepRunSourceInstance) Cleanup(state multistep.StateBag) {
|
||||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||||
ui := state.Get("ui").(packer.Ui)
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
// Cancel the spot request if it exists
|
|
||||||
if s.spotRequest != nil {
|
|
||||||
ui.Say("Cancelling the spot request...")
|
|
||||||
input := &ec2.CancelSpotInstanceRequestsInput{
|
|
||||||
SpotInstanceRequestIds: []*string{s.spotRequest.SpotInstanceRequestId},
|
|
||||||
}
|
|
||||||
if _, err := ec2conn.CancelSpotInstanceRequests(input); err != nil {
|
|
||||||
ui.Error(fmt.Sprintf("Error cancelling the spot request, may still be around: %s", err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
stateChange := StateChangeConf{
|
|
||||||
Pending: []string{"active", "open"},
|
|
||||||
Refresh: SpotRequestStateRefreshFunc(ec2conn, *s.spotRequest.SpotInstanceRequestId),
|
|
||||||
Target: "cancelled",
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := WaitForState(&stateChange)
|
|
||||||
if err != nil {
|
|
||||||
ui.Error(err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// Terminate the source instance if it exists
|
// Terminate the source instance if it exists
|
||||||
if s.instanceId != "" {
|
if s.instanceId != "" {
|
||||||
ui.Say("Terminating the source AWS instance...")
|
ui.Say("Terminating the source AWS instance...")
|
||||||
|
|
|
@ -0,0 +1,378 @@
|
||||||
|
package common
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
|
|
||||||
|
retry "github.com/hashicorp/packer/common"
|
||||||
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
"github.com/hashicorp/packer/template/interpolate"
|
||||||
|
)
|
||||||
|
|
||||||
|
type StepRunSpotInstance struct {
|
||||||
|
AssociatePublicIpAddress bool
|
||||||
|
AvailabilityZone string
|
||||||
|
BlockDevices BlockDevices
|
||||||
|
Debug bool
|
||||||
|
EbsOptimized bool
|
||||||
|
ExpectedRootDevice string
|
||||||
|
IamInstanceProfile string
|
||||||
|
InstanceInitiatedShutdownBehavior string
|
||||||
|
InstanceType string
|
||||||
|
SourceAMI string
|
||||||
|
SpotPrice string
|
||||||
|
SpotPriceProduct string
|
||||||
|
SubnetId string
|
||||||
|
Tags TagMap
|
||||||
|
VolumeTags TagMap
|
||||||
|
UserData string
|
||||||
|
UserDataFile string
|
||||||
|
Ctx interpolate.Context
|
||||||
|
|
||||||
|
instanceId string
|
||||||
|
spotRequest *ec2.SpotInstanceRequest
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StepRunSpotInstance) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
|
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||||
|
var keyName string
|
||||||
|
if name, ok := state.GetOk("keyPair"); ok {
|
||||||
|
keyName = name.(string)
|
||||||
|
}
|
||||||
|
securityGroupIds := aws.StringSlice(state.Get("securityGroupIds").([]string))
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
|
userData := s.UserData
|
||||||
|
if s.UserDataFile != "" {
|
||||||
|
contents, err := ioutil.ReadFile(s.UserDataFile)
|
||||||
|
if err != nil {
|
||||||
|
state.Put("error", fmt.Errorf("Problem reading user data file: %s", err))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
userData = string(contents)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test if it is encoded already, and if not, encode it
|
||||||
|
if _, err := base64.StdEncoding.DecodeString(userData); err != nil {
|
||||||
|
log.Printf("[DEBUG] base64 encoding user data...")
|
||||||
|
userData = base64.StdEncoding.EncodeToString([]byte(userData))
|
||||||
|
}
|
||||||
|
|
||||||
|
ui.Say("Launching a source AWS instance...")
|
||||||
|
image, ok := state.Get("source_image").(*ec2.Image)
|
||||||
|
if !ok {
|
||||||
|
state.Put("error", fmt.Errorf("source_image type assertion failed"))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
s.SourceAMI = *image.ImageId
|
||||||
|
|
||||||
|
if s.ExpectedRootDevice != "" && *image.RootDeviceType != s.ExpectedRootDevice {
|
||||||
|
state.Put("error", fmt.Errorf(
|
||||||
|
"The provided source AMI has an invalid root device type.\n"+
|
||||||
|
"Expected '%s', got '%s'.",
|
||||||
|
s.ExpectedRootDevice, *image.RootDeviceType))
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
spotPrice := s.SpotPrice
|
||||||
|
availabilityZone := s.AvailabilityZone
|
||||||
|
if spotPrice == "auto" {
|
||||||
|
ui.Message(fmt.Sprintf(
|
||||||
|
"Finding spot price for %s %s...",
|
||||||
|
s.SpotPriceProduct, s.InstanceType))
|
||||||
|
|
||||||
|
// Detect the spot price
|
||||||
|
startTime := time.Now().Add(-1 * time.Hour)
|
||||||
|
resp, err := ec2conn.DescribeSpotPriceHistory(&ec2.DescribeSpotPriceHistoryInput{
|
||||||
|
InstanceTypes: []*string{&s.InstanceType},
|
||||||
|
ProductDescriptions: []*string{&s.SpotPriceProduct},
|
||||||
|
AvailabilityZone: &s.AvailabilityZone,
|
||||||
|
StartTime: &startTime,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error finding spot price: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
var price float64
|
||||||
|
for _, history := range resp.SpotPriceHistory {
|
||||||
|
log.Printf("[INFO] Candidate spot price: %s", *history.SpotPrice)
|
||||||
|
current, err := strconv.ParseFloat(*history.SpotPrice, 64)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("[ERR] Error parsing spot price: %s", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if price == 0 || current < price {
|
||||||
|
price = current
|
||||||
|
if s.AvailabilityZone == "" {
|
||||||
|
availabilityZone = *history.AvailabilityZone
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if price == 0 {
|
||||||
|
err := fmt.Errorf("No candidate spot prices found!")
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
} else {
|
||||||
|
// Add 0.5 cents to minimum spot bid to ensure capacity will be available
|
||||||
|
// Avoids price-too-low error in active markets which can fluctuate
|
||||||
|
price = price + 0.005
|
||||||
|
}
|
||||||
|
|
||||||
|
spotPrice = strconv.FormatFloat(price, 'f', -1, 64)
|
||||||
|
}
|
||||||
|
|
||||||
|
var instanceId string
|
||||||
|
|
||||||
|
ui.Say("Adding tags to source instance")
|
||||||
|
if _, exists := s.Tags["Name"]; !exists {
|
||||||
|
s.Tags["Name"] = "Packer Builder"
|
||||||
|
}
|
||||||
|
|
||||||
|
ec2Tags, err := s.Tags.EC2Tags(s.Ctx, *ec2conn.Config.Region, s.SourceAMI)
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error tagging source instance: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
ec2Tags.Report(ui)
|
||||||
|
|
||||||
|
ui.Message(fmt.Sprintf(
|
||||||
|
"Requesting spot instance '%s' for: %s",
|
||||||
|
s.InstanceType, spotPrice))
|
||||||
|
|
||||||
|
runOpts := &ec2.RequestSpotLaunchSpecification{
|
||||||
|
ImageId: &s.SourceAMI,
|
||||||
|
InstanceType: &s.InstanceType,
|
||||||
|
UserData: &userData,
|
||||||
|
IamInstanceProfile: &ec2.IamInstanceProfileSpecification{Name: &s.IamInstanceProfile},
|
||||||
|
Placement: &ec2.SpotPlacement{
|
||||||
|
AvailabilityZone: &availabilityZone,
|
||||||
|
},
|
||||||
|
BlockDeviceMappings: s.BlockDevices.BuildLaunchDevices(),
|
||||||
|
EbsOptimized: &s.EbsOptimized,
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.SubnetId != "" && s.AssociatePublicIpAddress {
|
||||||
|
runOpts.NetworkInterfaces = []*ec2.InstanceNetworkInterfaceSpecification{
|
||||||
|
{
|
||||||
|
DeviceIndex: aws.Int64(0),
|
||||||
|
AssociatePublicIpAddress: &s.AssociatePublicIpAddress,
|
||||||
|
SubnetId: &s.SubnetId,
|
||||||
|
Groups: securityGroupIds,
|
||||||
|
DeleteOnTermination: aws.Bool(true),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
runOpts.SubnetId = &s.SubnetId
|
||||||
|
runOpts.SecurityGroupIds = securityGroupIds
|
||||||
|
}
|
||||||
|
|
||||||
|
if keyName != "" {
|
||||||
|
runOpts.KeyName = &keyName
|
||||||
|
}
|
||||||
|
|
||||||
|
runSpotResp, err := ec2conn.RequestSpotInstances(&ec2.RequestSpotInstancesInput{
|
||||||
|
SpotPrice: &spotPrice,
|
||||||
|
LaunchSpecification: runOpts,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error launching source spot instance: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
s.spotRequest = runSpotResp.SpotInstanceRequests[0]
|
||||||
|
|
||||||
|
spotRequestId := s.spotRequest.SpotInstanceRequestId
|
||||||
|
ui.Message(fmt.Sprintf("Waiting for spot request (%s) to become active...", *spotRequestId))
|
||||||
|
stateChange := StateChangeConf{
|
||||||
|
Pending: []string{"open"},
|
||||||
|
Target: "active",
|
||||||
|
Refresh: SpotRequestStateRefreshFunc(ec2conn, *spotRequestId),
|
||||||
|
StepState: state,
|
||||||
|
}
|
||||||
|
_, err = WaitForState(&stateChange)
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error waiting for spot request (%s) to become ready: %s", *spotRequestId, err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
spotResp, err := ec2conn.DescribeSpotInstanceRequests(&ec2.DescribeSpotInstanceRequestsInput{
|
||||||
|
SpotInstanceRequestIds: []*string{spotRequestId},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error finding spot request (%s): %s", *spotRequestId, err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
instanceId = *spotResp.SpotInstanceRequests[0].InstanceId
|
||||||
|
|
||||||
|
// Set the instance ID so that the cleanup works properly
|
||||||
|
s.instanceId = instanceId
|
||||||
|
|
||||||
|
ui.Message(fmt.Sprintf("Instance ID: %s", instanceId))
|
||||||
|
ui.Say(fmt.Sprintf("Waiting for instance (%v) to become ready...", instanceId))
|
||||||
|
describeInstance := &ec2.DescribeInstancesInput{
|
||||||
|
InstanceIds: []*string{aws.String(instanceId)},
|
||||||
|
}
|
||||||
|
if err := ec2conn.WaitUntilInstanceRunning(describeInstance); err != nil {
|
||||||
|
err := fmt.Errorf("Error waiting for instance (%s) to become ready: %s", instanceId, err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
r, err := ec2conn.DescribeInstances(&ec2.DescribeInstancesInput{
|
||||||
|
InstanceIds: []*string{aws.String(instanceId)},
|
||||||
|
})
|
||||||
|
if err != nil || len(r.Reservations) == 0 || len(r.Reservations[0].Instances) == 0 {
|
||||||
|
err := fmt.Errorf("Error finding source instance.")
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
instance := r.Reservations[0].Instances[0]
|
||||||
|
|
||||||
|
// Retry creating tags for about 2.5 minutes
|
||||||
|
err = retry.Retry(0.2, 30, 11, func(_ uint) (bool, error) {
|
||||||
|
_, err := ec2conn.CreateTags(&ec2.CreateTagsInput{
|
||||||
|
Tags: ec2Tags,
|
||||||
|
Resources: []*string{instance.InstanceId},
|
||||||
|
})
|
||||||
|
if err == nil {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
if awsErr, ok := err.(awserr.Error); ok {
|
||||||
|
if awsErr.Code() == "InvalidInstanceID.NotFound" {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true, err
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error tagging source instance: %s", err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
volumeIds := make([]*string, 0)
|
||||||
|
for _, v := range instance.BlockDeviceMappings {
|
||||||
|
if ebs := v.Ebs; ebs != nil {
|
||||||
|
volumeIds = append(volumeIds, ebs.VolumeId)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(volumeIds) > 0 && s.VolumeTags.IsSet() {
|
||||||
|
ui.Say("Adding tags to source EBS Volumes")
|
||||||
|
|
||||||
|
volumeTags, err := s.VolumeTags.EC2Tags(s.Ctx, *ec2conn.Config.Region, s.SourceAMI)
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error tagging source EBS Volumes on %s: %s", *instance.InstanceId, err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
volumeTags.Report(ui)
|
||||||
|
|
||||||
|
_, err = ec2conn.CreateTags(&ec2.CreateTagsInput{
|
||||||
|
Resources: volumeIds,
|
||||||
|
Tags: volumeTags,
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("Error tagging source EBS Volumes on %s: %s", *instance.InstanceId, err)
|
||||||
|
state.Put("error", err)
|
||||||
|
ui.Error(err.Error())
|
||||||
|
return multistep.ActionHalt
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.Debug {
|
||||||
|
if instance.PublicDnsName != nil && *instance.PublicDnsName != "" {
|
||||||
|
ui.Message(fmt.Sprintf("Public DNS: %s", *instance.PublicDnsName))
|
||||||
|
}
|
||||||
|
|
||||||
|
if instance.PublicIpAddress != nil && *instance.PublicIpAddress != "" {
|
||||||
|
ui.Message(fmt.Sprintf("Public IP: %s", *instance.PublicIpAddress))
|
||||||
|
}
|
||||||
|
|
||||||
|
if instance.PrivateIpAddress != nil && *instance.PrivateIpAddress != "" {
|
||||||
|
ui.Message(fmt.Sprintf("Private IP: %s", *instance.PrivateIpAddress))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
state.Put("instance", instance)
|
||||||
|
|
||||||
|
return multistep.ActionContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StepRunSpotInstance) Cleanup(state multistep.StateBag) {
|
||||||
|
|
||||||
|
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||||
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
|
// Cancel the spot request if it exists
|
||||||
|
if s.spotRequest != nil {
|
||||||
|
ui.Say("Cancelling the spot request...")
|
||||||
|
input := &ec2.CancelSpotInstanceRequestsInput{
|
||||||
|
SpotInstanceRequestIds: []*string{s.spotRequest.SpotInstanceRequestId},
|
||||||
|
}
|
||||||
|
if _, err := ec2conn.CancelSpotInstanceRequests(input); err != nil {
|
||||||
|
ui.Error(fmt.Sprintf("Error cancelling the spot request, may still be around: %s", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
stateChange := StateChangeConf{
|
||||||
|
Pending: []string{"active", "open"},
|
||||||
|
Refresh: SpotRequestStateRefreshFunc(ec2conn, *s.spotRequest.SpotInstanceRequestId),
|
||||||
|
Target: "cancelled",
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := WaitForState(&stateChange)
|
||||||
|
if err != nil {
|
||||||
|
ui.Error(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Terminate the source instance if it exists
|
||||||
|
if s.instanceId != "" {
|
||||||
|
ui.Say("Terminating the source AWS instance...")
|
||||||
|
if _, err := ec2conn.TerminateInstances(&ec2.TerminateInstancesInput{InstanceIds: []*string{&s.instanceId}}); err != nil {
|
||||||
|
ui.Error(fmt.Sprintf("Error terminating instance, may still be around: %s", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
stateChange := StateChangeConf{
|
||||||
|
Pending: []string{"pending", "running", "shutting-down", "stopped", "stopping"},
|
||||||
|
Refresh: InstanceStateRefreshFunc(ec2conn, s.instanceId),
|
||||||
|
Target: "terminated",
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := WaitForState(&stateChange)
|
||||||
|
if err != nil {
|
||||||
|
ui.Error(err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,28 +1,30 @@
|
||||||
package common
|
package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/private/waiter"
|
"github.com/aws/aws-sdk-go/aws/request"
|
||||||
"github.com/aws/aws-sdk-go/service/ec2"
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
"github.com/mitchellh/multistep"
|
"github.com/hashicorp/packer/common/uuid"
|
||||||
"github.com/mitchellh/packer/common/uuid"
|
"github.com/hashicorp/packer/helper/communicator"
|
||||||
"github.com/mitchellh/packer/helper/communicator"
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
"github.com/mitchellh/packer/packer"
|
"github.com/hashicorp/packer/packer"
|
||||||
)
|
)
|
||||||
|
|
||||||
type StepSecurityGroup struct {
|
type StepSecurityGroup struct {
|
||||||
CommConfig *communicator.Config
|
CommConfig *communicator.Config
|
||||||
SecurityGroupIds []string
|
SecurityGroupIds []string
|
||||||
VpcId string
|
VpcId string
|
||||||
|
TemporarySGSourceCidr string
|
||||||
|
|
||||||
createdGroupId string
|
createdGroupId string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StepSecurityGroup) Run(state multistep.StateBag) multistep.StepAction {
|
func (s *StepSecurityGroup) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||||
ui := state.Get("ui").(packer.Ui)
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
|
@ -45,18 +47,23 @@ func (s *StepSecurityGroup) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
|
|
||||||
port := s.CommConfig.Port()
|
port := s.CommConfig.Port()
|
||||||
if port == 0 {
|
if port == 0 {
|
||||||
|
if s.CommConfig.Type != "none" {
|
||||||
panic("port must be set to a non-zero value.")
|
panic("port must be set to a non-zero value.")
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Create the group
|
// Create the group
|
||||||
ui.Say("Creating temporary security group for this instance...")
|
groupName := fmt.Sprintf("packer_%s", uuid.TimeOrderedUUID())
|
||||||
groupName := fmt.Sprintf("packer %s", uuid.TimeOrderedUUID())
|
ui.Say(fmt.Sprintf("Creating temporary security group for this instance: %s", groupName))
|
||||||
log.Printf("Temporary group name: %s", groupName)
|
|
||||||
group := &ec2.CreateSecurityGroupInput{
|
group := &ec2.CreateSecurityGroupInput{
|
||||||
GroupName: &groupName,
|
GroupName: &groupName,
|
||||||
Description: aws.String("Temporary group for Packer"),
|
Description: aws.String("Temporary group for Packer"),
|
||||||
VpcId: &s.VpcId,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if s.VpcId != "" {
|
||||||
|
group.VpcId = &s.VpcId
|
||||||
|
}
|
||||||
|
|
||||||
groupResp, err := ec2conn.CreateSecurityGroup(group)
|
groupResp, err := ec2conn.CreateSecurityGroup(group)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ui.Error(err.Error())
|
ui.Error(err.Error())
|
||||||
|
@ -73,15 +80,15 @@ func (s *StepSecurityGroup) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
IpProtocol: aws.String("tcp"),
|
IpProtocol: aws.String("tcp"),
|
||||||
FromPort: aws.Int64(int64(port)),
|
FromPort: aws.Int64(int64(port)),
|
||||||
ToPort: aws.Int64(int64(port)),
|
ToPort: aws.Int64(int64(port)),
|
||||||
CidrIp: aws.String("0.0.0.0/0"),
|
CidrIp: aws.String(s.TemporarySGSourceCidr),
|
||||||
}
|
}
|
||||||
|
|
||||||
// We loop and retry this a few times because sometimes the security
|
// We loop and retry this a few times because sometimes the security
|
||||||
// group isn't available immediately because AWS resources are eventually
|
// group isn't available immediately because AWS resources are eventually
|
||||||
// consistent.
|
// consistent.
|
||||||
ui.Say(fmt.Sprintf(
|
ui.Say(fmt.Sprintf(
|
||||||
"Authorizing access to port %d the temporary security group...",
|
"Authorizing access to port %d from %s in the temporary security group...",
|
||||||
port))
|
port, s.TemporarySGSourceCidr))
|
||||||
for i := 0; i < 5; i++ {
|
for i := 0; i < 5; i++ {
|
||||||
_, err = ec2conn.AuthorizeSecurityGroupIngress(req)
|
_, err = ec2conn.AuthorizeSecurityGroupIngress(req)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
@ -148,36 +155,43 @@ func (s *StepSecurityGroup) Cleanup(state multistep.StateBag) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func waitUntilSecurityGroupExists(c *ec2.EC2, input *ec2.DescribeSecurityGroupsInput) error {
|
func waitUntilSecurityGroupExists(c *ec2.EC2, input *ec2.DescribeSecurityGroupsInput) error {
|
||||||
waiterCfg := waiter.Config{
|
ctx := aws.BackgroundContext()
|
||||||
Operation: "DescribeSecurityGroups",
|
w := request.Waiter{
|
||||||
Delay: 15,
|
Name: "DescribeSecurityGroups",
|
||||||
MaxAttempts: 40,
|
MaxAttempts: 40,
|
||||||
Acceptors: []waiter.WaitAcceptor{
|
Delay: request.ConstantWaiterDelay(5 * time.Second),
|
||||||
|
Acceptors: []request.WaiterAcceptor{
|
||||||
{
|
{
|
||||||
State: "success",
|
State: request.SuccessWaiterState,
|
||||||
Matcher: "path",
|
Matcher: request.PathWaiterMatch,
|
||||||
Argument: "length(SecurityGroups[]) > `0`",
|
Argument: "length(SecurityGroups[]) > `0`",
|
||||||
Expected: true,
|
Expected: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
State: "retry",
|
State: request.RetryWaiterState,
|
||||||
Matcher: "error",
|
Matcher: request.ErrorWaiterMatch,
|
||||||
Argument: "",
|
Argument: "",
|
||||||
Expected: "InvalidGroup.NotFound",
|
Expected: "InvalidGroup.NotFound",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
State: "retry",
|
State: request.RetryWaiterState,
|
||||||
Matcher: "error",
|
Matcher: request.ErrorWaiterMatch,
|
||||||
Argument: "",
|
Argument: "",
|
||||||
Expected: "InvalidSecurityGroupID.NotFound",
|
Expected: "InvalidSecurityGroupID.NotFound",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
Logger: c.Config.Logger,
|
||||||
|
NewRequest: func(opts []request.Option) (*request.Request, error) {
|
||||||
|
var inCpy *ec2.DescribeSecurityGroupsInput
|
||||||
|
if input != nil {
|
||||||
|
tmp := *input
|
||||||
|
inCpy = &tmp
|
||||||
}
|
}
|
||||||
|
req, _ := c.DescribeSecurityGroupsRequest(inCpy)
|
||||||
w := waiter.Waiter{
|
req.SetContext(ctx)
|
||||||
Client: c,
|
req.ApplyOptions(opts...)
|
||||||
Input: input,
|
return req, nil
|
||||||
Config: waiterCfg,
|
},
|
||||||
}
|
}
|
||||||
return w.Wait()
|
return w.WaitWithContext(ctx)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,14 +1,15 @@
|
||||||
package common
|
package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"sort"
|
"sort"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/service/ec2"
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
"github.com/mitchellh/multistep"
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
"github.com/mitchellh/packer/packer"
|
"github.com/hashicorp/packer/packer"
|
||||||
)
|
)
|
||||||
|
|
||||||
// StepSourceAMIInfo extracts critical information from the source AMI
|
// StepSourceAMIInfo extracts critical information from the source AMI
|
||||||
|
@ -18,7 +19,8 @@ import (
|
||||||
// source_image *ec2.Image - the source AMI info
|
// source_image *ec2.Image - the source AMI info
|
||||||
type StepSourceAMIInfo struct {
|
type StepSourceAMIInfo struct {
|
||||||
SourceAmi string
|
SourceAmi string
|
||||||
EnhancedNetworking bool
|
EnableAMISriovNetSupport bool
|
||||||
|
EnableAMIENASupport bool
|
||||||
AmiFilters AmiFilterOptions
|
AmiFilters AmiFilterOptions
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -51,7 +53,7 @@ func mostRecentAmi(images []*ec2.Image) *ec2.Image {
|
||||||
return sortedImages[len(sortedImages)-1]
|
return sortedImages[len(sortedImages)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StepSourceAMIInfo) Run(state multistep.StateBag) multistep.StepAction {
|
func (s *StepSourceAMIInfo) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||||
ui := state.Get("ui").(packer.Ui)
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
|
@ -103,7 +105,7 @@ func (s *StepSourceAMIInfo) Run(state multistep.StateBag) multistep.StepAction {
|
||||||
|
|
||||||
// Enhanced Networking can only be enabled on HVM AMIs.
|
// Enhanced Networking can only be enabled on HVM AMIs.
|
||||||
// See http://goo.gl/icuXh5
|
// See http://goo.gl/icuXh5
|
||||||
if s.EnhancedNetworking && *image.VirtualizationType != "hvm" {
|
if (s.EnableAMIENASupport || s.EnableAMISriovNetSupport) && *image.VirtualizationType != "hvm" {
|
||||||
err := fmt.Errorf("Cannot enable enhanced networking, source AMI '%s' is not HVM", s.SourceAmi)
|
err := fmt.Errorf("Cannot enable enhanced networking, source AMI '%s' is not HVM", s.SourceAmi)
|
||||||
state.Put("error", err)
|
state.Put("error", err)
|
||||||
ui.Error(err.Error())
|
ui.Error(err.Error())
|
||||||
|
|
|
@ -1,25 +1,28 @@
|
||||||
package common
|
package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
"github.com/aws/aws-sdk-go/service/ec2"
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
"github.com/mitchellh/multistep"
|
"github.com/hashicorp/packer/common"
|
||||||
"github.com/mitchellh/packer/packer"
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
)
|
)
|
||||||
|
|
||||||
type StepStopEBSBackedInstance struct {
|
type StepStopEBSBackedInstance struct {
|
||||||
SpotPrice string
|
Skip bool
|
||||||
DisableStopInstance bool
|
DisableStopInstance bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StepStopEBSBackedInstance) Run(state multistep.StateBag) multistep.StepAction {
|
func (s *StepStopEBSBackedInstance) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||||
instance := state.Get("instance").(*ec2.Instance)
|
instance := state.Get("instance").(*ec2.Instance)
|
||||||
ui := state.Get("ui").(packer.Ui)
|
ui := state.Get("ui").(packer.Ui)
|
||||||
|
|
||||||
// Skip when it is a spot instance
|
// Skip when it is a spot instance
|
||||||
if s.SpotPrice != "" && s.SpotPrice != "0" {
|
if s.Skip {
|
||||||
return multistep.ActionContinue
|
return multistep.ActionContinue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -28,28 +31,57 @@ func (s *StepStopEBSBackedInstance) Run(state multistep.StateBag) multistep.Step
|
||||||
if !s.DisableStopInstance {
|
if !s.DisableStopInstance {
|
||||||
// Stop the instance so we can create an AMI from it
|
// Stop the instance so we can create an AMI from it
|
||||||
ui.Say("Stopping the source instance...")
|
ui.Say("Stopping the source instance...")
|
||||||
|
|
||||||
|
// Amazon EC2 API follows an eventual consistency model.
|
||||||
|
|
||||||
|
// This means that if you run a command to modify or describe a resource
|
||||||
|
// that you just created, its ID might not have propagated throughout
|
||||||
|
// the system, and you will get an error responding that the resource
|
||||||
|
// does not exist.
|
||||||
|
|
||||||
|
// Work around this by retrying a few times, up to about 5 minutes.
|
||||||
|
err := common.Retry(10, 60, 6, func(i uint) (bool, error) {
|
||||||
|
ui.Message(fmt.Sprintf("Stopping instance, attempt %d", i+1))
|
||||||
|
|
||||||
_, err = ec2conn.StopInstances(&ec2.StopInstancesInput{
|
_, err = ec2conn.StopInstances(&ec2.StopInstancesInput{
|
||||||
InstanceIds: []*string{instance.InstanceId},
|
InstanceIds: []*string{instance.InstanceId},
|
||||||
})
|
})
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
// success
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if awsErr, ok := err.(awserr.Error); ok {
|
||||||
|
if awsErr.Code() == "InvalidInstanceID.NotFound" {
|
||||||
|
ui.Message(fmt.Sprintf(
|
||||||
|
"Error stopping instance; will retry ..."+
|
||||||
|
"Error: %s", err))
|
||||||
|
// retry
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// errored, but not in expected way. Don't want to retry
|
||||||
|
return true, err
|
||||||
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err := fmt.Errorf("Error stopping instance: %s", err)
|
err := fmt.Errorf("Error stopping instance: %s", err)
|
||||||
state.Put("error", err)
|
state.Put("error", err)
|
||||||
ui.Error(err.Error())
|
ui.Error(err.Error())
|
||||||
return multistep.ActionHalt
|
return multistep.ActionHalt
|
||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
ui.Say("Automatic instance stop disabled. Please stop instance manually.")
|
ui.Say("Automatic instance stop disabled. Please stop instance manually.")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for the instance to actual stop
|
// Wait for the instance to actually stop
|
||||||
ui.Say("Waiting for the instance to stop...")
|
ui.Say("Waiting for the instance to stop...")
|
||||||
stateChange := StateChangeConf{
|
err = ec2conn.WaitUntilInstanceStopped(&ec2.DescribeInstancesInput{
|
||||||
Pending: []string{"running", "stopping"},
|
InstanceIds: []*string{instance.InstanceId},
|
||||||
Target: "stopped",
|
})
|
||||||
Refresh: InstanceStateRefreshFunc(ec2conn, *instance.InstanceId),
|
|
||||||
StepState: state,
|
|
||||||
}
|
|
||||||
_, err = WaitForState(&stateChange)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err := fmt.Errorf("Error waiting for instance to stop: %s", err)
|
err := fmt.Errorf("Error waiting for instance to stop: %s", err)
|
||||||
state.Put("error", err)
|
state.Put("error", err)
|
||||||
|
|
|
@ -1,65 +0,0 @@
|
||||||
package common
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/service/ec2"
|
|
||||||
"github.com/mitchellh/multistep"
|
|
||||||
"github.com/mitchellh/packer/packer"
|
|
||||||
"github.com/mitchellh/packer/template/interpolate"
|
|
||||||
)
|
|
||||||
|
|
||||||
type StepTagEBSVolumes struct {
|
|
||||||
VolumeRunTags map[string]string
|
|
||||||
Ctx interpolate.Context
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *StepTagEBSVolumes) Run(state multistep.StateBag) multistep.StepAction {
|
|
||||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
|
||||||
instance := state.Get("instance").(*ec2.Instance)
|
|
||||||
sourceAMI := state.Get("source_image").(*ec2.Image)
|
|
||||||
ui := state.Get("ui").(packer.Ui)
|
|
||||||
|
|
||||||
if len(s.VolumeRunTags) == 0 {
|
|
||||||
return multistep.ActionContinue
|
|
||||||
}
|
|
||||||
|
|
||||||
volumeIds := make([]*string, 0)
|
|
||||||
for _, v := range instance.BlockDeviceMappings {
|
|
||||||
if ebs := v.Ebs; ebs != nil {
|
|
||||||
volumeIds = append(volumeIds, ebs.VolumeId)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(volumeIds) == 0 {
|
|
||||||
return multistep.ActionContinue
|
|
||||||
}
|
|
||||||
|
|
||||||
ui.Say("Adding tags to source EBS Volumes")
|
|
||||||
tags, err := ConvertToEC2Tags(s.VolumeRunTags, *ec2conn.Config.Region, *sourceAMI.ImageId, s.Ctx)
|
|
||||||
if err != nil {
|
|
||||||
err := fmt.Errorf("Error tagging source EBS Volumes on %s: %s", *instance.InstanceId, err)
|
|
||||||
state.Put("error", err)
|
|
||||||
ui.Error(err.Error())
|
|
||||||
return multistep.ActionHalt
|
|
||||||
}
|
|
||||||
|
|
||||||
ReportTags(ui, tags)
|
|
||||||
|
|
||||||
_, err = ec2conn.CreateTags(&ec2.CreateTagsInput{
|
|
||||||
Resources: volumeIds,
|
|
||||||
Tags: tags,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
err := fmt.Errorf("Error tagging source EBS Volumes on %s: %s", *instance.InstanceId, err)
|
|
||||||
state.Put("error", err)
|
|
||||||
ui.Error(err.Error())
|
|
||||||
return multistep.ActionHalt
|
|
||||||
}
|
|
||||||
|
|
||||||
return multistep.ActionContinue
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *StepTagEBSVolumes) Cleanup(state multistep.StateBag) {
|
|
||||||
// No cleanup...
|
|
||||||
}
|
|
|
@ -0,0 +1,47 @@
|
||||||
|
package common
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
|
"github.com/hashicorp/packer/packer"
|
||||||
|
"github.com/hashicorp/packer/template/interpolate"
|
||||||
|
)
|
||||||
|
|
||||||
|
type TagMap map[string]string
|
||||||
|
type EC2Tags []*ec2.Tag
|
||||||
|
|
||||||
|
func (t EC2Tags) Report(ui packer.Ui) {
|
||||||
|
for _, tag := range t {
|
||||||
|
ui.Message(fmt.Sprintf("Adding tag: \"%s\": \"%s\"",
|
||||||
|
aws.StringValue(tag.Key), aws.StringValue(tag.Value)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t TagMap) IsSet() bool {
|
||||||
|
return len(t) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t TagMap) EC2Tags(ctx interpolate.Context, region, sourceAMIID string) (EC2Tags, error) {
|
||||||
|
var ec2Tags []*ec2.Tag
|
||||||
|
ctx.Data = &BuildInfoTemplate{
|
||||||
|
SourceAMI: sourceAMIID,
|
||||||
|
BuildRegion: region,
|
||||||
|
}
|
||||||
|
for key, value := range t {
|
||||||
|
interpolatedKey, err := interpolate.Render(key, &ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Error processing tag: %s:%s - %s", key, value, err)
|
||||||
|
}
|
||||||
|
interpolatedValue, err := interpolate.Render(value, &ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Error processing tag: %s:%s - %s", key, value, err)
|
||||||
|
}
|
||||||
|
ec2Tags = append(ec2Tags, &ec2.Tag{
|
||||||
|
Key: aws.String(interpolatedKey),
|
||||||
|
Value: aws.String(interpolatedValue),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return ec2Tags, nil
|
||||||
|
}
|
|
@ -9,15 +9,14 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws/session"
|
|
||||||
"github.com/aws/aws-sdk-go/service/ec2"
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
"github.com/mitchellh/multistep"
|
awscommon "github.com/hashicorp/packer/builder/amazon/common"
|
||||||
awscommon "github.com/mitchellh/packer/builder/amazon/common"
|
"github.com/hashicorp/packer/common"
|
||||||
"github.com/mitchellh/packer/common"
|
"github.com/hashicorp/packer/helper/communicator"
|
||||||
"github.com/mitchellh/packer/helper/communicator"
|
"github.com/hashicorp/packer/helper/config"
|
||||||
"github.com/mitchellh/packer/helper/config"
|
"github.com/hashicorp/packer/helper/multistep"
|
||||||
"github.com/mitchellh/packer/packer"
|
"github.com/hashicorp/packer/packer"
|
||||||
"github.com/mitchellh/packer/template/interpolate"
|
"github.com/hashicorp/packer/template/interpolate"
|
||||||
)
|
)
|
||||||
|
|
||||||
// The unique ID for this builder
|
// The unique ID for this builder
|
||||||
|
@ -29,7 +28,7 @@ type Config struct {
|
||||||
awscommon.AMIConfig `mapstructure:",squash"`
|
awscommon.AMIConfig `mapstructure:",squash"`
|
||||||
awscommon.BlockDevices `mapstructure:",squash"`
|
awscommon.BlockDevices `mapstructure:",squash"`
|
||||||
awscommon.RunConfig `mapstructure:",squash"`
|
awscommon.RunConfig `mapstructure:",squash"`
|
||||||
VolumeRunTags map[string]string `mapstructure:"run_volume_tags"`
|
VolumeRunTags awscommon.TagMap `mapstructure:"run_volume_tags"`
|
||||||
|
|
||||||
ctx interpolate.Context
|
ctx interpolate.Context
|
||||||
}
|
}
|
||||||
|
@ -65,48 +64,104 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
|
||||||
// Accumulate any errors
|
// Accumulate any errors
|
||||||
var errs *packer.MultiError
|
var errs *packer.MultiError
|
||||||
errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(&b.config.ctx)...)
|
errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(&b.config.ctx)...)
|
||||||
|
errs = packer.MultiErrorAppend(errs,
|
||||||
|
b.config.AMIConfig.Prepare(&b.config.AccessConfig, &b.config.ctx)...)
|
||||||
errs = packer.MultiErrorAppend(errs, b.config.BlockDevices.Prepare(&b.config.ctx)...)
|
errs = packer.MultiErrorAppend(errs, b.config.BlockDevices.Prepare(&b.config.ctx)...)
|
||||||
errs = packer.MultiErrorAppend(errs, b.config.AMIConfig.Prepare(&b.config.ctx)...)
|
|
||||||
errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(&b.config.ctx)...)
|
errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(&b.config.ctx)...)
|
||||||
|
|
||||||
|
if b.config.IsSpotInstance() && (b.config.AMIENASupport || b.config.AMISriovNetSupport) {
|
||||||
|
errs = packer.MultiErrorAppend(errs,
|
||||||
|
fmt.Errorf("Spot instances do not support modification, which is required "+
|
||||||
|
"when either `ena_support` or `sriov_support` are set. Please ensure "+
|
||||||
|
"you use an AMI that already has either SR-IOV or ENA enabled."))
|
||||||
|
}
|
||||||
|
|
||||||
if errs != nil && len(errs.Errors) > 0 {
|
if errs != nil && len(errs.Errors) > 0 {
|
||||||
return nil, errs
|
return nil, errs
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Println(common.ScrubConfig(b.config, b.config.AccessKey, b.config.SecretKey))
|
log.Println(common.ScrubConfig(b.config, b.config.AccessKey, b.config.SecretKey, b.config.Token))
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) {
|
func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) {
|
||||||
config, err := b.config.Config()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
session, err := session.NewSession(config)
|
session, err := b.config.Session()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
ec2conn := ec2.New(session)
|
ec2conn := ec2.New(session)
|
||||||
|
|
||||||
// If the subnet is specified but not the AZ, try to determine the AZ automatically
|
// If the subnet is specified but not the VpcId or AZ, try to determine them automatically
|
||||||
if b.config.SubnetId != "" && b.config.AvailabilityZone == "" {
|
if b.config.SubnetId != "" && (b.config.AvailabilityZone == "" || b.config.VpcId == "") {
|
||||||
log.Printf("[INFO] Finding AZ for the given subnet '%s'", b.config.SubnetId)
|
log.Printf("[INFO] Finding AZ and VpcId for the given subnet '%s'", b.config.SubnetId)
|
||||||
resp, err := ec2conn.DescribeSubnets(&ec2.DescribeSubnetsInput{SubnetIds: []*string{&b.config.SubnetId}})
|
resp, err := ec2conn.DescribeSubnets(&ec2.DescribeSubnetsInput{SubnetIds: []*string{&b.config.SubnetId}})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
if b.config.AvailabilityZone == "" {
|
||||||
b.config.AvailabilityZone = *resp.Subnets[0].AvailabilityZone
|
b.config.AvailabilityZone = *resp.Subnets[0].AvailabilityZone
|
||||||
log.Printf("[INFO] AZ found: '%s'", b.config.AvailabilityZone)
|
log.Printf("[INFO] AvailabilityZone found: '%s'", b.config.AvailabilityZone)
|
||||||
|
}
|
||||||
|
if b.config.VpcId == "" {
|
||||||
|
b.config.VpcId = *resp.Subnets[0].VpcId
|
||||||
|
log.Printf("[INFO] VpcId found: '%s'", b.config.VpcId)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Setup the state bag and initial state for the steps
|
// Setup the state bag and initial state for the steps
|
||||||
state := new(multistep.BasicStateBag)
|
state := new(multistep.BasicStateBag)
|
||||||
state.Put("config", b.config)
|
state.Put("config", b.config)
|
||||||
state.Put("ec2", ec2conn)
|
state.Put("ec2", ec2conn)
|
||||||
|
state.Put("awsSession", session)
|
||||||
state.Put("hook", hook)
|
state.Put("hook", hook)
|
||||||
state.Put("ui", ui)
|
state.Put("ui", ui)
|
||||||
|
|
||||||
|
var instanceStep multistep.Step
|
||||||
|
|
||||||
|
if b.config.IsSpotInstance() {
|
||||||
|
instanceStep = &awscommon.StepRunSpotInstance{
|
||||||
|
AssociatePublicIpAddress: b.config.AssociatePublicIpAddress,
|
||||||
|
AvailabilityZone: b.config.AvailabilityZone,
|
||||||
|
BlockDevices: b.config.BlockDevices,
|
||||||
|
Ctx: b.config.ctx,
|
||||||
|
Debug: b.config.PackerDebug,
|
||||||
|
EbsOptimized: b.config.EbsOptimized,
|
||||||
|
ExpectedRootDevice: "ebs",
|
||||||
|
IamInstanceProfile: b.config.IamInstanceProfile,
|
||||||
|
InstanceInitiatedShutdownBehavior: b.config.InstanceInitiatedShutdownBehavior,
|
||||||
|
InstanceType: b.config.InstanceType,
|
||||||
|
SourceAMI: b.config.SourceAmi,
|
||||||
|
SpotPrice: b.config.SpotPrice,
|
||||||
|
SpotPriceProduct: b.config.SpotPriceAutoProduct,
|
||||||
|
SubnetId: b.config.SubnetId,
|
||||||
|
Tags: b.config.RunTags,
|
||||||
|
UserData: b.config.UserData,
|
||||||
|
UserDataFile: b.config.UserDataFile,
|
||||||
|
VolumeTags: b.config.VolumeRunTags,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
instanceStep = &awscommon.StepRunSourceInstance{
|
||||||
|
AssociatePublicIpAddress: b.config.AssociatePublicIpAddress,
|
||||||
|
AvailabilityZone: b.config.AvailabilityZone,
|
||||||
|
BlockDevices: b.config.BlockDevices,
|
||||||
|
Ctx: b.config.ctx,
|
||||||
|
Debug: b.config.PackerDebug,
|
||||||
|
EbsOptimized: b.config.EbsOptimized,
|
||||||
|
ExpectedRootDevice: "ebs",
|
||||||
|
IamInstanceProfile: b.config.IamInstanceProfile,
|
||||||
|
InstanceInitiatedShutdownBehavior: b.config.InstanceInitiatedShutdownBehavior,
|
||||||
|
InstanceType: b.config.InstanceType,
|
||||||
|
IsRestricted: b.config.IsChinaCloud() || b.config.IsGovCloud(),
|
||||||
|
SourceAMI: b.config.SourceAmi,
|
||||||
|
SubnetId: b.config.SubnetId,
|
||||||
|
Tags: b.config.RunTags,
|
||||||
|
UserData: b.config.UserData,
|
||||||
|
UserDataFile: b.config.UserDataFile,
|
||||||
|
VolumeTags: b.config.VolumeRunTags,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Build the steps
|
// Build the steps
|
||||||
steps := []multistep.Step{
|
steps := []multistep.Step{
|
||||||
&awscommon.StepPreValidate{
|
&awscommon.StepPreValidate{
|
||||||
|
@ -115,7 +170,8 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
||||||
},
|
},
|
||||||
&awscommon.StepSourceAMIInfo{
|
&awscommon.StepSourceAMIInfo{
|
||||||
SourceAmi: b.config.SourceAmi,
|
SourceAmi: b.config.SourceAmi,
|
||||||
EnhancedNetworking: b.config.AMIEnhancedNetworking,
|
EnableAMISriovNetSupport: b.config.AMISriovNetSupport,
|
||||||
|
EnableAMIENASupport: b.config.AMIENASupport,
|
||||||
AmiFilters: b.config.SourceAmiFilter,
|
AmiFilters: b.config.SourceAmiFilter,
|
||||||
},
|
},
|
||||||
&awscommon.StepKeyPair{
|
&awscommon.StepKeyPair{
|
||||||
|
@ -130,33 +186,12 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
||||||
SecurityGroupIds: b.config.SecurityGroupIds,
|
SecurityGroupIds: b.config.SecurityGroupIds,
|
||||||
CommConfig: &b.config.RunConfig.Comm,
|
CommConfig: &b.config.RunConfig.Comm,
|
||||||
VpcId: b.config.VpcId,
|
VpcId: b.config.VpcId,
|
||||||
|
TemporarySGSourceCidr: b.config.TemporarySGSourceCidr,
|
||||||
},
|
},
|
||||||
&stepCleanupVolumes{
|
&stepCleanupVolumes{
|
||||||
BlockDevices: b.config.BlockDevices,
|
BlockDevices: b.config.BlockDevices,
|
||||||
},
|
},
|
||||||
&awscommon.StepRunSourceInstance{
|
instanceStep,
|
||||||
Debug: b.config.PackerDebug,
|
|
||||||
ExpectedRootDevice: "ebs",
|
|
||||||
SpotPrice: b.config.SpotPrice,
|
|
||||||
SpotPriceProduct: b.config.SpotPriceAutoProduct,
|
|
||||||
InstanceType: b.config.InstanceType,
|
|
||||||
UserData: b.config.UserData,
|
|
||||||
UserDataFile: b.config.UserDataFile,
|
|
||||||
SourceAMI: b.config.SourceAmi,
|
|
||||||
IamInstanceProfile: b.config.IamInstanceProfile,
|
|
||||||
SubnetId: b.config.SubnetId,
|
|
||||||
AssociatePublicIpAddress: b.config.AssociatePublicIpAddress,
|
|
||||||
EbsOptimized: b.config.EbsOptimized,
|
|
||||||
AvailabilityZone: b.config.AvailabilityZone,
|
|
||||||
BlockDevices: b.config.BlockDevices,
|
|
||||||
Tags: b.config.RunTags,
|
|
||||||
Ctx: b.config.ctx,
|
|
||||||
InstanceInitiatedShutdownBehavior: b.config.InstanceInitiatedShutdownBehavior,
|
|
||||||
},
|
|
||||||
&awscommon.StepTagEBSVolumes{
|
|
||||||
VolumeRunTags: b.config.VolumeRunTags,
|
|
||||||
Ctx: b.config.ctx,
|
|
||||||
},
|
|
||||||
&awscommon.StepGetPassword{
|
&awscommon.StepGetPassword{
|
||||||
Debug: b.config.PackerDebug,
|
Debug: b.config.PackerDebug,
|
||||||
Comm: &b.config.RunConfig.Comm,
|
Comm: &b.config.RunConfig.Comm,
|
||||||
|
@ -166,7 +201,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
||||||
Config: &b.config.RunConfig.Comm,
|
Config: &b.config.RunConfig.Comm,
|
||||||
Host: awscommon.SSHHost(
|
Host: awscommon.SSHHost(
|
||||||
ec2conn,
|
ec2conn,
|
||||||
b.config.SSHPrivateIp),
|
b.config.SSHInterface),
|
||||||
SSHConfig: awscommon.SSHConfig(
|
SSHConfig: awscommon.SSHConfig(
|
||||||
b.config.RunConfig.Comm.SSHAgentAuth,
|
b.config.RunConfig.Comm.SSHAgentAuth,
|
||||||
b.config.RunConfig.Comm.SSHUsername,
|
b.config.RunConfig.Comm.SSHUsername,
|
||||||
|
@ -174,26 +209,32 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
||||||
},
|
},
|
||||||
&common.StepProvision{},
|
&common.StepProvision{},
|
||||||
&awscommon.StepStopEBSBackedInstance{
|
&awscommon.StepStopEBSBackedInstance{
|
||||||
SpotPrice: b.config.SpotPrice,
|
Skip: b.config.IsSpotInstance(),
|
||||||
DisableStopInstance: b.config.DisableStopInstance,
|
DisableStopInstance: b.config.DisableStopInstance,
|
||||||
},
|
},
|
||||||
&awscommon.StepModifyEBSBackedInstance{
|
&awscommon.StepModifyEBSBackedInstance{
|
||||||
EnableEnhancedNetworking: b.config.AMIEnhancedNetworking,
|
EnableAMISriovNetSupport: b.config.AMISriovNetSupport,
|
||||||
|
EnableAMIENASupport: b.config.AMIENASupport,
|
||||||
},
|
},
|
||||||
&awscommon.StepDeregisterAMI{
|
&awscommon.StepDeregisterAMI{
|
||||||
|
AccessConfig: &b.config.AccessConfig,
|
||||||
ForceDeregister: b.config.AMIForceDeregister,
|
ForceDeregister: b.config.AMIForceDeregister,
|
||||||
ForceDeleteSnapshot: b.config.AMIForceDeleteSnapshot,
|
ForceDeleteSnapshot: b.config.AMIForceDeleteSnapshot,
|
||||||
AMIName: b.config.AMIName,
|
AMIName: b.config.AMIName,
|
||||||
|
Regions: b.config.AMIRegions,
|
||||||
},
|
},
|
||||||
&stepCreateAMI{},
|
&stepCreateAMI{},
|
||||||
&awscommon.StepCreateEncryptedAMICopy{
|
&awscommon.StepCreateEncryptedAMICopy{
|
||||||
KeyID: b.config.AMIKmsKeyId,
|
KeyID: b.config.AMIKmsKeyId,
|
||||||
EncryptBootVolume: b.config.AMIEncryptBootVolume,
|
EncryptBootVolume: b.config.AMIEncryptBootVolume,
|
||||||
Name: b.config.AMIName,
|
Name: b.config.AMIName,
|
||||||
|
AMIMappings: b.config.AMIBlockDevices.AMIMappings,
|
||||||
},
|
},
|
||||||
&awscommon.StepAMIRegionCopy{
|
&awscommon.StepAMIRegionCopy{
|
||||||
AccessConfig: &b.config.AccessConfig,
|
AccessConfig: &b.config.AccessConfig,
|
||||||
Regions: b.config.AMIRegions,
|
Regions: b.config.AMIRegions,
|
||||||
|
RegionKeyIds: b.config.AMIRegionKMSKeyIDs,
|
||||||
|
EncryptBootVolume: b.config.AMIEncryptBootVolume,
|
||||||
Name: b.config.AMIName,
|
Name: b.config.AMIName,
|
||||||
},
|
},
|
||||||
&awscommon.StepModifyAMIAttributes{
|
&awscommon.StepModifyAMIAttributes{
|
||||||
|
@ -230,7 +271,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
||||||
artifact := &awscommon.Artifact{
|
artifact := &awscommon.Artifact{
|
||||||
Amis: state.Get("amis").(map[string]string),
|
Amis: state.Get("amis").(map[string]string),
|
||||||
BuilderIdValue: BuilderId,
|
BuilderIdValue: BuilderId,
|
||||||
Conn: ec2conn,
|
Session: session,
|
||||||
}
|
}
|
||||||
|
|
||||||
return artifact, nil
|
return artifact, nil
|
||||||
|
|
|
@ -1,16 +1,18 @@
|
||||||
|
/*
|
||||||
|
Deregister the test image with
|
||||||
|
aws ec2 deregister-image --image-id $(aws ec2 describe-images --output text --filters "Name=name,Values=packer-test-packer-test-dereg" --query 'Images[*].{ID:ImageId}')
|
||||||
|
*/
|
||||||
package ebs
|
package ebs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/aws/session"
|
|
||||||
"github.com/aws/aws-sdk-go/service/ec2"
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
"github.com/mitchellh/packer/builder/amazon/common"
|
"github.com/hashicorp/packer/builder/amazon/common"
|
||||||
builderT "github.com/mitchellh/packer/helper/builder/testing"
|
builderT "github.com/hashicorp/packer/helper/builder/testing"
|
||||||
"github.com/mitchellh/packer/packer"
|
"github.com/hashicorp/packer/packer"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestBuilderAcc_basic(t *testing.T) {
|
func TestBuilderAcc_basic(t *testing.T) {
|
||||||
|
@ -59,14 +61,14 @@ func TestBuilderAcc_forceDeleteSnapshot(t *testing.T) {
|
||||||
|
|
||||||
// Get image data by AMI name
|
// Get image data by AMI name
|
||||||
ec2conn, _ := testEC2Conn()
|
ec2conn, _ := testEC2Conn()
|
||||||
imageResp, _ := ec2conn.DescribeImages(
|
describeInput := &ec2.DescribeImagesInput{Filters: []*ec2.Filter{
|
||||||
&ec2.DescribeImagesInput{Filters: []*ec2.Filter{
|
|
||||||
{
|
{
|
||||||
Name: aws.String("name"),
|
Name: aws.String("name"),
|
||||||
Values: []*string{aws.String(amiName)},
|
Values: []*string{aws.String(amiName)},
|
||||||
},
|
},
|
||||||
}},
|
}}
|
||||||
)
|
ec2conn.WaitUntilImageExists(describeInput)
|
||||||
|
imageResp, _ := ec2conn.DescribeImages(describeInput)
|
||||||
image := imageResp.Images[0]
|
image := imageResp.Images[0]
|
||||||
|
|
||||||
// Get snapshot ids for image
|
// Get snapshot ids for image
|
||||||
|
@ -244,26 +246,15 @@ func checkBootEncrypted() builderT.TestCheckFunc {
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccPreCheck(t *testing.T) {
|
func testAccPreCheck(t *testing.T) {
|
||||||
if v := os.Getenv("AWS_ACCESS_KEY_ID"); v == "" {
|
|
||||||
t.Fatal("AWS_ACCESS_KEY_ID must be set for acceptance tests")
|
|
||||||
}
|
|
||||||
|
|
||||||
if v := os.Getenv("AWS_SECRET_ACCESS_KEY"); v == "" {
|
|
||||||
t.Fatal("AWS_SECRET_ACCESS_KEY must be set for acceptance tests")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func testEC2Conn() (*ec2.EC2, error) {
|
func testEC2Conn() (*ec2.EC2, error) {
|
||||||
access := &common.AccessConfig{RawRegion: "us-east-1"}
|
access := &common.AccessConfig{RawRegion: "us-east-1"}
|
||||||
config, err := access.Config()
|
session, err := access.Session()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
session, err := session.NewSession(config)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return ec2.New(session), nil
|
return ec2.New(session), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -303,7 +294,7 @@ const testBuilderAccForceDeregister = `
|
||||||
"source_ami": "ami-76b2a71e",
|
"source_ami": "ami-76b2a71e",
|
||||||
"ssh_username": "ubuntu",
|
"ssh_username": "ubuntu",
|
||||||
"force_deregister": "%s",
|
"force_deregister": "%s",
|
||||||
"ami_name": "packer-test-%s"
|
"ami_name": "%s"
|
||||||
}]
|
}]
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
@ -318,7 +309,7 @@ const testBuilderAccForceDeleteSnapshot = `
|
||||||
"ssh_username": "ubuntu",
|
"ssh_username": "ubuntu",
|
||||||
"force_deregister": "%s",
|
"force_deregister": "%s",
|
||||||
"force_delete_snapshot": "%s",
|
"force_delete_snapshot": "%s",
|
||||||
"ami_name": "packer-test-%s"
|
"ami_name": "%s"
|
||||||
}]
|
}]
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
|
|
@ -3,7 +3,7 @@ package ebs
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/mitchellh/packer/packer"
|
"github.com/hashicorp/packer/packer"
|
||||||
)
|
)
|
||||||
|
|
||||||
func testConfig() map[string]interface{} {
|
func testConfig() map[string]interface{} {
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue