Merge branch 'master' into subnet_az_discovery
This commit is contained in:
commit
e4930e7b57
|
@ -7,3 +7,8 @@
|
|||
.DS_Store
|
||||
.vagrant
|
||||
test/.env
|
||||
|
||||
website/.bundle
|
||||
website/vendor
|
||||
|
||||
packer-test*.log
|
15
.travis.yml
15
.travis.yml
|
@ -3,28 +3,17 @@ sudo: false
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.2
|
||||
- 1.3
|
||||
- 1.4
|
||||
- 1.5
|
||||
- tip
|
||||
|
||||
install: make updatedeps
|
||||
|
||||
script:
|
||||
- GOMAXPROCS=2 make test
|
||||
#- go test -race ./...
|
||||
- GOMAXPROCS=2 make ci
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
|
||||
notifications:
|
||||
irc:
|
||||
channels:
|
||||
- "irc.freenode.org#packer-tool"
|
||||
skip_join: true
|
||||
use_notice: true
|
||||
|
||||
matrix:
|
||||
fast_finish: true
|
||||
allow_failures:
|
||||
|
|
322
CHANGELOG.md
322
CHANGELOG.md
|
@ -1,23 +1,327 @@
|
|||
## 0.8.0 (unreleased)
|
||||
## 0.9.0 (Unreleased)
|
||||
|
||||
BACKWARDS INCOMPATIBILITIES:
|
||||
|
||||
* Packer now ships as a single binary, including plugins. If you install packer 0.9.0 over a previous packer installation, **you must delete all of the packer-* plugin files** or packer will load out-of-date plugins from disk.
|
||||
* Release binaries are now provided via <https://releases.hashicorp.com>.
|
||||
* Packer 0.9.0 is now built with Go 1.5. Future versions will drop support for building with Go 1.4.
|
||||
|
||||
FEATURES:
|
||||
|
||||
* **Artifice post-processor**: Override packer artifacts during post-
|
||||
processing. This allows you to extract artifacts from a packer builder
|
||||
and use them with other post-processors like compress, docker, and Atlas.
|
||||
* **New `vmware-esxi` feature**: Packer can now export images from vCloud or vSphere during the build. [GH-1921]
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
* builder/openstack: Add rackconnect_wait for Rackspace customers to wait for
|
||||
RackConnect data to appear
|
||||
* buidler/openstakc: Add ssh_interface option for rackconnect for users that
|
||||
have prohibitive firewalls
|
||||
* core: Packer plugins are now compiled into the main binary, reducing file size and build times, and making packer easier to install. The overall plugin architecture has not changed and third-party plugins can still be loaded from disk. Please make sure your plugins are up-to-date! [GH-2854]
|
||||
* core: Packer now indicates line numbers for template parse errors [GH-2742]
|
||||
* core: Scripts are executed via `/usr/bin/env bash` instead of `/bin/bash` for broader compatibility. [GH-2913]
|
||||
* core: `target_path` for builder downloads can now be specified. [GH-2600]
|
||||
* builder/amazon: Add support for `ebs_optimized` [GH-2806]
|
||||
* builder/amazon: You can now specify `0` for `spot_price` to switch to on demand instances [GH-2845]
|
||||
* builder/google: `account_file` can now be provided as a JSON string [GH-2811]
|
||||
* builder/google: added support for `preemptible` instances [GH-2982]
|
||||
* builder/parallels: Improve support for Parallels 11 [GH-2662]
|
||||
* builder/parallels: Parallels disks are now compacted by default [GH-2731]
|
||||
* builder/parallels: Packer will look for Parallels in `/Applications/Parallels Desktop.app` if it is not detected automatically [GH-2839]
|
||||
* builder/docker: Now works remote hosts, such as boot2docker [GH-2846]
|
||||
* builder/qemu: qcow2 images are now compacted by default [GH-2748]
|
||||
* builder/qemu: qcow2 images can now be compressed [GH-2748]
|
||||
* builder/qemu: Now specifies `virtio-scsi` by default [GH-2422]
|
||||
* builder/qemu: Now checks for version-specific options [GH-2376]
|
||||
* builder/docker-import: Can now import Artifice artifacts [GH-2718]
|
||||
* provisioner/puppet: Now accepts the `extra_arguments` parameter [GH-2635]
|
||||
* post-processor/atlas: Added support for compile ID. [GH-2775]
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
* builder/amazon: Remove deprecated ec2-upload-bundle paramger [GH-1931]
|
||||
* builder/digitalocean: Ignore invalid fields from the ever-changing v2 API
|
||||
* builder/docker: Fixed hang on prompt while copying script
|
||||
* core: Random number generator is now seeded. [GH-2640]
|
||||
* core: Packer should now have a lot less race conditions [GH-2824]
|
||||
* builder/amazon: The `no_device` option for block device mappings is now handled correctly [GH-2398]
|
||||
* builder/amazon: AMI name validation now matches Amazon's spec [GH-2774]
|
||||
* builder/amazon: Use snapshot size when volume size is unspecified [GH-2480]
|
||||
* builder/parallels: Now supports interpolation in `prlctl_post` [GH-2828]
|
||||
* builder/vmware: `format` option is now read correctly [GH-2892]
|
||||
* provisioner/shell: No longer leaves temp scripts behind [GH-1536]
|
||||
* provisioner/winrm: Now waits for reboot to complete before continuing with provisioning [GH-2568]
|
||||
* post-processor/artifice: Fix truncation of files downloaded from Docker. [GH-2793]
|
||||
|
||||
|
||||
## 0.8.6 (Aug 22, 2015)
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
* builder/docker: Now supports Download so it can be used with the file
|
||||
provisioner to download a file from a container. [GH-2585]
|
||||
* builder/docker: Now verifies that the artifact will be used before the build
|
||||
starts, unless the `discard` option is specified. This prevent failures
|
||||
after the build completes. [GH-2626]
|
||||
* post-processor/artifice: Now supports glob-like syntax for filenames [GH-2619]
|
||||
* post-processor/vagrant: Like the compress post-processor, vagrant now uses a
|
||||
parallel gzip algorithm to compress vagrant boxes. [GH-2590]
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
* core: When `iso_url` is a local file and the checksum is invalid, the local
|
||||
file will no longer be deleted. [GH-2603]
|
||||
* builder/parallels: Fix interpolation in `parallels_tools_guest_path` [GH-2543]
|
||||
|
||||
## 0.8.5 (Aug 10, 2015)
|
||||
|
||||
FEATURES:
|
||||
|
||||
* **[Beta]** Artifice post-processor: Override packer artifacts during post-
|
||||
processing. This allows you to extract artifacts from a packer builder
|
||||
and use them with other post-processors like compress, docker, and Atlas.
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
* Many docs have been updated and corrected; big thanks to our contributors!
|
||||
* builder/openstack: Add debug logging for IP addresses used for SSH [GH-2513]
|
||||
* builder/openstack: Add option to use existing SSH keypair [GH-2512]
|
||||
* builder/openstack: Add support for Glance metadata [GH-2434]
|
||||
* builder/qemu and builder/vmware: Packer's VNC connection no longer asks for
|
||||
an exclusive connection [GH-2522]
|
||||
* provisioner/salt-masterless: Can now customize salt remote directories [GH-2519]
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
* builder/amazon: Improve instance cleanup by storing id sooner [GH-2404]
|
||||
* builder/amazon: Only fetch windows password when using WinRM communicator [GH-2538]
|
||||
* builder/openstack: Support IPv6 SSH address [GH-2450]
|
||||
* builder/openstack: Track new IP address discovered during RackConnect [GH-2514]
|
||||
* builder/qemu: Add 100ms delay between VNC key events. [GH-2415]
|
||||
* post-processor/atlas: atlas_url configuration option works now [GH-2478]
|
||||
* post-processor/compress: Now supports interpolation in output config [GH-2414]
|
||||
* provisioner/powershell: Elevated runs now receive environment variables [GH-2378]
|
||||
* provisioner/salt-masterless: Clarify error messages when we can't create or
|
||||
write to the temp directory [GH-2518]
|
||||
* provisioner/salt-masterless: Copy state even if /srv/salt exists already [GH-1699]
|
||||
* provisioner/salt-masterless: Make sure /etc/salt exists before writing to it [GH-2520]
|
||||
* provisioner/winrm: Connect to the correct port when using NAT with
|
||||
VirtualBox / VMware [GH-2399]
|
||||
|
||||
Note: 0.8.3 was pulled and 0.8.4 was skipped.
|
||||
|
||||
## 0.8.2 (July 17, 2015)
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
* builder/docker: Add option to use a Pty [GH-2425]
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
* core: Fix crash when `min_packer_version` is specified in a template. [GH-2385]
|
||||
* builder/amazon: Fix EC2 devices being included in EBS mappings [GH-2459]
|
||||
* builder/googlecompute: Fix default name for GCE images [GH-2400]
|
||||
* builder/null: Fix error message with missing ssh_host [GH-2407]
|
||||
* builder/virtualbox: Use --portcount on VirtualBox 5.x [GH-2438]
|
||||
* provisioner/puppet: Packer now correctly handles a directory for manifest_file [GH-2463]
|
||||
* provisioner/winrm: Fix potential crash with WinRM [GH-2416]
|
||||
|
||||
## 0.8.1 (July 2, 2015)
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
* builder/amazon: When debug mode is enabled, the Windows administrator
|
||||
password for Windows instances will be shown [GH-2351]
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
* core: `min_packer_version` field in configs work [GH-2356]
|
||||
* core: The `build_name` and `build_type` functions work in provisioners [GH-2367]
|
||||
* core: Handle timeout in SSH handshake [GH-2333]
|
||||
* command/build: Fix reading configuration from stdin [GH-2366]
|
||||
* builder/amazon: Fix issue with sharing AMIs when using `ami_users` [GH-2308]
|
||||
* builder/amazon: Fix issue when using multiple Security Groups [GH-2381]
|
||||
* builder/amazon: Fix for tag creation when creating new ec2 instance [GH-2317]
|
||||
* builder/amazon: Fix issue with creating AMIs with multiple device mappings [GH-2320]
|
||||
* builder/amazon: Fix failing AMI snapshot tagging when copying to other
|
||||
regions [GH-2316]
|
||||
* builder/amazon: Fix setting AMI launch permissions [GH-2348]
|
||||
* builder/amazon: Fix spot instance cleanup to remove the correct request [GH-2327]
|
||||
* builder/amazon: Fix `bundle_prefix` not interpolating `timestamp` [GH-2352]
|
||||
* builder/amazon-instance: Fix issue with creating AMIs without specifying a
|
||||
virtualization type [GH-2330]
|
||||
* builder/digitalocean: Fix builder using private IP instead of public IP [GH-2339]
|
||||
* builder/google: Set default communicator settings properly [GH-2353]
|
||||
* builder/vmware-iso: Setting `checksum_type` to `none` for ESX builds
|
||||
now works [GH-2323]
|
||||
* provisioner/chef: Use knife config file vs command-line params to
|
||||
clean up nodes so full set of features can be used [GH-2306]
|
||||
* post-processor/compress: Fixed crash in compress post-processor plugin [GH-2311]
|
||||
|
||||
## 0.8.0 (June 23, 2015)
|
||||
|
||||
BACKWARDS INCOMPATIBILITIES:
|
||||
|
||||
* core: SSH connection will no longer request a PTY by default. This
|
||||
can be enabled per builder.
|
||||
* builder/digitalocean: no longer supports the v1 API which has been
|
||||
deprecated for some time. Most configurations should continue to
|
||||
work as long as you use the `api_token` field for auth.
|
||||
* builder/digitalocean: `image`, `region`, and `size` are now required.
|
||||
* builder/openstack: auth parameters have been changed to better
|
||||
reflect OS terminology. Existing environment variables still work.
|
||||
|
||||
FEATURES:
|
||||
|
||||
* **WinRM:** You can now connect via WinRM with almost every builder.
|
||||
See the docs for more info. [GH-2239]
|
||||
* **Windows AWS Support:** Windows AMIs can now be built without any
|
||||
external plugins: Packer will start a Windows instance, get the
|
||||
admin password, and can use WinRM (above) to connect through. [GH-2240]
|
||||
* **Disable SSH:** Set `communicator` to "none" in any builder to disable SSH
|
||||
connections. Note that provisioners won't work if this is done. [GH-1591]
|
||||
* **SSH Agent Forwarding:** SSH Agent Forwarding will now be enabled
|
||||
to allow access to remote servers such as private git repos. [GH-1066]
|
||||
* **SSH Bastion Hosts:** You can now specify a bastion host for
|
||||
SSH access (works with all builders). [GH-387]
|
||||
* **OpenStack v3 Identity:** The OpenStack builder now supports the
|
||||
v3 identity API.
|
||||
* **Docker builder supports SSH**: The Docker builder now supports containers
|
||||
with SSH, just set `communicator` to "ssh" [GH-2244]
|
||||
* **File provisioner can download**: The file provisioner can now download
|
||||
files out of the build process. [GH-1909]
|
||||
* **New config function: `build_name`**: The name of the currently running
|
||||
build. [GH-2232]
|
||||
* **New config function: `build_type`**: The type of the currently running
|
||||
builder. This is useful for provisioners. [GH-2232]
|
||||
* **New config function: `template_dir`**: The directory to the template
|
||||
being built. This should be used for template-relative paths. [GH-54]
|
||||
* **New provisioner: shell-local**: Runs a local shell script. [GH-770]
|
||||
* **New provisioner: powershell**: Provision Windows machines
|
||||
with PowerShell scripts. [GH-2243]
|
||||
* **New provisioner: windows-shell**: Provision Windows machines with
|
||||
batch files. [GH-2243]
|
||||
* **New provisioner: windows-restart**: Restart a Windows machines and
|
||||
wait for it to come back online. [GH-2243]
|
||||
* **Compress post-processor supports multiple algorithms:** The compress
|
||||
post-processor now supports lz4 compression and compresses gzip in
|
||||
parallel for much faster throughput.
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
* core: Interrupt handling for SIGTERM signal as well. [GH-1858]
|
||||
* core: HTTP downloads support resuming [GH-2106]
|
||||
* builder/*: Add `ssh_handshake_attempts` to configure the number of
|
||||
handshake attempts done before failure [GH-2237]
|
||||
* builder/amazon: Add `force_deregister` option for automatic AMI
|
||||
deregistration [GH-2221]
|
||||
* builder/amazon: Now applies tags to EBS snapshots [GH-2212]
|
||||
* builder/amazon: Clean up orphaned volumes from Source AMIs [GH-1783]
|
||||
* builder/amazon: Support custom keypairs [GH-1837]
|
||||
* builder/amazon-chroot: Can now resize the root volume of the resulting
|
||||
AMI with the `root_volume_size` option [GH-2289]
|
||||
* builder/amazon-chroot: Add `mount_options` configuration option for providing
|
||||
options to the `mount` command [GH-2296]
|
||||
* builder/digitalocean: Save SSH key to pwd if debug mode is on. [GH-1829]
|
||||
* builder/digitalocean: User data support [GH-2113]
|
||||
* builder/googlecompute: Option to use internal IP for connections. [GH-2152]
|
||||
* builder/parallels: Support Parallels Desktop 11 [GH-2199]
|
||||
* builder/openstack: Add `rackconnect_wait` for Rackspace customers to wait for
|
||||
RackConnect data to appear
|
||||
* buidler/openstack: Add `ssh_interface` option for rackconnect for users that
|
||||
have prohibitive firewalls
|
||||
* builder/openstack: Flavor names can be used as well as refs
|
||||
* builder/openstack: Add `availability_zone` [GH-2016]
|
||||
* builder/openstack: Machine will be stopped prior to imaging if the
|
||||
cluster supports the `startstop` extension. [GH-2223]
|
||||
* builder/openstack: Support for user data [GH-2224]
|
||||
* builder/qemu: Default accelerator to "tcg" on Windows [GH-2291]
|
||||
* builder/virtualbox: Added option: `ssh_skip_nat_mapping` to skip the
|
||||
automatic port forward for SSH and to use the guest port directly. [GH-1078]
|
||||
* builder/virtualbox: Added SCSI support
|
||||
* postprocessor/vagrant-cloud: Fixed failing on response
|
||||
* builder/vmware: Support for additional disks [GH-1382]
|
||||
* builder/vmware: Can now customize the template used for adding disks [GH-2254]
|
||||
* command/fix: After fixing, the template is validated [GH-2228]
|
||||
* command/push: Add `-name` flag for specifying name from CLI [GH-2042]
|
||||
* command/push: Push configuration in templates supports variables [GH-1861]
|
||||
* post-processor/docker-save: Can be chained [GH-2179]
|
||||
* post-processor/docker-tag: Support `force` option [GH-2055]
|
||||
* post-processor/docker-tag: Can be chained [GH-2179]
|
||||
* post-processor/vsphere: Make more fields optional, support empty
|
||||
resource pools. [GH-1868]
|
||||
* provisioner/puppet-masterless: `working_directory` option [GH-1831]
|
||||
* provisioner/puppet-masterless: `packer_build_name` and
|
||||
`packer_build_type` are default facts. [GH-1878]
|
||||
* provisioner/puppet-server: `ignore_exit_codes` option added [GH-2280]
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
* core: Fix potential panic for post-processor plugin exits [GH-2098]
|
||||
* core: `PACKER_CONFIG` may point to a non-existent file [GH-2226]
|
||||
* builder/amazon: Allow spaces in AMI names when using `clean_ami_name` [GH-2182]
|
||||
* builder/amazon: Remove deprecated ec2-upload-bundle paramger [GH-1931]
|
||||
* builder/amazon: Use IAM Profile to upload bundle if provided [GH-1985]
|
||||
* builder/amazon: Use correct exit code after SSH authentication failed [GH-2004]
|
||||
* builder/amazon: Retry finding created instance for eventual
|
||||
consistency. [GH-2129]
|
||||
* builder/amazon: If no AZ is specified, use AZ chosen automatically by
|
||||
AWS for spot instance. [GH-2017]
|
||||
* builder/amazon: Private key file (only available in debug mode)
|
||||
is deleted on cleanup. [GH-1801]
|
||||
* builder/amazon: AMI copy won't copy to the source region [GH-2123]
|
||||
* builder/amazon: Validate AMI doesn't exist with name prior to build [GH-1774]
|
||||
* builder/amazon: Improved retry logic around waiting for instances. [GH-1764]
|
||||
* builder/amazon: Fix issues with creating Block Devices. [GH-2195]
|
||||
* builder/amazon/chroot: Retry waiting for disk attachments [GH-2046]
|
||||
* builder/amazon/chroot: Only unmount path if it is mounted [GH-2054]
|
||||
* builder/amazon/instance: Use `-i` in sudo commands so PATH is inherited. [GH-1930]
|
||||
* builder/amazon/instance: Use `--region` flag for bundle upload command. [GH-1931]
|
||||
* builder/digitalocean: Wait for droplet to unlock before changing state,
|
||||
should lower the "pending event" errors.
|
||||
* builder/digitalocean: Ignore invalid fields from the ever-changing v2 API
|
||||
* builder/digitalocean: Private images can be used as a source [GH-1792]
|
||||
* builder/docker: Fixed hang on prompt while copying script
|
||||
* builder/docker: Use `docker exec` for newer versions of Docker for
|
||||
running scripts [GH-1993]
|
||||
* builder/docker: Fix crash that could occur at certain timed ctrl-c [GH-1838]
|
||||
* builder/docker: validate that `export_path` is not a directory [GH-2105]
|
||||
* builder/google: `ssh_timeout` is respected [GH-1781]
|
||||
* builder/openstack: `ssh_interface` can be used to specify the interface
|
||||
to retrieve the SSH IP from. [GH-2220]
|
||||
* builder/qemu: Add `disk_discard` option [GH-2120]
|
||||
* builder/qemu: Use proper SSH port, not hardcoded to 22. [GH-2236]
|
||||
* builder/qemu: Find unused SSH port if SSH port is taken. [GH-2032]
|
||||
* builder/virtualbox: Bind HTTP server to IPv4, which is more compatible with
|
||||
OS installers. [GH-1709]
|
||||
* builder/virtualbox: Remove the floppy controller in addition to the
|
||||
floppy disk. [GH-1879]
|
||||
* builder/virtualbox: Fixed regression where downloading ISO without a
|
||||
".iso" extension didn't work. [GH-1839]
|
||||
* builder/virtualbox: Output dir is verified at runtime, not template
|
||||
validation time. [GH-2233]
|
||||
* builder/virtualbox: Find unused SSH port if SSH port is taken. [GH-2032]
|
||||
* builder/vmware: Add 100ms delay between keystrokes to avoid subtle
|
||||
timing issues in most cases. [GH-1663]
|
||||
* builder/vmware: Bind HTTP server to IPv4, which is more compatible with
|
||||
OS installers. [GH-1709]
|
||||
* builder/vmware: Case-insensitive match of MAC address to find IP [GH-1989]
|
||||
* builder/vmware: More robust IP parsing from ifconfig output [GH-1999]
|
||||
* builder/vmware: Nested output directories for ESXi work [GH-2174]
|
||||
* builder/vmware: Output dir is verified at runtime, not template
|
||||
validation time. [GH-2233]
|
||||
* command/fix: For the `virtualbox` to `virtualbox-iso` builder rename,
|
||||
provisioner overrides are now also fixed [GH-2231]
|
||||
* command/validate: don't crash for invalid builds [GH-2139]
|
||||
* post-processor/atlas: Find common archive prefix for Windows [GH-1874]
|
||||
* post-processor/atlas: Fix index out of range panic [GH-1959]
|
||||
* post-processor/vagrant-cloud: Fixed failing on response
|
||||
* post-processor/vagrant-cloud: Don't delete version on error [GH-2014]
|
||||
* post-processor/vagrant-cloud: Retry failed uploads a few times
|
||||
* provisioner/chef-client: Fix permissions issues on default dir [GH-2255]
|
||||
* provisioner/chef-client: Node cleanup works now. [GH-2257]
|
||||
* provisioner/puppet-masterless: Allow manifest_file to be a directory
|
||||
* provisioner/salt-masterless: Add `--retcode-passthrough` to salt-call
|
||||
* provisioner/shell: chmod executable script to 0755, not 0777 [GH-1708]
|
||||
* provisioner/shell: inline commands failing will fail the provisioner [GH-2069]
|
||||
* provisioner/shell: single quotes in env vars are escaped [GH-2229]
|
||||
* provisioner/shell: Temporary file is deleted after run [GH-2259]
|
||||
* provisioner/shell: Randomize default script name to avoid strange
|
||||
race issues from Windows. [GH-2270]
|
||||
|
||||
## 0.7.5 (December 9, 2014)
|
||||
|
||||
|
|
|
@ -53,8 +53,8 @@ it raises the chances we can quickly merge or address your contributions.
|
|||
If you have never worked with Go before, you will have to complete the
|
||||
following steps in order to be able to compile and test Packer.
|
||||
|
||||
1. Install Go. Make sure the Go version is at least Go 1.2. Packer will not work with anything less than
|
||||
Go 1.2. On a Mac, you can `brew install go` to install Go 1.2.
|
||||
1. Install Go. Make sure the Go version is at least Go 1.4. Packer will not work with anything less than
|
||||
Go 1.4. On a Mac, you can `brew install go` to install Go 1.4.
|
||||
|
||||
2. Set and export the `GOPATH` environment variable and update your `PATH`.
|
||||
For example, you can add to your `.bash_profile`.
|
||||
|
|
91
Makefile
91
Makefile
|
@ -1,33 +1,84 @@
|
|||
TEST?=./...
|
||||
VETARGS?=-asmdecl -atomic -bool -buildtags -copylocks -methods \
|
||||
-nilfunc -printf -rangeloops -shift -structtags -unsafeptr
|
||||
# Get the current full sha from git
|
||||
GITSHA:=$(shell git rev-parse HEAD)
|
||||
# Get the current local branch name from git (if we can, this may be blank)
|
||||
GITBRANCH:=$(shell git symbolic-ref --short HEAD 2>/dev/null)
|
||||
|
||||
default: test
|
||||
default: test dev
|
||||
|
||||
bin:
|
||||
ci: deps test
|
||||
|
||||
release: updatedeps test releasebin
|
||||
|
||||
bin: deps
|
||||
@echo "WARN: 'make bin' is for debug / test builds only. Use 'make release' for release builds."
|
||||
@sh -c "$(CURDIR)/scripts/build.sh"
|
||||
|
||||
dev:
|
||||
@TF_DEV=1 sh -c "$(CURDIR)/scripts/build.sh"
|
||||
releasebin: deps
|
||||
@grep 'const VersionPrerelease = ""' version.go > /dev/null ; if [ $$? -ne 0 ]; then \
|
||||
echo "ERROR: You must remove prerelease tags from version.go prior to release."; \
|
||||
exit 1; \
|
||||
fi
|
||||
@sh -c "$(CURDIR)/scripts/build.sh"
|
||||
|
||||
test:
|
||||
go test $(TEST) $(TESTARGS) -timeout=10s
|
||||
@$(MAKE) vet
|
||||
deps:
|
||||
go get -v -d ./...
|
||||
|
||||
testrace:
|
||||
go test -race $(TEST) $(TESTARGS)
|
||||
dev: deps
|
||||
@grep 'const VersionPrerelease = ""' version.go > /dev/null ; if [ $$? -eq 0 ]; then \
|
||||
echo "ERROR: You must add prerelease tags to version.go prior to making a dev build."; \
|
||||
exit 1; \
|
||||
fi
|
||||
@PACKER_DEV=1 sh -c "$(CURDIR)/scripts/build.sh"
|
||||
|
||||
updatedeps:
|
||||
go get -d -v -p 2 ./...
|
||||
# generate runs `go generate` to build the dynamically generated
|
||||
# source files.
|
||||
generate: deps
|
||||
go generate ./...
|
||||
|
||||
vet:
|
||||
@go tool vet 2>/dev/null ; if [ $$? -eq 3 ]; then \
|
||||
test: deps
|
||||
go test $(TEST) $(TESTARGS) -timeout=15s | tee packer-test.log
|
||||
@go vet 2>/dev/null ; if [ $$? -eq 3 ]; then \
|
||||
go get golang.org/x/tools/cmd/vet; \
|
||||
fi
|
||||
@go tool vet $(VETARGS) . ; if [ $$? -eq 1 ]; then \
|
||||
echo ""; \
|
||||
echo "Vet found suspicious constructs. Please check the reported constructs"; \
|
||||
echo "and fix them if necessary before submitting the code for reviewal."; \
|
||||
@go vet $(TEST) ; if [ $$? -eq 1 ]; then \
|
||||
echo "ERROR: Vet found problems in the code."; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
.PHONY: bin default test updatedeps vet
|
||||
# testacc runs acceptance tests
|
||||
testacc: deps generate
|
||||
@echo "WARN: Acceptance tests will take a long time to run and may cost money. Ctrl-C if you want to cancel."
|
||||
PACKER_ACC=1 go test -v $(TEST) $(TESTARGS) -timeout=45m | tee packer-test-acc.log
|
||||
|
||||
testrace: deps
|
||||
go test -race $(TEST) $(TESTARGS) -timeout=15s | tee packer-test-race.log
|
||||
|
||||
# `go get -u` causes git to revert packer to the master branch. This causes all
|
||||
# kinds of headaches. We record the git sha when make starts try to correct it
|
||||
# if we detect dift. DO NOT use `git checkout -f` for this because it will wipe
|
||||
# out your changes without asking.
|
||||
updatedeps:
|
||||
@echo "INFO: Currently on $(GITBRANCH) ($(GITSHA))"
|
||||
@git diff-index --quiet HEAD ; if [ $$? -ne 0 ]; then \
|
||||
echo "ERROR: Your git working tree has uncommitted changes. updatedeps will fail. Please stash or commit your changes first."; \
|
||||
exit 1; \
|
||||
fi
|
||||
go get -u github.com/mitchellh/gox
|
||||
go get -u golang.org/x/tools/cmd/stringer
|
||||
go list ./... \
|
||||
| xargs go list -f '{{join .Deps "\n"}}' \
|
||||
| grep -v github.com/mitchellh/packer \
|
||||
| grep -v '/internal/' \
|
||||
| sort -u \
|
||||
| xargs go get -f -u -v -d ; if [ $$? -ne 0 ]; then \
|
||||
echo "ERROR: go get failed. Your git branch may have changed; you were on $(GITBRANCH) ($(GITSHA))."; \
|
||||
fi
|
||||
@if [ "$(GITBRANCH)" != "" ]; then git checkout -q $(GITBRANCH); else git checkout -q $(GITSHA); fi
|
||||
@if [ `git rev-parse HEAD` != "$(GITSHA)" ]; then \
|
||||
echo "ERROR: git checkout has drifted and we weren't able to correct it. Was $(GITBRANCH) ($(GITSHA))"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "INFO: Currently on $(GITBRANCH) ($(GITSHA))"
|
||||
|
||||
.PHONY: bin checkversion ci default deps generate releasebin test testacc testrace updatedeps
|
||||
|
|
47
README.md
47
README.md
|
@ -1,6 +1,7 @@
|
|||
# Packer
|
||||
|
||||
[![Build Status](https://travis-ci.org/mitchellh/packer.svg?branch=master)](https://travis-ci.org/mitchellh/packer)
|
||||
[![Windows Build Status](https://ci.appveyor.com/api/projects/status/github/mitchellh/packer?branch=master&svg=true)](https://ci.appveyor.com/project/hashicorp/packer)
|
||||
|
||||
* Website: http://www.packer.io
|
||||
* IRC: `#packer-tool` on Freenode
|
||||
|
@ -41,15 +42,19 @@ for your operating system or [compile Packer yourself](#developing-packer).
|
|||
After Packer is installed, create your first template, which tells Packer
|
||||
what platforms to build images for and how you want to build them. In our
|
||||
case, we'll create a simple AMI that has Redis pre-installed. Save this
|
||||
file as `quick-start.json`. Be sure to replace any credentials with your
|
||||
own.
|
||||
file as `quick-start.json`. Export your AWS credentials as the
|
||||
`AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables.
|
||||
|
||||
```json
|
||||
{
|
||||
"variables": {
|
||||
"access_key": "{{env `AWS_ACCESS_KEY_ID`}}",
|
||||
"secret_key": "{{env `AWS_SECRET_ACCESS_KEY`}}"
|
||||
},
|
||||
"builders": [{
|
||||
"type": "amazon-ebs",
|
||||
"access_key": "YOUR KEY HERE",
|
||||
"secret_key": "YOUR SECRET KEY HERE",
|
||||
"access_key": "{{user `access_key`}}",
|
||||
"secret_key": "{{user `access_key`}}",
|
||||
"region": "us-east-1",
|
||||
"source_ami": "ami-de0d9eb7",
|
||||
"instance_type": "t1.micro",
|
||||
|
@ -81,7 +86,7 @@ http://www.packer.io/docs
|
|||
## Developing Packer
|
||||
|
||||
If you wish to work on Packer itself or any of its built-in providers,
|
||||
you'll first need [Go](http://www.golang.org) installed (version 1.2+ is
|
||||
you'll first need [Go](http://www.golang.org) installed (version 1.4+ is
|
||||
_required_). Make sure Go is properly installed, including setting up
|
||||
a [GOPATH](http://golang.org/doc/code.html#GOPATH).
|
||||
|
||||
|
@ -121,3 +126,35 @@ package by specifying the `TEST` variable. For example below, only
|
|||
|
||||
$ make test TEST=./packer
|
||||
...
|
||||
|
||||
### Acceptance Tests
|
||||
|
||||
Packer has comprehensive [acceptance tests](https://en.wikipedia.org/wiki/Acceptance_testing)
|
||||
covering the builders of Packer.
|
||||
|
||||
If you're working on a feature of a builder or a new builder and want
|
||||
verify it is functioning (and also hasn't broken anything else), we recommend
|
||||
running the acceptance tests.
|
||||
|
||||
**Warning:** The acceptance tests create/destroy/modify *real resources*, which
|
||||
may incur real costs in some cases. In the presence of a bug, it is technically
|
||||
possible that broken backends could leave dangling data behind. Therefore,
|
||||
please run the acceptance tests at your own risk. At the very least,
|
||||
we recommend running them in their own private account for whatever builder
|
||||
you're testing.
|
||||
|
||||
To run the acceptance tests, invoke `make testacc`:
|
||||
|
||||
```sh
|
||||
$ make testacc TEST=./builder/amazon/ebs
|
||||
...
|
||||
```
|
||||
|
||||
The `TEST` variable is required, and you should specify the folder where the
|
||||
backend is. The `TESTARGS` variable is recommended to filter down to a specific
|
||||
resource to test, since testing all of them at once can sometimes take a very
|
||||
long time.
|
||||
|
||||
Acceptance tests typically require other environment variables to be set for
|
||||
things such as access keys. The test itself should error early and tell
|
||||
you what to set, so it is not documented here.
|
||||
|
|
|
@ -0,0 +1,39 @@
|
|||
# appveyor.yml reference : http://www.appveyor.com/docs/appveyor-yml
|
||||
|
||||
version: "{build}"
|
||||
|
||||
skip_tags: true
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
matrix:
|
||||
- GOARCH: 386
|
||||
GOVERSION: 1.4.2
|
||||
- GOARCH: amd64
|
||||
GOVERSION: 1.4.2
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\mitchellh\packer
|
||||
|
||||
install:
|
||||
- set Path=c:\go\bin;%Path%
|
||||
- echo %Path%
|
||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go%GOVERSION%.windows-%GOARCH%.msi
|
||||
- msiexec /i go%GOVERSION%.windows-%GOARCH%.msi /q
|
||||
- go version
|
||||
- go env
|
||||
- go get -d -v -t ./...
|
||||
|
||||
build_script:
|
||||
- go test -v ./...
|
||||
- go vet ./...
|
||||
- git rev-parse HEAD
|
||||
|
||||
test: off
|
||||
|
||||
deploy: off
|
|
@ -6,15 +6,17 @@ package chroot
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"runtime"
|
||||
|
||||
"github.com/mitchellh/goamz/ec2"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/mitchellh/multistep"
|
||||
awscommon "github.com/mitchellh/packer/builder/amazon/common"
|
||||
"github.com/mitchellh/packer/common"
|
||||
"github.com/mitchellh/packer/helper/config"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
"github.com/mitchellh/packer/template/interpolate"
|
||||
)
|
||||
|
||||
// The unique ID for this builder
|
||||
|
@ -33,8 +35,10 @@ type Config struct {
|
|||
DevicePath string `mapstructure:"device_path"`
|
||||
MountPath string `mapstructure:"mount_path"`
|
||||
SourceAmi string `mapstructure:"source_ami"`
|
||||
RootVolumeSize int64 `mapstructure:"root_volume_size"`
|
||||
MountOptions []string `mapstructure:"mount_options"`
|
||||
|
||||
tpl *packer.ConfigTemplate
|
||||
ctx interpolate.Context
|
||||
}
|
||||
|
||||
type wrappedCommandTemplate struct {
|
||||
|
@ -47,18 +51,21 @@ type Builder struct {
|
|||
}
|
||||
|
||||
func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
|
||||
md, err := common.DecodeConfig(&b.config, raws...)
|
||||
b.config.ctx.Funcs = awscommon.TemplateFuncs
|
||||
err := config.Decode(&b.config, &config.DecodeOpts{
|
||||
Interpolate: true,
|
||||
InterpolateContext: &b.config.ctx,
|
||||
InterpolateFilter: &interpolate.RenderFilter{
|
||||
Exclude: []string{
|
||||
"command_wrapper",
|
||||
"mount_path",
|
||||
},
|
||||
},
|
||||
}, raws...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
b.config.tpl, err = packer.NewConfigTemplate()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.config.tpl.UserVars = b.config.PackerUserVars
|
||||
b.config.tpl.Funcs(awscommon.TemplateFuncs)
|
||||
|
||||
// Defaults
|
||||
if b.config.ChrootMounts == nil {
|
||||
b.config.ChrootMounts = make([][]string, 0)
|
||||
|
@ -91,55 +98,22 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
|
|||
}
|
||||
|
||||
// Accumulate any errors
|
||||
errs := common.CheckUnusedConfig(md)
|
||||
errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(b.config.tpl)...)
|
||||
errs = packer.MultiErrorAppend(errs, b.config.AMIConfig.Prepare(b.config.tpl)...)
|
||||
var errs *packer.MultiError
|
||||
errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(&b.config.ctx)...)
|
||||
errs = packer.MultiErrorAppend(errs, b.config.AMIConfig.Prepare(&b.config.ctx)...)
|
||||
|
||||
for i, mounts := range b.config.ChrootMounts {
|
||||
for _, mounts := range b.config.ChrootMounts {
|
||||
if len(mounts) != 3 {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, errors.New("Each chroot_mounts entry should be three elements."))
|
||||
break
|
||||
}
|
||||
|
||||
for j, entry := range mounts {
|
||||
b.config.ChrootMounts[i][j], err = b.config.tpl.Process(entry, nil)
|
||||
if err != nil {
|
||||
errs = packer.MultiErrorAppend(errs,
|
||||
fmt.Errorf("Error processing chroot_mounts[%d][%d]: %s",
|
||||
i, j, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for i, file := range b.config.CopyFiles {
|
||||
var err error
|
||||
b.config.CopyFiles[i], err = b.config.tpl.Process(file, nil)
|
||||
if err != nil {
|
||||
errs = packer.MultiErrorAppend(errs,
|
||||
fmt.Errorf("Error processing copy_files[%d]: %s",
|
||||
i, err))
|
||||
}
|
||||
}
|
||||
|
||||
if b.config.SourceAmi == "" {
|
||||
errs = packer.MultiErrorAppend(errs, errors.New("source_ami is required."))
|
||||
}
|
||||
|
||||
templates := map[string]*string{
|
||||
"device_path": &b.config.DevicePath,
|
||||
"source_ami": &b.config.SourceAmi,
|
||||
}
|
||||
|
||||
for n, ptr := range templates {
|
||||
var err error
|
||||
*ptr, err = b.config.tpl.Process(*ptr, nil)
|
||||
if err != nil {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, fmt.Errorf("Error processing %s: %s", n, err))
|
||||
}
|
||||
}
|
||||
|
||||
if errs != nil && len(errs.Errors) > 0 {
|
||||
return nil, errs
|
||||
}
|
||||
|
@ -153,17 +127,13 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
|||
return nil, errors.New("The amazon-chroot builder only works on Linux environments.")
|
||||
}
|
||||
|
||||
region, err := b.config.Region()
|
||||
config, err := b.config.Config()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
auth, err := b.config.AccessConfig.Auth()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ec2conn := ec2.New(auth, region)
|
||||
session := session.New(config)
|
||||
ec2conn := ec2.New(session)
|
||||
|
||||
// If the subnet is specified but not the AZ, try to determine the AZ automatically
|
||||
if b.config.SubnetId != "" && b.config.AvailabilityZone == "" {
|
||||
|
@ -177,10 +147,9 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
|||
}
|
||||
|
||||
wrappedCommand := func(command string) (string, error) {
|
||||
return b.config.tpl.Process(
|
||||
b.config.CommandWrapper, &wrappedCommandTemplate{
|
||||
Command: command,
|
||||
})
|
||||
ctx := b.config.ctx
|
||||
ctx.Data = &wrappedCommandTemplate{Command: command}
|
||||
return interpolate.Render(b.config.CommandWrapper, &ctx)
|
||||
}
|
||||
|
||||
// Setup the state bag and initial state for the steps
|
||||
|
@ -193,6 +162,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
|||
|
||||
// Build the steps
|
||||
steps := []multistep.Step{
|
||||
&awscommon.StepPreValidate{
|
||||
DestAmiName: b.config.AMIName,
|
||||
ForceDeregister: b.config.AMIForceDeregister,
|
||||
},
|
||||
&StepInstanceInfo{},
|
||||
&awscommon.StepSourceAMIInfo{
|
||||
SourceAmi: b.config.SourceAmi,
|
||||
|
@ -201,18 +174,30 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
|||
&StepCheckRootDevice{},
|
||||
&StepFlock{},
|
||||
&StepPrepareDevice{},
|
||||
&StepCreateVolume{},
|
||||
&StepCreateVolume{
|
||||
RootVolumeSize: b.config.RootVolumeSize,
|
||||
},
|
||||
&StepAttachVolume{},
|
||||
&StepEarlyUnflock{},
|
||||
&StepMountDevice{},
|
||||
&StepMountDevice{
|
||||
MountOptions: b.config.MountOptions,
|
||||
},
|
||||
&StepMountExtra{},
|
||||
&StepCopyFiles{},
|
||||
&StepChrootProvision{},
|
||||
&StepEarlyCleanup{},
|
||||
&StepSnapshot{},
|
||||
&StepRegisterAMI{},
|
||||
&awscommon.StepDeregisterAMI{
|
||||
ForceDeregister: b.config.AMIForceDeregister,
|
||||
AMIName: b.config.AMIName,
|
||||
},
|
||||
&StepRegisterAMI{
|
||||
RootVolumeSize: b.config.RootVolumeSize,
|
||||
},
|
||||
&awscommon.StepAMIRegionCopy{
|
||||
AccessConfig: &b.config.AccessConfig,
|
||||
Regions: b.config.AMIRegions,
|
||||
Name: b.config.AMIName,
|
||||
},
|
||||
&awscommon.StepModifyAMIAttributes{
|
||||
Description: b.config.AMIDescription,
|
||||
|
|
|
@ -3,11 +3,13 @@ package chroot
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/mitchellh/goamz/ec2"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/mitchellh/multistep"
|
||||
awscommon "github.com/mitchellh/packer/builder/amazon/common"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// StepAttachVolume attaches the previously created volume to an
|
||||
|
@ -32,7 +34,11 @@ func (s *StepAttachVolume) Run(state multistep.StateBag) multistep.StepAction {
|
|||
attachVolume := strings.Replace(device, "/xvd", "/sd", 1)
|
||||
|
||||
ui.Say(fmt.Sprintf("Attaching the root volume to %s", attachVolume))
|
||||
_, err := ec2conn.AttachVolume(volumeId, instance.InstanceId, attachVolume)
|
||||
_, err := ec2conn.AttachVolume(&ec2.AttachVolumeInput{
|
||||
InstanceId: instance.InstanceId,
|
||||
VolumeId: &volumeId,
|
||||
Device: &attachVolume,
|
||||
})
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error attaching volume: %s", err)
|
||||
state.Put("error", err)
|
||||
|
@ -50,17 +56,26 @@ func (s *StepAttachVolume) Run(state multistep.StateBag) multistep.StepAction {
|
|||
StepState: state,
|
||||
Target: "attached",
|
||||
Refresh: func() (interface{}, string, error) {
|
||||
resp, err := ec2conn.Volumes([]string{volumeId}, ec2.NewFilter())
|
||||
attempts := 0
|
||||
for attempts < 30 {
|
||||
resp, err := ec2conn.DescribeVolumes(&ec2.DescribeVolumesInput{VolumeIds: []*string{&volumeId}})
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
if len(resp.Volumes[0].Attachments) == 0 {
|
||||
return nil, "", errors.New("No attachments on volume.")
|
||||
if len(resp.Volumes[0].Attachments) > 0 {
|
||||
a := resp.Volumes[0].Attachments[0]
|
||||
return a, *a.State, nil
|
||||
}
|
||||
// When Attachment on volume is not present sleep for 2s and retry
|
||||
attempts += 1
|
||||
ui.Say(fmt.Sprintf(
|
||||
"Volume %s show no attachments. Attempt %d/30. Sleeping for 2s and will retry.",
|
||||
volumeId, attempts))
|
||||
time.Sleep(2 * time.Second)
|
||||
}
|
||||
|
||||
a := resp.Volumes[0].Attachments[0]
|
||||
return a, a.Status, nil
|
||||
// Attachment on volume is not present after all attempts
|
||||
return nil, "", errors.New("No attachments on volume.")
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -92,7 +107,7 @@ func (s *StepAttachVolume) CleanupFunc(state multistep.StateBag) error {
|
|||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
ui.Say("Detaching EBS volume...")
|
||||
_, err := ec2conn.DetachVolume(s.volumeId)
|
||||
_, err := ec2conn.DetachVolume(&ec2.DetachVolumeInput{VolumeId: &s.volumeId})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error detaching EBS volume: %s", err)
|
||||
}
|
||||
|
@ -105,14 +120,14 @@ func (s *StepAttachVolume) CleanupFunc(state multistep.StateBag) error {
|
|||
StepState: state,
|
||||
Target: "detached",
|
||||
Refresh: func() (interface{}, string, error) {
|
||||
resp, err := ec2conn.Volumes([]string{s.volumeId}, ec2.NewFilter())
|
||||
resp, err := ec2conn.DescribeVolumes(&ec2.DescribeVolumesInput{VolumeIds: []*string{&s.volumeId}})
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
v := resp.Volumes[0]
|
||||
if len(v.Attachments) > 0 {
|
||||
return v, v.Attachments[0].Status, nil
|
||||
return v, *v.Attachments[0].State, nil
|
||||
} else {
|
||||
return v, "detached", nil
|
||||
}
|
||||
|
|
|
@ -3,7 +3,7 @@ package chroot
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/mitchellh/goamz/ec2"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/mitchellh/multistep"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
)
|
||||
|
@ -18,7 +18,7 @@ func (s *StepCheckRootDevice) Run(state multistep.StateBag) multistep.StepAction
|
|||
ui.Say("Checking the root device on source AMI...")
|
||||
|
||||
// It must be EBS-backed otherwise the build won't work
|
||||
if image.RootDeviceType != "ebs" {
|
||||
if *image.RootDeviceType != "ebs" {
|
||||
err := fmt.Errorf("The root device of the source AMI must be EBS-backed.")
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
|
|
|
@ -2,11 +2,13 @@ package chroot
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/mitchellh/goamz/ec2"
|
||||
"log"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/mitchellh/multistep"
|
||||
awscommon "github.com/mitchellh/packer/builder/amazon/common"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
"log"
|
||||
)
|
||||
|
||||
// StepCreateVolume creates a new volume from the snapshot of the root
|
||||
|
@ -16,6 +18,7 @@ import (
|
|||
// volume_id string - The ID of the created volume
|
||||
type StepCreateVolume struct {
|
||||
volumeId string
|
||||
RootVolumeSize int64
|
||||
}
|
||||
|
||||
func (s *StepCreateVolume) Run(state multistep.StateBag) multistep.StepAction {
|
||||
|
@ -25,11 +28,11 @@ func (s *StepCreateVolume) Run(state multistep.StateBag) multistep.StepAction {
|
|||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
// Determine the root device snapshot
|
||||
log.Printf("Searching for root device of the image (%s)", image.RootDeviceName)
|
||||
log.Printf("Searching for root device of the image (%s)", *image.RootDeviceName)
|
||||
var rootDevice *ec2.BlockDeviceMapping
|
||||
for _, device := range image.BlockDevices {
|
||||
if device.DeviceName == image.RootDeviceName {
|
||||
rootDevice = &device
|
||||
for _, device := range image.BlockDeviceMappings {
|
||||
if *device.DeviceName == *image.RootDeviceName {
|
||||
rootDevice = device
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -42,14 +45,18 @@ func (s *StepCreateVolume) Run(state multistep.StateBag) multistep.StepAction {
|
|||
}
|
||||
|
||||
ui.Say("Creating the root volume...")
|
||||
createVolume := &ec2.CreateVolume{
|
||||
AvailZone: instance.AvailZone,
|
||||
Size: rootDevice.VolumeSize,
|
||||
SnapshotId: rootDevice.SnapshotId,
|
||||
VolumeType: rootDevice.VolumeType,
|
||||
IOPS: rootDevice.IOPS,
|
||||
vs := *rootDevice.Ebs.VolumeSize
|
||||
if s.RootVolumeSize > *rootDevice.Ebs.VolumeSize {
|
||||
vs = s.RootVolumeSize
|
||||
}
|
||||
log.Printf("Create args: %#v", createVolume)
|
||||
createVolume := &ec2.CreateVolumeInput{
|
||||
AvailabilityZone: instance.Placement.AvailabilityZone,
|
||||
Size: aws.Int64(vs),
|
||||
SnapshotId: rootDevice.Ebs.SnapshotId,
|
||||
VolumeType: rootDevice.Ebs.VolumeType,
|
||||
Iops: rootDevice.Ebs.Iops,
|
||||
}
|
||||
log.Printf("Create args: %+v", createVolume)
|
||||
|
||||
createVolumeResp, err := ec2conn.CreateVolume(createVolume)
|
||||
if err != nil {
|
||||
|
@ -60,7 +67,7 @@ func (s *StepCreateVolume) Run(state multistep.StateBag) multistep.StepAction {
|
|||
}
|
||||
|
||||
// Set the volume ID so we remember to delete it later
|
||||
s.volumeId = createVolumeResp.VolumeId
|
||||
s.volumeId = *createVolumeResp.VolumeId
|
||||
log.Printf("Volume ID: %s", s.volumeId)
|
||||
|
||||
// Wait for the volume to become ready
|
||||
|
@ -69,13 +76,13 @@ func (s *StepCreateVolume) Run(state multistep.StateBag) multistep.StepAction {
|
|||
StepState: state,
|
||||
Target: "available",
|
||||
Refresh: func() (interface{}, string, error) {
|
||||
resp, err := ec2conn.Volumes([]string{s.volumeId}, ec2.NewFilter())
|
||||
resp, err := ec2conn.DescribeVolumes(&ec2.DescribeVolumesInput{VolumeIds: []*string{&s.volumeId}})
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
v := resp.Volumes[0]
|
||||
return v, v.Status, nil
|
||||
return v, *v.State, nil
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -100,7 +107,7 @@ func (s *StepCreateVolume) Cleanup(state multistep.StateBag) {
|
|||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
ui.Say("Deleting the created EBS volume...")
|
||||
_, err := ec2conn.DeleteVolume(s.volumeId)
|
||||
_, err := ec2conn.DeleteVolume(&ec2.DeleteVolumeInput{VolumeId: &s.volumeId})
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf("Error deleting EBS volume: %s", err))
|
||||
}
|
||||
|
|
|
@ -2,11 +2,12 @@ package chroot
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/mitchellh/goamz/aws"
|
||||
"github.com/mitchellh/goamz/ec2"
|
||||
"github.com/mitchellh/multistep"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
"log"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/mitchellh/multistep"
|
||||
"github.com/mitchellh/packer/builder/amazon/common"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
)
|
||||
|
||||
// StepInstanceInfo verifies that this builder is running on an EC2 instance.
|
||||
|
@ -18,7 +19,7 @@ func (s *StepInstanceInfo) Run(state multistep.StateBag) multistep.StepAction {
|
|||
|
||||
// Get our own instance ID
|
||||
ui.Say("Gathering information about this EC2 instance...")
|
||||
instanceIdBytes, err := aws.GetMetaData("instance-id")
|
||||
instanceIdBytes, err := common.GetInstanceMetaData("instance-id")
|
||||
if err != nil {
|
||||
log.Printf("Error: %s", err)
|
||||
err := fmt.Errorf(
|
||||
|
@ -33,7 +34,7 @@ func (s *StepInstanceInfo) Run(state multistep.StateBag) multistep.StepAction {
|
|||
log.Printf("Instance ID: %s", instanceId)
|
||||
|
||||
// Query the entire instance metadata
|
||||
instancesResp, err := ec2conn.Instances([]string{instanceId}, ec2.NewFilter())
|
||||
instancesResp, err := ec2conn.DescribeInstances(&ec2.DescribeInstancesInput{InstanceIds: []*string{&instanceId}})
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error getting instance data: %s", err)
|
||||
state.Put("error", err)
|
||||
|
@ -48,7 +49,7 @@ func (s *StepInstanceInfo) Run(state multistep.StateBag) multistep.StepAction {
|
|||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
instance := &instancesResp.Reservations[0].Instances[0]
|
||||
instance := instancesResp.Reservations[0].Instances[0]
|
||||
state.Put("instance", instance)
|
||||
|
||||
return multistep.ActionContinue
|
||||
|
|
|
@ -3,12 +3,15 @@ package chroot
|
|||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/mitchellh/goamz/ec2"
|
||||
"github.com/mitchellh/multistep"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/mitchellh/multistep"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
"github.com/mitchellh/packer/template/interpolate"
|
||||
)
|
||||
|
||||
type mountPathData struct {
|
||||
|
@ -21,6 +24,8 @@ type mountPathData struct {
|
|||
// mount_path string - The location where the volume was mounted.
|
||||
// mount_device_cleanup CleanupFunc - To perform early cleanup
|
||||
type StepMountDevice struct {
|
||||
MountOptions []string
|
||||
|
||||
mountPath string
|
||||
}
|
||||
|
||||
|
@ -31,9 +36,9 @@ func (s *StepMountDevice) Run(state multistep.StateBag) multistep.StepAction {
|
|||
device := state.Get("device").(string)
|
||||
wrappedCommand := state.Get("wrappedCommand").(CommandWrapper)
|
||||
|
||||
mountPath, err := config.tpl.Process(config.MountPath, &mountPathData{
|
||||
Device: filepath.Base(device),
|
||||
})
|
||||
ctx := config.ctx
|
||||
ctx.Data = &mountPathData{Device: filepath.Base(device)}
|
||||
mountPath, err := interpolate.Render(config.MountPath, &ctx)
|
||||
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error preparing mount directory: %s", err)
|
||||
|
@ -59,17 +64,24 @@ func (s *StepMountDevice) Run(state multistep.StateBag) multistep.StepAction {
|
|||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
log.Printf("Source image virtualization type is: %s", image.VirtualizationType)
|
||||
log.Printf("Source image virtualization type is: %s", *image.VirtualizationType)
|
||||
deviceMount := device
|
||||
if image.VirtualizationType == "hvm" {
|
||||
if *image.VirtualizationType == "hvm" {
|
||||
deviceMount = fmt.Sprintf("%s%d", device, 1)
|
||||
}
|
||||
state.Put("deviceMount", deviceMount)
|
||||
|
||||
ui.Say("Mounting the root device...")
|
||||
stderr := new(bytes.Buffer)
|
||||
|
||||
// build mount options from mount_options config, usefull for nouuid options
|
||||
// or other specific device type settings for mount
|
||||
opts := ""
|
||||
if len(s.MountOptions) > 0 {
|
||||
opts = "-o " + strings.Join(s.MountOptions, " -o ")
|
||||
}
|
||||
mountCommand, err := wrappedCommand(
|
||||
fmt.Sprintf("mount %s %s", deviceMount, mountPath))
|
||||
fmt.Sprintf("mount %s %s %s", opts, deviceMount, mountPath))
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error creating mount command: %s", err)
|
||||
state.Put("error", err)
|
||||
|
|
|
@ -6,6 +6,8 @@ import (
|
|||
"github.com/mitchellh/multistep"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
"os"
|
||||
"os/exec"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// StepMountExtra mounts the attached device.
|
||||
|
@ -90,13 +92,37 @@ func (s *StepMountExtra) CleanupFunc(state multistep.StateBag) error {
|
|||
var path string
|
||||
lastIndex := len(s.mounts) - 1
|
||||
path, s.mounts = s.mounts[lastIndex], s.mounts[:lastIndex]
|
||||
|
||||
grepCommand, err := wrappedCommand(fmt.Sprintf("grep %s /proc/mounts", path))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating grep command: %s", err)
|
||||
}
|
||||
|
||||
// Before attempting to unmount,
|
||||
// check to see if path is already unmounted
|
||||
stderr := new(bytes.Buffer)
|
||||
cmd := ShellCommand(grepCommand)
|
||||
cmd.Stderr = stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
if exitError, ok := err.(*exec.ExitError); ok {
|
||||
if status, ok := exitError.Sys().(syscall.WaitStatus); ok {
|
||||
exitStatus := status.ExitStatus()
|
||||
if exitStatus == 1 {
|
||||
// path has already been unmounted
|
||||
// just skip this path
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unmountCommand, err := wrappedCommand(fmt.Sprintf("umount %s", path))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating unmount command: %s", err)
|
||||
}
|
||||
|
||||
stderr := new(bytes.Buffer)
|
||||
cmd := ShellCommand(unmountCommand)
|
||||
stderr = new(bytes.Buffer)
|
||||
cmd = ShellCommand(unmountCommand)
|
||||
cmd.Stderr = stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf(
|
||||
|
|
|
@ -3,14 +3,17 @@ package chroot
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/mitchellh/goamz/ec2"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/mitchellh/multistep"
|
||||
awscommon "github.com/mitchellh/packer/builder/amazon/common"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
)
|
||||
|
||||
// StepRegisterAMI creates the AMI.
|
||||
type StepRegisterAMI struct{}
|
||||
type StepRegisterAMI struct {
|
||||
RootVolumeSize int64
|
||||
}
|
||||
|
||||
func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction {
|
||||
config := state.Get("config").(*Config)
|
||||
|
@ -20,11 +23,25 @@ func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction {
|
|||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
ui.Say("Registering the AMI...")
|
||||
blockDevices := make([]ec2.BlockDeviceMapping, len(image.BlockDevices))
|
||||
for i, device := range image.BlockDevices {
|
||||
blockDevices := make([]*ec2.BlockDeviceMapping, len(image.BlockDeviceMappings))
|
||||
for i, device := range image.BlockDeviceMappings {
|
||||
newDevice := device
|
||||
if newDevice.DeviceName == image.RootDeviceName {
|
||||
newDevice.SnapshotId = snapshotId
|
||||
if *newDevice.DeviceName == *image.RootDeviceName {
|
||||
if newDevice.Ebs != nil {
|
||||
newDevice.Ebs.SnapshotId = aws.String(snapshotId)
|
||||
} else {
|
||||
newDevice.Ebs = &ec2.EbsBlockDevice{SnapshotId: aws.String(snapshotId)}
|
||||
}
|
||||
|
||||
if s.RootVolumeSize > *newDevice.Ebs.VolumeSize {
|
||||
newDevice.Ebs.VolumeSize = aws.Int64(s.RootVolumeSize)
|
||||
}
|
||||
}
|
||||
|
||||
// assume working from a snapshot, so we unset the Encrypted field if set,
|
||||
// otherwise AWS API will return InvalidParameter
|
||||
if newDevice.Ebs != nil && newDevice.Ebs.Encrypted != nil {
|
||||
newDevice.Ebs.Encrypted = nil
|
||||
}
|
||||
|
||||
blockDevices[i] = newDevice
|
||||
|
@ -34,7 +51,7 @@ func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction {
|
|||
|
||||
// Set SriovNetSupport to "simple". See http://goo.gl/icuXh5
|
||||
if config.AMIEnhancedNetworking {
|
||||
registerOpts.SriovNetSupport = "simple"
|
||||
registerOpts.SriovNetSupport = aws.String("simple")
|
||||
}
|
||||
|
||||
registerResp, err := ec2conn.RegisterImage(registerOpts)
|
||||
|
@ -45,16 +62,16 @@ func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction {
|
|||
}
|
||||
|
||||
// Set the AMI ID in the state
|
||||
ui.Say(fmt.Sprintf("AMI: %s", registerResp.ImageId))
|
||||
ui.Say(fmt.Sprintf("AMI: %s", *registerResp.ImageId))
|
||||
amis := make(map[string]string)
|
||||
amis[ec2conn.Region.Name] = registerResp.ImageId
|
||||
amis[*ec2conn.Config.Region] = *registerResp.ImageId
|
||||
state.Put("amis", amis)
|
||||
|
||||
// Wait for the image to become ready
|
||||
stateChange := awscommon.StateChangeConf{
|
||||
Pending: []string{"pending"},
|
||||
Target: "available",
|
||||
Refresh: awscommon.AMIStateRefreshFunc(ec2conn, registerResp.ImageId),
|
||||
Refresh: awscommon.AMIStateRefreshFunc(ec2conn, *registerResp.ImageId),
|
||||
StepState: state,
|
||||
}
|
||||
|
||||
|
@ -71,13 +88,17 @@ func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction {
|
|||
|
||||
func (s *StepRegisterAMI) Cleanup(state multistep.StateBag) {}
|
||||
|
||||
func buildRegisterOpts(config *Config, image *ec2.Image, blockDevices []ec2.BlockDeviceMapping) *ec2.RegisterImage {
|
||||
registerOpts := &ec2.RegisterImage{
|
||||
Name: config.AMIName,
|
||||
func buildRegisterOpts(config *Config, image *ec2.Image, blockDevices []*ec2.BlockDeviceMapping) *ec2.RegisterImageInput {
|
||||
registerOpts := &ec2.RegisterImageInput{
|
||||
Name: &config.AMIName,
|
||||
Architecture: image.Architecture,
|
||||
RootDeviceName: image.RootDeviceName,
|
||||
BlockDevices: blockDevices,
|
||||
VirtType: config.AMIVirtType,
|
||||
BlockDeviceMappings: blockDevices,
|
||||
VirtualizationType: image.VirtualizationType,
|
||||
}
|
||||
|
||||
if config.AMIVirtType != "" {
|
||||
registerOpts.VirtualizationType = aws.String(config.AMIVirtType)
|
||||
}
|
||||
|
||||
if config.AMIVirtType != "hvm" {
|
||||
|
|
|
@ -1,16 +1,18 @@
|
|||
package chroot
|
||||
|
||||
import (
|
||||
"github.com/mitchellh/goamz/ec2"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
)
|
||||
|
||||
func testImage() ec2.Image {
|
||||
return ec2.Image{
|
||||
Id: "ami-abcd1234",
|
||||
Name: "ami_test_name",
|
||||
Architecture: "x86_64",
|
||||
KernelId: "aki-abcd1234",
|
||||
ImageId: aws.String("ami-abcd1234"),
|
||||
Name: aws.String("ami_test_name"),
|
||||
Architecture: aws.String("x86_64"),
|
||||
KernelId: aws.String("aki-abcd1234"),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -22,23 +24,23 @@ func TestStepRegisterAmi_buildRegisterOpts_pv(t *testing.T) {
|
|||
|
||||
image := testImage()
|
||||
|
||||
blockDevices := []ec2.BlockDeviceMapping{}
|
||||
blockDevices := []*ec2.BlockDeviceMapping{}
|
||||
|
||||
opts := buildRegisterOpts(&config, &image, blockDevices)
|
||||
|
||||
expected := config.AMIVirtType
|
||||
if opts.VirtType != expected {
|
||||
t.Fatalf("Unexpected VirtType value: expected %s got %s\n", expected, opts.VirtType)
|
||||
if *opts.VirtualizationType != expected {
|
||||
t.Fatalf("Unexpected VirtType value: expected %s got %s\n", expected, *opts.VirtualizationType)
|
||||
}
|
||||
|
||||
expected = config.AMIName
|
||||
if opts.Name != expected {
|
||||
t.Fatalf("Unexpected Name value: expected %s got %s\n", expected, opts.Name)
|
||||
if *opts.Name != expected {
|
||||
t.Fatalf("Unexpected Name value: expected %s got %s\n", expected, *opts.Name)
|
||||
}
|
||||
|
||||
expected = image.KernelId
|
||||
if opts.KernelId != expected {
|
||||
t.Fatalf("Unexpected KernelId value: expected %s got %s\n", expected, opts.KernelId)
|
||||
expected = *image.KernelId
|
||||
if *opts.KernelId != expected {
|
||||
t.Fatalf("Unexpected KernelId value: expected %s got %s\n", expected, *opts.KernelId)
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -51,23 +53,21 @@ func TestStepRegisterAmi_buildRegisterOpts_hvm(t *testing.T) {
|
|||
|
||||
image := testImage()
|
||||
|
||||
blockDevices := []ec2.BlockDeviceMapping{}
|
||||
blockDevices := []*ec2.BlockDeviceMapping{}
|
||||
|
||||
opts := buildRegisterOpts(&config, &image, blockDevices)
|
||||
|
||||
expected := config.AMIVirtType
|
||||
if opts.VirtType != expected {
|
||||
t.Fatalf("Unexpected VirtType value: expected %s got %s\n", expected, opts.VirtType)
|
||||
if *opts.VirtualizationType != expected {
|
||||
t.Fatalf("Unexpected VirtType value: expected %s got %s\n", expected, *opts.VirtualizationType)
|
||||
}
|
||||
|
||||
expected = config.AMIName
|
||||
if opts.Name != expected {
|
||||
t.Fatalf("Unexpected Name value: expected %s got %s\n", expected, opts.Name)
|
||||
if *opts.Name != expected {
|
||||
t.Fatalf("Unexpected Name value: expected %s got %s\n", expected, *opts.Name)
|
||||
}
|
||||
|
||||
expected = ""
|
||||
if opts.KernelId != expected {
|
||||
t.Fatalf("Unexpected KernelId value: expected %s got %s\n", expected, opts.KernelId)
|
||||
if opts.KernelId != nil {
|
||||
t.Fatalf("Unexpected KernelId value: expected nil got %s\n", *opts.KernelId)
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/mitchellh/goamz/ec2"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/mitchellh/multistep"
|
||||
awscommon "github.com/mitchellh/packer/builder/amazon/common"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
|
@ -25,9 +25,12 @@ func (s *StepSnapshot) Run(state multistep.StateBag) multistep.StepAction {
|
|||
volumeId := state.Get("volume_id").(string)
|
||||
|
||||
ui.Say("Creating snapshot...")
|
||||
createSnapResp, err := ec2conn.CreateSnapshot(
|
||||
volumeId,
|
||||
fmt.Sprintf("Packer: %s", time.Now().String()))
|
||||
description := fmt.Sprintf("Packer: %s", time.Now().String())
|
||||
|
||||
createSnapResp, err := ec2conn.CreateSnapshot(&ec2.CreateSnapshotInput{
|
||||
VolumeId: &volumeId,
|
||||
Description: &description,
|
||||
})
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error creating snapshot: %s", err)
|
||||
state.Put("error", err)
|
||||
|
@ -36,7 +39,7 @@ func (s *StepSnapshot) Run(state multistep.StateBag) multistep.StepAction {
|
|||
}
|
||||
|
||||
// Set the snapshot ID so we can delete it later
|
||||
s.snapshotId = createSnapResp.Id
|
||||
s.snapshotId = *createSnapResp.SnapshotId
|
||||
ui.Message(fmt.Sprintf("Snapshot ID: %s", s.snapshotId))
|
||||
|
||||
// Wait for the snapshot to be ready
|
||||
|
@ -45,7 +48,7 @@ func (s *StepSnapshot) Run(state multistep.StateBag) multistep.StepAction {
|
|||
StepState: state,
|
||||
Target: "completed",
|
||||
Refresh: func() (interface{}, string, error) {
|
||||
resp, err := ec2conn.Snapshots([]string{s.snapshotId}, ec2.NewFilter())
|
||||
resp, err := ec2conn.DescribeSnapshots(&ec2.DescribeSnapshotsInput{SnapshotIds: []*string{&s.snapshotId}})
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
@ -55,7 +58,7 @@ func (s *StepSnapshot) Run(state multistep.StateBag) multistep.StepAction {
|
|||
}
|
||||
|
||||
s := resp.Snapshots[0]
|
||||
return s, s.Status, nil
|
||||
return s, *s.State, nil
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -83,7 +86,7 @@ func (s *StepSnapshot) Cleanup(state multistep.StateBag) {
|
|||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
ui.Say("Removing snapshot since we cancelled or halted...")
|
||||
_, err := ec2conn.DeleteSnapshots([]string{s.snapshotId})
|
||||
_, err := ec2conn.DeleteSnapshot(&ec2.DeleteSnapshotInput{SnapshotId: &s.snapshotId})
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf("Error: %s", err))
|
||||
}
|
||||
|
|
|
@ -2,10 +2,15 @@ package common
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/mitchellh/goamz/aws"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
|
||||
"github.com/mitchellh/packer/template/interpolate"
|
||||
)
|
||||
|
||||
// AccessConfig is for common configuration related to AWS access
|
||||
|
@ -16,66 +21,55 @@ type AccessConfig struct {
|
|||
Token string `mapstructure:"token"`
|
||||
}
|
||||
|
||||
// Auth returns a valid aws.Auth object for access to AWS services, or
|
||||
// an error if the authentication couldn't be resolved.
|
||||
func (c *AccessConfig) Auth() (aws.Auth, error) {
|
||||
auth, err := aws.GetAuth(c.AccessKey, c.SecretKey)
|
||||
if err == nil {
|
||||
// Store the accesskey and secret that we got...
|
||||
c.AccessKey = auth.AccessKey
|
||||
c.SecretKey = auth.SecretKey
|
||||
c.Token = auth.Token
|
||||
}
|
||||
if c.Token != "" {
|
||||
auth.Token = c.Token
|
||||
// Config returns a valid aws.Config object for access to AWS services, or
|
||||
// an error if the authentication and region couldn't be resolved
|
||||
func (c *AccessConfig) Config() (*aws.Config, error) {
|
||||
creds := credentials.NewChainCredentials([]credentials.Provider{
|
||||
&credentials.StaticProvider{Value: credentials.Value{
|
||||
AccessKeyID: c.AccessKey,
|
||||
SecretAccessKey: c.SecretKey,
|
||||
SessionToken: c.Token,
|
||||
}},
|
||||
&credentials.EnvProvider{},
|
||||
&credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
|
||||
&ec2rolecreds.EC2RoleProvider{},
|
||||
})
|
||||
|
||||
region, err := c.Region()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return auth, err
|
||||
return &aws.Config{
|
||||
Region: aws.String(region),
|
||||
Credentials: creds,
|
||||
MaxRetries: aws.Int(11),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Region returns the aws.Region object for access to AWS services, requesting
|
||||
// the region from the instance metadata if possible.
|
||||
func (c *AccessConfig) Region() (aws.Region, error) {
|
||||
func (c *AccessConfig) Region() (string, error) {
|
||||
if c.RawRegion != "" {
|
||||
return aws.Regions[c.RawRegion], nil
|
||||
if valid := ValidateRegion(c.RawRegion); valid == false {
|
||||
return "", fmt.Errorf("Not a valid region: %s", c.RawRegion)
|
||||
}
|
||||
return c.RawRegion, nil
|
||||
}
|
||||
|
||||
md, err := aws.GetMetaData("placement/availability-zone")
|
||||
md, err := GetInstanceMetaData("placement/availability-zone")
|
||||
if err != nil {
|
||||
return aws.Region{}, err
|
||||
return "", err
|
||||
}
|
||||
|
||||
region := strings.TrimRightFunc(string(md), unicode.IsLetter)
|
||||
return aws.Regions[region], nil
|
||||
return region, nil
|
||||
}
|
||||
|
||||
func (c *AccessConfig) Prepare(t *packer.ConfigTemplate) []error {
|
||||
if t == nil {
|
||||
var err error
|
||||
t, err = packer.NewConfigTemplate()
|
||||
if err != nil {
|
||||
return []error{err}
|
||||
}
|
||||
}
|
||||
|
||||
templates := map[string]*string{
|
||||
"access_key": &c.AccessKey,
|
||||
"secret_key": &c.SecretKey,
|
||||
"region": &c.RawRegion,
|
||||
}
|
||||
|
||||
errs := make([]error, 0)
|
||||
for n, ptr := range templates {
|
||||
var err error
|
||||
*ptr, err = t.Process(*ptr, nil)
|
||||
if err != nil {
|
||||
errs = append(
|
||||
errs, fmt.Errorf("Error processing %s: %s", n, err))
|
||||
}
|
||||
}
|
||||
|
||||
func (c *AccessConfig) Prepare(ctx *interpolate.Context) []error {
|
||||
var errs []error
|
||||
if c.RawRegion != "" {
|
||||
if _, ok := aws.Regions[c.RawRegion]; !ok {
|
||||
if valid := ValidateRegion(c.RawRegion); valid == false {
|
||||
errs = append(errs, fmt.Errorf("Unknown region: %s", c.RawRegion))
|
||||
}
|
||||
}
|
||||
|
@ -86,3 +80,24 @@ func (c *AccessConfig) Prepare(t *packer.ConfigTemplate) []error {
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetInstanceMetaData(path string) (contents []byte, err error) {
|
||||
url := "http://169.254.169.254/latest/meta-data/" + path
|
||||
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
err = fmt.Errorf("Code %d returned for url %s", resp.StatusCode, url)
|
||||
return
|
||||
}
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return []byte(body), err
|
||||
}
|
||||
|
|
|
@ -3,8 +3,7 @@ package common
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/mitchellh/goamz/aws"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
"github.com/mitchellh/packer/template/interpolate"
|
||||
)
|
||||
|
||||
// AMIConfig is for common configuration related to creating AMIs.
|
||||
|
@ -18,51 +17,11 @@ type AMIConfig struct {
|
|||
AMIRegions []string `mapstructure:"ami_regions"`
|
||||
AMITags map[string]string `mapstructure:"tags"`
|
||||
AMIEnhancedNetworking bool `mapstructure:"enhanced_networking"`
|
||||
AMIForceDeregister bool `mapstructure:"force_deregister"`
|
||||
}
|
||||
|
||||
func (c *AMIConfig) Prepare(t *packer.ConfigTemplate) []error {
|
||||
if t == nil {
|
||||
var err error
|
||||
t, err = packer.NewConfigTemplate()
|
||||
if err != nil {
|
||||
return []error{err}
|
||||
}
|
||||
}
|
||||
|
||||
templates := map[string]*string{
|
||||
"ami_name": &c.AMIName,
|
||||
"ami_description": &c.AMIDescription,
|
||||
"ami_virtualization_type": &c.AMIVirtType,
|
||||
}
|
||||
|
||||
errs := make([]error, 0)
|
||||
for n, ptr := range templates {
|
||||
var err error
|
||||
*ptr, err = t.Process(*ptr, nil)
|
||||
if err != nil {
|
||||
errs = append(
|
||||
errs, fmt.Errorf("Error processing %s: %s", n, err))
|
||||
}
|
||||
}
|
||||
|
||||
sliceTemplates := map[string][]string{
|
||||
"ami_users": c.AMIUsers,
|
||||
"ami_groups": c.AMIGroups,
|
||||
"ami_product_codes": c.AMIProductCodes,
|
||||
"ami_regions": c.AMIRegions,
|
||||
}
|
||||
|
||||
for n, slice := range sliceTemplates {
|
||||
for i, elem := range slice {
|
||||
var err error
|
||||
slice[i], err = t.Process(elem, nil)
|
||||
if err != nil {
|
||||
errs = append(
|
||||
errs, fmt.Errorf("Error processing %s[%d]: %s", n, i, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *AMIConfig) Prepare(ctx *interpolate.Context) []error {
|
||||
var errs []error
|
||||
if c.AMIName == "" {
|
||||
errs = append(errs, fmt.Errorf("ami_name must be specified"))
|
||||
}
|
||||
|
@ -81,7 +40,7 @@ func (c *AMIConfig) Prepare(t *packer.ConfigTemplate) []error {
|
|||
regionSet[region] = struct{}{}
|
||||
|
||||
// Verify the region is real
|
||||
if _, ok := aws.Regions[region]; !ok {
|
||||
if valid := ValidateRegion(region); valid == false {
|
||||
errs = append(errs, fmt.Errorf("Unknown region: %s", region))
|
||||
continue
|
||||
}
|
||||
|
@ -92,27 +51,6 @@ func (c *AMIConfig) Prepare(t *packer.ConfigTemplate) []error {
|
|||
c.AMIRegions = regions
|
||||
}
|
||||
|
||||
newTags := make(map[string]string)
|
||||
for k, v := range c.AMITags {
|
||||
k, err := t.Process(k, nil)
|
||||
if err != nil {
|
||||
errs = append(errs,
|
||||
fmt.Errorf("Error processing tag key %s: %s", k, err))
|
||||
continue
|
||||
}
|
||||
|
||||
v, err := t.Process(v, nil)
|
||||
if err != nil {
|
||||
errs = append(errs,
|
||||
fmt.Errorf("Error processing tag value '%s': %s", v, err))
|
||||
continue
|
||||
}
|
||||
|
||||
newTags[k] = v
|
||||
}
|
||||
|
||||
c.AMITags = newTags
|
||||
|
||||
if len(errs) > 0 {
|
||||
return errs
|
||||
}
|
||||
|
|
|
@ -6,8 +6,9 @@ import (
|
|||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/mitchellh/goamz/aws"
|
||||
"github.com/mitchellh/goamz/ec2"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
)
|
||||
|
||||
|
@ -67,8 +68,18 @@ func (a *Artifact) Destroy() error {
|
|||
|
||||
for region, imageId := range a.Amis {
|
||||
log.Printf("Deregistering image ID (%s) from region (%s)", imageId, region)
|
||||
regionconn := ec2.New(a.Conn.Auth, aws.Regions[region])
|
||||
if _, err := regionconn.DeregisterImage(imageId); err != nil {
|
||||
|
||||
regionConfig := &aws.Config{
|
||||
Credentials: a.Conn.Config.Credentials,
|
||||
Region: aws.String(region),
|
||||
}
|
||||
sess := session.New(regionConfig)
|
||||
regionConn := ec2.New(sess)
|
||||
|
||||
input := &ec2.DeregisterImageInput{
|
||||
ImageId: &imageId,
|
||||
}
|
||||
if _, err := regionConn.DeregisterImage(input); err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
|
||||
|
@ -79,7 +90,7 @@ func (a *Artifact) Destroy() error {
|
|||
if len(errors) == 1 {
|
||||
return errors[0]
|
||||
} else {
|
||||
return &packer.MultiError{errors}
|
||||
return &packer.MultiError{Errors: errors}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,10 +1,11 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/mitchellh/goamz/ec2"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/mitchellh/packer/template/interpolate"
|
||||
)
|
||||
|
||||
// BlockDevice
|
||||
|
@ -25,74 +26,61 @@ type BlockDevices struct {
|
|||
LaunchMappings []BlockDevice `mapstructure:"launch_block_device_mappings"`
|
||||
}
|
||||
|
||||
func buildBlockDevices(b []BlockDevice) []ec2.BlockDeviceMapping {
|
||||
var blockDevices []ec2.BlockDeviceMapping
|
||||
func buildBlockDevices(b []BlockDevice) []*ec2.BlockDeviceMapping {
|
||||
var blockDevices []*ec2.BlockDeviceMapping
|
||||
|
||||
for _, blockDevice := range b {
|
||||
blockDevices = append(blockDevices, ec2.BlockDeviceMapping{
|
||||
DeviceName: blockDevice.DeviceName,
|
||||
VirtualName: blockDevice.VirtualName,
|
||||
SnapshotId: blockDevice.SnapshotId,
|
||||
VolumeType: blockDevice.VolumeType,
|
||||
VolumeSize: blockDevice.VolumeSize,
|
||||
DeleteOnTermination: blockDevice.DeleteOnTermination,
|
||||
IOPS: blockDevice.IOPS,
|
||||
NoDevice: blockDevice.NoDevice,
|
||||
Encrypted: blockDevice.Encrypted,
|
||||
})
|
||||
mapping := &ec2.BlockDeviceMapping{
|
||||
DeviceName: aws.String(blockDevice.DeviceName),
|
||||
}
|
||||
|
||||
if blockDevice.NoDevice {
|
||||
mapping.NoDevice = aws.String("")
|
||||
} else if blockDevice.VirtualName != "" {
|
||||
if strings.HasPrefix(blockDevice.VirtualName, "ephemeral") {
|
||||
mapping.VirtualName = aws.String(blockDevice.VirtualName)
|
||||
}
|
||||
} else {
|
||||
ebsBlockDevice := &ec2.EbsBlockDevice{
|
||||
DeleteOnTermination: aws.Bool(blockDevice.DeleteOnTermination),
|
||||
}
|
||||
|
||||
if blockDevice.VolumeType != "" {
|
||||
ebsBlockDevice.VolumeType = aws.String(blockDevice.VolumeType)
|
||||
}
|
||||
|
||||
if blockDevice.VolumeSize > 0 {
|
||||
ebsBlockDevice.VolumeSize = aws.Int64(blockDevice.VolumeSize)
|
||||
}
|
||||
|
||||
// IOPS is only valid for io1 type
|
||||
if blockDevice.VolumeType == "io1" {
|
||||
ebsBlockDevice.Iops = aws.Int64(blockDevice.IOPS)
|
||||
}
|
||||
|
||||
// You cannot specify Encrypted if you specify a Snapshot ID
|
||||
if blockDevice.SnapshotId != "" {
|
||||
ebsBlockDevice.SnapshotId = aws.String(blockDevice.SnapshotId)
|
||||
} else if blockDevice.Encrypted {
|
||||
ebsBlockDevice.Encrypted = aws.Bool(blockDevice.Encrypted)
|
||||
}
|
||||
|
||||
mapping.Ebs = ebsBlockDevice
|
||||
}
|
||||
|
||||
blockDevices = append(blockDevices, mapping)
|
||||
}
|
||||
return blockDevices
|
||||
}
|
||||
|
||||
func (b *BlockDevices) Prepare(t *packer.ConfigTemplate) []error {
|
||||
if t == nil {
|
||||
var err error
|
||||
t, err = packer.NewConfigTemplate()
|
||||
if err != nil {
|
||||
return []error{err}
|
||||
}
|
||||
}
|
||||
|
||||
lists := map[string][]BlockDevice{
|
||||
"ami_block_device_mappings": b.AMIMappings,
|
||||
"launch_block_device_mappings": b.LaunchMappings,
|
||||
}
|
||||
|
||||
var errs []error
|
||||
for outer, bds := range lists {
|
||||
for i := 0; i < len(bds); i++ {
|
||||
templates := map[string]*string{
|
||||
"device_name": &bds[i].DeviceName,
|
||||
"snapshot_id": &bds[i].SnapshotId,
|
||||
"virtual_name": &bds[i].VirtualName,
|
||||
"volume_type": &bds[i].VolumeType,
|
||||
}
|
||||
|
||||
errs := make([]error, 0)
|
||||
for n, ptr := range templates {
|
||||
var err error
|
||||
*ptr, err = t.Process(*ptr, nil)
|
||||
if err != nil {
|
||||
errs = append(
|
||||
errs, fmt.Errorf(
|
||||
"Error processing %s[%d].%s: %s",
|
||||
outer, i, n, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
return errs
|
||||
}
|
||||
|
||||
func (b *BlockDevices) Prepare(ctx *interpolate.Context) []error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BlockDevices) BuildAMIDevices() []ec2.BlockDeviceMapping {
|
||||
func (b *BlockDevices) BuildAMIDevices() []*ec2.BlockDeviceMapping {
|
||||
return buildBlockDevices(b.AMIMappings)
|
||||
}
|
||||
|
||||
func (b *BlockDevices) BuildLaunchDevices() []ec2.BlockDeviceMapping {
|
||||
func (b *BlockDevices) BuildLaunchDevices() []*ec2.BlockDeviceMapping {
|
||||
return buildBlockDevices(b.LaunchMappings)
|
||||
}
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"github.com/mitchellh/goamz/ec2"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
)
|
||||
|
||||
func TestBlockDevice(t *testing.T) {
|
||||
|
@ -14,22 +16,109 @@ func TestBlockDevice(t *testing.T) {
|
|||
{
|
||||
Config: &BlockDevice{
|
||||
DeviceName: "/dev/sdb",
|
||||
VirtualName: "ephemeral0",
|
||||
SnapshotId: "snap-1234",
|
||||
VolumeType: "standard",
|
||||
VolumeSize: 8,
|
||||
DeleteOnTermination: true,
|
||||
},
|
||||
|
||||
Result: &ec2.BlockDeviceMapping{
|
||||
DeviceName: aws.String("/dev/sdb"),
|
||||
Ebs: &ec2.EbsBlockDevice{
|
||||
SnapshotId: aws.String("snap-1234"),
|
||||
VolumeType: aws.String("standard"),
|
||||
VolumeSize: aws.Int64(8),
|
||||
DeleteOnTermination: aws.Bool(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Config: &BlockDevice{
|
||||
DeviceName: "/dev/sdb",
|
||||
VolumeSize: 8,
|
||||
},
|
||||
|
||||
Result: &ec2.BlockDeviceMapping{
|
||||
DeviceName: aws.String("/dev/sdb"),
|
||||
Ebs: &ec2.EbsBlockDevice{
|
||||
VolumeSize: aws.Int64(8),
|
||||
DeleteOnTermination: aws.Bool(false),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Config: &BlockDevice{
|
||||
DeviceName: "/dev/sdb",
|
||||
VolumeType: "io1",
|
||||
VolumeSize: 8,
|
||||
DeleteOnTermination: true,
|
||||
IOPS: 1000,
|
||||
},
|
||||
|
||||
Result: &ec2.BlockDeviceMapping{
|
||||
DeviceName: aws.String("/dev/sdb"),
|
||||
Ebs: &ec2.EbsBlockDevice{
|
||||
VolumeType: aws.String("io1"),
|
||||
VolumeSize: aws.Int64(8),
|
||||
DeleteOnTermination: aws.Bool(true),
|
||||
Iops: aws.Int64(1000),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Config: &BlockDevice{
|
||||
DeviceName: "/dev/sdb",
|
||||
VirtualName: "ephemeral0",
|
||||
SnapshotId: "snap-1234",
|
||||
VolumeType: "standard",
|
||||
VolumeType: "gp2",
|
||||
VolumeSize: 8,
|
||||
DeleteOnTermination: true,
|
||||
IOPS: 1000,
|
||||
Encrypted: true,
|
||||
},
|
||||
|
||||
Result: &ec2.BlockDeviceMapping{
|
||||
DeviceName: aws.String("/dev/sdb"),
|
||||
Ebs: &ec2.EbsBlockDevice{
|
||||
VolumeType: aws.String("gp2"),
|
||||
VolumeSize: aws.Int64(8),
|
||||
DeleteOnTermination: aws.Bool(true),
|
||||
Encrypted: aws.Bool(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Config: &BlockDevice{
|
||||
DeviceName: "/dev/sdb",
|
||||
VolumeType: "standard",
|
||||
DeleteOnTermination: true,
|
||||
},
|
||||
|
||||
Result: &ec2.BlockDeviceMapping{
|
||||
DeviceName: aws.String("/dev/sdb"),
|
||||
Ebs: &ec2.EbsBlockDevice{
|
||||
VolumeType: aws.String("standard"),
|
||||
DeleteOnTermination: aws.Bool(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Config: &BlockDevice{
|
||||
DeviceName: "/dev/sdb",
|
||||
VirtualName: "ephemeral0",
|
||||
},
|
||||
|
||||
Result: &ec2.BlockDeviceMapping{
|
||||
DeviceName: aws.String("/dev/sdb"),
|
||||
VirtualName: aws.String("ephemeral0"),
|
||||
},
|
||||
},
|
||||
{
|
||||
Config: &BlockDevice{
|
||||
DeviceName: "/dev/sdb",
|
||||
NoDevice: true,
|
||||
},
|
||||
|
||||
Result: &ec2.BlockDeviceMapping{
|
||||
DeviceName: aws.String("/dev/sdb"),
|
||||
NoDevice: aws.String(""),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -40,14 +129,17 @@ func TestBlockDevice(t *testing.T) {
|
|||
LaunchMappings: []BlockDevice{*tc.Config},
|
||||
}
|
||||
|
||||
expected := []ec2.BlockDeviceMapping{*tc.Result}
|
||||
|
||||
if !reflect.DeepEqual(expected, blockDevices.BuildAMIDevices()) {
|
||||
t.Fatalf("bad: %#v", expected)
|
||||
expected := []*ec2.BlockDeviceMapping{tc.Result}
|
||||
got := blockDevices.BuildAMIDevices()
|
||||
if !reflect.DeepEqual(expected, got) {
|
||||
t.Fatalf("Bad block device, \nexpected: %#v\n\ngot: %#v",
|
||||
expected, got)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(expected, blockDevices.BuildLaunchDevices()) {
|
||||
t.Fatalf("bad: %#v", expected)
|
||||
t.Fatalf("Bad block device, \nexpected: %#v\n\ngot: %#v",
|
||||
expected,
|
||||
blockDevices.BuildLaunchDevices())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,16 @@
|
|||
package common
|
||||
|
||||
// IsValidRegion returns true if the supplied region is a valid AWS
|
||||
// region and false if it's not.
|
||||
func ValidateRegion(region string) bool {
|
||||
var regions = [11]string{"us-east-1", "us-west-2", "us-west-1", "eu-west-1",
|
||||
"eu-central-1", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1",
|
||||
"sa-east-1", "cn-north-1", "us-gov-west-1"}
|
||||
|
||||
for _, valid := range regions {
|
||||
if region == valid {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -7,7 +7,8 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/mitchellh/packer/common/uuid"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
"github.com/mitchellh/packer/helper/communicator"
|
||||
"github.com/mitchellh/packer/template/interpolate"
|
||||
)
|
||||
|
||||
// RunConfig contains configuration for running an instance from a source
|
||||
|
@ -15,82 +16,41 @@ import (
|
|||
type RunConfig struct {
|
||||
AssociatePublicIpAddress bool `mapstructure:"associate_public_ip_address"`
|
||||
AvailabilityZone string `mapstructure:"availability_zone"`
|
||||
EbsOptimized bool `mapstructure:"ebs_optimized"`
|
||||
IamInstanceProfile string `mapstructure:"iam_instance_profile"`
|
||||
InstanceType string `mapstructure:"instance_type"`
|
||||
RunTags map[string]string `mapstructure:"run_tags"`
|
||||
SourceAmi string `mapstructure:"source_ami"`
|
||||
SpotPrice string `mapstructure:"spot_price"`
|
||||
SpotPriceAutoProduct string `mapstructure:"spot_price_auto_product"`
|
||||
RawSSHTimeout string `mapstructure:"ssh_timeout"`
|
||||
SSHUsername string `mapstructure:"ssh_username"`
|
||||
SSHPrivateKeyFile string `mapstructure:"ssh_private_key_file"`
|
||||
SSHPrivateIp bool `mapstructure:"ssh_private_ip"`
|
||||
SSHPort int `mapstructure:"ssh_port"`
|
||||
SecurityGroupId string `mapstructure:"security_group_id"`
|
||||
SecurityGroupIds []string `mapstructure:"security_group_ids"`
|
||||
SubnetId string `mapstructure:"subnet_id"`
|
||||
TemporaryKeyPairName string `mapstructure:"temporary_key_pair_name"`
|
||||
UserData string `mapstructure:"user_data"`
|
||||
UserDataFile string `mapstructure:"user_data_file"`
|
||||
WindowsPasswordTimeout time.Duration `mapstructure:"windows_password_timeout"`
|
||||
VpcId string `mapstructure:"vpc_id"`
|
||||
|
||||
// Unexported fields that are calculated from others
|
||||
sshTimeout time.Duration
|
||||
// Communicator settings
|
||||
Comm communicator.Config `mapstructure:",squash"`
|
||||
SSHKeyPairName string `mapstructure:"ssh_keypair_name"`
|
||||
SSHPrivateIp bool `mapstructure:"ssh_private_ip"`
|
||||
}
|
||||
|
||||
func (c *RunConfig) Prepare(t *packer.ConfigTemplate) []error {
|
||||
if t == nil {
|
||||
var err error
|
||||
t, err = packer.NewConfigTemplate()
|
||||
if err != nil {
|
||||
return []error{err}
|
||||
}
|
||||
}
|
||||
|
||||
templates := map[string]*string{
|
||||
"iam_instance_profile": &c.IamInstanceProfile,
|
||||
"instance_type": &c.InstanceType,
|
||||
"spot_price": &c.SpotPrice,
|
||||
"spot_price_auto_product": &c.SpotPriceAutoProduct,
|
||||
"ssh_timeout": &c.RawSSHTimeout,
|
||||
"ssh_username": &c.SSHUsername,
|
||||
"ssh_private_key_file": &c.SSHPrivateKeyFile,
|
||||
"source_ami": &c.SourceAmi,
|
||||
"subnet_id": &c.SubnetId,
|
||||
"temporary_key_pair_name": &c.TemporaryKeyPairName,
|
||||
"vpc_id": &c.VpcId,
|
||||
"availability_zone": &c.AvailabilityZone,
|
||||
"user_data": &c.UserData,
|
||||
"user_data_file": &c.UserDataFile,
|
||||
"security_group_id": &c.SecurityGroupId,
|
||||
}
|
||||
|
||||
errs := make([]error, 0)
|
||||
for n, ptr := range templates {
|
||||
var err error
|
||||
*ptr, err = t.Process(*ptr, nil)
|
||||
if err != nil {
|
||||
errs = append(
|
||||
errs, fmt.Errorf("Error processing %s: %s", n, err))
|
||||
}
|
||||
}
|
||||
|
||||
// Defaults
|
||||
if c.SSHPort == 0 {
|
||||
c.SSHPort = 22
|
||||
}
|
||||
|
||||
if c.RawSSHTimeout == "" {
|
||||
c.RawSSHTimeout = "5m"
|
||||
}
|
||||
|
||||
if c.TemporaryKeyPairName == "" {
|
||||
func (c *RunConfig) Prepare(ctx *interpolate.Context) []error {
|
||||
// if we are not given an explicit keypairname, create a temporary one
|
||||
if c.SSHKeyPairName == "" {
|
||||
c.TemporaryKeyPairName = fmt.Sprintf(
|
||||
"packer %s", uuid.TimeOrderedUUID())
|
||||
}
|
||||
|
||||
if c.WindowsPasswordTimeout == 0 {
|
||||
c.WindowsPasswordTimeout = 10 * time.Minute
|
||||
}
|
||||
|
||||
// Validation
|
||||
var err error
|
||||
errs := c.Comm.Prepare(ctx)
|
||||
if c.SourceAmi == "" {
|
||||
errs = append(errs, errors.New("A source_ami must be specified"))
|
||||
}
|
||||
|
@ -106,10 +66,6 @@ func (c *RunConfig) Prepare(t *packer.ConfigTemplate) []error {
|
|||
}
|
||||
}
|
||||
|
||||
if c.SSHUsername == "" {
|
||||
errs = append(errs, errors.New("An ssh_username must be specified"))
|
||||
}
|
||||
|
||||
if c.UserData != "" && c.UserDataFile != "" {
|
||||
errs = append(errs, fmt.Errorf("Only one of user_data or user_data_file can be specified."))
|
||||
} else if c.UserDataFile != "" {
|
||||
|
@ -127,50 +83,5 @@ func (c *RunConfig) Prepare(t *packer.ConfigTemplate) []error {
|
|||
}
|
||||
}
|
||||
|
||||
sliceTemplates := map[string][]string{
|
||||
"security_group_ids": c.SecurityGroupIds,
|
||||
}
|
||||
|
||||
for n, slice := range sliceTemplates {
|
||||
for i, elem := range slice {
|
||||
var err error
|
||||
slice[i], err = t.Process(elem, nil)
|
||||
if err != nil {
|
||||
errs = append(
|
||||
errs, fmt.Errorf("Error processing %s[%d]: %s", n, i, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
newTags := make(map[string]string)
|
||||
for k, v := range c.RunTags {
|
||||
k, err := t.Process(k, nil)
|
||||
if err != nil {
|
||||
errs = append(errs,
|
||||
fmt.Errorf("Error processing tag key %s: %s", k, err))
|
||||
continue
|
||||
}
|
||||
|
||||
v, err := t.Process(v, nil)
|
||||
if err != nil {
|
||||
errs = append(errs,
|
||||
fmt.Errorf("Error processing tag value '%s': %s", v, err))
|
||||
continue
|
||||
}
|
||||
|
||||
newTags[k] = v
|
||||
}
|
||||
|
||||
c.RunTags = newTags
|
||||
|
||||
c.sshTimeout, err = time.ParseDuration(c.RawSSHTimeout)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("Failed parsing ssh_timeout: %s", err))
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
func (c *RunConfig) SSHTimeout() time.Duration {
|
||||
return c.sshTimeout
|
||||
}
|
||||
|
|
|
@ -4,6 +4,8 @@ import (
|
|||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/mitchellh/packer/helper/communicator"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -19,7 +21,10 @@ func testConfig() *RunConfig {
|
|||
return &RunConfig{
|
||||
SourceAmi: "abcd",
|
||||
InstanceType: "m1.small",
|
||||
SSHUsername: "root",
|
||||
|
||||
Comm: communicator.Config{
|
||||
SSHUsername: "foo",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -62,41 +67,28 @@ func TestRunConfigPrepare_SpotAuto(t *testing.T) {
|
|||
|
||||
func TestRunConfigPrepare_SSHPort(t *testing.T) {
|
||||
c := testConfig()
|
||||
c.SSHPort = 0
|
||||
c.Comm.SSHPort = 0
|
||||
if err := c.Prepare(nil); len(err) != 0 {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
if c.SSHPort != 22 {
|
||||
t.Fatalf("invalid value: %d", c.SSHPort)
|
||||
if c.Comm.SSHPort != 22 {
|
||||
t.Fatalf("invalid value: %d", c.Comm.SSHPort)
|
||||
}
|
||||
|
||||
c.SSHPort = 44
|
||||
c.Comm.SSHPort = 44
|
||||
if err := c.Prepare(nil); len(err) != 0 {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
if c.SSHPort != 44 {
|
||||
t.Fatalf("invalid value: %d", c.SSHPort)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunConfigPrepare_SSHTimeout(t *testing.T) {
|
||||
c := testConfig()
|
||||
c.RawSSHTimeout = ""
|
||||
if err := c.Prepare(nil); len(err) != 0 {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
c.RawSSHTimeout = "bad"
|
||||
if err := c.Prepare(nil); len(err) != 1 {
|
||||
t.Fatalf("err: %s", err)
|
||||
if c.Comm.SSHPort != 44 {
|
||||
t.Fatalf("invalid value: %d", c.Comm.SSHPort)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunConfigPrepare_SSHUsername(t *testing.T) {
|
||||
c := testConfig()
|
||||
c.SSHUsername = ""
|
||||
c.Comm.SSHUsername = ""
|
||||
if err := c.Prepare(nil); len(err) != 1 {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
|
|
@ -1,42 +1,45 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"code.google.com/p/go.crypto/ssh"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/mitchellh/goamz/ec2"
|
||||
"github.com/mitchellh/multistep"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/mitchellh/multistep"
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
// SSHAddress returns a function that can be given to the SSH communicator
|
||||
// SSHHost returns a function that can be given to the SSH communicator
|
||||
// for determining the SSH address based on the instance DNS name.
|
||||
func SSHAddress(e *ec2.EC2, port int, private bool) func(multistep.StateBag) (string, error) {
|
||||
func SSHHost(e *ec2.EC2, private bool) func(multistep.StateBag) (string, error) {
|
||||
return func(state multistep.StateBag) (string, error) {
|
||||
for j := 0; j < 2; j++ {
|
||||
var host string
|
||||
i := state.Get("instance").(*ec2.Instance)
|
||||
if i.VpcId != "" {
|
||||
if i.PublicIpAddress != "" && !private {
|
||||
host = i.PublicIpAddress
|
||||
if i.VpcId != nil && *i.VpcId != "" {
|
||||
if i.PublicIpAddress != nil && *i.PublicIpAddress != "" && !private {
|
||||
host = *i.PublicIpAddress
|
||||
} else {
|
||||
host = i.PrivateIpAddress
|
||||
host = *i.PrivateIpAddress
|
||||
}
|
||||
} else if i.DNSName != "" {
|
||||
host = i.DNSName
|
||||
} else if i.PublicDnsName != nil && *i.PublicDnsName != "" {
|
||||
host = *i.PublicDnsName
|
||||
}
|
||||
|
||||
if host != "" {
|
||||
return fmt.Sprintf("%s:%d", host, port), nil
|
||||
return host, nil
|
||||
}
|
||||
|
||||
r, err := e.Instances([]string{i.InstanceId}, ec2.NewFilter())
|
||||
r, err := e.DescribeInstances(&ec2.DescribeInstancesInput{
|
||||
InstanceIds: []*string{i.InstanceId},
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(r.Reservations) == 0 || len(r.Reservations[0].Instances) == 0 {
|
||||
return "", fmt.Errorf("instance not found: %s", i.InstanceId)
|
||||
return "", fmt.Errorf("instance not found: %s", *i.InstanceId)
|
||||
}
|
||||
|
||||
state.Put("instance", &r.Reservations[0].Instances[0])
|
||||
|
|
|
@ -3,13 +3,15 @@ package common
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/mitchellh/goamz/ec2"
|
||||
"github.com/mitchellh/multistep"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/mitchellh/multistep"
|
||||
)
|
||||
|
||||
// StateRefreshFunc is a function type used for StateChangeConf that is
|
||||
|
@ -36,9 +38,11 @@ type StateChangeConf struct {
|
|||
// an AMI for state changes.
|
||||
func AMIStateRefreshFunc(conn *ec2.EC2, imageId string) StateRefreshFunc {
|
||||
return func() (interface{}, string, error) {
|
||||
resp, err := conn.Images([]string{imageId}, ec2.NewFilter())
|
||||
resp, err := conn.DescribeImages(&ec2.DescribeImagesInput{
|
||||
ImageIds: []*string{&imageId},
|
||||
})
|
||||
if err != nil {
|
||||
if ec2err, ok := err.(*ec2.Error); ok && ec2err.Code == "InvalidAMIID.NotFound" {
|
||||
if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidAMIID.NotFound" {
|
||||
// Set this to nil as if we didn't find anything.
|
||||
resp = nil
|
||||
} else if isTransientNetworkError(err) {
|
||||
|
@ -57,17 +61,19 @@ func AMIStateRefreshFunc(conn *ec2.EC2, imageId string) StateRefreshFunc {
|
|||
}
|
||||
|
||||
i := resp.Images[0]
|
||||
return i, i.State, nil
|
||||
return i, *i.State, nil
|
||||
}
|
||||
}
|
||||
|
||||
// InstanceStateRefreshFunc returns a StateRefreshFunc that is used to watch
|
||||
// an EC2 instance.
|
||||
func InstanceStateRefreshFunc(conn *ec2.EC2, i *ec2.Instance) StateRefreshFunc {
|
||||
func InstanceStateRefreshFunc(conn *ec2.EC2, instanceId string) StateRefreshFunc {
|
||||
return func() (interface{}, string, error) {
|
||||
resp, err := conn.Instances([]string{i.InstanceId}, ec2.NewFilter())
|
||||
resp, err := conn.DescribeInstances(&ec2.DescribeInstancesInput{
|
||||
InstanceIds: []*string{&instanceId},
|
||||
})
|
||||
if err != nil {
|
||||
if ec2err, ok := err.(*ec2.Error); ok && ec2err.Code == "InvalidInstanceID.NotFound" {
|
||||
if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidInstanceID.NotFound" {
|
||||
// Set this to nil as if we didn't find anything.
|
||||
resp = nil
|
||||
} else if isTransientNetworkError(err) {
|
||||
|
@ -85,8 +91,8 @@ func InstanceStateRefreshFunc(conn *ec2.EC2, i *ec2.Instance) StateRefreshFunc {
|
|||
return nil, "", nil
|
||||
}
|
||||
|
||||
i = &resp.Reservations[0].Instances[0]
|
||||
return i, i.State.Name, nil
|
||||
i := resp.Reservations[0].Instances[0]
|
||||
return i, *i.State.Name, nil
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -94,9 +100,12 @@ func InstanceStateRefreshFunc(conn *ec2.EC2, i *ec2.Instance) StateRefreshFunc {
|
|||
// a spot request for state changes.
|
||||
func SpotRequestStateRefreshFunc(conn *ec2.EC2, spotRequestId string) StateRefreshFunc {
|
||||
return func() (interface{}, string, error) {
|
||||
resp, err := conn.DescribeSpotRequests([]string{spotRequestId}, ec2.NewFilter())
|
||||
resp, err := conn.DescribeSpotInstanceRequests(&ec2.DescribeSpotInstanceRequestsInput{
|
||||
SpotInstanceRequestIds: []*string{&spotRequestId},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
if ec2err, ok := err.(*ec2.Error); ok && ec2err.Code == "InvalidSpotInstanceRequestID.NotFound" {
|
||||
if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidSpotInstanceRequestID.NotFound" {
|
||||
// Set this to nil as if we didn't find anything.
|
||||
resp = nil
|
||||
} else if isTransientNetworkError(err) {
|
||||
|
@ -108,14 +117,14 @@ func SpotRequestStateRefreshFunc(conn *ec2.EC2, spotRequestId string) StateRefre
|
|||
}
|
||||
}
|
||||
|
||||
if resp == nil || len(resp.SpotRequestResults) == 0 {
|
||||
if resp == nil || len(resp.SpotInstanceRequests) == 0 {
|
||||
// Sometimes AWS has consistency issues and doesn't see the
|
||||
// SpotRequest. Return an empty state.
|
||||
return nil, "", nil
|
||||
}
|
||||
|
||||
i := resp.SpotRequestResults[0]
|
||||
return i, i.State, nil
|
||||
i := resp.SpotInstanceRequests[0]
|
||||
return i, *i.State, nil
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -172,8 +181,6 @@ func WaitForState(conf *StateChangeConf) (i interface{}, err error) {
|
|||
|
||||
time.Sleep(time.Duration(sleepSeconds) * time.Second)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func isTransientNetworkError(err error) bool {
|
||||
|
|
|
@ -2,22 +2,28 @@ package common
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/mitchellh/goamz/aws"
|
||||
"github.com/mitchellh/goamz/ec2"
|
||||
|
||||
"sync"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
|
||||
"github.com/mitchellh/multistep"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type StepAMIRegionCopy struct {
|
||||
AccessConfig *AccessConfig
|
||||
Regions []string
|
||||
Name string
|
||||
}
|
||||
|
||||
func (s *StepAMIRegionCopy) Run(state multistep.StateBag) multistep.StepAction {
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
amis := state.Get("amis").(map[string]string)
|
||||
ami := amis[ec2conn.Region.Name]
|
||||
ami := amis[*ec2conn.Config.Region]
|
||||
|
||||
if len(s.Regions) == 0 {
|
||||
return multistep.ActionContinue
|
||||
|
@ -29,13 +35,18 @@ func (s *StepAMIRegionCopy) Run(state multistep.StateBag) multistep.StepAction {
|
|||
var wg sync.WaitGroup
|
||||
errs := new(packer.MultiError)
|
||||
for _, region := range s.Regions {
|
||||
if region == *ec2conn.Config.Region {
|
||||
ui.Message(fmt.Sprintf(
|
||||
"Avoiding copying AMI to duplicate region %s", region))
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
ui.Message(fmt.Sprintf("Copying to: %s", region))
|
||||
|
||||
go func(region string) {
|
||||
defer wg.Done()
|
||||
id, err := amiRegionCopy(state, ec2conn.Auth, ami,
|
||||
aws.Regions[region], ec2conn.Region)
|
||||
id, err := amiRegionCopy(state, s.AccessConfig, s.Name, ami, region, *ec2conn.Config.Region)
|
||||
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
|
@ -67,32 +78,41 @@ func (s *StepAMIRegionCopy) Cleanup(state multistep.StateBag) {
|
|||
|
||||
// amiRegionCopy does a copy for the given AMI to the target region and
|
||||
// returns the resulting ID or error.
|
||||
func amiRegionCopy(state multistep.StateBag, auth aws.Auth, imageId string,
|
||||
target aws.Region, source aws.Region) (string, error) {
|
||||
func amiRegionCopy(state multistep.StateBag, config *AccessConfig, name string, imageId string,
|
||||
target string, source string) (string, error) {
|
||||
|
||||
// Connect to the region where the AMI will be copied to
|
||||
regionconn := ec2.New(auth, target)
|
||||
resp, err := regionconn.CopyImage(&ec2.CopyImage{
|
||||
SourceRegion: source.Name,
|
||||
SourceImageId: imageId,
|
||||
awsConfig, err := config.Config()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
awsConfig.Region = aws.String(target)
|
||||
|
||||
sess := session.New(awsConfig)
|
||||
regionconn := ec2.New(sess)
|
||||
|
||||
resp, err := regionconn.CopyImage(&ec2.CopyImageInput{
|
||||
SourceRegion: &source,
|
||||
SourceImageId: &imageId,
|
||||
Name: &name,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Error Copying AMI (%s) to region (%s): %s",
|
||||
imageId, target.Name, err)
|
||||
imageId, target, err)
|
||||
}
|
||||
|
||||
stateChange := StateChangeConf{
|
||||
Pending: []string{"pending"},
|
||||
Target: "available",
|
||||
Refresh: AMIStateRefreshFunc(regionconn, resp.ImageId),
|
||||
Refresh: AMIStateRefreshFunc(regionconn, *resp.ImageId),
|
||||
StepState: state,
|
||||
}
|
||||
|
||||
if _, err := WaitForState(&stateChange); err != nil {
|
||||
return "", fmt.Errorf("Error waiting for AMI (%s) in region (%s): %s",
|
||||
resp.ImageId, target.Name, err)
|
||||
*resp.ImageId, target, err)
|
||||
}
|
||||
|
||||
return resp.ImageId, nil
|
||||
return *resp.ImageId, nil
|
||||
}
|
||||
|
|
|
@ -2,8 +2,10 @@ package common
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/mitchellh/goamz/aws"
|
||||
"github.com/mitchellh/goamz/ec2"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/mitchellh/multistep"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
)
|
||||
|
@ -21,16 +23,61 @@ func (s *StepCreateTags) Run(state multistep.StateBag) multistep.StepAction {
|
|||
for region, ami := range amis {
|
||||
ui.Say(fmt.Sprintf("Adding tags to AMI (%s)...", ami))
|
||||
|
||||
var ec2Tags []ec2.Tag
|
||||
var ec2Tags []*ec2.Tag
|
||||
for key, value := range s.Tags {
|
||||
ui.Message(fmt.Sprintf("Adding tag: \"%s\": \"%s\"", key, value))
|
||||
ec2Tags = append(ec2Tags, ec2.Tag{key, value})
|
||||
ec2Tags = append(ec2Tags, &ec2.Tag{
|
||||
Key: aws.String(key),
|
||||
Value: aws.String(value),
|
||||
})
|
||||
}
|
||||
|
||||
regionconn := ec2.New(ec2conn.Auth, aws.Regions[region])
|
||||
_, err := regionconn.CreateTags([]string{ami}, ec2Tags)
|
||||
// Declare list of resources to tag
|
||||
resourceIds := []*string{&ami}
|
||||
awsConfig := aws.Config{
|
||||
Credentials: ec2conn.Config.Credentials,
|
||||
Region: aws.String(region),
|
||||
}
|
||||
session := session.New(&awsConfig)
|
||||
|
||||
regionconn := ec2.New(session)
|
||||
|
||||
// Retrieve image list for given AMI
|
||||
imageResp, err := regionconn.DescribeImages(&ec2.DescribeImagesInput{
|
||||
ImageIds: resourceIds,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error adding tags to AMI (%s): %s", ami, err)
|
||||
err := fmt.Errorf("Error retrieving details for AMI (%s): %s", ami, err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
if len(imageResp.Images) == 0 {
|
||||
err := fmt.Errorf("Error retrieving details for AMI (%s), no images found", ami)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
image := imageResp.Images[0]
|
||||
|
||||
// Add only those with a Snapshot ID, i.e. not Ephemeral
|
||||
for _, device := range image.BlockDeviceMappings {
|
||||
if device.Ebs != nil && device.Ebs.SnapshotId != nil {
|
||||
ui.Say(fmt.Sprintf("Tagging snapshot: %s", *device.Ebs.SnapshotId))
|
||||
resourceIds = append(resourceIds, device.Ebs.SnapshotId)
|
||||
}
|
||||
}
|
||||
|
||||
_, err = regionconn.CreateTags(&ec2.CreateTagsInput{
|
||||
Resources: resourceIds,
|
||||
Tags: ec2Tags,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error adding tags to Resources (%#v): %s", resourceIds, err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
|
|
|
@ -0,0 +1,56 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/mitchellh/multistep"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
)
|
||||
|
||||
type StepDeregisterAMI struct {
|
||||
ForceDeregister bool
|
||||
AMIName string
|
||||
}
|
||||
|
||||
func (s *StepDeregisterAMI) Run(state multistep.StateBag) multistep.StepAction {
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
// check for force deregister
|
||||
if s.ForceDeregister {
|
||||
resp, err := ec2conn.DescribeImages(&ec2.DescribeImagesInput{
|
||||
Filters: []*ec2.Filter{&ec2.Filter{
|
||||
Name: aws.String("name"),
|
||||
Values: []*string{aws.String(s.AMIName)},
|
||||
}}})
|
||||
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error creating AMI: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// deregister image(s) by that name
|
||||
for _, i := range resp.Images {
|
||||
_, err := ec2conn.DeregisterImage(&ec2.DeregisterImageInput{
|
||||
ImageId: i.ImageId,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error deregistering existing AMI: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
ui.Say(fmt.Sprintf("Deregistered AMI %s, id: %s", s.AMIName, *i.ImageId))
|
||||
}
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepDeregisterAMI) Cleanup(state multistep.StateBag) {
|
||||
}
|
|
@ -0,0 +1,167 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/mitchellh/multistep"
|
||||
"github.com/mitchellh/packer/helper/communicator"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
)
|
||||
|
||||
// StepGetPassword reads the password from a Windows server and sets it
|
||||
// on the WinRM config.
|
||||
type StepGetPassword struct {
|
||||
Debug bool
|
||||
Comm *communicator.Config
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
||||
func (s *StepGetPassword) Run(state multistep.StateBag) multistep.StepAction {
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
// Skip if we're not using winrm
|
||||
if s.Comm.Type != "winrm" {
|
||||
log.Printf("[INFO] Not using winrm communicator, skipping get password...")
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
// If we already have a password, skip it
|
||||
if s.Comm.WinRMPassword != "" {
|
||||
ui.Say("Skipping waiting for password since WinRM password set...")
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
// Get the password
|
||||
var password string
|
||||
var err error
|
||||
cancel := make(chan struct{})
|
||||
waitDone := make(chan bool, 1)
|
||||
go func() {
|
||||
ui.Say("Waiting for auto-generated password for instance...")
|
||||
ui.Message(
|
||||
"It is normal for this process to take up to 15 minutes,\n" +
|
||||
"but it usually takes around 5. Please wait.")
|
||||
password, err = s.waitForPassword(state, cancel)
|
||||
waitDone <- true
|
||||
}()
|
||||
|
||||
timeout := time.After(s.Timeout)
|
||||
WaitLoop:
|
||||
for {
|
||||
// Wait for either SSH to become available, a timeout to occur,
|
||||
// or an interrupt to come through.
|
||||
select {
|
||||
case <-waitDone:
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf("Error waiting for password: %s", err))
|
||||
state.Put("error", err)
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
ui.Message(fmt.Sprintf(" \nPassword retrieved!"))
|
||||
s.Comm.WinRMPassword = password
|
||||
break WaitLoop
|
||||
case <-timeout:
|
||||
err := fmt.Errorf("Timeout waiting for password.")
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
close(cancel)
|
||||
return multistep.ActionHalt
|
||||
case <-time.After(1 * time.Second):
|
||||
if _, ok := state.GetOk(multistep.StateCancelled); ok {
|
||||
// The step sequence was cancelled, so cancel waiting for password
|
||||
// and just start the halting process.
|
||||
close(cancel)
|
||||
log.Println("[WARN] Interrupt detected, quitting waiting for password.")
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// In debug-mode, we output the password
|
||||
if s.Debug {
|
||||
ui.Message(fmt.Sprintf(
|
||||
"Password (since debug is enabled): %s", s.Comm.WinRMPassword))
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepGetPassword) Cleanup(multistep.StateBag) {}
|
||||
|
||||
func (s *StepGetPassword) waitForPassword(state multistep.StateBag, cancel <-chan struct{}) (string, error) {
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
instance := state.Get("instance").(*ec2.Instance)
|
||||
privateKey := state.Get("privateKey").(string)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-cancel:
|
||||
log.Println("[INFO] Retrieve password wait cancelled. Exiting loop.")
|
||||
return "", errors.New("Retrieve password wait cancelled")
|
||||
case <-time.After(5 * time.Second):
|
||||
}
|
||||
|
||||
resp, err := ec2conn.GetPasswordData(&ec2.GetPasswordDataInput{
|
||||
InstanceId: instance.InstanceId,
|
||||
})
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error retrieving auto-generated instance password: %s", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
if resp.PasswordData != nil && *resp.PasswordData != "" {
|
||||
decryptedPassword, err := decryptPasswordDataWithPrivateKey(
|
||||
*resp.PasswordData, []byte(privateKey))
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error decrypting auto-generated instance password: %s", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
return decryptedPassword, nil
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Password is blank, will retry...")
|
||||
}
|
||||
}
|
||||
|
||||
func decryptPasswordDataWithPrivateKey(passwordData string, pemBytes []byte) (string, error) {
|
||||
encryptedPasswd, err := base64.StdEncoding.DecodeString(passwordData)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
block, _ := pem.Decode(pemBytes)
|
||||
var asn1Bytes []byte
|
||||
if _, ok := block.Headers["DEK-Info"]; ok {
|
||||
return "", errors.New("encrypted private key isn't yet supported")
|
||||
/*
|
||||
asn1Bytes, err = x509.DecryptPEMBlock(block, password)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
*/
|
||||
} else {
|
||||
asn1Bytes = block.Bytes
|
||||
}
|
||||
|
||||
key, err := x509.ParsePKCS1PrivateKey(asn1Bytes)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
out, err := rsa.DecryptPKCS1v15(nil, key, encryptedPasswd)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(out), nil
|
||||
}
|
|
@ -2,17 +2,19 @@ package common
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/mitchellh/goamz/ec2"
|
||||
"github.com/mitchellh/multistep"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/mitchellh/multistep"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
)
|
||||
|
||||
type StepKeyPair struct {
|
||||
Debug bool
|
||||
DebugKeyPath string
|
||||
TemporaryKeyPairName string
|
||||
KeyPairName string
|
||||
PrivateKeyFile string
|
||||
|
||||
|
@ -21,15 +23,14 @@ type StepKeyPair struct {
|
|||
|
||||
func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction {
|
||||
if s.PrivateKeyFile != "" {
|
||||
s.keyName = ""
|
||||
|
||||
privateKeyBytes, err := ioutil.ReadFile(s.PrivateKeyFile)
|
||||
if err != nil {
|
||||
state.Put("error", fmt.Errorf("Error loading configured private key file: %s", err))
|
||||
state.Put("error", fmt.Errorf(
|
||||
"Error loading configured private key file: %s", err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
state.Put("keyPair", "")
|
||||
state.Put("keyPair", s.KeyPairName)
|
||||
state.Put("privateKey", string(privateKeyBytes))
|
||||
|
||||
return multistep.ActionContinue
|
||||
|
@ -38,19 +39,20 @@ func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction {
|
|||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
ui.Say(fmt.Sprintf("Creating temporary keypair: %s", s.KeyPairName))
|
||||
keyResp, err := ec2conn.CreateKeyPair(s.KeyPairName)
|
||||
ui.Say(fmt.Sprintf("Creating temporary keypair: %s", s.TemporaryKeyPairName))
|
||||
keyResp, err := ec2conn.CreateKeyPair(&ec2.CreateKeyPairInput{
|
||||
KeyName: &s.TemporaryKeyPairName})
|
||||
if err != nil {
|
||||
state.Put("error", fmt.Errorf("Error creating temporary keypair: %s", err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Set the keyname so we know to delete it later
|
||||
s.keyName = s.KeyPairName
|
||||
s.keyName = s.TemporaryKeyPairName
|
||||
|
||||
// Set some state data for use in future steps
|
||||
state.Put("keyPair", s.keyName)
|
||||
state.Put("privateKey", keyResp.KeyMaterial)
|
||||
state.Put("privateKey", *keyResp.KeyMaterial)
|
||||
|
||||
// If we're in debug mode, output the private key to the working
|
||||
// directory.
|
||||
|
@ -64,7 +66,7 @@ func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction {
|
|||
defer f.Close()
|
||||
|
||||
// Write the key out
|
||||
if _, err := f.Write([]byte(keyResp.KeyMaterial)); err != nil {
|
||||
if _, err := f.Write([]byte(*keyResp.KeyMaterial)); err != nil {
|
||||
state.Put("error", fmt.Errorf("Error saving debug key: %s", err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
@ -83,17 +85,28 @@ func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction {
|
|||
|
||||
func (s *StepKeyPair) Cleanup(state multistep.StateBag) {
|
||||
// If no key name is set, then we never created it, so just return
|
||||
if s.keyName == "" {
|
||||
// If we used an SSH private key file, do not go about deleting
|
||||
// keypairs
|
||||
if s.PrivateKeyFile != "" {
|
||||
return
|
||||
}
|
||||
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
// Remove the keypair
|
||||
ui.Say("Deleting temporary keypair...")
|
||||
_, err := ec2conn.DeleteKeyPair(s.keyName)
|
||||
_, err := ec2conn.DeleteKeyPair(&ec2.DeleteKeyPairInput{KeyName: &s.keyName})
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf(
|
||||
"Error cleaning up keypair. Please delete the key manually: %s", s.keyName))
|
||||
}
|
||||
|
||||
// Also remove the physical key if we're debugging.
|
||||
if s.Debug {
|
||||
if err := os.Remove(s.DebugKeyPath); err != nil {
|
||||
ui.Error(fmt.Sprintf(
|
||||
"Error removing debug key '%s': %s", s.DebugKeyPath, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,8 +2,10 @@ package common
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/mitchellh/goamz/aws"
|
||||
"github.com/mitchellh/goamz/ec2"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/mitchellh/multistep"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
)
|
||||
|
@ -34,37 +36,69 @@ func (s *StepModifyAMIAttributes) Run(state multistep.StateBag) multistep.StepAc
|
|||
// Construct the modify image attribute requests we're going to make.
|
||||
// We need to make each separately since the EC2 API only allows changing
|
||||
// one type at a kind currently.
|
||||
options := make(map[string]*ec2.ModifyImageAttribute)
|
||||
options := make(map[string]*ec2.ModifyImageAttributeInput)
|
||||
if s.Description != "" {
|
||||
options["description"] = &ec2.ModifyImageAttribute{
|
||||
Description: s.Description,
|
||||
options["description"] = &ec2.ModifyImageAttributeInput{
|
||||
Description: &ec2.AttributeValue{Value: &s.Description},
|
||||
}
|
||||
}
|
||||
|
||||
if len(s.Groups) > 0 {
|
||||
options["groups"] = &ec2.ModifyImageAttribute{
|
||||
AddGroups: s.Groups,
|
||||
groups := make([]*string, len(s.Groups))
|
||||
adds := make([]*ec2.LaunchPermission, len(s.Groups))
|
||||
addGroups := &ec2.ModifyImageAttributeInput{
|
||||
LaunchPermission: &ec2.LaunchPermissionModifications{},
|
||||
}
|
||||
|
||||
for i, g := range s.Groups {
|
||||
groups[i] = aws.String(g)
|
||||
adds[i] = &ec2.LaunchPermission{
|
||||
Group: aws.String(g),
|
||||
}
|
||||
}
|
||||
addGroups.UserGroups = groups
|
||||
addGroups.LaunchPermission.Add = adds
|
||||
|
||||
options["groups"] = addGroups
|
||||
}
|
||||
|
||||
if len(s.Users) > 0 {
|
||||
options["users"] = &ec2.ModifyImageAttribute{
|
||||
AddUsers: s.Users,
|
||||
users := make([]*string, len(s.Users))
|
||||
adds := make([]*ec2.LaunchPermission, len(s.Users))
|
||||
for i, u := range s.Users {
|
||||
users[i] = aws.String(u)
|
||||
adds[i] = &ec2.LaunchPermission{UserId: aws.String(u)}
|
||||
}
|
||||
options["users"] = &ec2.ModifyImageAttributeInput{
|
||||
UserIds: users,
|
||||
LaunchPermission: &ec2.LaunchPermissionModifications{
|
||||
Add: adds,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if len(s.ProductCodes) > 0 {
|
||||
options["product codes"] = &ec2.ModifyImageAttribute{
|
||||
ProductCodes: s.ProductCodes,
|
||||
codes := make([]*string, len(s.ProductCodes))
|
||||
for i, c := range s.ProductCodes {
|
||||
codes[i] = &c
|
||||
}
|
||||
options["product codes"] = &ec2.ModifyImageAttributeInput{
|
||||
ProductCodes: codes,
|
||||
}
|
||||
}
|
||||
|
||||
for region, ami := range amis {
|
||||
ui.Say(fmt.Sprintf("Modifying attributes on AMI (%s)...", ami))
|
||||
regionconn := ec2.New(ec2conn.Auth, aws.Regions[region])
|
||||
for name, opts := range options {
|
||||
awsConfig := aws.Config{
|
||||
Credentials: ec2conn.Config.Credentials,
|
||||
Region: aws.String(region),
|
||||
}
|
||||
session := session.New(&awsConfig)
|
||||
regionconn := ec2.New(session)
|
||||
for name, input := range options {
|
||||
ui.Message(fmt.Sprintf("Modifying: %s", name))
|
||||
_, err := regionconn.ModifyImageAttribute(ami, opts)
|
||||
input.ImageId = &ami
|
||||
_, err := regionconn.ModifyImageAttribute(input)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error modify AMI attributes: %s", err)
|
||||
state.Put("error", err)
|
||||
|
|
|
@ -0,0 +1,53 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/mitchellh/multistep"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
)
|
||||
|
||||
// StepPreValidate provides an opportunity to pre-validate any configuration for
|
||||
// the build before actually doing any time consuming work
|
||||
//
|
||||
type StepPreValidate struct {
|
||||
DestAmiName string
|
||||
ForceDeregister bool
|
||||
}
|
||||
|
||||
func (s *StepPreValidate) Run(state multistep.StateBag) multistep.StepAction {
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
if s.ForceDeregister {
|
||||
ui.Say("Force Deregister flag found, skipping prevalidating AMI Name")
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
|
||||
ui.Say("Prevalidating AMI Name...")
|
||||
resp, err := ec2conn.DescribeImages(&ec2.DescribeImagesInput{
|
||||
Filters: []*ec2.Filter{&ec2.Filter{
|
||||
Name: aws.String("name"),
|
||||
Values: []*string{aws.String(s.DestAmiName)},
|
||||
}}})
|
||||
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error querying AMI: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
if len(resp.Images) > 0 {
|
||||
err := fmt.Errorf("Error: name conflicts with an existing AMI: %s", *resp.Images[0].ImageId)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepPreValidate) Cleanup(multistep.StateBag) {}
|
|
@ -1,13 +1,16 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/mitchellh/goamz/ec2"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
|
||||
"github.com/mitchellh/multistep"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
)
|
||||
|
@ -17,6 +20,7 @@ type StepRunSourceInstance struct {
|
|||
AvailabilityZone string
|
||||
BlockDevices BlockDevices
|
||||
Debug bool
|
||||
EbsOptimized bool
|
||||
ExpectedRootDevice string
|
||||
InstanceType string
|
||||
IamInstanceProfile string
|
||||
|
@ -28,16 +32,21 @@ type StepRunSourceInstance struct {
|
|||
UserData string
|
||||
UserDataFile string
|
||||
|
||||
instance *ec2.Instance
|
||||
spotRequest *ec2.SpotRequestResult
|
||||
instanceId string
|
||||
spotRequest *ec2.SpotInstanceRequest
|
||||
}
|
||||
|
||||
func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepAction {
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
keyName := state.Get("keyPair").(string)
|
||||
securityGroupIds := state.Get("securityGroupIds").([]string)
|
||||
tempSecurityGroupIds := state.Get("securityGroupIds").([]string)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
securityGroupIds := make([]*string, len(tempSecurityGroupIds))
|
||||
for i, sg := range tempSecurityGroupIds {
|
||||
securityGroupIds[i] = aws.String(sg)
|
||||
}
|
||||
|
||||
userData := s.UserData
|
||||
if s.UserDataFile != "" {
|
||||
contents, err := ioutil.ReadFile(s.UserDataFile)
|
||||
|
@ -46,16 +55,20 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi
|
|||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
userData = string(contents)
|
||||
// Test if it is encoded already, and if not, encode it
|
||||
if _, err := base64.StdEncoding.DecodeString(string(contents)); err != nil {
|
||||
log.Printf("[DEBUG] base64 encoding user data...")
|
||||
contents = []byte(base64.StdEncoding.EncodeToString(contents))
|
||||
}
|
||||
|
||||
securityGroups := make([]ec2.SecurityGroup, len(securityGroupIds))
|
||||
for n, securityGroupId := range securityGroupIds {
|
||||
securityGroups[n] = ec2.SecurityGroup{Id: securityGroupId}
|
||||
userData = string(contents)
|
||||
|
||||
}
|
||||
|
||||
ui.Say("Launching a source AWS instance...")
|
||||
imageResp, err := ec2conn.Images([]string{s.SourceAMI}, ec2.NewFilter())
|
||||
imageResp, err := ec2conn.DescribeImages(&ec2.DescribeImagesInput{
|
||||
ImageIds: []*string{&s.SourceAMI},
|
||||
})
|
||||
if err != nil {
|
||||
state.Put("error", fmt.Errorf("There was a problem with the source AMI: %s", err))
|
||||
return multistep.ActionHalt
|
||||
|
@ -66,15 +79,16 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi
|
|||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
if s.ExpectedRootDevice != "" && imageResp.Images[0].RootDeviceType != s.ExpectedRootDevice {
|
||||
if s.ExpectedRootDevice != "" && *imageResp.Images[0].RootDeviceType != s.ExpectedRootDevice {
|
||||
state.Put("error", fmt.Errorf(
|
||||
"The provided source AMI has an invalid root device type.\n"+
|
||||
"Expected '%s', got '%s'.",
|
||||
s.ExpectedRootDevice, imageResp.Images[0].RootDeviceType))
|
||||
s.ExpectedRootDevice, *imageResp.Images[0].RootDeviceType))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
spotPrice := s.SpotPrice
|
||||
availabilityZone := s.AvailabilityZone
|
||||
if spotPrice == "auto" {
|
||||
ui.Message(fmt.Sprintf(
|
||||
"Finding spot price for %s %s...",
|
||||
|
@ -82,11 +96,11 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi
|
|||
|
||||
// Detect the spot price
|
||||
startTime := time.Now().Add(-1 * time.Hour)
|
||||
resp, err := ec2conn.DescribeSpotPriceHistory(&ec2.DescribeSpotPriceHistory{
|
||||
InstanceType: []string{s.InstanceType},
|
||||
ProductDescription: []string{s.SpotPriceProduct},
|
||||
AvailabilityZone: s.AvailabilityZone,
|
||||
StartTime: startTime,
|
||||
resp, err := ec2conn.DescribeSpotPriceHistory(&ec2.DescribeSpotPriceHistoryInput{
|
||||
InstanceTypes: []*string{&s.InstanceType},
|
||||
ProductDescriptions: []*string{&s.SpotPriceProduct},
|
||||
AvailabilityZone: &s.AvailabilityZone,
|
||||
StartTime: &startTime,
|
||||
})
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error finding spot price: %s", err)
|
||||
|
@ -96,15 +110,18 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi
|
|||
}
|
||||
|
||||
var price float64
|
||||
for _, history := range resp.History {
|
||||
log.Printf("[INFO] Candidate spot price: %s", history.SpotPrice)
|
||||
current, err := strconv.ParseFloat(history.SpotPrice, 64)
|
||||
for _, history := range resp.SpotPriceHistory {
|
||||
log.Printf("[INFO] Candidate spot price: %s", *history.SpotPrice)
|
||||
current, err := strconv.ParseFloat(*history.SpotPrice, 64)
|
||||
if err != nil {
|
||||
log.Printf("[ERR] Error parsing spot price: %s", err)
|
||||
continue
|
||||
}
|
||||
if price == 0 || current < price {
|
||||
price = current
|
||||
if s.AvailabilityZone == "" {
|
||||
availabilityZone = *history.AvailabilityZone
|
||||
}
|
||||
}
|
||||
}
|
||||
if price == 0 {
|
||||
|
@ -119,21 +136,35 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi
|
|||
|
||||
var instanceId string
|
||||
|
||||
if spotPrice == "" {
|
||||
runOpts := &ec2.RunInstances{
|
||||
KeyName: keyName,
|
||||
ImageId: s.SourceAMI,
|
||||
InstanceType: s.InstanceType,
|
||||
UserData: []byte(userData),
|
||||
MinCount: 0,
|
||||
MaxCount: 0,
|
||||
SecurityGroups: securityGroups,
|
||||
IamInstanceProfile: s.IamInstanceProfile,
|
||||
SubnetId: s.SubnetId,
|
||||
AssociatePublicIpAddress: s.AssociatePublicIpAddress,
|
||||
BlockDevices: s.BlockDevices.BuildLaunchDevices(),
|
||||
AvailZone: s.AvailabilityZone,
|
||||
if spotPrice == "" || spotPrice == "0" {
|
||||
runOpts := &ec2.RunInstancesInput{
|
||||
KeyName: &keyName,
|
||||
ImageId: &s.SourceAMI,
|
||||
InstanceType: &s.InstanceType,
|
||||
UserData: &userData,
|
||||
MaxCount: aws.Int64(1),
|
||||
MinCount: aws.Int64(1),
|
||||
IamInstanceProfile: &ec2.IamInstanceProfileSpecification{Name: &s.IamInstanceProfile},
|
||||
BlockDeviceMappings: s.BlockDevices.BuildLaunchDevices(),
|
||||
Placement: &ec2.Placement{AvailabilityZone: &s.AvailabilityZone},
|
||||
EbsOptimized: &s.EbsOptimized,
|
||||
}
|
||||
|
||||
if s.SubnetId != "" && s.AssociatePublicIpAddress {
|
||||
runOpts.NetworkInterfaces = []*ec2.InstanceNetworkInterfaceSpecification{
|
||||
&ec2.InstanceNetworkInterfaceSpecification{
|
||||
DeviceIndex: aws.Int64(0),
|
||||
AssociatePublicIpAddress: &s.AssociatePublicIpAddress,
|
||||
SubnetId: &s.SubnetId,
|
||||
Groups: securityGroupIds,
|
||||
DeleteOnTermination: aws.Bool(true),
|
||||
},
|
||||
}
|
||||
} else {
|
||||
runOpts.SubnetId = &s.SubnetId
|
||||
runOpts.SecurityGroupIds = securityGroupIds
|
||||
}
|
||||
|
||||
runResp, err := ec2conn.RunInstances(runOpts)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error launching source instance: %s", err)
|
||||
|
@ -141,26 +172,35 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi
|
|||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
instanceId = runResp.Instances[0].InstanceId
|
||||
instanceId = *runResp.Instances[0].InstanceId
|
||||
} else {
|
||||
ui.Message(fmt.Sprintf(
|
||||
"Requesting spot instance '%s' for: %s",
|
||||
s.InstanceType, spotPrice))
|
||||
|
||||
runOpts := &ec2.RequestSpotInstances{
|
||||
SpotPrice: spotPrice,
|
||||
KeyName: keyName,
|
||||
ImageId: s.SourceAMI,
|
||||
InstanceType: s.InstanceType,
|
||||
UserData: []byte(userData),
|
||||
SecurityGroups: securityGroups,
|
||||
IamInstanceProfile: s.IamInstanceProfile,
|
||||
SubnetId: s.SubnetId,
|
||||
AssociatePublicIpAddress: s.AssociatePublicIpAddress,
|
||||
BlockDevices: s.BlockDevices.BuildLaunchDevices(),
|
||||
AvailZone: s.AvailabilityZone,
|
||||
}
|
||||
runSpotResp, err := ec2conn.RequestSpotInstances(runOpts)
|
||||
runSpotResp, err := ec2conn.RequestSpotInstances(&ec2.RequestSpotInstancesInput{
|
||||
SpotPrice: &spotPrice,
|
||||
LaunchSpecification: &ec2.RequestSpotLaunchSpecification{
|
||||
KeyName: &keyName,
|
||||
ImageId: &s.SourceAMI,
|
||||
InstanceType: &s.InstanceType,
|
||||
UserData: &userData,
|
||||
IamInstanceProfile: &ec2.IamInstanceProfileSpecification{Name: &s.IamInstanceProfile},
|
||||
NetworkInterfaces: []*ec2.InstanceNetworkInterfaceSpecification{
|
||||
&ec2.InstanceNetworkInterfaceSpecification{
|
||||
DeviceIndex: aws.Int64(0),
|
||||
AssociatePublicIpAddress: &s.AssociatePublicIpAddress,
|
||||
SubnetId: &s.SubnetId,
|
||||
Groups: securityGroupIds,
|
||||
DeleteOnTermination: aws.Bool(true),
|
||||
},
|
||||
},
|
||||
Placement: &ec2.SpotPlacement{
|
||||
AvailabilityZone: &availabilityZone,
|
||||
},
|
||||
BlockDeviceMappings: s.BlockDevices.BuildLaunchDevices(),
|
||||
EbsOptimized: &s.EbsOptimized,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error launching source spot instance: %s", err)
|
||||
state.Put("error", err)
|
||||
|
@ -168,87 +208,87 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi
|
|||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
s.spotRequest = &runSpotResp.SpotRequestResults[0]
|
||||
s.spotRequest = runSpotResp.SpotInstanceRequests[0]
|
||||
|
||||
spotRequestId := s.spotRequest.SpotRequestId
|
||||
ui.Message(fmt.Sprintf("Waiting for spot request (%s) to become active...", spotRequestId))
|
||||
spotRequestId := s.spotRequest.SpotInstanceRequestId
|
||||
ui.Message(fmt.Sprintf("Waiting for spot request (%s) to become active...", *spotRequestId))
|
||||
stateChange := StateChangeConf{
|
||||
Pending: []string{"open"},
|
||||
Target: "active",
|
||||
Refresh: SpotRequestStateRefreshFunc(ec2conn, spotRequestId),
|
||||
Refresh: SpotRequestStateRefreshFunc(ec2conn, *spotRequestId),
|
||||
StepState: state,
|
||||
}
|
||||
_, err = WaitForState(&stateChange)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error waiting for spot request (%s) to become ready: %s", spotRequestId, err)
|
||||
err := fmt.Errorf("Error waiting for spot request (%s) to become ready: %s", *spotRequestId, err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
spotResp, err := ec2conn.DescribeSpotRequests([]string{spotRequestId}, nil)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error finding spot request (%s): %s", spotRequestId, err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
instanceId = spotResp.SpotRequestResults[0].InstanceId
|
||||
}
|
||||
|
||||
instanceResp, err := ec2conn.Instances([]string{instanceId}, nil)
|
||||
spotResp, err := ec2conn.DescribeSpotInstanceRequests(&ec2.DescribeSpotInstanceRequestsInput{
|
||||
SpotInstanceRequestIds: []*string{spotRequestId},
|
||||
})
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error finding source instance (%s): %s", instanceId, err)
|
||||
err := fmt.Errorf("Error finding spot request (%s): %s", *spotRequestId, err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
s.instance = &instanceResp.Reservations[0].Instances[0]
|
||||
ui.Message(fmt.Sprintf("Instance ID: %s", s.instance.InstanceId))
|
||||
instanceId = *spotResp.SpotInstanceRequests[0].InstanceId
|
||||
}
|
||||
|
||||
ui.Say(fmt.Sprintf("Waiting for instance (%s) to become ready...", s.instance.InstanceId))
|
||||
// Set the instance ID so that the cleanup works properly
|
||||
s.instanceId = instanceId
|
||||
|
||||
ui.Message(fmt.Sprintf("Instance ID: %s", instanceId))
|
||||
ui.Say(fmt.Sprintf("Waiting for instance (%v) to become ready...", instanceId))
|
||||
stateChange := StateChangeConf{
|
||||
Pending: []string{"pending"},
|
||||
Target: "running",
|
||||
Refresh: InstanceStateRefreshFunc(ec2conn, s.instance),
|
||||
Refresh: InstanceStateRefreshFunc(ec2conn, instanceId),
|
||||
StepState: state,
|
||||
}
|
||||
latestInstance, err := WaitForState(&stateChange)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error waiting for instance (%s) to become ready: %s", s.instance.InstanceId, err)
|
||||
err := fmt.Errorf("Error waiting for instance (%s) to become ready: %s", instanceId, err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
s.instance = latestInstance.(*ec2.Instance)
|
||||
instance := latestInstance.(*ec2.Instance)
|
||||
|
||||
ec2Tags := make([]ec2.Tag, 1, len(s.Tags)+1)
|
||||
ec2Tags[0] = ec2.Tag{"Name", "Packer Builder"}
|
||||
ec2Tags := make([]*ec2.Tag, 1, len(s.Tags)+1)
|
||||
ec2Tags[0] = &ec2.Tag{Key: aws.String("Name"), Value: aws.String("Packer Builder")}
|
||||
for k, v := range s.Tags {
|
||||
ec2Tags = append(ec2Tags, ec2.Tag{k, v})
|
||||
ec2Tags = append(ec2Tags, &ec2.Tag{Key: aws.String(k), Value: aws.String(v)})
|
||||
}
|
||||
|
||||
_, err = ec2conn.CreateTags([]string{s.instance.InstanceId}, ec2Tags)
|
||||
_, err = ec2conn.CreateTags(&ec2.CreateTagsInput{
|
||||
Tags: ec2Tags,
|
||||
Resources: []*string{instance.InstanceId},
|
||||
})
|
||||
if err != nil {
|
||||
ui.Message(
|
||||
fmt.Sprintf("Failed to tag a Name on the builder instance: %s", err))
|
||||
}
|
||||
|
||||
if s.Debug {
|
||||
if s.instance.DNSName != "" {
|
||||
ui.Message(fmt.Sprintf("Public DNS: %s", s.instance.DNSName))
|
||||
if instance.PublicDnsName != nil && *instance.PublicDnsName != "" {
|
||||
ui.Message(fmt.Sprintf("Public DNS: %s", *instance.PublicDnsName))
|
||||
}
|
||||
|
||||
if s.instance.PublicIpAddress != "" {
|
||||
ui.Message(fmt.Sprintf("Public IP: %s", s.instance.PublicIpAddress))
|
||||
if instance.PublicIpAddress != nil && *instance.PublicIpAddress != "" {
|
||||
ui.Message(fmt.Sprintf("Public IP: %s", *instance.PublicIpAddress))
|
||||
}
|
||||
|
||||
if s.instance.PrivateIpAddress != "" {
|
||||
ui.Message(fmt.Sprintf("Private IP: %s", s.instance.PrivateIpAddress))
|
||||
if instance.PrivateIpAddress != nil && *instance.PrivateIpAddress != "" {
|
||||
ui.Message(fmt.Sprintf("Private IP: %s", *instance.PrivateIpAddress))
|
||||
}
|
||||
}
|
||||
|
||||
state.Put("instance", s.instance)
|
||||
state.Put("instance", instance)
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
@ -261,13 +301,16 @@ func (s *StepRunSourceInstance) Cleanup(state multistep.StateBag) {
|
|||
// Cancel the spot request if it exists
|
||||
if s.spotRequest != nil {
|
||||
ui.Say("Cancelling the spot request...")
|
||||
if _, err := ec2conn.CancelSpotRequests([]string{s.spotRequest.SpotRequestId}); err != nil {
|
||||
input := &ec2.CancelSpotInstanceRequestsInput{
|
||||
SpotInstanceRequestIds: []*string{s.spotRequest.SpotInstanceRequestId},
|
||||
}
|
||||
if _, err := ec2conn.CancelSpotInstanceRequests(input); err != nil {
|
||||
ui.Error(fmt.Sprintf("Error cancelling the spot request, may still be around: %s", err))
|
||||
return
|
||||
}
|
||||
stateChange := StateChangeConf{
|
||||
Pending: []string{"active", "open"},
|
||||
Refresh: SpotRequestStateRefreshFunc(ec2conn, s.spotRequest.SpotRequestId),
|
||||
Refresh: SpotRequestStateRefreshFunc(ec2conn, *s.spotRequest.SpotInstanceRequestId),
|
||||
Target: "cancelled",
|
||||
}
|
||||
|
||||
|
@ -276,16 +319,15 @@ func (s *StepRunSourceInstance) Cleanup(state multistep.StateBag) {
|
|||
}
|
||||
|
||||
// Terminate the source instance if it exists
|
||||
if s.instance != nil {
|
||||
|
||||
if s.instanceId != "" {
|
||||
ui.Say("Terminating the source AWS instance...")
|
||||
if _, err := ec2conn.TerminateInstances([]string{s.instance.InstanceId}); err != nil {
|
||||
if _, err := ec2conn.TerminateInstances(&ec2.TerminateInstancesInput{InstanceIds: []*string{&s.instanceId}}); err != nil {
|
||||
ui.Error(fmt.Sprintf("Error terminating instance, may still be around: %s", err))
|
||||
return
|
||||
}
|
||||
stateChange := StateChangeConf{
|
||||
Pending: []string{"pending", "running", "shutting-down", "stopped", "stopping"},
|
||||
Refresh: InstanceStateRefreshFunc(ec2conn, s.instance),
|
||||
Refresh: InstanceStateRefreshFunc(ec2conn, s.instanceId),
|
||||
Target: "terminated",
|
||||
}
|
||||
|
||||
|
|
|
@ -2,17 +2,20 @@ package common
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/mitchellh/goamz/ec2"
|
||||
"github.com/mitchellh/multistep"
|
||||
"github.com/mitchellh/packer/common/uuid"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/mitchellh/multistep"
|
||||
"github.com/mitchellh/packer/common/uuid"
|
||||
"github.com/mitchellh/packer/helper/communicator"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
)
|
||||
|
||||
type StepSecurityGroup struct {
|
||||
CommConfig *communicator.Config
|
||||
SecurityGroupIds []string
|
||||
SSHPort int
|
||||
VpcId string
|
||||
|
||||
createdGroupId string
|
||||
|
@ -28,44 +31,47 @@ func (s *StepSecurityGroup) Run(state multistep.StateBag) multistep.StepAction {
|
|||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
if s.SSHPort == 0 {
|
||||
panic("SSHPort must be set to a non-zero value.")
|
||||
port := s.CommConfig.Port()
|
||||
if port == 0 {
|
||||
panic("port must be set to a non-zero value.")
|
||||
}
|
||||
|
||||
// Create the group
|
||||
ui.Say("Creating temporary security group for this instance...")
|
||||
groupName := fmt.Sprintf("packer %s", uuid.TimeOrderedUUID())
|
||||
log.Printf("Temporary group name: %s", groupName)
|
||||
group := ec2.SecurityGroup{
|
||||
Name: groupName,
|
||||
Description: "Temporary group for Packer",
|
||||
VpcId: s.VpcId,
|
||||
group := &ec2.CreateSecurityGroupInput{
|
||||
GroupName: &groupName,
|
||||
Description: aws.String("Temporary group for Packer"),
|
||||
VpcId: &s.VpcId,
|
||||
}
|
||||
groupResp, err := ec2conn.CreateSecurityGroup(group)
|
||||
if err != nil {
|
||||
ui.Error(err.Error())
|
||||
state.Put("error", err)
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Set the group ID so we can delete it later
|
||||
s.createdGroupId = groupResp.Id
|
||||
s.createdGroupId = *groupResp.GroupId
|
||||
|
||||
// Authorize the SSH access
|
||||
perms := []ec2.IPPerm{
|
||||
ec2.IPPerm{
|
||||
Protocol: "tcp",
|
||||
FromPort: s.SSHPort,
|
||||
ToPort: s.SSHPort,
|
||||
SourceIPs: []string{"0.0.0.0/0"},
|
||||
},
|
||||
// Authorize the SSH access for the security group
|
||||
req := &ec2.AuthorizeSecurityGroupIngressInput{
|
||||
GroupId: groupResp.GroupId,
|
||||
IpProtocol: aws.String("tcp"),
|
||||
FromPort: aws.Int64(int64(port)),
|
||||
ToPort: aws.Int64(int64(port)),
|
||||
CidrIp: aws.String("0.0.0.0/0"),
|
||||
}
|
||||
|
||||
// We loop and retry this a few times because sometimes the security
|
||||
// group isn't available immediately because AWS resources are eventaully
|
||||
// consistent.
|
||||
ui.Say("Authorizing SSH access on the temporary security group...")
|
||||
ui.Say(fmt.Sprintf(
|
||||
"Authorizing access to port %d the temporary security group...",
|
||||
port))
|
||||
for i := 0; i < 5; i++ {
|
||||
_, err = ec2conn.AuthorizeSecurityGroup(groupResp.SecurityGroup, perms)
|
||||
_, err = ec2conn.AuthorizeSecurityGroupIngress(req)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
@ -99,7 +105,7 @@ func (s *StepSecurityGroup) Cleanup(state multistep.StateBag) {
|
|||
|
||||
var err error
|
||||
for i := 0; i < 5; i++ {
|
||||
_, err = ec2conn.DeleteSecurityGroup(ec2.SecurityGroup{Id: s.createdGroupId})
|
||||
_, err = ec2conn.DeleteSecurityGroup(&ec2.DeleteSecurityGroupInput{GroupId: &s.createdGroupId})
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
|
|
@ -3,7 +3,7 @@ package common
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/mitchellh/goamz/ec2"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/mitchellh/multistep"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
)
|
||||
|
@ -23,7 +23,7 @@ func (s *StepSourceAMIInfo) Run(state multistep.StateBag) multistep.StepAction {
|
|||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
ui.Say("Inspecting the source AMI...")
|
||||
imageResp, err := ec2conn.Images([]string{s.SourceAmi}, ec2.NewFilter())
|
||||
imageResp, err := ec2conn.DescribeImages(&ec2.DescribeImagesInput{ImageIds: []*string{&s.SourceAmi}})
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error querying AMI: %s", err)
|
||||
state.Put("error", err)
|
||||
|
@ -38,11 +38,11 @@ func (s *StepSourceAMIInfo) Run(state multistep.StateBag) multistep.StepAction {
|
|||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
image := &imageResp.Images[0]
|
||||
image := imageResp.Images[0]
|
||||
|
||||
// Enhanced Networking (SriovNetSupport) can only be enabled on HVM AMIs.
|
||||
// See http://goo.gl/icuXh5
|
||||
if s.EnhancedNetworking && image.VirtualizationType != "hvm" {
|
||||
if s.EnhancedNetworking && *image.VirtualizationType != "hvm" {
|
||||
err := fmt.Errorf("Cannot enable enhanced networking, source AMI '%s' is not HVM", s.SourceAmi)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
|
|
|
@ -19,8 +19,10 @@ func isalphanumeric(b byte) bool {
|
|||
}
|
||||
|
||||
// Clean up AMI name by replacing invalid characters with "-"
|
||||
// For allowed characters see docs for Name parameter
|
||||
// at http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateImage.html
|
||||
func templateCleanAMIName(s string) string {
|
||||
allowed := []byte{'(', ')', ',', '/', '-', '_'}
|
||||
allowed := []byte{'(', ')', '[', ']', ' ', '.', '/', '-', '\'', '@', '_'}
|
||||
b := []byte(s)
|
||||
newb := make([]byte, len(b))
|
||||
for i, c := range b {
|
||||
|
|
|
@ -5,8 +5,8 @@ import (
|
|||
)
|
||||
|
||||
func TestAMITemplatePrepare_clean(t *testing.T) {
|
||||
origName := "AMZamz09(),/-_:&^$%"
|
||||
expected := "AMZamz09(),/-_-----"
|
||||
origName := "AMZamz09()./-_:&^ $%[]#'@"
|
||||
expected := "AMZamz09()./-_--- --[]-'@"
|
||||
|
||||
name := templateCleanAMIName(origName)
|
||||
|
||||
|
|
|
@ -9,50 +9,51 @@ import (
|
|||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/mitchellh/goamz/ec2"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/mitchellh/multistep"
|
||||
awscommon "github.com/mitchellh/packer/builder/amazon/common"
|
||||
"github.com/mitchellh/packer/common"
|
||||
"github.com/mitchellh/packer/helper/communicator"
|
||||
"github.com/mitchellh/packer/helper/config"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
"github.com/mitchellh/packer/template/interpolate"
|
||||
)
|
||||
|
||||
// The unique ID for this builder
|
||||
const BuilderId = "mitchellh.amazonebs"
|
||||
|
||||
type config struct {
|
||||
type Config struct {
|
||||
common.PackerConfig `mapstructure:",squash"`
|
||||
awscommon.AccessConfig `mapstructure:",squash"`
|
||||
awscommon.AMIConfig `mapstructure:",squash"`
|
||||
awscommon.BlockDevices `mapstructure:",squash"`
|
||||
awscommon.RunConfig `mapstructure:",squash"`
|
||||
|
||||
tpl *packer.ConfigTemplate
|
||||
ctx interpolate.Context
|
||||
}
|
||||
|
||||
type Builder struct {
|
||||
config config
|
||||
config Config
|
||||
runner multistep.Runner
|
||||
}
|
||||
|
||||
func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
|
||||
md, err := common.DecodeConfig(&b.config, raws...)
|
||||
b.config.ctx.Funcs = awscommon.TemplateFuncs
|
||||
err := config.Decode(&b.config, &config.DecodeOpts{
|
||||
Interpolate: true,
|
||||
InterpolateContext: &b.config.ctx,
|
||||
}, raws...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
b.config.tpl, err = packer.NewConfigTemplate()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.config.tpl.UserVars = b.config.PackerUserVars
|
||||
b.config.tpl.Funcs(awscommon.TemplateFuncs)
|
||||
|
||||
// Accumulate any errors
|
||||
errs := common.CheckUnusedConfig(md)
|
||||
errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(b.config.tpl)...)
|
||||
errs = packer.MultiErrorAppend(errs, b.config.BlockDevices.Prepare(b.config.tpl)...)
|
||||
errs = packer.MultiErrorAppend(errs, b.config.AMIConfig.Prepare(b.config.tpl)...)
|
||||
errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(b.config.tpl)...)
|
||||
var errs *packer.MultiError
|
||||
errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(&b.config.ctx)...)
|
||||
errs = packer.MultiErrorAppend(errs, b.config.BlockDevices.Prepare(&b.config.ctx)...)
|
||||
errs = packer.MultiErrorAppend(errs, b.config.AMIConfig.Prepare(&b.config.ctx)...)
|
||||
errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(&b.config.ctx)...)
|
||||
|
||||
if errs != nil && len(errs.Errors) > 0 {
|
||||
return nil, errs
|
||||
|
@ -63,17 +64,13 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
|
|||
}
|
||||
|
||||
func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) {
|
||||
region, err := b.config.Region()
|
||||
config, err := b.config.Config()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
auth, err := b.config.AccessConfig.Auth()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ec2conn := ec2.New(auth, region)
|
||||
session := session.New(config)
|
||||
ec2conn := ec2.New(session)
|
||||
|
||||
// If the subnet is specified but not the AZ, try to determine the AZ automatically
|
||||
if b.config.SubnetId != "" && b.config.AvailabilityZone == "" {
|
||||
|
@ -95,6 +92,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
|||
|
||||
// Build the steps
|
||||
steps := []multistep.Step{
|
||||
&awscommon.StepPreValidate{
|
||||
DestAmiName: b.config.AMIName,
|
||||
ForceDeregister: b.config.AMIForceDeregister,
|
||||
},
|
||||
&awscommon.StepSourceAMIInfo{
|
||||
SourceAmi: b.config.SourceAmi,
|
||||
EnhancedNetworking: b.config.AMIEnhancedNetworking,
|
||||
|
@ -102,14 +103,18 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
|||
&awscommon.StepKeyPair{
|
||||
Debug: b.config.PackerDebug,
|
||||
DebugKeyPath: fmt.Sprintf("ec2_%s.pem", b.config.PackerBuildName),
|
||||
KeyPairName: b.config.TemporaryKeyPairName,
|
||||
PrivateKeyFile: b.config.SSHPrivateKeyFile,
|
||||
KeyPairName: b.config.SSHKeyPairName,
|
||||
TemporaryKeyPairName: b.config.TemporaryKeyPairName,
|
||||
PrivateKeyFile: b.config.RunConfig.Comm.SSHPrivateKey,
|
||||
},
|
||||
&awscommon.StepSecurityGroup{
|
||||
SecurityGroupIds: b.config.SecurityGroupIds,
|
||||
SSHPort: b.config.SSHPort,
|
||||
CommConfig: &b.config.RunConfig.Comm,
|
||||
VpcId: b.config.VpcId,
|
||||
},
|
||||
&stepCleanupVolumes{
|
||||
BlockDevices: b.config.BlockDevices,
|
||||
},
|
||||
&awscommon.StepRunSourceInstance{
|
||||
Debug: b.config.PackerDebug,
|
||||
ExpectedRootDevice: "ebs",
|
||||
|
@ -122,23 +127,37 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
|||
IamInstanceProfile: b.config.IamInstanceProfile,
|
||||
SubnetId: b.config.SubnetId,
|
||||
AssociatePublicIpAddress: b.config.AssociatePublicIpAddress,
|
||||
EbsOptimized: b.config.EbsOptimized,
|
||||
AvailabilityZone: b.config.AvailabilityZone,
|
||||
BlockDevices: b.config.BlockDevices,
|
||||
Tags: b.config.RunTags,
|
||||
},
|
||||
&common.StepConnectSSH{
|
||||
SSHAddress: awscommon.SSHAddress(
|
||||
ec2conn, b.config.SSHPort, b.config.SSHPrivateIp),
|
||||
SSHConfig: awscommon.SSHConfig(b.config.SSHUsername),
|
||||
SSHWaitTimeout: b.config.SSHTimeout(),
|
||||
&awscommon.StepGetPassword{
|
||||
Debug: b.config.PackerDebug,
|
||||
Comm: &b.config.RunConfig.Comm,
|
||||
Timeout: b.config.WindowsPasswordTimeout,
|
||||
},
|
||||
&communicator.StepConnect{
|
||||
Config: &b.config.RunConfig.Comm,
|
||||
Host: awscommon.SSHHost(
|
||||
ec2conn,
|
||||
b.config.SSHPrivateIp),
|
||||
SSHConfig: awscommon.SSHConfig(
|
||||
b.config.RunConfig.Comm.SSHUsername),
|
||||
},
|
||||
&common.StepProvision{},
|
||||
&stepStopInstance{SpotPrice: b.config.SpotPrice},
|
||||
// TODO(mitchellh): verify works with spots
|
||||
&stepModifyInstance{},
|
||||
&awscommon.StepDeregisterAMI{
|
||||
ForceDeregister: b.config.AMIForceDeregister,
|
||||
AMIName: b.config.AMIName,
|
||||
},
|
||||
&stepCreateAMI{},
|
||||
&awscommon.StepAMIRegionCopy{
|
||||
AccessConfig: &b.config.AccessConfig,
|
||||
Regions: b.config.AMIRegions,
|
||||
Name: b.config.AMIName,
|
||||
},
|
||||
&awscommon.StepModifyAMIAttributes{
|
||||
Description: b.config.AMIDescription,
|
||||
|
|
|
@ -0,0 +1,227 @@
|
|||
package ebs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/mitchellh/packer/builder/amazon/common"
|
||||
builderT "github.com/mitchellh/packer/helper/builder/testing"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
)
|
||||
|
||||
func TestBuilderAcc_basic(t *testing.T) {
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Builder: &Builder{},
|
||||
Template: testBuilderAccBasic,
|
||||
})
|
||||
}
|
||||
|
||||
func TestBuilderAcc_regionCopy(t *testing.T) {
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Builder: &Builder{},
|
||||
Template: testBuilderAccRegionCopy,
|
||||
Check: checkRegionCopy([]string{"us-east-1", "us-west-2"}),
|
||||
})
|
||||
}
|
||||
|
||||
func TestBuilderAcc_forceDeregister(t *testing.T) {
|
||||
// Build the same AMI name twice, with force_deregister on the second run
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Builder: &Builder{},
|
||||
Template: buildForceDeregisterConfig("false", "dereg"),
|
||||
SkipArtifactTeardown: true,
|
||||
})
|
||||
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Builder: &Builder{},
|
||||
Template: buildForceDeregisterConfig("true", "dereg"),
|
||||
})
|
||||
}
|
||||
|
||||
func TestBuilderAcc_amiSharing(t *testing.T) {
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Builder: &Builder{},
|
||||
Template: testBuilderAccSharing,
|
||||
Check: checkAMISharing(2, "932021504756", "all"),
|
||||
})
|
||||
}
|
||||
|
||||
func checkAMISharing(count int, uid, group string) builderT.TestCheckFunc {
|
||||
return func(artifacts []packer.Artifact) error {
|
||||
if len(artifacts) > 1 {
|
||||
return fmt.Errorf("more than 1 artifact")
|
||||
}
|
||||
|
||||
// Get the actual *Artifact pointer so we can access the AMIs directly
|
||||
artifactRaw := artifacts[0]
|
||||
artifact, ok := artifactRaw.(*common.Artifact)
|
||||
if !ok {
|
||||
return fmt.Errorf("unknown artifact: %#v", artifactRaw)
|
||||
}
|
||||
|
||||
// describe the image, get block devices with a snapshot
|
||||
ec2conn, _ := testEC2Conn()
|
||||
imageResp, err := ec2conn.DescribeImageAttribute(&ec2.DescribeImageAttributeInput{
|
||||
Attribute: aws.String("launchPermission"),
|
||||
ImageId: aws.String(artifact.Amis["us-east-1"]),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error retrieving Image Attributes for AMI Artifact (%#v) in AMI Sharing Test: %s", artifact, err)
|
||||
}
|
||||
|
||||
// Launch Permissions are in addition to the userid that created it, so if
|
||||
// you add 3 additional ami_users, you expect 2 Launch Permissions here
|
||||
if len(imageResp.LaunchPermissions) != count {
|
||||
return fmt.Errorf("Error in Image Attributes, expected (%d) Launch Permissions, got (%d)", count, len(imageResp.LaunchPermissions))
|
||||
}
|
||||
|
||||
userFound := false
|
||||
for _, lp := range imageResp.LaunchPermissions {
|
||||
if lp.UserId != nil && uid == *lp.UserId {
|
||||
userFound = true
|
||||
}
|
||||
}
|
||||
|
||||
if !userFound {
|
||||
return fmt.Errorf("Error in Image Attributes, expected User ID (%s) to have Launch Permissions, but was not found", uid)
|
||||
}
|
||||
|
||||
groupFound := false
|
||||
for _, lp := range imageResp.LaunchPermissions {
|
||||
if lp.Group != nil && group == *lp.Group {
|
||||
groupFound = true
|
||||
}
|
||||
}
|
||||
|
||||
if !groupFound {
|
||||
return fmt.Errorf("Error in Image Attributes, expected Group ID (%s) to have Launch Permissions, but was not found", group)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func checkRegionCopy(regions []string) builderT.TestCheckFunc {
|
||||
return func(artifacts []packer.Artifact) error {
|
||||
if len(artifacts) > 1 {
|
||||
return fmt.Errorf("more than 1 artifact")
|
||||
}
|
||||
|
||||
// Get the actual *Artifact pointer so we can access the AMIs directly
|
||||
artifactRaw := artifacts[0]
|
||||
artifact, ok := artifactRaw.(*common.Artifact)
|
||||
if !ok {
|
||||
return fmt.Errorf("unknown artifact: %#v", artifactRaw)
|
||||
}
|
||||
|
||||
// Verify that we copied to only the regions given
|
||||
regionSet := make(map[string]struct{})
|
||||
for _, r := range regions {
|
||||
regionSet[r] = struct{}{}
|
||||
}
|
||||
for r, _ := range artifact.Amis {
|
||||
if _, ok := regionSet[r]; !ok {
|
||||
return fmt.Errorf("unknown region: %s", r)
|
||||
}
|
||||
|
||||
delete(regionSet, r)
|
||||
}
|
||||
if len(regionSet) > 0 {
|
||||
return fmt.Errorf("didn't copy to: %#v", regionSet)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccPreCheck(t *testing.T) {
|
||||
if v := os.Getenv("AWS_ACCESS_KEY_ID"); v == "" {
|
||||
t.Fatal("AWS_ACCESS_KEY_ID must be set for acceptance tests")
|
||||
}
|
||||
|
||||
if v := os.Getenv("AWS_SECRET_ACCESS_KEY"); v == "" {
|
||||
t.Fatal("AWS_SECRET_ACCESS_KEY must be set for acceptance tests")
|
||||
}
|
||||
}
|
||||
|
||||
func testEC2Conn() (*ec2.EC2, error) {
|
||||
access := &common.AccessConfig{RawRegion: "us-east-1"}
|
||||
config, err := access.Config()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
session := session.New(config)
|
||||
return ec2.New(session), nil
|
||||
}
|
||||
|
||||
const testBuilderAccBasic = `
|
||||
{
|
||||
"builders": [{
|
||||
"type": "test",
|
||||
"region": "us-east-1",
|
||||
"instance_type": "m3.medium",
|
||||
"source_ami": "ami-76b2a71e",
|
||||
"ssh_username": "ubuntu",
|
||||
"ami_name": "packer-test {{timestamp}}"
|
||||
}]
|
||||
}
|
||||
`
|
||||
|
||||
const testBuilderAccRegionCopy = `
|
||||
{
|
||||
"builders": [{
|
||||
"type": "test",
|
||||
"region": "us-east-1",
|
||||
"instance_type": "m3.medium",
|
||||
"source_ami": "ami-76b2a71e",
|
||||
"ssh_username": "ubuntu",
|
||||
"ami_name": "packer-test {{timestamp}}",
|
||||
"ami_regions": ["us-east-1", "us-west-2"]
|
||||
}]
|
||||
}
|
||||
`
|
||||
|
||||
const testBuilderAccForceDeregister = `
|
||||
{
|
||||
"builders": [{
|
||||
"type": "test",
|
||||
"region": "us-east-1",
|
||||
"instance_type": "m3.medium",
|
||||
"source_ami": "ami-76b2a71e",
|
||||
"ssh_username": "ubuntu",
|
||||
"force_deregister": "%s",
|
||||
"ami_name": "packer-test-%s"
|
||||
}]
|
||||
}
|
||||
`
|
||||
|
||||
// share with catsby
|
||||
const testBuilderAccSharing = `
|
||||
{
|
||||
"builders": [{
|
||||
"type": "test",
|
||||
"region": "us-east-1",
|
||||
"instance_type": "m3.medium",
|
||||
"source_ami": "ami-76b2a71e",
|
||||
"ssh_username": "ubuntu",
|
||||
"ami_name": "packer-test {{timestamp}}",
|
||||
"ami_users":["932021504756"],
|
||||
"ami_groups":["all"]
|
||||
}]
|
||||
}
|
||||
`
|
||||
|
||||
func buildForceDeregisterConfig(name, flag string) string {
|
||||
return fmt.Sprintf(testBuilderAccForceDeregister, name, flag)
|
||||
}
|
|
@ -1,8 +1,9 @@
|
|||
package ebs
|
||||
|
||||
import (
|
||||
"github.com/mitchellh/packer/packer"
|
||||
"testing"
|
||||
|
||||
"github.com/mitchellh/packer/packer"
|
||||
)
|
||||
|
||||
func testConfig() map[string]interface{} {
|
||||
|
|
|
@ -0,0 +1,118 @@
|
|||
package ebs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/mitchellh/multistep"
|
||||
"github.com/mitchellh/packer/builder/amazon/common"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
)
|
||||
|
||||
// stepCleanupVolumes cleans up any orphaned volumes that were not designated to
|
||||
// remain after termination of the instance. These volumes are typically ones
|
||||
// that are marked as "delete on terminate:false" in the source_ami of a build.
|
||||
type stepCleanupVolumes struct {
|
||||
BlockDevices common.BlockDevices
|
||||
}
|
||||
|
||||
func (s *stepCleanupVolumes) Run(state multistep.StateBag) multistep.StepAction {
|
||||
// stepCleanupVolumes is for Cleanup only
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *stepCleanupVolumes) Cleanup(state multistep.StateBag) {
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
instanceRaw := state.Get("instance")
|
||||
var instance *ec2.Instance
|
||||
if instanceRaw != nil {
|
||||
instance = instanceRaw.(*ec2.Instance)
|
||||
}
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
amisRaw := state.Get("amis")
|
||||
if amisRaw == nil {
|
||||
ui.Say("No AMIs to cleanup")
|
||||
return
|
||||
}
|
||||
|
||||
if instance == nil {
|
||||
ui.Say("No volumes to clean up, skipping")
|
||||
return
|
||||
}
|
||||
|
||||
ui.Say("Cleaning up any extra volumes...")
|
||||
|
||||
// We don't actually care about the value here, but we need Set behavior
|
||||
save := make(map[string]struct{})
|
||||
for _, b := range s.BlockDevices.AMIMappings {
|
||||
if !b.DeleteOnTermination {
|
||||
save[b.DeviceName] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
for _, b := range s.BlockDevices.LaunchMappings {
|
||||
if !b.DeleteOnTermination {
|
||||
save[b.DeviceName] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Collect Volume information from the cached Instance as a map of volume-id
|
||||
// to device name, to compare with save list above
|
||||
var vl []*string
|
||||
volList := make(map[string]string)
|
||||
for _, bdm := range instance.BlockDeviceMappings {
|
||||
if bdm.Ebs != nil {
|
||||
vl = append(vl, bdm.Ebs.VolumeId)
|
||||
volList[*bdm.Ebs.VolumeId] = *bdm.DeviceName
|
||||
}
|
||||
}
|
||||
|
||||
// Using the volume list from the cached Instance, check with AWS for up to
|
||||
// date information on them
|
||||
resp, err := ec2conn.DescribeVolumes(&ec2.DescribeVolumesInput{
|
||||
Filters: []*ec2.Filter{
|
||||
&ec2.Filter{
|
||||
Name: aws.String("volume-id"),
|
||||
Values: vl,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
ui.Say(fmt.Sprintf("Error describing volumes: %s", err))
|
||||
return
|
||||
}
|
||||
|
||||
// If any of the returned volumes are in a "deleting" stage or otherwise not
|
||||
// available, remove them from the list of volumes
|
||||
for _, v := range resp.Volumes {
|
||||
if v.State != nil && *v.State != "available" {
|
||||
delete(volList, *v.VolumeId)
|
||||
}
|
||||
}
|
||||
|
||||
if len(resp.Volumes) == 0 {
|
||||
ui.Say("No volumes to clean up, skipping")
|
||||
return
|
||||
}
|
||||
|
||||
// Filter out any devices marked for saving
|
||||
for saveName, _ := range save {
|
||||
for volKey, volName := range volList {
|
||||
if volName == saveName {
|
||||
delete(volList, volKey)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Destroy remaining volumes
|
||||
for k, _ := range volList {
|
||||
ui.Say(fmt.Sprintf("Destroying volume (%s)...", k))
|
||||
_, err := ec2conn.DeleteVolume(&ec2.DeleteVolumeInput{VolumeId: aws.String(k)})
|
||||
if err != nil {
|
||||
ui.Say(fmt.Sprintf("Error deleting volume: %s", k))
|
||||
}
|
||||
|
||||
}
|
||||
}
|
|
@ -2,7 +2,8 @@ package ebs
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/mitchellh/goamz/ec2"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/mitchellh/multistep"
|
||||
awscommon "github.com/mitchellh/packer/builder/amazon/common"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
|
@ -13,17 +14,17 @@ type stepCreateAMI struct {
|
|||
}
|
||||
|
||||
func (s *stepCreateAMI) Run(state multistep.StateBag) multistep.StepAction {
|
||||
config := state.Get("config").(config)
|
||||
config := state.Get("config").(Config)
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
instance := state.Get("instance").(*ec2.Instance)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
// Create the image
|
||||
ui.Say(fmt.Sprintf("Creating the AMI: %s", config.AMIName))
|
||||
createOpts := &ec2.CreateImage{
|
||||
createOpts := &ec2.CreateImageInput{
|
||||
InstanceId: instance.InstanceId,
|
||||
Name: config.AMIName,
|
||||
BlockDevices: config.BlockDevices.BuildAMIDevices(),
|
||||
Name: &config.AMIName,
|
||||
BlockDeviceMappings: config.BlockDevices.BuildAMIDevices(),
|
||||
}
|
||||
|
||||
createResp, err := ec2conn.CreateImage(createOpts)
|
||||
|
@ -35,16 +36,16 @@ func (s *stepCreateAMI) Run(state multistep.StateBag) multistep.StepAction {
|
|||
}
|
||||
|
||||
// Set the AMI ID in the state
|
||||
ui.Message(fmt.Sprintf("AMI: %s", createResp.ImageId))
|
||||
ui.Message(fmt.Sprintf("AMI: %s", *createResp.ImageId))
|
||||
amis := make(map[string]string)
|
||||
amis[ec2conn.Region.Name] = createResp.ImageId
|
||||
amis[*ec2conn.Config.Region] = *createResp.ImageId
|
||||
state.Put("amis", amis)
|
||||
|
||||
// Wait for the image to become ready
|
||||
stateChange := awscommon.StateChangeConf{
|
||||
Pending: []string{"pending"},
|
||||
Target: "available",
|
||||
Refresh: awscommon.AMIStateRefreshFunc(ec2conn, createResp.ImageId),
|
||||
Refresh: awscommon.AMIStateRefreshFunc(ec2conn, *createResp.ImageId),
|
||||
StepState: state,
|
||||
}
|
||||
|
||||
|
@ -56,14 +57,14 @@ func (s *stepCreateAMI) Run(state multistep.StateBag) multistep.StepAction {
|
|||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
imagesResp, err := ec2conn.Images([]string{createResp.ImageId}, nil)
|
||||
imagesResp, err := ec2conn.DescribeImages(&ec2.DescribeImagesInput{ImageIds: []*string{createResp.ImageId}})
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error searching for AMI: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
s.image = &imagesResp.Images[0]
|
||||
s.image = imagesResp.Images[0]
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
@ -83,11 +84,9 @@ func (s *stepCreateAMI) Cleanup(state multistep.StateBag) {
|
|||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
ui.Say("Deregistering the AMI because cancelation or error...")
|
||||
if resp, err := ec2conn.DeregisterImage(s.image.Id); err != nil {
|
||||
deregisterOpts := &ec2.DeregisterImageInput{ImageId: s.image.ImageId}
|
||||
if _, err := ec2conn.DeregisterImage(deregisterOpts); err != nil {
|
||||
ui.Error(fmt.Sprintf("Error deregistering AMI, may still be around: %s", err))
|
||||
return
|
||||
} else if resp.Return == false {
|
||||
ui.Error(fmt.Sprintf("Error deregistering AMI, may still be around: %t", resp.Return))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,7 +3,7 @@ package ebs
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/mitchellh/goamz/ec2"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/mitchellh/multistep"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
)
|
||||
|
@ -11,7 +11,7 @@ import (
|
|||
type stepModifyInstance struct{}
|
||||
|
||||
func (s *stepModifyInstance) Run(state multistep.StateBag) multistep.StepAction {
|
||||
config := state.Get("config").(config)
|
||||
config := state.Get("config").(Config)
|
||||
ec2conn := state.Get("ec2").(*ec2.EC2)
|
||||
instance := state.Get("instance").(*ec2.Instance)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
@ -19,12 +19,13 @@ func (s *stepModifyInstance) Run(state multistep.StateBag) multistep.StepAction
|
|||
// Set SriovNetSupport to "simple". See http://goo.gl/icuXh5
|
||||
if config.AMIEnhancedNetworking {
|
||||
ui.Say("Enabling Enhanced Networking...")
|
||||
_, err := ec2conn.ModifyInstance(
|
||||
instance.InstanceId,
|
||||
&ec2.ModifyInstance{SriovNetSupport: true},
|
||||
)
|
||||
simple := "simple"
|
||||
_, err := ec2conn.ModifyInstanceAttribute(&ec2.ModifyInstanceAttributeInput{
|
||||
InstanceId: instance.InstanceId,
|
||||
SriovNetSupport: &ec2.AttributeValue{Value: &simple},
|
||||
})
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error enabling Enhanced Networking on %s: %s", instance.InstanceId, err)
|
||||
err := fmt.Errorf("Error enabling Enhanced Networking on %s: %s", *instance.InstanceId, err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
|
|
|
@ -2,7 +2,8 @@ package ebs
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/mitchellh/goamz/ec2"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/mitchellh/multistep"
|
||||
awscommon "github.com/mitchellh/packer/builder/amazon/common"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
|
@ -18,13 +19,15 @@ func (s *stepStopInstance) Run(state multistep.StateBag) multistep.StepAction {
|
|||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
// Skip when it is a spot instance
|
||||
if s.SpotPrice != "" {
|
||||
if s.SpotPrice != "" && s.SpotPrice != "0" {
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
// Stop the instance so we can create an AMI from it
|
||||
ui.Say("Stopping the source instance...")
|
||||
_, err := ec2conn.StopInstances(instance.InstanceId)
|
||||
_, err := ec2conn.StopInstances(&ec2.StopInstancesInput{
|
||||
InstanceIds: []*string{instance.InstanceId},
|
||||
})
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error stopping instance: %s", err)
|
||||
state.Put("error", err)
|
||||
|
@ -37,7 +40,7 @@ func (s *stepStopInstance) Run(state multistep.StateBag) multistep.StepAction {
|
|||
stateChange := awscommon.StateChangeConf{
|
||||
Pending: []string{"running", "stopping"},
|
||||
Target: "stopped",
|
||||
Refresh: awscommon.InstanceStateRefreshFunc(ec2conn, instance),
|
||||
Refresh: awscommon.InstanceStateRefreshFunc(ec2conn, *instance.InstanceId),
|
||||
StepState: state,
|
||||
}
|
||||
_, err = awscommon.WaitForState(&stateChange)
|
||||
|
|
|
@ -0,0 +1,114 @@
|
|||
package ebs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/mitchellh/packer/builder/amazon/common"
|
||||
builderT "github.com/mitchellh/packer/helper/builder/testing"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
)
|
||||
|
||||
func TestBuilderTagsAcc_basic(t *testing.T) {
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Builder: &Builder{},
|
||||
Template: testBuilderTagsAccBasic,
|
||||
Check: checkTags(),
|
||||
})
|
||||
}
|
||||
|
||||
func checkTags() builderT.TestCheckFunc {
|
||||
return func(artifacts []packer.Artifact) error {
|
||||
if len(artifacts) > 1 {
|
||||
return fmt.Errorf("more than 1 artifact")
|
||||
}
|
||||
|
||||
tags := make(map[string]string)
|
||||
tags["OS_Version"] = "Ubuntu"
|
||||
tags["Release"] = "Latest"
|
||||
|
||||
// Get the actual *Artifact pointer so we can access the AMIs directly
|
||||
artifactRaw := artifacts[0]
|
||||
artifact, ok := artifactRaw.(*common.Artifact)
|
||||
if !ok {
|
||||
return fmt.Errorf("unknown artifact: %#v", artifactRaw)
|
||||
}
|
||||
|
||||
// describe the image, get block devices with a snapshot
|
||||
ec2conn, _ := testEC2Conn()
|
||||
imageResp, err := ec2conn.DescribeImages(&ec2.DescribeImagesInput{
|
||||
ImageIds: []*string{aws.String(artifact.Amis["us-east-1"])},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error retrieving details for AMI Artifcat (%#v) in Tags Test: %s", artifact, err)
|
||||
}
|
||||
|
||||
if len(imageResp.Images) == 0 {
|
||||
return fmt.Errorf("No images found for AMI Artifcat (%#v) in Tags Test: %s", artifact, err)
|
||||
}
|
||||
|
||||
image := imageResp.Images[0]
|
||||
|
||||
// Check only those with a Snapshot ID, i.e. not Ephemeral
|
||||
var snapshots []*string
|
||||
for _, device := range image.BlockDeviceMappings {
|
||||
if device.Ebs != nil && device.Ebs.SnapshotId != nil {
|
||||
snapshots = append(snapshots, device.Ebs.SnapshotId)
|
||||
}
|
||||
}
|
||||
|
||||
// grab matching snapshot info
|
||||
resp, err := ec2conn.DescribeSnapshots(&ec2.DescribeSnapshotsInput{
|
||||
SnapshotIds: snapshots,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error retreiving Snapshots for AMI Artifcat (%#v) in Tags Test: %s", artifact, err)
|
||||
}
|
||||
|
||||
if len(resp.Snapshots) == 0 {
|
||||
return fmt.Errorf("No Snapshots found for AMI Artifcat (%#v) in Tags Test", artifact)
|
||||
}
|
||||
|
||||
// grab the snapshots, check the tags
|
||||
for _, s := range resp.Snapshots {
|
||||
expected := len(tags)
|
||||
for _, t := range s.Tags {
|
||||
for key, value := range tags {
|
||||
if key == *t.Key && value == *t.Value {
|
||||
expected--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if expected > 0 {
|
||||
return fmt.Errorf("Not all tags found")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
const testBuilderTagsAccBasic = `
|
||||
{
|
||||
"builders": [
|
||||
{
|
||||
"type": "test",
|
||||
"region": "us-east-1",
|
||||
"source_ami": "ami-9eaa1cf6",
|
||||
"instance_type": "t2.micro",
|
||||
"ssh_username": "ubuntu",
|
||||
"ami_name": "packer-tags-testing-{{timestamp}}",
|
||||
"tags": {
|
||||
"OS_Version": "Ubuntu",
|
||||
"Release": "Latest"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
`
|
|
@ -9,11 +9,15 @@ import (
|
|||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/mitchellh/goamz/ec2"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/mitchellh/multistep"
|
||||
awscommon "github.com/mitchellh/packer/builder/amazon/common"
|
||||
"github.com/mitchellh/packer/common"
|
||||
"github.com/mitchellh/packer/helper/communicator"
|
||||
"github.com/mitchellh/packer/helper/config"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
"github.com/mitchellh/packer/template/interpolate"
|
||||
)
|
||||
|
||||
// The unique ID for this builder
|
||||
|
@ -38,7 +42,7 @@ type Config struct {
|
|||
X509KeyPath string `mapstructure:"x509_key_path"`
|
||||
X509UploadPath string `mapstructure:"x509_upload_path"`
|
||||
|
||||
tpl *packer.ConfigTemplate
|
||||
ctx interpolate.Context
|
||||
}
|
||||
|
||||
type Builder struct {
|
||||
|
@ -47,40 +51,55 @@ type Builder struct {
|
|||
}
|
||||
|
||||
func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
|
||||
md, err := common.DecodeConfig(&b.config, raws...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
configs := make([]interface{}, len(raws)+1)
|
||||
configs[0] = map[string]interface{}{
|
||||
"bundle_prefix": "image-{{timestamp}}",
|
||||
}
|
||||
copy(configs[1:], raws)
|
||||
|
||||
b.config.tpl, err = packer.NewConfigTemplate()
|
||||
b.config.ctx.Funcs = awscommon.TemplateFuncs
|
||||
err := config.Decode(&b.config, &config.DecodeOpts{
|
||||
Interpolate: true,
|
||||
InterpolateContext: &b.config.ctx,
|
||||
InterpolateFilter: &interpolate.RenderFilter{
|
||||
Exclude: []string{
|
||||
"bundle_upload_command",
|
||||
"bundle_vol_command",
|
||||
},
|
||||
},
|
||||
}, configs...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.config.tpl.UserVars = b.config.PackerUserVars
|
||||
b.config.tpl.Funcs(awscommon.TemplateFuncs)
|
||||
|
||||
if b.config.BundleDestination == "" {
|
||||
b.config.BundleDestination = "/tmp"
|
||||
}
|
||||
|
||||
if b.config.BundlePrefix == "" {
|
||||
b.config.BundlePrefix = "image-{{timestamp}}"
|
||||
}
|
||||
|
||||
if b.config.BundleUploadCommand == "" {
|
||||
b.config.BundleUploadCommand = "sudo -n ec2-upload-bundle " +
|
||||
if b.config.IamInstanceProfile != "" {
|
||||
b.config.BundleUploadCommand = "sudo -i -n ec2-upload-bundle " +
|
||||
"-b {{.BucketName}} " +
|
||||
"-m {{.ManifestPath}} " +
|
||||
"-d {{.BundleDirectory}} " +
|
||||
"--batch " +
|
||||
"--region {{.Region}} " +
|
||||
"--retry"
|
||||
} else {
|
||||
b.config.BundleUploadCommand = "sudo -i -n ec2-upload-bundle " +
|
||||
"-b {{.BucketName}} " +
|
||||
"-m {{.ManifestPath}} " +
|
||||
"-a {{.AccessKey}} " +
|
||||
"-s {{.SecretKey}} " +
|
||||
"-d {{.BundleDirectory}} " +
|
||||
"--batch " +
|
||||
"--location {{.Region}} " +
|
||||
"--region {{.Region}} " +
|
||||
"--retry"
|
||||
}
|
||||
}
|
||||
|
||||
if b.config.BundleVolCommand == "" {
|
||||
b.config.BundleVolCommand = "sudo -n ec2-bundle-vol " +
|
||||
b.config.BundleVolCommand = "sudo -i -n ec2-bundle-vol " +
|
||||
"-k {{.KeyPath}} " +
|
||||
"-u {{.AccountId}} " +
|
||||
"-c {{.CertPath}} " +
|
||||
|
@ -97,43 +116,11 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
|
|||
}
|
||||
|
||||
// Accumulate any errors
|
||||
errs := common.CheckUnusedConfig(md)
|
||||
errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(b.config.tpl)...)
|
||||
errs = packer.MultiErrorAppend(errs, b.config.BlockDevices.Prepare(b.config.tpl)...)
|
||||
errs = packer.MultiErrorAppend(errs, b.config.AMIConfig.Prepare(b.config.tpl)...)
|
||||
errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(b.config.tpl)...)
|
||||
|
||||
validates := map[string]*string{
|
||||
"bundle_upload_command": &b.config.BundleUploadCommand,
|
||||
"bundle_vol_command": &b.config.BundleVolCommand,
|
||||
}
|
||||
|
||||
for n, ptr := range validates {
|
||||
if err := b.config.tpl.Validate(*ptr); err != nil {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, fmt.Errorf("Error parsing %s: %s", n, err))
|
||||
}
|
||||
}
|
||||
|
||||
templates := map[string]*string{
|
||||
"account_id": &b.config.AccountId,
|
||||
"ami_name": &b.config.AMIName,
|
||||
"bundle_destination": &b.config.BundleDestination,
|
||||
"bundle_prefix": &b.config.BundlePrefix,
|
||||
"s3_bucket": &b.config.S3Bucket,
|
||||
"x509_cert_path": &b.config.X509CertPath,
|
||||
"x509_key_path": &b.config.X509KeyPath,
|
||||
"x509_upload_path": &b.config.X509UploadPath,
|
||||
}
|
||||
|
||||
for n, ptr := range templates {
|
||||
var err error
|
||||
*ptr, err = b.config.tpl.Process(*ptr, nil)
|
||||
if err != nil {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, fmt.Errorf("Error processing %s: %s", n, err))
|
||||
}
|
||||
}
|
||||
var errs *packer.MultiError
|
||||
errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(&b.config.ctx)...)
|
||||
errs = packer.MultiErrorAppend(errs, b.config.BlockDevices.Prepare(&b.config.ctx)...)
|
||||
errs = packer.MultiErrorAppend(errs, b.config.AMIConfig.Prepare(&b.config.ctx)...)
|
||||
errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(&b.config.ctx)...)
|
||||
|
||||
if b.config.AccountId == "" {
|
||||
errs = packer.MultiErrorAppend(errs, errors.New("account_id is required"))
|
||||
|
@ -168,17 +155,13 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
|
|||
}
|
||||
|
||||
func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) {
|
||||
region, err := b.config.Region()
|
||||
config, err := b.config.Config()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
auth, err := b.config.AccessConfig.Auth()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ec2conn := ec2.New(auth, region)
|
||||
session := session.New(config)
|
||||
ec2conn := ec2.New(session)
|
||||
|
||||
// If the subnet is specified but not the AZ, try to determine the AZ automatically
|
||||
if b.config.SubnetId != "" && b.config.AvailabilityZone == "" {
|
||||
|
@ -200,6 +183,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
|||
|
||||
// Build the steps
|
||||
steps := []multistep.Step{
|
||||
&awscommon.StepPreValidate{
|
||||
DestAmiName: b.config.AMIName,
|
||||
ForceDeregister: b.config.AMIForceDeregister,
|
||||
},
|
||||
&awscommon.StepSourceAMIInfo{
|
||||
SourceAmi: b.config.SourceAmi,
|
||||
EnhancedNetworking: b.config.AMIEnhancedNetworking,
|
||||
|
@ -207,12 +194,13 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
|||
&awscommon.StepKeyPair{
|
||||
Debug: b.config.PackerDebug,
|
||||
DebugKeyPath: fmt.Sprintf("ec2_%s.pem", b.config.PackerBuildName),
|
||||
KeyPairName: b.config.TemporaryKeyPairName,
|
||||
PrivateKeyFile: b.config.SSHPrivateKeyFile,
|
||||
KeyPairName: b.config.SSHKeyPairName,
|
||||
PrivateKeyFile: b.config.RunConfig.Comm.SSHPrivateKey,
|
||||
TemporaryKeyPairName: b.config.TemporaryKeyPairName,
|
||||
},
|
||||
&awscommon.StepSecurityGroup{
|
||||
CommConfig: &b.config.RunConfig.Comm,
|
||||
SecurityGroupIds: b.config.SecurityGroupIds,
|
||||
SSHPort: b.config.SSHPort,
|
||||
VpcId: b.config.VpcId,
|
||||
},
|
||||
&awscommon.StepRunSourceInstance{
|
||||
|
@ -226,15 +214,23 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
|||
SourceAMI: b.config.SourceAmi,
|
||||
SubnetId: b.config.SubnetId,
|
||||
AssociatePublicIpAddress: b.config.AssociatePublicIpAddress,
|
||||
EbsOptimized: b.config.EbsOptimized,
|
||||
AvailabilityZone: b.config.AvailabilityZone,
|
||||
BlockDevices: b.config.BlockDevices,
|
||||
Tags: b.config.RunTags,
|
||||
},
|
||||
&common.StepConnectSSH{
|
||||
SSHAddress: awscommon.SSHAddress(
|
||||
ec2conn, b.config.SSHPort, b.config.SSHPrivateIp),
|
||||
SSHConfig: awscommon.SSHConfig(b.config.SSHUsername),
|
||||
SSHWaitTimeout: b.config.SSHTimeout(),
|
||||
&awscommon.StepGetPassword{
|
||||
Debug: b.config.PackerDebug,
|
||||
Comm: &b.config.RunConfig.Comm,
|
||||
Timeout: b.config.WindowsPasswordTimeout,
|
||||
},
|
||||
&communicator.StepConnect{
|
||||
Config: &b.config.RunConfig.Comm,
|
||||
Host: awscommon.SSHHost(
|
||||
ec2conn,
|
||||
b.config.SSHPrivateIp),
|
||||
SSHConfig: awscommon.SSHConfig(
|
||||
b.config.RunConfig.Comm.SSHUsername),
|
||||
},
|
||||
&common.StepProvision{},
|
||||
&StepUploadX509Cert{},
|
||||
|
@ -244,9 +240,15 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
|||
&StepUploadBundle{
|
||||
Debug: b.config.PackerDebug,
|
||||
},
|
||||
&awscommon.StepDeregisterAMI{
|
||||
ForceDeregister: b.config.AMIForceDeregister,
|
||||
AMIName: b.config.AMIName,
|
||||
},
|
||||
&StepRegisterAMI{},
|
||||
&awscommon.StepAMIRegionCopy{
|
||||
AccessConfig: &b.config.AccessConfig,
|
||||
Regions: b.config.AMIRegions,
|
||||
Name: b.config.AMIName,
|
||||
},
|
||||
&awscommon.StepModifyAMIAttributes{
|
||||
Description: b.config.AMIDescription,
|
||||
|
|
|
@ -130,7 +130,6 @@ func TestBuilderPrepare_BundlePrefix(t *testing.T) {
|
|||
b := &Builder{}
|
||||
config := testConfig()
|
||||
|
||||
config["bundle_prefix"] = ""
|
||||
warnings, err := b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
|
|
|
@ -3,9 +3,10 @@ package instance
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/mitchellh/goamz/ec2"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/mitchellh/multistep"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
"github.com/mitchellh/packer/template/interpolate"
|
||||
)
|
||||
|
||||
type bundleCmdData struct {
|
||||
|
@ -32,15 +33,16 @@ func (s *StepBundleVolume) Run(state multistep.StateBag) multistep.StepAction {
|
|||
|
||||
// Bundle the volume
|
||||
var err error
|
||||
config.BundleVolCommand, err = config.tpl.Process(config.BundleVolCommand, bundleCmdData{
|
||||
config.ctx.Data = bundleCmdData{
|
||||
AccountId: config.AccountId,
|
||||
Architecture: instance.Architecture,
|
||||
Architecture: *instance.Architecture,
|
||||
CertPath: x509RemoteCertPath,
|
||||
Destination: config.BundleDestination,
|
||||
KeyPath: x509RemoteKeyPath,
|
||||
Prefix: config.BundlePrefix,
|
||||
PrivatePath: config.X509UploadPath,
|
||||
})
|
||||
}
|
||||
config.BundleVolCommand, err = interpolate.Render(config.BundleVolCommand, &config.ctx)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error processing bundle volume command: %s", err)
|
||||
state.Put("error", err)
|
||||
|
@ -65,7 +67,9 @@ func (s *StepBundleVolume) Run(state multistep.StateBag) multistep.StepAction {
|
|||
if cmd.ExitStatus != 0 {
|
||||
state.Put("error", fmt.Errorf(
|
||||
"Volume bundling failed. Please see the output above for more\n"+
|
||||
"details on what went wrong."))
|
||||
"details on what went wrong.\n\n"+
|
||||
"One common cause for this error is ec2-bundle-vol not being\n"+
|
||||
"available on the target instance."))
|
||||
ui.Error(state.Get("error").(error).Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
|
|
@ -3,7 +3,8 @@ package instance
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/mitchellh/goamz/ec2"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/mitchellh/multistep"
|
||||
awscommon "github.com/mitchellh/packer/builder/amazon/common"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
|
@ -18,16 +19,19 @@ func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction {
|
|||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
ui.Say("Registering the AMI...")
|
||||
registerOpts := &ec2.RegisterImage{
|
||||
ImageLocation: manifestPath,
|
||||
Name: config.AMIName,
|
||||
BlockDevices: config.BlockDevices.BuildAMIDevices(),
|
||||
VirtType: config.AMIVirtType,
|
||||
registerOpts := &ec2.RegisterImageInput{
|
||||
ImageLocation: &manifestPath,
|
||||
Name: aws.String(config.AMIName),
|
||||
BlockDeviceMappings: config.BlockDevices.BuildAMIDevices(),
|
||||
}
|
||||
|
||||
if config.AMIVirtType != "" {
|
||||
registerOpts.VirtualizationType = aws.String(config.AMIVirtType)
|
||||
}
|
||||
|
||||
// Set SriovNetSupport to "simple". See http://goo.gl/icuXh5
|
||||
if config.AMIEnhancedNetworking {
|
||||
registerOpts.SriovNetSupport = "simple"
|
||||
registerOpts.SriovNetSupport = aws.String("simple")
|
||||
}
|
||||
|
||||
registerResp, err := ec2conn.RegisterImage(registerOpts)
|
||||
|
@ -38,16 +42,16 @@ func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction {
|
|||
}
|
||||
|
||||
// Set the AMI ID in the state
|
||||
ui.Say(fmt.Sprintf("AMI: %s", registerResp.ImageId))
|
||||
ui.Say(fmt.Sprintf("AMI: %s", *registerResp.ImageId))
|
||||
amis := make(map[string]string)
|
||||
amis[ec2conn.Region.Name] = registerResp.ImageId
|
||||
amis[*ec2conn.Config.Region] = *registerResp.ImageId
|
||||
state.Put("amis", amis)
|
||||
|
||||
// Wait for the image to become ready
|
||||
stateChange := awscommon.StateChangeConf{
|
||||
Pending: []string{"pending"},
|
||||
Target: "available",
|
||||
Refresh: awscommon.AMIStateRefreshFunc(ec2conn, registerResp.ImageId),
|
||||
Refresh: awscommon.AMIStateRefreshFunc(ec2conn, *registerResp.ImageId),
|
||||
StepState: state,
|
||||
}
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
|
||||
"github.com/mitchellh/multistep"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
"github.com/mitchellh/packer/template/interpolate"
|
||||
)
|
||||
|
||||
type uploadCmdData struct {
|
||||
|
@ -35,14 +36,15 @@ func (s *StepUploadBundle) Run(state multistep.StateBag) multistep.StepAction {
|
|||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
config.BundleUploadCommand, err = config.tpl.Process(config.BundleUploadCommand, uploadCmdData{
|
||||
config.ctx.Data = uploadCmdData{
|
||||
AccessKey: config.AccessKey,
|
||||
BucketName: config.S3Bucket,
|
||||
BundleDirectory: config.BundleDestination,
|
||||
ManifestPath: manifestPath,
|
||||
Region: region.Name,
|
||||
Region: region,
|
||||
SecretKey: config.SecretKey,
|
||||
})
|
||||
}
|
||||
config.BundleUploadCommand, err = interpolate.Render(config.BundleUploadCommand, &config.ctx)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error processing bundle upload command: %s", err)
|
||||
state.Put("error", err)
|
||||
|
|
|
@ -1,76 +0,0 @@
|
|||
// All of the methods used to communicate with the digital_ocean API
|
||||
// are here. Their API is on a path to V2, so just plain JSON is used
|
||||
// in place of a proper client library for now.
|
||||
|
||||
package digitalocean
|
||||
|
||||
type Region struct {
|
||||
Slug string `json:"slug"`
|
||||
Name string `json:"name"`
|
||||
|
||||
// v1 only
|
||||
Id uint `json:"id,omitempty"`
|
||||
|
||||
// v2 only
|
||||
Sizes []string `json:"sizes,omitempty"`
|
||||
Available bool `json:"available,omitempty"`
|
||||
Features []string `json:"features,omitempty"`
|
||||
}
|
||||
|
||||
type RegionsResp struct {
|
||||
Regions []Region
|
||||
}
|
||||
|
||||
type Size struct {
|
||||
Slug string `json:"slug"`
|
||||
|
||||
// v1 only
|
||||
Id uint `json:"id,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
|
||||
// v2 only
|
||||
Memory uint `json:"memory,omitempty"`
|
||||
VCPUS uint `json:"vcpus,omitempty"`
|
||||
Disk uint `json:"disk,omitempty"`
|
||||
Transfer float64 `json:"transfer,omitempty"`
|
||||
PriceMonthly float64 `json:"price_monthly,omitempty"`
|
||||
PriceHourly float64 `json:"price_hourly,omitempty"`
|
||||
}
|
||||
|
||||
type SizesResp struct {
|
||||
Sizes []Size
|
||||
}
|
||||
|
||||
type Image struct {
|
||||
Id uint `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Slug string `json:"slug"`
|
||||
Distribution string `json:"distribution"`
|
||||
|
||||
// v2 only
|
||||
Public bool `json:"public,omitempty"`
|
||||
ActionIds []string `json:"action_ids,omitempty"`
|
||||
CreatedAt string `json:"created_at,omitempty"`
|
||||
}
|
||||
|
||||
type ImagesResp struct {
|
||||
Images []Image
|
||||
}
|
||||
|
||||
type DigitalOceanClient interface {
|
||||
CreateKey(string, string) (uint, error)
|
||||
DestroyKey(uint) error
|
||||
CreateDroplet(string, string, string, string, uint, bool) (uint, error)
|
||||
DestroyDroplet(uint) error
|
||||
PowerOffDroplet(uint) error
|
||||
ShutdownDroplet(uint) error
|
||||
CreateSnapshot(uint, string) error
|
||||
Images() ([]Image, error)
|
||||
DestroyImage(uint) error
|
||||
DropletStatus(uint) (string, string, error)
|
||||
Image(string) (Image, error)
|
||||
Regions() ([]Region, error)
|
||||
Region(string) (Region, error)
|
||||
Sizes() ([]Size, error)
|
||||
Size(string) (Size, error)
|
||||
}
|
|
@ -1,382 +0,0 @@
|
|||
// All of the methods used to communicate with the digital_ocean API
|
||||
// are here. Their API is on a path to V2, so just plain JSON is used
|
||||
// in place of a proper client library for now.
|
||||
|
||||
package digitalocean
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/mitchellh/mapstructure"
|
||||
)
|
||||
|
||||
type DigitalOceanClientV1 struct {
|
||||
// The http client for communicating
|
||||
client *http.Client
|
||||
|
||||
// Credentials
|
||||
ClientID string
|
||||
APIKey string
|
||||
// The base URL of the API
|
||||
APIURL string
|
||||
}
|
||||
|
||||
// Creates a new client for communicating with DO
|
||||
func DigitalOceanClientNewV1(client string, key string, url string) *DigitalOceanClientV1 {
|
||||
c := &DigitalOceanClientV1{
|
||||
client: &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
},
|
||||
},
|
||||
APIURL: url,
|
||||
ClientID: client,
|
||||
APIKey: key,
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// Creates an SSH Key and returns it's id
|
||||
func (d DigitalOceanClientV1) CreateKey(name string, pub string) (uint, error) {
|
||||
params := url.Values{}
|
||||
params.Set("name", name)
|
||||
params.Set("ssh_pub_key", pub)
|
||||
|
||||
body, err := NewRequestV1(d, "ssh_keys/new", params)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Read the SSH key's ID we just created
|
||||
key := body["ssh_key"].(map[string]interface{})
|
||||
keyId := key["id"].(float64)
|
||||
return uint(keyId), nil
|
||||
}
|
||||
|
||||
// Destroys an SSH key
|
||||
func (d DigitalOceanClientV1) DestroyKey(id uint) error {
|
||||
path := fmt.Sprintf("ssh_keys/%v/destroy", id)
|
||||
_, err := NewRequestV1(d, path, url.Values{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Creates a droplet and returns it's id
|
||||
func (d DigitalOceanClientV1) CreateDroplet(name string, size string, image string, region string, keyId uint, privateNetworking bool) (uint, error) {
|
||||
params := url.Values{}
|
||||
params.Set("name", name)
|
||||
|
||||
found_size, err := d.Size(size)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("Invalid size or lookup failure: '%s': %s", size, err)
|
||||
}
|
||||
|
||||
found_image, err := d.Image(image)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("Invalid image or lookup failure: '%s': %s", image, err)
|
||||
}
|
||||
|
||||
found_region, err := d.Region(region)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("Invalid region or lookup failure: '%s': %s", region, err)
|
||||
}
|
||||
|
||||
params.Set("size_slug", found_size.Slug)
|
||||
params.Set("image_slug", found_image.Slug)
|
||||
params.Set("region_slug", found_region.Slug)
|
||||
params.Set("ssh_key_ids", fmt.Sprintf("%v", keyId))
|
||||
params.Set("private_networking", fmt.Sprintf("%v", privateNetworking))
|
||||
|
||||
body, err := NewRequestV1(d, "droplets/new", params)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Read the Droplets ID
|
||||
droplet := body["droplet"].(map[string]interface{})
|
||||
dropletId := droplet["id"].(float64)
|
||||
return uint(dropletId), err
|
||||
}
|
||||
|
||||
// Destroys a droplet
|
||||
func (d DigitalOceanClientV1) DestroyDroplet(id uint) error {
|
||||
path := fmt.Sprintf("droplets/%v/destroy", id)
|
||||
_, err := NewRequestV1(d, path, url.Values{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Powers off a droplet
|
||||
func (d DigitalOceanClientV1) PowerOffDroplet(id uint) error {
|
||||
path := fmt.Sprintf("droplets/%v/power_off", id)
|
||||
_, err := NewRequestV1(d, path, url.Values{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Shutsdown a droplet. This is a "soft" shutdown.
|
||||
func (d DigitalOceanClientV1) ShutdownDroplet(id uint) error {
|
||||
path := fmt.Sprintf("droplets/%v/shutdown", id)
|
||||
_, err := NewRequestV1(d, path, url.Values{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Creates a snaphot of a droplet by it's ID
|
||||
func (d DigitalOceanClientV1) CreateSnapshot(id uint, name string) error {
|
||||
path := fmt.Sprintf("droplets/%v/snapshot", id)
|
||||
|
||||
params := url.Values{}
|
||||
params.Set("name", name)
|
||||
|
||||
_, err := NewRequestV1(d, path, params)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Returns all available images.
|
||||
func (d DigitalOceanClientV1) Images() ([]Image, error) {
|
||||
resp, err := NewRequestV1(d, "images", url.Values{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var result ImagesResp
|
||||
if err := mapstructure.Decode(resp, &result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return result.Images, nil
|
||||
}
|
||||
|
||||
// Destroys an image by its ID.
|
||||
func (d DigitalOceanClientV1) DestroyImage(id uint) error {
|
||||
path := fmt.Sprintf("images/%d/destroy", id)
|
||||
_, err := NewRequestV1(d, path, url.Values{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Returns DO's string representation of status "off" "new" "active" etc.
|
||||
func (d DigitalOceanClientV1) DropletStatus(id uint) (string, string, error) {
|
||||
path := fmt.Sprintf("droplets/%v", id)
|
||||
|
||||
body, err := NewRequestV1(d, path, url.Values{})
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
var ip string
|
||||
|
||||
// Read the droplet's "status"
|
||||
droplet := body["droplet"].(map[string]interface{})
|
||||
status := droplet["status"].(string)
|
||||
|
||||
if droplet["ip_address"] != nil {
|
||||
ip = droplet["ip_address"].(string)
|
||||
}
|
||||
|
||||
return ip, status, err
|
||||
}
|
||||
|
||||
// Sends an api request and returns a generic map[string]interface of
|
||||
// the response.
|
||||
func NewRequestV1(d DigitalOceanClientV1, path string, params url.Values) (map[string]interface{}, error) {
|
||||
client := d.client
|
||||
|
||||
// Add the authentication parameters
|
||||
params.Set("client_id", d.ClientID)
|
||||
params.Set("api_key", d.APIKey)
|
||||
|
||||
url := fmt.Sprintf("%s/%s?%s", d.APIURL, path, params.Encode())
|
||||
|
||||
// Do some basic scrubbing so sensitive information doesn't appear in logs
|
||||
scrubbedUrl := strings.Replace(url, d.ClientID, "CLIENT_ID", -1)
|
||||
scrubbedUrl = strings.Replace(scrubbedUrl, d.APIKey, "API_KEY", -1)
|
||||
log.Printf("sending new request to digitalocean: %s", scrubbedUrl)
|
||||
|
||||
var lastErr error
|
||||
for attempts := 1; attempts < 10; attempts++ {
|
||||
resp, err := client.Get(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Printf("response from digitalocean: %s", body)
|
||||
|
||||
var decodedResponse map[string]interface{}
|
||||
err = json.Unmarshal(body, &decodedResponse)
|
||||
if err != nil {
|
||||
err = errors.New(fmt.Sprintf("Failed to decode JSON response (HTTP %v) from DigitalOcean: %s",
|
||||
resp.StatusCode, body))
|
||||
return decodedResponse, err
|
||||
}
|
||||
|
||||
// Check for errors sent by digitalocean
|
||||
status := decodedResponse["status"].(string)
|
||||
if status == "OK" {
|
||||
return decodedResponse, nil
|
||||
}
|
||||
|
||||
if status == "ERROR" {
|
||||
statusRaw, ok := decodedResponse["error_message"]
|
||||
if ok {
|
||||
status = statusRaw.(string)
|
||||
} else {
|
||||
status = fmt.Sprintf(
|
||||
"Unknown error. Full response body: %s", body)
|
||||
}
|
||||
}
|
||||
|
||||
lastErr = errors.New(fmt.Sprintf("Received error from DigitalOcean (%d): %s",
|
||||
resp.StatusCode, status))
|
||||
log.Println(lastErr)
|
||||
if strings.Contains(status, "a pending event") {
|
||||
// Retry, DigitalOcean sends these dumb "pending event"
|
||||
// errors all the time.
|
||||
time.Sleep(5 * time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
// Some other kind of error. Just return.
|
||||
return decodedResponse, lastErr
|
||||
}
|
||||
|
||||
return nil, lastErr
|
||||
}
|
||||
|
||||
func (d DigitalOceanClientV1) Image(slug_or_name_or_id string) (Image, error) {
|
||||
images, err := d.Images()
|
||||
if err != nil {
|
||||
return Image{}, err
|
||||
}
|
||||
|
||||
for _, image := range images {
|
||||
if strings.EqualFold(image.Slug, slug_or_name_or_id) {
|
||||
return image, nil
|
||||
}
|
||||
}
|
||||
|
||||
for _, image := range images {
|
||||
if strings.EqualFold(image.Name, slug_or_name_or_id) {
|
||||
return image, nil
|
||||
}
|
||||
}
|
||||
|
||||
for _, image := range images {
|
||||
id, err := strconv.Atoi(slug_or_name_or_id)
|
||||
if err == nil {
|
||||
if image.Id == uint(id) {
|
||||
return image, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = errors.New(fmt.Sprintf("Unknown image '%v'", slug_or_name_or_id))
|
||||
|
||||
return Image{}, err
|
||||
}
|
||||
|
||||
// Returns all available regions.
|
||||
func (d DigitalOceanClientV1) Regions() ([]Region, error) {
|
||||
resp, err := NewRequestV1(d, "regions", url.Values{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var result RegionsResp
|
||||
if err := mapstructure.Decode(resp, &result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return result.Regions, nil
|
||||
}
|
||||
|
||||
func (d DigitalOceanClientV1) Region(slug_or_name_or_id string) (Region, error) {
|
||||
regions, err := d.Regions()
|
||||
if err != nil {
|
||||
return Region{}, err
|
||||
}
|
||||
|
||||
for _, region := range regions {
|
||||
if strings.EqualFold(region.Slug, slug_or_name_or_id) {
|
||||
return region, nil
|
||||
}
|
||||
}
|
||||
|
||||
for _, region := range regions {
|
||||
if strings.EqualFold(region.Name, slug_or_name_or_id) {
|
||||
return region, nil
|
||||
}
|
||||
}
|
||||
|
||||
for _, region := range regions {
|
||||
id, err := strconv.Atoi(slug_or_name_or_id)
|
||||
if err == nil {
|
||||
if region.Id == uint(id) {
|
||||
return region, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = errors.New(fmt.Sprintf("Unknown region '%v'", slug_or_name_or_id))
|
||||
|
||||
return Region{}, err
|
||||
}
|
||||
|
||||
// Returns all available sizes.
|
||||
func (d DigitalOceanClientV1) Sizes() ([]Size, error) {
|
||||
resp, err := NewRequestV1(d, "sizes", url.Values{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var result SizesResp
|
||||
if err := mapstructure.Decode(resp, &result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return result.Sizes, nil
|
||||
}
|
||||
|
||||
func (d DigitalOceanClientV1) Size(slug_or_name_or_id string) (Size, error) {
|
||||
sizes, err := d.Sizes()
|
||||
if err != nil {
|
||||
return Size{}, err
|
||||
}
|
||||
|
||||
for _, size := range sizes {
|
||||
if strings.EqualFold(size.Slug, slug_or_name_or_id) {
|
||||
return size, nil
|
||||
}
|
||||
}
|
||||
|
||||
for _, size := range sizes {
|
||||
if strings.EqualFold(size.Name, slug_or_name_or_id) {
|
||||
return size, nil
|
||||
}
|
||||
}
|
||||
|
||||
for _, size := range sizes {
|
||||
id, err := strconv.Atoi(slug_or_name_or_id)
|
||||
if err == nil {
|
||||
if size.Id == uint(id) {
|
||||
return size, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = errors.New(fmt.Sprintf("Unknown size '%v'", slug_or_name_or_id))
|
||||
|
||||
return Size{}, err
|
||||
}
|
|
@ -1,457 +0,0 @@
|
|||
// are here. Their API is on a path to V2, so just plain JSON is used
|
||||
// in place of a proper client library for now.
|
||||
|
||||
package digitalocean
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type DigitalOceanClientV2 struct {
|
||||
// The http client for communicating
|
||||
client *http.Client
|
||||
|
||||
// Credentials
|
||||
APIToken string
|
||||
|
||||
// The base URL of the API
|
||||
APIURL string
|
||||
}
|
||||
|
||||
// Creates a new client for communicating with DO
|
||||
func DigitalOceanClientNewV2(token string, url string) *DigitalOceanClientV2 {
|
||||
c := &DigitalOceanClientV2{
|
||||
client: &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
},
|
||||
},
|
||||
APIURL: url,
|
||||
APIToken: token,
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// Creates an SSH Key and returns it's id
|
||||
func (d DigitalOceanClientV2) CreateKey(name string, pub string) (uint, error) {
|
||||
type KeyReq struct {
|
||||
Name string `json:"name"`
|
||||
PublicKey string `json:"public_key"`
|
||||
}
|
||||
type KeyRes struct {
|
||||
SSHKey struct {
|
||||
Id uint
|
||||
Name string
|
||||
Fingerprint string
|
||||
PublicKey string `json:"public_key"`
|
||||
} `json:"ssh_key"`
|
||||
}
|
||||
req := &KeyReq{Name: name, PublicKey: pub}
|
||||
res := KeyRes{}
|
||||
err := NewRequestV2(d, "v2/account/keys", "POST", req, &res)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return res.SSHKey.Id, err
|
||||
}
|
||||
|
||||
// Destroys an SSH key
|
||||
func (d DigitalOceanClientV2) DestroyKey(id uint) error {
|
||||
path := fmt.Sprintf("v2/account/keys/%v", id)
|
||||
return NewRequestV2(d, path, "DELETE", nil, nil)
|
||||
}
|
||||
|
||||
// Creates a droplet and returns it's id
|
||||
func (d DigitalOceanClientV2) CreateDroplet(name string, size string, image string, region string, keyId uint, privateNetworking bool) (uint, error) {
|
||||
type DropletReq struct {
|
||||
Name string `json:"name"`
|
||||
Region string `json:"region"`
|
||||
Size string `json:"size"`
|
||||
Image string `json:"image"`
|
||||
SSHKeys []string `json:"ssh_keys,omitempty"`
|
||||
Backups bool `json:"backups,omitempty"`
|
||||
IPv6 bool `json:"ipv6,omitempty"`
|
||||
PrivateNetworking bool `json:"private_networking,omitempty"`
|
||||
}
|
||||
type DropletRes struct {
|
||||
Droplet struct {
|
||||
Id uint
|
||||
Name string
|
||||
Memory uint
|
||||
VCPUS uint `json:"vcpus"`
|
||||
Disk uint
|
||||
Region Region
|
||||
Image Image
|
||||
Size Size
|
||||
Locked bool
|
||||
CreateAt string `json:"created_at"`
|
||||
Status string
|
||||
Networks struct {
|
||||
V4 []struct {
|
||||
IPAddr string `json:"ip_address"`
|
||||
Netmask string
|
||||
Gateway string
|
||||
Type string
|
||||
} `json:"v4,omitempty"`
|
||||
V6 []struct {
|
||||
IPAddr string `json:"ip_address"`
|
||||
CIDR uint `json:"cidr"`
|
||||
Gateway string
|
||||
Type string
|
||||
} `json:"v6,omitempty"`
|
||||
}
|
||||
Kernel struct {
|
||||
Id uint
|
||||
Name string
|
||||
Version string
|
||||
}
|
||||
BackupIds []uint
|
||||
SnapshotIds []uint
|
||||
ActionIds []uint
|
||||
Features []string `json:"features,omitempty"`
|
||||
}
|
||||
}
|
||||
req := &DropletReq{Name: name}
|
||||
res := DropletRes{}
|
||||
|
||||
found_size, err := d.Size(size)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("Invalid size or lookup failure: '%s': %s", size, err)
|
||||
}
|
||||
|
||||
found_image, err := d.Image(image)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("Invalid image or lookup failure: '%s': %s", image, err)
|
||||
}
|
||||
|
||||
found_region, err := d.Region(region)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("Invalid region or lookup failure: '%s': %s", region, err)
|
||||
}
|
||||
|
||||
req.Size = found_size.Slug
|
||||
req.Image = found_image.Slug
|
||||
req.Region = found_region.Slug
|
||||
req.SSHKeys = []string{fmt.Sprintf("%v", keyId)}
|
||||
req.PrivateNetworking = privateNetworking
|
||||
|
||||
err = NewRequestV2(d, "v2/droplets", "POST", req, &res)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return res.Droplet.Id, err
|
||||
}
|
||||
|
||||
// Destroys a droplet
|
||||
func (d DigitalOceanClientV2) DestroyDroplet(id uint) error {
|
||||
path := fmt.Sprintf("v2/droplets/%v", id)
|
||||
return NewRequestV2(d, path, "DELETE", nil, nil)
|
||||
}
|
||||
|
||||
// Powers off a droplet
|
||||
func (d DigitalOceanClientV2) PowerOffDroplet(id uint) error {
|
||||
type ActionReq struct {
|
||||
Type string `json:"type"`
|
||||
}
|
||||
type ActionRes struct {
|
||||
}
|
||||
req := &ActionReq{Type: "power_off"}
|
||||
path := fmt.Sprintf("v2/droplets/%v/actions", id)
|
||||
return NewRequestV2(d, path, "POST", req, nil)
|
||||
}
|
||||
|
||||
// Shutsdown a droplet. This is a "soft" shutdown.
|
||||
func (d DigitalOceanClientV2) ShutdownDroplet(id uint) error {
|
||||
type ActionReq struct {
|
||||
Type string `json:"type"`
|
||||
}
|
||||
type ActionRes struct {
|
||||
}
|
||||
req := &ActionReq{Type: "shutdown"}
|
||||
|
||||
path := fmt.Sprintf("v2/droplets/%v/actions", id)
|
||||
return NewRequestV2(d, path, "POST", req, nil)
|
||||
}
|
||||
|
||||
// Creates a snaphot of a droplet by it's ID
|
||||
func (d DigitalOceanClientV2) CreateSnapshot(id uint, name string) error {
|
||||
type ActionReq struct {
|
||||
Type string `json:"type"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
type ActionRes struct {
|
||||
}
|
||||
req := &ActionReq{Type: "snapshot", Name: name}
|
||||
path := fmt.Sprintf("v2/droplets/%v/actions", id)
|
||||
return NewRequestV2(d, path, "POST", req, nil)
|
||||
}
|
||||
|
||||
// Returns all available images.
|
||||
func (d DigitalOceanClientV2) Images() ([]Image, error) {
|
||||
res := ImagesResp{}
|
||||
|
||||
err := NewRequestV2(d, "v2/images?per_page=200", "GET", nil, &res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return res.Images, nil
|
||||
}
|
||||
|
||||
// Destroys an image by its ID.
|
||||
func (d DigitalOceanClientV2) DestroyImage(id uint) error {
|
||||
path := fmt.Sprintf("v2/images/%d", id)
|
||||
return NewRequestV2(d, path, "DELETE", nil, nil)
|
||||
}
|
||||
|
||||
// Returns DO's string representation of status "off" "new" "active" etc.
|
||||
func (d DigitalOceanClientV2) DropletStatus(id uint) (string, string, error) {
|
||||
path := fmt.Sprintf("v2/droplets/%v", id)
|
||||
type DropletRes struct {
|
||||
Droplet struct {
|
||||
Id uint
|
||||
Name string
|
||||
Memory uint
|
||||
VCPUS uint `json:"vcpus"`
|
||||
Disk uint
|
||||
Region Region
|
||||
Image Image
|
||||
Size Size
|
||||
Locked bool
|
||||
CreateAt string `json:"created_at"`
|
||||
Status string
|
||||
Networks struct {
|
||||
V4 []struct {
|
||||
IPAddr string `json:"ip_address"`
|
||||
Netmask string
|
||||
Gateway string
|
||||
Type string
|
||||
} `json:"v4,omitempty"`
|
||||
V6 []struct {
|
||||
IPAddr string `json:"ip_address"`
|
||||
CIDR uint `json:"cidr"`
|
||||
Gateway string
|
||||
Type string
|
||||
} `json:"v6,omitempty"`
|
||||
}
|
||||
Kernel struct {
|
||||
Id uint
|
||||
Name string
|
||||
Version string
|
||||
}
|
||||
BackupIds []uint
|
||||
SnapshotIds []uint
|
||||
ActionIds []uint
|
||||
Features []string `json:"features,omitempty"`
|
||||
}
|
||||
}
|
||||
res := DropletRes{}
|
||||
err := NewRequestV2(d, path, "GET", nil, &res)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
var ip string
|
||||
|
||||
for _, n := range res.Droplet.Networks.V4 {
|
||||
if n.Type == "public" {
|
||||
ip = n.IPAddr
|
||||
}
|
||||
}
|
||||
|
||||
return ip, res.Droplet.Status, err
|
||||
}
|
||||
|
||||
// Sends an api request and returns a generic map[string]interface of
|
||||
// the response.
|
||||
func NewRequestV2(d DigitalOceanClientV2, path string, method string, req interface{}, res interface{}) error {
|
||||
var err error
|
||||
var request *http.Request
|
||||
|
||||
client := d.client
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
// Add the authentication parameters
|
||||
url := fmt.Sprintf("%s/%s", d.APIURL, path)
|
||||
if req != nil {
|
||||
enc := json.NewEncoder(buf)
|
||||
enc.Encode(req)
|
||||
defer buf.Reset()
|
||||
request, err = http.NewRequest(method, url, buf)
|
||||
request.Header.Add("Content-Type", "application/json")
|
||||
} else {
|
||||
request, err = http.NewRequest(method, url, nil)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add the authentication parameters
|
||||
request.Header.Add("Authorization", "Bearer "+d.APIToken)
|
||||
if buf != nil {
|
||||
log.Printf("sending new request to digitalocean: %s buffer: %s", url, buf)
|
||||
} else {
|
||||
log.Printf("sending new request to digitalocean: %s", url)
|
||||
}
|
||||
resp, err := client.Do(request)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if method == "DELETE" && resp.StatusCode == 204 {
|
||||
if resp.Body != nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if resp.Body == nil {
|
||||
return errors.New("Request returned empty body")
|
||||
}
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("response from digitalocean: %s", body)
|
||||
|
||||
err = json.Unmarshal(body, &res)
|
||||
if err != nil {
|
||||
return errors.New(fmt.Sprintf("Failed to decode JSON response %s (HTTP %v) from DigitalOcean: %s", err.Error(),
|
||||
resp.StatusCode, body))
|
||||
}
|
||||
switch resp.StatusCode {
|
||||
case 403, 401, 429, 422, 404, 503, 500:
|
||||
return errors.New(fmt.Sprintf("digitalocean request error: %+v", res))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d DigitalOceanClientV2) Image(slug_or_name_or_id string) (Image, error) {
|
||||
images, err := d.Images()
|
||||
if err != nil {
|
||||
return Image{}, err
|
||||
}
|
||||
|
||||
for _, image := range images {
|
||||
if strings.EqualFold(image.Slug, slug_or_name_or_id) {
|
||||
return image, nil
|
||||
}
|
||||
}
|
||||
|
||||
for _, image := range images {
|
||||
if strings.EqualFold(image.Name, slug_or_name_or_id) {
|
||||
return image, nil
|
||||
}
|
||||
}
|
||||
|
||||
for _, image := range images {
|
||||
id, err := strconv.Atoi(slug_or_name_or_id)
|
||||
if err == nil {
|
||||
if image.Id == uint(id) {
|
||||
return image, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = errors.New(fmt.Sprintf("Unknown image '%v'", slug_or_name_or_id))
|
||||
|
||||
return Image{}, err
|
||||
}
|
||||
|
||||
// Returns all available regions.
|
||||
func (d DigitalOceanClientV2) Regions() ([]Region, error) {
|
||||
res := RegionsResp{}
|
||||
err := NewRequestV2(d, "v2/regions?per_page=200", "GET", nil, &res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return res.Regions, nil
|
||||
}
|
||||
|
||||
func (d DigitalOceanClientV2) Region(slug_or_name_or_id string) (Region, error) {
|
||||
regions, err := d.Regions()
|
||||
if err != nil {
|
||||
return Region{}, err
|
||||
}
|
||||
|
||||
for _, region := range regions {
|
||||
if strings.EqualFold(region.Slug, slug_or_name_or_id) {
|
||||
return region, nil
|
||||
}
|
||||
}
|
||||
|
||||
for _, region := range regions {
|
||||
if strings.EqualFold(region.Name, slug_or_name_or_id) {
|
||||
return region, nil
|
||||
}
|
||||
}
|
||||
|
||||
for _, region := range regions {
|
||||
id, err := strconv.Atoi(slug_or_name_or_id)
|
||||
if err == nil {
|
||||
if region.Id == uint(id) {
|
||||
return region, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = errors.New(fmt.Sprintf("Unknown region '%v'", slug_or_name_or_id))
|
||||
|
||||
return Region{}, err
|
||||
}
|
||||
|
||||
// Returns all available sizes.
|
||||
func (d DigitalOceanClientV2) Sizes() ([]Size, error) {
|
||||
res := SizesResp{}
|
||||
err := NewRequestV2(d, "v2/sizes?per_page=200", "GET", nil, &res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return res.Sizes, nil
|
||||
}
|
||||
|
||||
func (d DigitalOceanClientV2) Size(slug_or_name_or_id string) (Size, error) {
|
||||
sizes, err := d.Sizes()
|
||||
if err != nil {
|
||||
return Size{}, err
|
||||
}
|
||||
|
||||
for _, size := range sizes {
|
||||
if strings.EqualFold(size.Slug, slug_or_name_or_id) {
|
||||
return size, nil
|
||||
}
|
||||
}
|
||||
|
||||
for _, size := range sizes {
|
||||
if strings.EqualFold(size.Name, slug_or_name_or_id) {
|
||||
return size, nil
|
||||
}
|
||||
}
|
||||
|
||||
for _, size := range sizes {
|
||||
id, err := strconv.Atoi(slug_or_name_or_id)
|
||||
if err == nil {
|
||||
if size.Id == uint(id) {
|
||||
return size, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = errors.New(fmt.Sprintf("Unknown size '%v'", slug_or_name_or_id))
|
||||
|
||||
return Size{}, err
|
||||
}
|
|
@ -4,6 +4,8 @@ import (
|
|||
"fmt"
|
||||
"log"
|
||||
"strconv"
|
||||
|
||||
"github.com/digitalocean/godo"
|
||||
)
|
||||
|
||||
type Artifact struct {
|
||||
|
@ -11,13 +13,13 @@ type Artifact struct {
|
|||
snapshotName string
|
||||
|
||||
// The ID of the image
|
||||
snapshotId uint
|
||||
snapshotId int
|
||||
|
||||
// The name of the region
|
||||
regionName string
|
||||
|
||||
// The client for making API calls
|
||||
client DigitalOceanClient
|
||||
client *godo.Client
|
||||
}
|
||||
|
||||
func (*Artifact) BuilderId() string {
|
||||
|
@ -30,11 +32,11 @@ func (*Artifact) Files() []string {
|
|||
}
|
||||
|
||||
func (a *Artifact) Id() string {
|
||||
return strconv.FormatUint(uint64(a.snapshotId), 10)
|
||||
return fmt.Sprintf("%s:%s", a.regionName, strconv.FormatUint(uint64(a.snapshotId), 10))
|
||||
}
|
||||
|
||||
func (a *Artifact) String() string {
|
||||
return fmt.Sprintf("A snapshot was created: '%v' in region '%v'", a.snapshotName, a.regionName)
|
||||
return fmt.Sprintf("A snapshot was created: '%v' (ID: %v) in region '%v'", a.snapshotName, a.snapshotId, a.regionName)
|
||||
}
|
||||
|
||||
func (a *Artifact) State(name string) interface{} {
|
||||
|
@ -43,5 +45,6 @@ func (a *Artifact) State(name string) interface{} {
|
|||
|
||||
func (a *Artifact) Destroy() error {
|
||||
log.Printf("Destroying image: %d (%s)", a.snapshotId, a.snapshotName)
|
||||
return a.client.DestroyImage(a.snapshotId)
|
||||
_, err := a.client.Images.Delete(a.snapshotId)
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -16,7 +16,7 @@ func TestArtifact_Impl(t *testing.T) {
|
|||
|
||||
func TestArtifactId(t *testing.T) {
|
||||
a := &Artifact{"packer-foobar", 42, "San Francisco", nil}
|
||||
expected := "42"
|
||||
expected := "San Francisco:42"
|
||||
|
||||
if a.Id() != expected {
|
||||
t.Fatalf("artifact ID should match: %v", expected)
|
||||
|
@ -25,7 +25,7 @@ func TestArtifactId(t *testing.T) {
|
|||
|
||||
func TestArtifactString(t *testing.T) {
|
||||
a := &Artifact{"packer-foobar", 42, "San Francisco", nil}
|
||||
expected := "A snapshot was created: 'packer-foobar' in region 'San Francisco'"
|
||||
expected := "A snapshot was created: 'packer-foobar' (ID: 42) in region 'San Francisco'"
|
||||
|
||||
if a.String() != expected {
|
||||
t.Fatalf("artifact string should match: %v", expected)
|
||||
|
|
|
@ -4,236 +4,39 @@
|
|||
package digitalocean
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/digitalocean/godo"
|
||||
"github.com/mitchellh/multistep"
|
||||
"github.com/mitchellh/packer/common"
|
||||
"github.com/mitchellh/packer/common/uuid"
|
||||
"github.com/mitchellh/packer/helper/communicator"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
// see https://api.digitalocean.com/images/?client_id=[client_id]&api_key=[api_key]
|
||||
// name="Ubuntu 12.04.4 x64", id=6374128,
|
||||
const DefaultImage = "ubuntu-12-04-x64"
|
||||
|
||||
// see https://api.digitalocean.com/regions/?client_id=[client_id]&api_key=[api_key]
|
||||
// name="New York 3", id=8
|
||||
const DefaultRegion = "nyc3"
|
||||
|
||||
// see https://api.digitalocean.com/sizes/?client_id=[client_id]&api_key=[api_key]
|
||||
// name="512MB", id=66 (the smallest droplet size)
|
||||
const DefaultSize = "512mb"
|
||||
|
||||
// The unique id for the builder
|
||||
const BuilderId = "pearkes.digitalocean"
|
||||
|
||||
// Configuration tells the builder the credentials
|
||||
// to use while communicating with DO and describes the image
|
||||
// you are creating
|
||||
type config struct {
|
||||
common.PackerConfig `mapstructure:",squash"`
|
||||
|
||||
ClientID string `mapstructure:"client_id"`
|
||||
APIKey string `mapstructure:"api_key"`
|
||||
APIURL string `mapstructure:"api_url"`
|
||||
APIToken string `mapstructure:"api_token"`
|
||||
RegionID uint `mapstructure:"region_id"`
|
||||
SizeID uint `mapstructure:"size_id"`
|
||||
ImageID uint `mapstructure:"image_id"`
|
||||
|
||||
Region string `mapstructure:"region"`
|
||||
Size string `mapstructure:"size"`
|
||||
Image string `mapstructure:"image"`
|
||||
|
||||
PrivateNetworking bool `mapstructure:"private_networking"`
|
||||
SnapshotName string `mapstructure:"snapshot_name"`
|
||||
DropletName string `mapstructure:"droplet_name"`
|
||||
SSHUsername string `mapstructure:"ssh_username"`
|
||||
SSHPort uint `mapstructure:"ssh_port"`
|
||||
|
||||
RawSSHTimeout string `mapstructure:"ssh_timeout"`
|
||||
RawStateTimeout string `mapstructure:"state_timeout"`
|
||||
|
||||
// These are unexported since they're set by other fields
|
||||
// being set.
|
||||
sshTimeout time.Duration
|
||||
stateTimeout time.Duration
|
||||
|
||||
tpl *packer.ConfigTemplate
|
||||
}
|
||||
|
||||
type Builder struct {
|
||||
config config
|
||||
config Config
|
||||
runner multistep.Runner
|
||||
}
|
||||
|
||||
func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
|
||||
md, err := common.DecodeConfig(&b.config, raws...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
c, warnings, errs := NewConfig(raws...)
|
||||
if errs != nil {
|
||||
return warnings, errs
|
||||
}
|
||||
b.config = *c
|
||||
|
||||
b.config.tpl, err = packer.NewConfigTemplate()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.config.tpl.UserVars = b.config.PackerUserVars
|
||||
|
||||
// Accumulate any errors
|
||||
errs := common.CheckUnusedConfig(md)
|
||||
|
||||
// Optional configuration with defaults
|
||||
if b.config.APIKey == "" {
|
||||
// Default to environment variable for api_key, if it exists
|
||||
b.config.APIKey = os.Getenv("DIGITALOCEAN_API_KEY")
|
||||
}
|
||||
|
||||
if b.config.ClientID == "" {
|
||||
// Default to environment variable for client_id, if it exists
|
||||
b.config.ClientID = os.Getenv("DIGITALOCEAN_CLIENT_ID")
|
||||
}
|
||||
|
||||
if b.config.APIURL == "" {
|
||||
// Default to environment variable for api_url, if it exists
|
||||
b.config.APIURL = os.Getenv("DIGITALOCEAN_API_URL")
|
||||
}
|
||||
|
||||
if b.config.APIToken == "" {
|
||||
// Default to environment variable for api_token, if it exists
|
||||
b.config.APIToken = os.Getenv("DIGITALOCEAN_API_TOKEN")
|
||||
}
|
||||
|
||||
if b.config.Region == "" {
|
||||
if b.config.RegionID != 0 {
|
||||
b.config.Region = fmt.Sprintf("%v", b.config.RegionID)
|
||||
} else {
|
||||
b.config.Region = DefaultRegion
|
||||
}
|
||||
}
|
||||
|
||||
if b.config.Size == "" {
|
||||
if b.config.SizeID != 0 {
|
||||
b.config.Size = fmt.Sprintf("%v", b.config.SizeID)
|
||||
} else {
|
||||
b.config.Size = DefaultSize
|
||||
}
|
||||
}
|
||||
|
||||
if b.config.Image == "" {
|
||||
if b.config.ImageID != 0 {
|
||||
b.config.Image = fmt.Sprintf("%v", b.config.ImageID)
|
||||
} else {
|
||||
b.config.Image = DefaultImage
|
||||
}
|
||||
}
|
||||
|
||||
if b.config.SnapshotName == "" {
|
||||
// Default to packer-{{ unix timestamp (utc) }}
|
||||
b.config.SnapshotName = "packer-{{timestamp}}"
|
||||
}
|
||||
|
||||
if b.config.DropletName == "" {
|
||||
// Default to packer-[time-ordered-uuid]
|
||||
b.config.DropletName = fmt.Sprintf("packer-%s", uuid.TimeOrderedUUID())
|
||||
}
|
||||
|
||||
if b.config.SSHUsername == "" {
|
||||
// Default to "root". You can override this if your
|
||||
// SourceImage has a different user account then the DO default
|
||||
b.config.SSHUsername = "root"
|
||||
}
|
||||
|
||||
if b.config.SSHPort == 0 {
|
||||
// Default to port 22 per DO default
|
||||
b.config.SSHPort = 22
|
||||
}
|
||||
|
||||
if b.config.RawSSHTimeout == "" {
|
||||
// Default to 1 minute timeouts
|
||||
b.config.RawSSHTimeout = "1m"
|
||||
}
|
||||
|
||||
if b.config.RawStateTimeout == "" {
|
||||
// Default to 6 minute timeouts waiting for
|
||||
// desired state. i.e waiting for droplet to become active
|
||||
b.config.RawStateTimeout = "6m"
|
||||
}
|
||||
|
||||
templates := map[string]*string{
|
||||
"region": &b.config.Region,
|
||||
"size": &b.config.Size,
|
||||
"image": &b.config.Image,
|
||||
"client_id": &b.config.ClientID,
|
||||
"api_key": &b.config.APIKey,
|
||||
"api_url": &b.config.APIURL,
|
||||
"api_token": &b.config.APIToken,
|
||||
"snapshot_name": &b.config.SnapshotName,
|
||||
"droplet_name": &b.config.DropletName,
|
||||
"ssh_username": &b.config.SSHUsername,
|
||||
"ssh_timeout": &b.config.RawSSHTimeout,
|
||||
"state_timeout": &b.config.RawStateTimeout,
|
||||
}
|
||||
|
||||
for n, ptr := range templates {
|
||||
var err error
|
||||
*ptr, err = b.config.tpl.Process(*ptr, nil)
|
||||
if err != nil {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, fmt.Errorf("Error processing %s: %s", n, err))
|
||||
}
|
||||
}
|
||||
|
||||
if b.config.APIToken == "" {
|
||||
// Required configurations that will display errors if not set
|
||||
if b.config.ClientID == "" {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, errors.New("a client_id for v1 auth or api_token for v2 auth must be specified"))
|
||||
}
|
||||
|
||||
if b.config.APIKey == "" {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, errors.New("a api_key for v1 auth or api_token for v2 auth must be specified"))
|
||||
}
|
||||
}
|
||||
|
||||
if b.config.APIURL == "" {
|
||||
b.config.APIURL = "https://api.digitalocean.com"
|
||||
}
|
||||
|
||||
sshTimeout, err := time.ParseDuration(b.config.RawSSHTimeout)
|
||||
if err != nil {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, fmt.Errorf("Failed parsing ssh_timeout: %s", err))
|
||||
}
|
||||
b.config.sshTimeout = sshTimeout
|
||||
|
||||
stateTimeout, err := time.ParseDuration(b.config.RawStateTimeout)
|
||||
if err != nil {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, fmt.Errorf("Failed parsing state_timeout: %s", err))
|
||||
}
|
||||
b.config.stateTimeout = stateTimeout
|
||||
|
||||
if errs != nil && len(errs.Errors) > 0 {
|
||||
return nil, errs
|
||||
}
|
||||
|
||||
common.ScrubConfig(b.config, b.config.ClientID, b.config.APIKey)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) {
|
||||
var client DigitalOceanClient
|
||||
// Initialize the DO API client
|
||||
if b.config.APIToken == "" {
|
||||
client = DigitalOceanClientNewV1(b.config.ClientID, b.config.APIKey, b.config.APIURL)
|
||||
} else {
|
||||
client = DigitalOceanClientNewV2(b.config.APIToken, b.config.APIURL)
|
||||
}
|
||||
client := godo.NewClient(oauth2.NewClient(oauth2.NoContext, &apiTokenSource{
|
||||
AccessToken: b.config.APIToken,
|
||||
}))
|
||||
|
||||
// Set up the state
|
||||
state := new(multistep.BasicStateBag)
|
||||
|
@ -244,13 +47,16 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
|||
|
||||
// Build the steps
|
||||
steps := []multistep.Step{
|
||||
new(stepCreateSSHKey),
|
||||
&stepCreateSSHKey{
|
||||
Debug: b.config.PackerDebug,
|
||||
DebugKeyPath: fmt.Sprintf("do_%s.pem", b.config.PackerBuildName),
|
||||
},
|
||||
new(stepCreateDroplet),
|
||||
new(stepDropletInfo),
|
||||
&common.StepConnectSSH{
|
||||
SSHAddress: sshAddress,
|
||||
&communicator.StepConnect{
|
||||
Config: &b.config.Comm,
|
||||
Host: commHost,
|
||||
SSHConfig: sshConfig,
|
||||
SSHWaitTimeout: 5 * time.Minute,
|
||||
},
|
||||
new(common.StepProvision),
|
||||
new(stepShutdown),
|
||||
|
@ -280,26 +86,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
sregion := state.Get("region")
|
||||
|
||||
var region string
|
||||
|
||||
if sregion != nil {
|
||||
region = sregion.(string)
|
||||
} else {
|
||||
region = fmt.Sprintf("%v", state.Get("region_id").(uint))
|
||||
}
|
||||
|
||||
found_region, err := client.Region(region)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
artifact := &Artifact{
|
||||
snapshotName: state.Get("snapshot_name").(string),
|
||||
snapshotId: state.Get("snapshot_image_id").(uint),
|
||||
regionName: found_region.Name,
|
||||
snapshotId: state.Get("snapshot_image_id").(int),
|
||||
regionName: state.Get("region").(string),
|
||||
client: client,
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
package digitalocean
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
builderT "github.com/mitchellh/packer/helper/builder/testing"
|
||||
)
|
||||
|
||||
func TestBuilderAcc_basic(t *testing.T) {
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Builder: &Builder{},
|
||||
Template: testBuilderAccBasic,
|
||||
})
|
||||
}
|
||||
|
||||
func testAccPreCheck(t *testing.T) {
|
||||
if v := os.Getenv("DIGITALOCEAN_API_TOKEN"); v == "" {
|
||||
t.Fatal("DIGITALOCEAN_API_TOKEN must be set for acceptance tests")
|
||||
}
|
||||
}
|
||||
|
||||
const testBuilderAccBasic = `
|
||||
{
|
||||
"builders": [{
|
||||
"type": "test",
|
||||
"region": "nyc2",
|
||||
"size": "512mb",
|
||||
"image": "ubuntu-12-04-x64"
|
||||
}]
|
||||
}
|
||||
`
|
|
@ -1,22 +1,19 @@
|
|||
package digitalocean
|
||||
|
||||
import (
|
||||
"github.com/mitchellh/packer/packer"
|
||||
"os"
|
||||
"strconv"
|
||||
"testing"
|
||||
)
|
||||
"time"
|
||||
|
||||
func init() {
|
||||
// Clear out the credential env vars
|
||||
os.Setenv("DIGITALOCEAN_API_KEY", "")
|
||||
os.Setenv("DIGITALOCEAN_CLIENT_ID", "")
|
||||
}
|
||||
"github.com/mitchellh/packer/packer"
|
||||
)
|
||||
|
||||
func testConfig() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"client_id": "foo",
|
||||
"api_key": "bar",
|
||||
"api_token": "bar",
|
||||
"region": "nyc2",
|
||||
"size": "512mb",
|
||||
"image": "foo",
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -43,90 +40,6 @@ func TestBuilder_Prepare_BadType(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestBuilderPrepare_APIKey(t *testing.T) {
|
||||
var b Builder
|
||||
config := testConfig()
|
||||
|
||||
// Test good
|
||||
config["api_key"] = "foo"
|
||||
warnings, err := b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
|
||||
if b.config.APIKey != "foo" {
|
||||
t.Errorf("access key invalid: %s", b.config.APIKey)
|
||||
}
|
||||
|
||||
// Test bad
|
||||
delete(config, "api_key")
|
||||
b = Builder{}
|
||||
warnings, err = b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err == nil {
|
||||
t.Fatal("should have error")
|
||||
}
|
||||
|
||||
// Test env variable
|
||||
delete(config, "api_key")
|
||||
os.Setenv("DIGITALOCEAN_API_KEY", "foo")
|
||||
defer os.Setenv("DIGITALOCEAN_API_KEY", "")
|
||||
warnings, err = b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderPrepare_ClientID(t *testing.T) {
|
||||
var b Builder
|
||||
config := testConfig()
|
||||
|
||||
// Test good
|
||||
config["client_id"] = "foo"
|
||||
warnings, err := b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
|
||||
if b.config.ClientID != "foo" {
|
||||
t.Errorf("invalid: %s", b.config.ClientID)
|
||||
}
|
||||
|
||||
// Test bad
|
||||
delete(config, "client_id")
|
||||
b = Builder{}
|
||||
warnings, err = b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err == nil {
|
||||
t.Fatal("should have error")
|
||||
}
|
||||
|
||||
// Test env variable
|
||||
delete(config, "client_id")
|
||||
os.Setenv("DIGITALOCEAN_CLIENT_ID", "foo")
|
||||
defer os.Setenv("DIGITALOCEAN_CLIENT_ID", "")
|
||||
warnings, err = b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderPrepare_InvalidKey(t *testing.T) {
|
||||
var b Builder
|
||||
config := testConfig()
|
||||
|
@ -147,22 +60,18 @@ func TestBuilderPrepare_Region(t *testing.T) {
|
|||
config := testConfig()
|
||||
|
||||
// Test default
|
||||
delete(config, "region")
|
||||
warnings, err := b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
|
||||
if b.config.Region != DefaultRegion {
|
||||
t.Errorf("found %s, expected %s", b.config.Region, DefaultRegion)
|
||||
if err == nil {
|
||||
t.Fatalf("should error")
|
||||
}
|
||||
|
||||
expected := "sfo1"
|
||||
|
||||
// Test set
|
||||
config["region_id"] = 0
|
||||
config["region"] = expected
|
||||
b = Builder{}
|
||||
warnings, err = b.Prepare(config)
|
||||
|
@ -183,22 +92,18 @@ func TestBuilderPrepare_Size(t *testing.T) {
|
|||
config := testConfig()
|
||||
|
||||
// Test default
|
||||
delete(config, "size")
|
||||
warnings, err := b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
|
||||
if b.config.Size != DefaultSize {
|
||||
t.Errorf("found %s, expected %s", b.config.Size, DefaultSize)
|
||||
if err == nil {
|
||||
t.Fatalf("should error")
|
||||
}
|
||||
|
||||
expected := "1024mb"
|
||||
|
||||
// Test set
|
||||
config["size_id"] = 0
|
||||
config["size"] = expected
|
||||
b = Builder{}
|
||||
warnings, err = b.Prepare(config)
|
||||
|
@ -219,22 +124,18 @@ func TestBuilderPrepare_Image(t *testing.T) {
|
|||
config := testConfig()
|
||||
|
||||
// Test default
|
||||
delete(config, "image")
|
||||
warnings, err := b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
|
||||
if b.config.Image != DefaultImage {
|
||||
t.Errorf("found %s, expected %s", b.config.Image, DefaultImage)
|
||||
if err == nil {
|
||||
t.Fatal("should error")
|
||||
}
|
||||
|
||||
expected := "ubuntu-14-04-x64"
|
||||
|
||||
// Test set
|
||||
config["image_id"] = 0
|
||||
config["image"] = expected
|
||||
b = Builder{}
|
||||
warnings, err = b.Prepare(config)
|
||||
|
@ -263,8 +164,8 @@ func TestBuilderPrepare_SSHUsername(t *testing.T) {
|
|||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
|
||||
if b.config.SSHUsername != "root" {
|
||||
t.Errorf("invalid: %s", b.config.SSHUsername)
|
||||
if b.config.Comm.SSHUsername != "root" {
|
||||
t.Errorf("invalid: %s", b.config.Comm.SSHUsername)
|
||||
}
|
||||
|
||||
// Test set
|
||||
|
@ -278,52 +179,11 @@ func TestBuilderPrepare_SSHUsername(t *testing.T) {
|
|||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
|
||||
if b.config.SSHUsername != "foo" {
|
||||
t.Errorf("invalid: %s", b.config.SSHUsername)
|
||||
if b.config.Comm.SSHUsername != "foo" {
|
||||
t.Errorf("invalid: %s", b.config.Comm.SSHUsername)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderPrepare_SSHTimeout(t *testing.T) {
|
||||
var b Builder
|
||||
config := testConfig()
|
||||
|
||||
// Test default
|
||||
warnings, err := b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
|
||||
if b.config.RawSSHTimeout != "1m" {
|
||||
t.Errorf("invalid: %s", b.config.RawSSHTimeout)
|
||||
}
|
||||
|
||||
// Test set
|
||||
config["ssh_timeout"] = "30s"
|
||||
b = Builder{}
|
||||
warnings, err = b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
|
||||
// Test bad
|
||||
config["ssh_timeout"] = "tubes"
|
||||
b = Builder{}
|
||||
warnings, err = b.Prepare(config)
|
||||
if len(warnings) > 0 {
|
||||
t.Fatalf("bad: %#v", warnings)
|
||||
}
|
||||
if err == nil {
|
||||
t.Fatal("should have error")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestBuilderPrepare_StateTimeout(t *testing.T) {
|
||||
var b Builder
|
||||
config := testConfig()
|
||||
|
@ -337,8 +197,8 @@ func TestBuilderPrepare_StateTimeout(t *testing.T) {
|
|||
t.Fatalf("should not have error: %s", err)
|
||||
}
|
||||
|
||||
if b.config.RawStateTimeout != "6m" {
|
||||
t.Errorf("invalid: %s", b.config.RawStateTimeout)
|
||||
if b.config.StateTimeout != 6*time.Minute {
|
||||
t.Errorf("invalid: %s", b.config.StateTimeout)
|
||||
}
|
||||
|
||||
// Test set
|
||||
|
|
|
@ -0,0 +1,119 @@
|
|||
package digitalocean
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/mitchellh/mapstructure"
|
||||
"github.com/mitchellh/packer/common"
|
||||
"github.com/mitchellh/packer/common/uuid"
|
||||
"github.com/mitchellh/packer/helper/communicator"
|
||||
"github.com/mitchellh/packer/helper/config"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
"github.com/mitchellh/packer/template/interpolate"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
common.PackerConfig `mapstructure:",squash"`
|
||||
Comm communicator.Config `mapstructure:",squash"`
|
||||
|
||||
APIToken string `mapstructure:"api_token"`
|
||||
|
||||
Region string `mapstructure:"region"`
|
||||
Size string `mapstructure:"size"`
|
||||
Image string `mapstructure:"image"`
|
||||
|
||||
PrivateNetworking bool `mapstructure:"private_networking"`
|
||||
SnapshotName string `mapstructure:"snapshot_name"`
|
||||
StateTimeout time.Duration `mapstructure:"state_timeout"`
|
||||
DropletName string `mapstructure:"droplet_name"`
|
||||
UserData string `mapstructure:"user_data"`
|
||||
|
||||
ctx interpolate.Context
|
||||
}
|
||||
|
||||
func NewConfig(raws ...interface{}) (*Config, []string, error) {
|
||||
c := new(Config)
|
||||
|
||||
var md mapstructure.Metadata
|
||||
err := config.Decode(c, &config.DecodeOpts{
|
||||
Metadata: &md,
|
||||
Interpolate: true,
|
||||
InterpolateContext: &c.ctx,
|
||||
InterpolateFilter: &interpolate.RenderFilter{
|
||||
Exclude: []string{
|
||||
"run_command",
|
||||
},
|
||||
},
|
||||
}, raws...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Defaults
|
||||
if c.APIToken == "" {
|
||||
// Default to environment variable for api_token, if it exists
|
||||
c.APIToken = os.Getenv("DIGITALOCEAN_API_TOKEN")
|
||||
}
|
||||
|
||||
if c.SnapshotName == "" {
|
||||
def, err := interpolate.Render("packer-{{timestamp}}", nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Default to packer-{{ unix timestamp (utc) }}
|
||||
c.SnapshotName = def
|
||||
}
|
||||
|
||||
if c.DropletName == "" {
|
||||
// Default to packer-[time-ordered-uuid]
|
||||
c.DropletName = fmt.Sprintf("packer-%s", uuid.TimeOrderedUUID())
|
||||
}
|
||||
|
||||
if c.Comm.SSHUsername == "" {
|
||||
// Default to "root". You can override this if your
|
||||
// SourceImage has a different user account then the DO default
|
||||
c.Comm.SSHUsername = "root"
|
||||
}
|
||||
|
||||
if c.StateTimeout == 0 {
|
||||
// Default to 6 minute timeouts waiting for
|
||||
// desired state. i.e waiting for droplet to become active
|
||||
c.StateTimeout = 6 * time.Minute
|
||||
}
|
||||
|
||||
var errs *packer.MultiError
|
||||
if es := c.Comm.Prepare(&c.ctx); len(es) > 0 {
|
||||
errs = packer.MultiErrorAppend(errs, es...)
|
||||
}
|
||||
if c.APIToken == "" {
|
||||
// Required configurations that will display errors if not set
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, errors.New("api_token for auth must be specified"))
|
||||
}
|
||||
|
||||
if c.Region == "" {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, errors.New("region is required"))
|
||||
}
|
||||
|
||||
if c.Size == "" {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, errors.New("size is required"))
|
||||
}
|
||||
|
||||
if c.Image == "" {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, errors.New("image is required"))
|
||||
}
|
||||
|
||||
if errs != nil && len(errs.Errors) > 0 {
|
||||
return nil, nil, errs
|
||||
}
|
||||
|
||||
common.ScrubConfig(c, c.APIToken)
|
||||
return c, nil, nil
|
||||
}
|
|
@ -1,19 +1,19 @@
|
|||
package digitalocean
|
||||
|
||||
import (
|
||||
"code.google.com/p/go.crypto/ssh"
|
||||
"fmt"
|
||||
"golang.org/x/crypto/ssh"
|
||||
|
||||
"github.com/mitchellh/multistep"
|
||||
)
|
||||
|
||||
func sshAddress(state multistep.StateBag) (string, error) {
|
||||
config := state.Get("config").(config)
|
||||
func commHost(state multistep.StateBag) (string, error) {
|
||||
ipAddress := state.Get("droplet_ip").(string)
|
||||
return fmt.Sprintf("%s:%d", ipAddress, config.SSHPort), nil
|
||||
return ipAddress, nil
|
||||
}
|
||||
|
||||
func sshConfig(state multistep.StateBag) (*ssh.ClientConfig, error) {
|
||||
config := state.Get("config").(config)
|
||||
config := state.Get("config").(Config)
|
||||
privateKey := state.Get("privateKey").(string)
|
||||
|
||||
signer, err := ssh.ParsePrivateKey([]byte(privateKey))
|
||||
|
@ -22,7 +22,7 @@ func sshConfig(state multistep.StateBag) (*ssh.ClientConfig, error) {
|
|||
}
|
||||
|
||||
return &ssh.ClientConfig{
|
||||
User: config.SSHUsername,
|
||||
User: config.Comm.SSHUsername,
|
||||
Auth: []ssh.AuthMethod{
|
||||
ssh.PublicKeys(signer),
|
||||
},
|
||||
|
|
|
@ -3,25 +3,36 @@ package digitalocean
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/digitalocean/godo"
|
||||
"github.com/mitchellh/multistep"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
)
|
||||
|
||||
type stepCreateDroplet struct {
|
||||
dropletId uint
|
||||
dropletId int
|
||||
}
|
||||
|
||||
func (s *stepCreateDroplet) Run(state multistep.StateBag) multistep.StepAction {
|
||||
client := state.Get("client").(DigitalOceanClient)
|
||||
client := state.Get("client").(*godo.Client)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
c := state.Get("config").(config)
|
||||
sshKeyId := state.Get("ssh_key_id").(uint)
|
||||
|
||||
ui.Say("Creating droplet...")
|
||||
c := state.Get("config").(Config)
|
||||
sshKeyId := state.Get("ssh_key_id").(int)
|
||||
|
||||
// Create the droplet based on configuration
|
||||
dropletId, err := client.CreateDroplet(c.DropletName, c.Size, c.Image, c.Region, sshKeyId, c.PrivateNetworking)
|
||||
|
||||
ui.Say("Creating droplet...")
|
||||
droplet, _, err := client.Droplets.Create(&godo.DropletCreateRequest{
|
||||
Name: c.DropletName,
|
||||
Region: c.Region,
|
||||
Size: c.Size,
|
||||
Image: godo.DropletCreateImage{
|
||||
Slug: c.Image,
|
||||
},
|
||||
SSHKeys: []godo.DropletCreateSSHKey{
|
||||
godo.DropletCreateSSHKey{ID: int(sshKeyId)},
|
||||
},
|
||||
PrivateNetworking: c.PrivateNetworking,
|
||||
UserData: c.UserData,
|
||||
})
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error creating droplet: %s", err)
|
||||
state.Put("error", err)
|
||||
|
@ -30,10 +41,10 @@ func (s *stepCreateDroplet) Run(state multistep.StateBag) multistep.StepAction {
|
|||
}
|
||||
|
||||
// We use this in cleanup
|
||||
s.dropletId = dropletId
|
||||
s.dropletId = droplet.ID
|
||||
|
||||
// Store the droplet id for later
|
||||
state.Put("droplet_id", dropletId)
|
||||
state.Put("droplet_id", droplet.ID)
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
@ -44,19 +55,14 @@ func (s *stepCreateDroplet) Cleanup(state multistep.StateBag) {
|
|||
return
|
||||
}
|
||||
|
||||
client := state.Get("client").(DigitalOceanClient)
|
||||
client := state.Get("client").(*godo.Client)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
c := state.Get("config").(config)
|
||||
|
||||
// Destroy the droplet we just created
|
||||
ui.Say("Destroying droplet...")
|
||||
|
||||
err := client.DestroyDroplet(s.dropletId)
|
||||
_, err := client.Droplets.Delete(s.dropletId)
|
||||
if err != nil {
|
||||
curlstr := fmt.Sprintf("curl '%v/droplets/%v/destroy?client_id=%v&api_key=%v'",
|
||||
c.APIURL, s.dropletId, c.ClientID, c.APIKey)
|
||||
|
||||
ui.Error(fmt.Sprintf(
|
||||
"Error destroying droplet. Please destroy it manually: %v", curlstr))
|
||||
"Error destroying droplet. Please destroy it manually: %s", err))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,19 +7,25 @@ import (
|
|||
"encoding/pem"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"code.google.com/p/gosshold/ssh"
|
||||
"github.com/digitalocean/godo"
|
||||
"github.com/mitchellh/multistep"
|
||||
"github.com/mitchellh/packer/common/uuid"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
type stepCreateSSHKey struct {
|
||||
keyId uint
|
||||
Debug bool
|
||||
DebugKeyPath string
|
||||
|
||||
keyId int
|
||||
}
|
||||
|
||||
func (s *stepCreateSSHKey) Run(state multistep.StateBag) multistep.StepAction {
|
||||
client := state.Get("client").(DigitalOceanClient)
|
||||
client := state.Get("client").(*godo.Client)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
ui.Say("Creating temporary ssh key for droplet...")
|
||||
|
@ -46,7 +52,10 @@ func (s *stepCreateSSHKey) Run(state multistep.StateBag) multistep.StepAction {
|
|||
name := fmt.Sprintf("packer-%s", uuid.TimeOrderedUUID())
|
||||
|
||||
// Create the key!
|
||||
keyId, err := client.CreateKey(name, pub_sshformat)
|
||||
key, _, err := client.Keys.Create(&godo.KeyCreateRequest{
|
||||
Name: name,
|
||||
PublicKey: pub_sshformat,
|
||||
})
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error creating temporary SSH key: %s", err)
|
||||
state.Put("error", err)
|
||||
|
@ -55,12 +64,37 @@ func (s *stepCreateSSHKey) Run(state multistep.StateBag) multistep.StepAction {
|
|||
}
|
||||
|
||||
// We use this to check cleanup
|
||||
s.keyId = keyId
|
||||
s.keyId = key.ID
|
||||
|
||||
log.Printf("temporary ssh key name: %s", name)
|
||||
|
||||
// Remember some state for the future
|
||||
state.Put("ssh_key_id", keyId)
|
||||
state.Put("ssh_key_id", key.ID)
|
||||
|
||||
// If we're in debug mode, output the private key to the working directory.
|
||||
if s.Debug {
|
||||
ui.Message(fmt.Sprintf("Saving key for debug purposes: %s", s.DebugKeyPath))
|
||||
f, err := os.Create(s.DebugKeyPath)
|
||||
if err != nil {
|
||||
state.Put("error", fmt.Errorf("Error saving debug key: %s", err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
// Write the key out
|
||||
if _, err := f.Write(pem.EncodeToMemory(&priv_blk)); err != nil {
|
||||
state.Put("error", fmt.Errorf("Error saving debug key: %s", err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Chmod it so that it is SSH ready
|
||||
if runtime.GOOS != "windows" {
|
||||
if err := f.Chmod(0600); err != nil {
|
||||
state.Put("error", fmt.Errorf("Error setting permissions of debug key: %s", err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
@ -71,18 +105,14 @@ func (s *stepCreateSSHKey) Cleanup(state multistep.StateBag) {
|
|||
return
|
||||
}
|
||||
|
||||
client := state.Get("client").(DigitalOceanClient)
|
||||
client := state.Get("client").(*godo.Client)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
c := state.Get("config").(config)
|
||||
|
||||
ui.Say("Deleting temporary ssh key...")
|
||||
err := client.DestroyKey(s.keyId)
|
||||
|
||||
curlstr := fmt.Sprintf("curl -H 'Authorization: Bearer #TOKEN#' -X DELETE '%v/v2/account/keys/%v'", c.APIURL, s.keyId)
|
||||
|
||||
_, err := client.Keys.DeleteByID(s.keyId)
|
||||
if err != nil {
|
||||
log.Printf("Error cleaning up ssh key: %v", err.Error())
|
||||
log.Printf("Error cleaning up ssh key: %s", err)
|
||||
ui.Error(fmt.Sprintf(
|
||||
"Error cleaning up ssh key. Please delete the key manually: %v", curlstr))
|
||||
"Error cleaning up ssh key. Please delete the key manually: %s", err))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ package digitalocean
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/digitalocean/godo"
|
||||
"github.com/mitchellh/multistep"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
)
|
||||
|
@ -10,14 +11,14 @@ import (
|
|||
type stepDropletInfo struct{}
|
||||
|
||||
func (s *stepDropletInfo) Run(state multistep.StateBag) multistep.StepAction {
|
||||
client := state.Get("client").(DigitalOceanClient)
|
||||
client := state.Get("client").(*godo.Client)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
c := state.Get("config").(config)
|
||||
dropletId := state.Get("droplet_id").(uint)
|
||||
c := state.Get("config").(Config)
|
||||
dropletID := state.Get("droplet_id").(int)
|
||||
|
||||
ui.Say("Waiting for droplet to become active...")
|
||||
|
||||
err := waitForDropletState("active", dropletId, client, c.stateTimeout)
|
||||
err := waitForDropletState("active", dropletID, client, c.StateTimeout)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error waiting for droplet to become active: %s", err)
|
||||
state.Put("error", err)
|
||||
|
@ -26,15 +27,39 @@ func (s *stepDropletInfo) Run(state multistep.StateBag) multistep.StepAction {
|
|||
}
|
||||
|
||||
// Set the IP on the state for later
|
||||
ip, _, err := client.DropletStatus(dropletId)
|
||||
droplet, _, err := client.Droplets.Get(dropletID)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error retrieving droplet ID: %s", err)
|
||||
err := fmt.Errorf("Error retrieving droplet: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
state.Put("droplet_ip", ip)
|
||||
// Verify we have an IPv4 address
|
||||
invalid := droplet.Networks == nil ||
|
||||
len(droplet.Networks.V4) == 0
|
||||
if invalid {
|
||||
err := fmt.Errorf("IPv4 address not found for droplet")
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Find a public IPv4 network
|
||||
foundNetwork := false
|
||||
for _, network := range droplet.Networks.V4 {
|
||||
if network.Type == "public" {
|
||||
state.Put("droplet_ip", network.IPAddress)
|
||||
foundNetwork = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !foundNetwork {
|
||||
err := fmt.Errorf("Count not find a public IPv4 address for this droplet")
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
|
|
@ -3,7 +3,9 @@ package digitalocean
|
|||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/digitalocean/godo"
|
||||
"github.com/mitchellh/multistep"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
)
|
||||
|
@ -11,12 +13,12 @@ import (
|
|||
type stepPowerOff struct{}
|
||||
|
||||
func (s *stepPowerOff) Run(state multistep.StateBag) multistep.StepAction {
|
||||
client := state.Get("client").(DigitalOceanClient)
|
||||
c := state.Get("config").(config)
|
||||
client := state.Get("client").(*godo.Client)
|
||||
c := state.Get("config").(Config)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
dropletId := state.Get("droplet_id").(uint)
|
||||
dropletId := state.Get("droplet_id").(int)
|
||||
|
||||
_, status, err := client.DropletStatus(dropletId)
|
||||
droplet, _, err := client.Droplets.Get(dropletId)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error checking droplet state: %s", err)
|
||||
state.Put("error", err)
|
||||
|
@ -24,14 +26,14 @@ func (s *stepPowerOff) Run(state multistep.StateBag) multistep.StepAction {
|
|||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
if status == "off" {
|
||||
if droplet.Status == "off" {
|
||||
// Droplet is already off, don't do anything
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
// Pull the plug on the Droplet
|
||||
ui.Say("Forcefully shutting down Droplet...")
|
||||
err = client.PowerOffDroplet(dropletId)
|
||||
_, _, err = client.DropletActions.PowerOff(dropletId)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error powering off droplet: %s", err)
|
||||
state.Put("error", err)
|
||||
|
@ -40,13 +42,22 @@ func (s *stepPowerOff) Run(state multistep.StateBag) multistep.StepAction {
|
|||
}
|
||||
|
||||
log.Println("Waiting for poweroff event to complete...")
|
||||
err = waitForDropletState("off", dropletId, client, c.stateTimeout)
|
||||
err = waitForDropletState("off", dropletId, client, c.StateTimeout)
|
||||
if err != nil {
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Wait for the droplet to become unlocked for future steps
|
||||
if err := waitForDropletUnlocked(client, dropletId, 2*time.Minute); err != nil {
|
||||
// If we get an error the first time, actually report it
|
||||
err := fmt.Errorf("Error powering off droplet: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/digitalocean/godo"
|
||||
"github.com/mitchellh/multistep"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
)
|
||||
|
@ -12,16 +13,16 @@ import (
|
|||
type stepShutdown struct{}
|
||||
|
||||
func (s *stepShutdown) Run(state multistep.StateBag) multistep.StepAction {
|
||||
client := state.Get("client").(DigitalOceanClient)
|
||||
client := state.Get("client").(*godo.Client)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
dropletId := state.Get("droplet_id").(uint)
|
||||
dropletId := state.Get("droplet_id").(int)
|
||||
|
||||
// Gracefully power off the droplet. We have to retry this a number
|
||||
// of times because sometimes it says it completed when it actually
|
||||
// did absolutely nothing (*ALAKAZAM!* magic!). We give up after
|
||||
// a pretty arbitrary amount of time.
|
||||
ui.Say("Gracefully shutting down droplet...")
|
||||
err := client.ShutdownDroplet(dropletId)
|
||||
_, _, err := client.DropletActions.Shutdown(dropletId)
|
||||
if err != nil {
|
||||
// If we get an error the first time, actually report it
|
||||
err := fmt.Errorf("Error shutting down droplet: %s", err)
|
||||
|
@ -48,7 +49,7 @@ func (s *stepShutdown) Run(state multistep.StateBag) multistep.StepAction {
|
|||
|
||||
for attempts := 2; attempts > 0; attempts++ {
|
||||
log.Printf("ShutdownDroplet attempt #%d...", attempts)
|
||||
err := client.ShutdownDroplet(dropletId)
|
||||
_, _, err := client.DropletActions.Shutdown(dropletId)
|
||||
if err != nil {
|
||||
log.Printf("Shutdown retry error: %s", err)
|
||||
}
|
||||
|
@ -64,7 +65,19 @@ func (s *stepShutdown) Run(state multistep.StateBag) multistep.StepAction {
|
|||
|
||||
err = waitForDropletState("off", dropletId, client, 2*time.Minute)
|
||||
if err != nil {
|
||||
log.Printf("Error waiting for graceful off: %s", err)
|
||||
// If we get an error the first time, actually report it
|
||||
err := fmt.Errorf("Error shutting down droplet: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
if err := waitForDropletUnlocked(client, dropletId, 2*time.Minute); err != nil {
|
||||
// If we get an error the first time, actually report it
|
||||
err := fmt.Errorf("Error shutting down droplet: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
|
|
|
@ -4,7 +4,9 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/digitalocean/godo"
|
||||
"github.com/mitchellh/multistep"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
)
|
||||
|
@ -12,13 +14,13 @@ import (
|
|||
type stepSnapshot struct{}
|
||||
|
||||
func (s *stepSnapshot) Run(state multistep.StateBag) multistep.StepAction {
|
||||
client := state.Get("client").(DigitalOceanClient)
|
||||
client := state.Get("client").(*godo.Client)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
c := state.Get("config").(config)
|
||||
dropletId := state.Get("droplet_id").(uint)
|
||||
c := state.Get("config").(Config)
|
||||
dropletId := state.Get("droplet_id").(int)
|
||||
|
||||
ui.Say(fmt.Sprintf("Creating snapshot: %v", c.SnapshotName))
|
||||
err := client.CreateSnapshot(dropletId, c.SnapshotName)
|
||||
_, _, err := client.DropletActions.Snapshot(dropletId, c.SnapshotName)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error creating snapshot: %s", err)
|
||||
state.Put("error", err)
|
||||
|
@ -26,8 +28,20 @@ func (s *stepSnapshot) Run(state multistep.StateBag) multistep.StepAction {
|
|||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Wait for the droplet to become unlocked first. For snapshots
|
||||
// this can end up taking quite a long time, so we hardcode this to
|
||||
// 10 minutes.
|
||||
if err := waitForDropletUnlocked(client, dropletId, 10*time.Minute); err != nil {
|
||||
// If we get an error the first time, actually report it
|
||||
err := fmt.Errorf("Error shutting down droplet: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// With the pending state over, verify that we're in the active state
|
||||
ui.Say("Waiting for snapshot to complete...")
|
||||
err = waitForDropletState("active", dropletId, client, c.stateTimeout)
|
||||
err = waitForDropletState("active", dropletId, client, c.StateTimeout)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error waiting for snapshot to complete: %s", err)
|
||||
state.Put("error", err)
|
||||
|
@ -36,7 +50,7 @@ func (s *stepSnapshot) Run(state multistep.StateBag) multistep.StepAction {
|
|||
}
|
||||
|
||||
log.Printf("Looking up snapshot ID for snapshot: %s", c.SnapshotName)
|
||||
images, err := client.Images()
|
||||
images, _, err := client.Images.ListUser(&godo.ListOptions{PerPage: 200})
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error looking up snapshot ID: %s", err)
|
||||
state.Put("error", err)
|
||||
|
@ -44,10 +58,10 @@ func (s *stepSnapshot) Run(state multistep.StateBag) multistep.StepAction {
|
|||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
var imageId uint
|
||||
var imageId int
|
||||
for _, image := range images {
|
||||
if image.Name == c.SnapshotName {
|
||||
imageId = image.Id
|
||||
imageId = image.ID
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -60,7 +74,6 @@ func (s *stepSnapshot) Run(state multistep.StateBag) multistep.StepAction {
|
|||
}
|
||||
|
||||
log.Printf("Snapshot image ID: %d", imageId)
|
||||
|
||||
state.Put("snapshot_image_id", imageId)
|
||||
state.Put("snapshot_name", c.SnapshotName)
|
||||
state.Put("region", c.Region)
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
package digitalocean
|
||||
|
||||
import (
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
type apiTokenSource struct {
|
||||
AccessToken string
|
||||
}
|
||||
|
||||
func (t *apiTokenSource) Token() (*oauth2.Token, error) {
|
||||
return &oauth2.Token{
|
||||
AccessToken: t.AccessToken,
|
||||
}, nil
|
||||
}
|
|
@ -4,11 +4,64 @@ import (
|
|||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/digitalocean/godo"
|
||||
)
|
||||
|
||||
// waitForDropletUnlocked waits for the Droplet to be unlocked to
|
||||
// avoid "pending" errors when making state changes.
|
||||
func waitForDropletUnlocked(
|
||||
client *godo.Client, dropletId int, timeout time.Duration) error {
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
|
||||
result := make(chan error, 1)
|
||||
go func() {
|
||||
attempts := 0
|
||||
for {
|
||||
attempts += 1
|
||||
|
||||
log.Printf("[DEBUG] Checking droplet lock state... (attempt: %d)", attempts)
|
||||
droplet, _, err := client.Droplets.Get(dropletId)
|
||||
if err != nil {
|
||||
result <- err
|
||||
return
|
||||
}
|
||||
|
||||
if !droplet.Locked {
|
||||
result <- nil
|
||||
return
|
||||
}
|
||||
|
||||
// Wait 3 seconds in between
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
// Verify we shouldn't exit
|
||||
select {
|
||||
case <-done:
|
||||
// We finished, so just exit the goroutine
|
||||
return
|
||||
default:
|
||||
// Keep going
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
log.Printf("[DEBUG] Waiting for up to %d seconds for droplet to unlock", timeout/time.Second)
|
||||
select {
|
||||
case err := <-result:
|
||||
return err
|
||||
case <-time.After(timeout):
|
||||
return fmt.Errorf(
|
||||
"Timeout while waiting to for droplet to unlock")
|
||||
}
|
||||
}
|
||||
|
||||
// waitForState simply blocks until the droplet is in
|
||||
// a state we expect, while eventually timing out.
|
||||
func waitForDropletState(desiredState string, dropletId uint, client DigitalOceanClient, timeout time.Duration) error {
|
||||
func waitForDropletState(
|
||||
desiredState string, dropletId int,
|
||||
client *godo.Client, timeout time.Duration) error {
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
|
||||
|
@ -19,13 +72,13 @@ func waitForDropletState(desiredState string, dropletId uint, client DigitalOcea
|
|||
attempts += 1
|
||||
|
||||
log.Printf("Checking droplet status... (attempt: %d)", attempts)
|
||||
_, status, err := client.DropletStatus(dropletId)
|
||||
droplet, _, err := client.Droplets.Get(dropletId)
|
||||
if err != nil {
|
||||
result <- err
|
||||
return
|
||||
}
|
||||
|
||||
if status == desiredState {
|
||||
if droplet.Status == desiredState {
|
||||
result <- nil
|
||||
return
|
||||
}
|
||||
|
|
|
@ -1,14 +1,18 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/mitchellh/multistep"
|
||||
"github.com/mitchellh/packer/common"
|
||||
"github.com/mitchellh/packer/helper/communicator"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
"log"
|
||||
)
|
||||
|
||||
const BuilderId = "packer.docker"
|
||||
const BuilderIdImport = "packer.post-processor.docker-import"
|
||||
const (
|
||||
BuilderId = "packer.docker"
|
||||
BuilderIdImport = "packer.post-processor.docker-import"
|
||||
)
|
||||
|
||||
type Builder struct {
|
||||
config *Config
|
||||
|
@ -26,22 +30,42 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
|
|||
}
|
||||
|
||||
func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) {
|
||||
driver := &DockerDriver{Tpl: b.config.tpl, Ui: ui}
|
||||
driver := &DockerDriver{Ctx: &b.config.ctx, Ui: ui}
|
||||
if err := driver.Verify(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
version, err := driver.Version()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Printf("[DEBUG] Docker version: %s", version.String())
|
||||
|
||||
steps := []multistep.Step{
|
||||
&StepTempDir{},
|
||||
&StepPull{},
|
||||
&StepRun{},
|
||||
&StepProvision{},
|
||||
&communicator.StepConnect{
|
||||
Config: &b.config.Comm,
|
||||
Host: commHost,
|
||||
SSHConfig: sshConfig(&b.config.Comm),
|
||||
CustomConnect: map[string]multistep.Step{
|
||||
"docker": &StepConnectDocker{},
|
||||
},
|
||||
},
|
||||
&common.StepProvision{},
|
||||
}
|
||||
|
||||
if b.config.Commit {
|
||||
if b.config.Discard {
|
||||
log.Print("[DEBUG] Container will be discarded")
|
||||
} else if b.config.Commit {
|
||||
log.Print("[DEBUG] Container will be committed")
|
||||
steps = append(steps, new(StepCommit))
|
||||
} else {
|
||||
} else if b.config.ExportPath != "" {
|
||||
log.Printf("[DEBUG] Container will be exported to %s", b.config.ExportPath)
|
||||
steps = append(steps, new(StepExport))
|
||||
} else {
|
||||
return nil, errArtifactNotUsed
|
||||
}
|
||||
|
||||
// Setup the state bag and initial state for the steps
|
||||
|
@ -70,8 +94,13 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
|||
return nil, rawErr.(error)
|
||||
}
|
||||
|
||||
var artifact packer.Artifact
|
||||
// If it was cancelled, then just return
|
||||
if _, ok := state.GetOk(multistep.StateCancelled); ok {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// No errors, must've worked
|
||||
var artifact packer.Artifact
|
||||
if b.config.Commit {
|
||||
artifact = &ImportArtifact{
|
||||
IdValue: state.Get("image_id").(string),
|
||||
|
@ -81,6 +110,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
|||
} else {
|
||||
artifact = &ExportArtifact{path: b.config.ExportPath}
|
||||
}
|
||||
|
||||
return artifact, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,52 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/mitchellh/multistep"
|
||||
"github.com/mitchellh/packer/communicator/ssh"
|
||||
"github.com/mitchellh/packer/helper/communicator"
|
||||
gossh "golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
func commHost(state multistep.StateBag) (string, error) {
|
||||
containerId := state.Get("container_id").(string)
|
||||
driver := state.Get("driver").(Driver)
|
||||
return driver.IPAddress(containerId)
|
||||
}
|
||||
|
||||
func sshConfig(comm *communicator.Config) func(state multistep.StateBag) (*gossh.ClientConfig, error) {
|
||||
return func(state multistep.StateBag) (*gossh.ClientConfig, error) {
|
||||
if comm.SSHPrivateKey != "" {
|
||||
// key based auth
|
||||
bytes, err := ioutil.ReadFile(comm.SSHPrivateKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error setting up SSH config: %s", err)
|
||||
}
|
||||
privateKey := string(bytes)
|
||||
|
||||
signer, err := gossh.ParsePrivateKey([]byte(privateKey))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error setting up SSH config: %s", err)
|
||||
}
|
||||
|
||||
return &gossh.ClientConfig{
|
||||
User: comm.SSHUsername,
|
||||
Auth: []gossh.AuthMethod{
|
||||
gossh.PublicKeys(signer),
|
||||
},
|
||||
}, nil
|
||||
} else {
|
||||
// password based auth
|
||||
return &gossh.ClientConfig{
|
||||
User: comm.SSHUsername,
|
||||
Auth: []gossh.AuthMethod{
|
||||
gossh.Password(comm.SSHPassword),
|
||||
gossh.KeyboardInteractive(
|
||||
ssh.PasswordKeyboardInteractive(comm.SSHPassword)),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,6 +1,7 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
|
@ -15,6 +16,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/ActiveState/tail"
|
||||
"github.com/hashicorp/go-version"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
)
|
||||
|
||||
|
@ -22,7 +24,8 @@ type Communicator struct {
|
|||
ContainerId string
|
||||
HostDir string
|
||||
ContainerDir string
|
||||
|
||||
Version *version.Version
|
||||
Config *Config
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
|
@ -41,7 +44,17 @@ func (c *Communicator) Start(remote *packer.RemoteCmd) error {
|
|||
// This file will store the exit code of the command once it is complete.
|
||||
exitCodePath := outputFile.Name() + "-exit"
|
||||
|
||||
cmd := exec.Command("docker", "attach", c.ContainerId)
|
||||
var cmd *exec.Cmd
|
||||
if c.canExec() {
|
||||
if c.Config.Pty {
|
||||
cmd = exec.Command("docker", "exec", "-i", "-t", c.ContainerId, "/bin/sh")
|
||||
} else {
|
||||
cmd = exec.Command("docker", "exec", "-i", c.ContainerId, "/bin/sh")
|
||||
}
|
||||
} else {
|
||||
cmd = exec.Command("docker", "attach", c.ContainerId)
|
||||
}
|
||||
|
||||
stdin_w, err := cmd.StdinPipe()
|
||||
if err != nil {
|
||||
// We have to do some cleanup since run was never called
|
||||
|
@ -117,7 +130,7 @@ func (c *Communicator) UploadDir(dst string, src string, exclude []string) error
|
|||
return os.MkdirAll(hostpath, info.Mode())
|
||||
}
|
||||
|
||||
if info.Mode() & os.ModeSymlink == os.ModeSymlink {
|
||||
if info.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
dest, err := os.Readlink(path)
|
||||
|
||||
if err != nil {
|
||||
|
@ -182,8 +195,51 @@ func (c *Communicator) UploadDir(dst string, src string, exclude []string) error
|
|||
return nil
|
||||
}
|
||||
|
||||
// Download pulls a file out of a container using `docker cp`. We have a source
|
||||
// path and want to write to an io.Writer, not a file. We use - to make docker
|
||||
// cp to write to stdout, and then copy the stream to our destination io.Writer.
|
||||
func (c *Communicator) Download(src string, dst io.Writer) error {
|
||||
panic("not implemented")
|
||||
log.Printf("Downloading file from container: %s:%s", c.ContainerId, src)
|
||||
localCmd := exec.Command("docker", "cp", fmt.Sprintf("%s:%s", c.ContainerId, src), "-")
|
||||
|
||||
pipe, err := localCmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to open pipe: %s", err)
|
||||
}
|
||||
|
||||
if err = localCmd.Start(); err != nil {
|
||||
return fmt.Errorf("Failed to start download: %s", err)
|
||||
}
|
||||
|
||||
// When you use - to send docker cp to stdout it is streamed as a tar; this
|
||||
// enables it to work with directories. We don't actually support
|
||||
// directories in Download() but we still need to handle the tar format.
|
||||
archive := tar.NewReader(pipe)
|
||||
_, err = archive.Next()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to read header from tar stream: %s", err)
|
||||
}
|
||||
|
||||
numBytes, err := io.Copy(dst, archive)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to pipe download: %s", err)
|
||||
}
|
||||
log.Printf("Copied %d bytes for %s", numBytes, src)
|
||||
|
||||
if err = localCmd.Wait(); err != nil {
|
||||
return fmt.Errorf("Failed to download '%s' from container: %s", src, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// canExec tells us whether `docker exec` is supported
|
||||
func (c *Communicator) canExec() bool {
|
||||
execConstraint, err := version.NewConstraint(">= 1.4.0")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return execConstraint.Check(c.Version)
|
||||
}
|
||||
|
||||
// Runs the given command and blocks until completion
|
||||
|
|
|
@ -1,10 +1,269 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"github.com/mitchellh/packer/packer"
|
||||
"crypto/sha256"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/mitchellh/packer/packer"
|
||||
"github.com/mitchellh/packer/provisioner/file"
|
||||
"github.com/mitchellh/packer/provisioner/shell"
|
||||
"github.com/mitchellh/packer/template"
|
||||
)
|
||||
|
||||
func TestCommunicator_impl(t *testing.T) {
|
||||
var _ packer.Communicator = new(Communicator)
|
||||
}
|
||||
|
||||
// TestUploadDownload verifies that basic upload / download functionality works
|
||||
func TestUploadDownload(t *testing.T) {
|
||||
ui := packer.TestUi(t)
|
||||
cache := &packer.FileCache{CacheDir: os.TempDir()}
|
||||
|
||||
tpl, err := template.Parse(strings.NewReader(dockerBuilderConfig))
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to parse config: %s", err)
|
||||
}
|
||||
|
||||
if os.Getenv("PACKER_ACC") == "" {
|
||||
t.Skip("This test is only run with PACKER_ACC=1")
|
||||
}
|
||||
cmd := exec.Command("docker", "-v")
|
||||
cmd.Run()
|
||||
if !cmd.ProcessState.Success() {
|
||||
t.Error("docker command not found; please make sure docker is installed")
|
||||
}
|
||||
|
||||
// Setup the builder
|
||||
builder := &Builder{}
|
||||
warnings, err := builder.Prepare(tpl.Builders["docker"].Config)
|
||||
if err != nil {
|
||||
t.Fatalf("Error preparing configuration %s", err)
|
||||
}
|
||||
if len(warnings) > 0 {
|
||||
t.Fatal("Encountered configuration warnings; aborting")
|
||||
}
|
||||
|
||||
// Setup the provisioners
|
||||
upload := &file.Provisioner{}
|
||||
err = upload.Prepare(tpl.Provisioners[0].Config)
|
||||
if err != nil {
|
||||
t.Fatalf("Error preparing upload: %s", err)
|
||||
}
|
||||
download := &file.Provisioner{}
|
||||
err = download.Prepare(tpl.Provisioners[1].Config)
|
||||
if err != nil {
|
||||
t.Fatalf("Error preparing download: %s", err)
|
||||
}
|
||||
// Preemptive cleanup. Honestly I don't know why you would want to get rid
|
||||
// of my strawberry cake. It's so tasty! Do you not like cake? Are you a
|
||||
// cake-hater? Or are you keeping all the cake all for yourself? So selfish!
|
||||
defer os.Remove("my-strawberry-cake")
|
||||
|
||||
// Add hooks so the provisioners run during the build
|
||||
hooks := map[string][]packer.Hook{}
|
||||
hooks[packer.HookProvision] = []packer.Hook{
|
||||
&packer.ProvisionHook{
|
||||
Provisioners: []packer.Provisioner{
|
||||
upload,
|
||||
download,
|
||||
},
|
||||
},
|
||||
}
|
||||
hook := &packer.DispatchHook{Mapping: hooks}
|
||||
|
||||
// Run things
|
||||
artifact, err := builder.Run(ui, hook, cache)
|
||||
if err != nil {
|
||||
t.Fatalf("Error running build %s", err)
|
||||
}
|
||||
// Preemptive cleanup
|
||||
defer artifact.Destroy()
|
||||
|
||||
// Verify that the thing we downloaded is the same thing we sent up.
|
||||
// Complain loudly if it isn't.
|
||||
inputFile, err := ioutil.ReadFile("test-fixtures/onecakes/strawberry")
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to read input file: %s", err)
|
||||
}
|
||||
outputFile, err := ioutil.ReadFile("my-strawberry-cake")
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to read output file: %s", err)
|
||||
}
|
||||
if sha256.Sum256(inputFile) != sha256.Sum256(outputFile) {
|
||||
t.Fatalf("Input and output files do not match\n"+
|
||||
"Input:\n%s\nOutput:\n%s\n", inputFile, outputFile)
|
||||
}
|
||||
}
|
||||
|
||||
// TestLargeDownload verifies that files are the apporpriate size after being
|
||||
// downloaded. This is to identify and fix the race condition in #2793. You may
|
||||
// need to use github.com/cbednarski/rerun to verify since this problem occurs
|
||||
// only intermittently.
|
||||
func TestLargeDownload(t *testing.T) {
|
||||
ui := packer.TestUi(t)
|
||||
cache := &packer.FileCache{CacheDir: os.TempDir()}
|
||||
|
||||
tpl, err := template.Parse(strings.NewReader(dockerLargeBuilderConfig))
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to parse config: %s", err)
|
||||
}
|
||||
|
||||
if os.Getenv("PACKER_ACC") == "" {
|
||||
t.Skip("This test is only run with PACKER_ACC=1")
|
||||
}
|
||||
cmd := exec.Command("docker", "-v")
|
||||
cmd.Run()
|
||||
if !cmd.ProcessState.Success() {
|
||||
t.Error("docker command not found; please make sure docker is installed")
|
||||
}
|
||||
|
||||
// Setup the builder
|
||||
builder := &Builder{}
|
||||
warnings, err := builder.Prepare(tpl.Builders["docker"].Config)
|
||||
if err != nil {
|
||||
t.Fatalf("Error preparing configuration %s", err)
|
||||
}
|
||||
if len(warnings) > 0 {
|
||||
t.Fatal("Encountered configuration warnings; aborting")
|
||||
}
|
||||
|
||||
// Setup the provisioners
|
||||
shell := &shell.Provisioner{}
|
||||
err = shell.Prepare(tpl.Provisioners[0].Config)
|
||||
if err != nil {
|
||||
t.Fatalf("Error preparing shell provisioner: %s", err)
|
||||
}
|
||||
downloadCupcake := &file.Provisioner{}
|
||||
err = downloadCupcake.Prepare(tpl.Provisioners[1].Config)
|
||||
if err != nil {
|
||||
t.Fatalf("Error preparing downloadCupcake: %s", err)
|
||||
}
|
||||
downloadBigcake := &file.Provisioner{}
|
||||
err = downloadBigcake.Prepare(tpl.Provisioners[2].Config)
|
||||
if err != nil {
|
||||
t.Fatalf("Error preparing downloadBigcake: %s", err)
|
||||
}
|
||||
|
||||
// Preemptive cleanup.
|
||||
defer os.Remove("cupcake")
|
||||
defer os.Remove("bigcake")
|
||||
|
||||
// Add hooks so the provisioners run during the build
|
||||
hooks := map[string][]packer.Hook{}
|
||||
hooks[packer.HookProvision] = []packer.Hook{
|
||||
&packer.ProvisionHook{
|
||||
Provisioners: []packer.Provisioner{
|
||||
shell,
|
||||
downloadCupcake,
|
||||
downloadBigcake,
|
||||
},
|
||||
},
|
||||
}
|
||||
hook := &packer.DispatchHook{Mapping: hooks}
|
||||
|
||||
// Run things
|
||||
artifact, err := builder.Run(ui, hook, cache)
|
||||
if err != nil {
|
||||
t.Fatalf("Error running build %s", err)
|
||||
}
|
||||
// Preemptive cleanup
|
||||
defer artifact.Destroy()
|
||||
|
||||
// Verify that the things we downloaded are the right size. Complain loudly
|
||||
// if they are not.
|
||||
//
|
||||
// cupcake should be 2097152 bytes
|
||||
// bigcake should be 104857600 bytes
|
||||
cupcake, err := os.Stat("cupcake")
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to stat cupcake file: %s", err)
|
||||
}
|
||||
cupcakeExpected := int64(2097152)
|
||||
if cupcake.Size() != cupcakeExpected {
|
||||
t.Errorf("Expected cupcake to be %d bytes; found %d", cupcakeExpected, cupcake.Size())
|
||||
}
|
||||
|
||||
bigcake, err := os.Stat("bigcake")
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to stat bigcake file: %s", err)
|
||||
}
|
||||
bigcakeExpected := int64(104857600)
|
||||
if bigcake.Size() != bigcakeExpected {
|
||||
t.Errorf("Expected bigcake to be %d bytes; found %d", bigcakeExpected, bigcake.Size())
|
||||
}
|
||||
|
||||
// TODO if we can, calculate a sha inside the container and compare to the
|
||||
// one we get after we pull it down. We will probably have to parse the log
|
||||
// or ui output to do this because we use /dev/urandom to create the file.
|
||||
|
||||
// if sha256.Sum256(inputFile) != sha256.Sum256(outputFile) {
|
||||
// t.Fatalf("Input and output files do not match\n"+
|
||||
// "Input:\n%s\nOutput:\n%s\n", inputFile, outputFile)
|
||||
// }
|
||||
|
||||
}
|
||||
|
||||
const dockerBuilderConfig = `
|
||||
{
|
||||
"builders": [
|
||||
{
|
||||
"type": "docker",
|
||||
"image": "ubuntu",
|
||||
"discard": true,
|
||||
"run_command": ["-d", "-i", "-t", "{{.Image}}", "/bin/sh"]
|
||||
}
|
||||
],
|
||||
"provisioners": [
|
||||
{
|
||||
"type": "file",
|
||||
"source": "test-fixtures/onecakes/strawberry",
|
||||
"destination": "/strawberry-cake"
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"source": "/strawberry-cake",
|
||||
"destination": "my-strawberry-cake",
|
||||
"direction": "download"
|
||||
}
|
||||
]
|
||||
}
|
||||
`
|
||||
|
||||
const dockerLargeBuilderConfig = `
|
||||
{
|
||||
"builders": [
|
||||
{
|
||||
"type": "docker",
|
||||
"image": "ubuntu",
|
||||
"discard": true
|
||||
}
|
||||
],
|
||||
"provisioners": [
|
||||
{
|
||||
"type": "shell",
|
||||
"inline": [
|
||||
"dd if=/dev/urandom of=/tmp/cupcake bs=1M count=2",
|
||||
"dd if=/dev/urandom of=/tmp/bigcake bs=1M count=100",
|
||||
"sync",
|
||||
"md5sum /tmp/cupcake /tmp/bigcake"
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"source": "/tmp/cupcake",
|
||||
"destination": "cupcake",
|
||||
"direction": "download"
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"source": "/tmp/bigcake",
|
||||
"destination": "bigcake",
|
||||
"direction": "download"
|
||||
}
|
||||
]
|
||||
}
|
||||
`
|
||||
|
|
|
@ -2,50 +2,68 @@ package docker
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/mitchellh/mapstructure"
|
||||
"github.com/mitchellh/packer/common"
|
||||
"github.com/mitchellh/packer/helper/communicator"
|
||||
"github.com/mitchellh/packer/helper/config"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
"github.com/mitchellh/packer/template/interpolate"
|
||||
)
|
||||
|
||||
var (
|
||||
errArtifactNotUsed = fmt.Errorf("No instructions given for handling the artifact; expected commit, discard, or export_path")
|
||||
errArtifactUseConflict = fmt.Errorf("Cannot specify more than one of commit, discard, and export_path")
|
||||
errExportPathNotFile = fmt.Errorf("export_path must be a file, not a directory")
|
||||
errImageNotSpecified = fmt.Errorf("Image must be specified")
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
common.PackerConfig `mapstructure:",squash"`
|
||||
Comm communicator.Config `mapstructure:",squash"`
|
||||
|
||||
Commit bool
|
||||
Discard bool
|
||||
ExportPath string `mapstructure:"export_path"`
|
||||
Image string
|
||||
Pty bool
|
||||
Pull bool
|
||||
RunCommand []string `mapstructure:"run_command"`
|
||||
Volumes map[string]string
|
||||
|
||||
// This is used to login to dockerhub to pull a private base container. For
|
||||
// pushing to dockerhub, see the docker post-processors
|
||||
Login bool
|
||||
LoginEmail string `mapstructure:"login_email"`
|
||||
LoginUsername string `mapstructure:"login_username"`
|
||||
LoginPassword string `mapstructure:"login_password"`
|
||||
LoginServer string `mapstructure:"login_server"`
|
||||
LoginUsername string `mapstructure:"login_username"`
|
||||
|
||||
tpl *packer.ConfigTemplate
|
||||
ctx interpolate.Context
|
||||
}
|
||||
|
||||
func NewConfig(raws ...interface{}) (*Config, []string, error) {
|
||||
c := new(Config)
|
||||
md, err := common.DecodeConfig(c, raws...)
|
||||
|
||||
var md mapstructure.Metadata
|
||||
err := config.Decode(c, &config.DecodeOpts{
|
||||
Metadata: &md,
|
||||
Interpolate: true,
|
||||
InterpolateContext: &c.ctx,
|
||||
InterpolateFilter: &interpolate.RenderFilter{
|
||||
Exclude: []string{
|
||||
"run_command",
|
||||
},
|
||||
},
|
||||
}, raws...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
c.tpl, err = packer.NewConfigTemplate()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
c.tpl.UserVars = c.PackerUserVars
|
||||
|
||||
// Defaults
|
||||
if len(c.RunCommand) == 0 {
|
||||
c.RunCommand = []string{
|
||||
"-d", "-i", "-t",
|
||||
"{{.Image}}",
|
||||
"/bin/bash",
|
||||
}
|
||||
c.RunCommand = []string{"-d", "-i", "-t", "{{.Image}}", "/bin/bash"}
|
||||
}
|
||||
|
||||
// Default Pull if it wasn't set
|
||||
|
@ -61,45 +79,31 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) {
|
|||
c.Pull = true
|
||||
}
|
||||
|
||||
errs := common.CheckUnusedConfig(md)
|
||||
|
||||
templates := map[string]*string{
|
||||
"export_path": &c.ExportPath,
|
||||
"image": &c.Image,
|
||||
"login_email": &c.LoginEmail,
|
||||
"login_username": &c.LoginUsername,
|
||||
"login_password": &c.LoginPassword,
|
||||
"login_server": &c.LoginServer,
|
||||
// Default to the normal Docker type
|
||||
if c.Comm.Type == "" {
|
||||
c.Comm.Type = "docker"
|
||||
}
|
||||
|
||||
for n, ptr := range templates {
|
||||
var err error
|
||||
*ptr, err = c.tpl.Process(*ptr, nil)
|
||||
if err != nil {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, fmt.Errorf("Error processing %s: %s", n, err))
|
||||
var errs *packer.MultiError
|
||||
if es := c.Comm.Prepare(&c.ctx); len(es) > 0 {
|
||||
errs = packer.MultiErrorAppend(errs, es...)
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range c.Volumes {
|
||||
var err error
|
||||
v, err = c.tpl.Process(v, nil)
|
||||
if err != nil {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, fmt.Errorf("Error processing volumes[%s]: %s", k, err))
|
||||
}
|
||||
|
||||
c.Volumes[k] = v
|
||||
}
|
||||
|
||||
if c.Image == "" {
|
||||
errs = packer.MultiErrorAppend(errs,
|
||||
fmt.Errorf("image must be specified"))
|
||||
errs = packer.MultiErrorAppend(errs, errImageNotSpecified)
|
||||
}
|
||||
|
||||
if c.ExportPath != "" && c.Commit {
|
||||
errs = packer.MultiErrorAppend(errs,
|
||||
fmt.Errorf("both commit and export_path cannot be set"))
|
||||
if (c.ExportPath != "" && c.Commit) || (c.ExportPath != "" && c.Discard) || (c.Commit && c.Discard) {
|
||||
errs = packer.MultiErrorAppend(errs, errArtifactUseConflict)
|
||||
}
|
||||
|
||||
if c.ExportPath == "" && !c.Commit && !c.Discard {
|
||||
errs = packer.MultiErrorAppend(errs, errArtifactNotUsed)
|
||||
}
|
||||
|
||||
if c.ExportPath != "" {
|
||||
if fi, err := os.Stat(c.ExportPath); err == nil && fi.IsDir() {
|
||||
errs = packer.MultiErrorAppend(errs, errExportPathNotFile)
|
||||
}
|
||||
}
|
||||
|
||||
if errs != nil && len(errs.Errors) > 0 {
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
|
@ -42,29 +44,66 @@ func testConfigOk(t *testing.T, warns []string, err error) {
|
|||
}
|
||||
|
||||
func TestConfigPrepare_exportPath(t *testing.T) {
|
||||
td, err := ioutil.TempDir("", "packer")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
defer os.RemoveAll(td)
|
||||
|
||||
raw := testConfig()
|
||||
|
||||
// No export path
|
||||
// No export path. This is invalid. Previously this would not error during
|
||||
// validation and as a result the failure would happen at build time.
|
||||
delete(raw, "export_path")
|
||||
_, warns, errs := NewConfig(raw)
|
||||
testConfigOk(t, warns, errs)
|
||||
testConfigErr(t, warns, errs)
|
||||
|
||||
// Good export path
|
||||
raw["export_path"] = "good"
|
||||
_, warns, errs = NewConfig(raw)
|
||||
testConfigOk(t, warns, errs)
|
||||
|
||||
// Bad export path (directory)
|
||||
raw["export_path"] = td
|
||||
_, warns, errs = NewConfig(raw)
|
||||
testConfigErr(t, warns, errs)
|
||||
}
|
||||
|
||||
func TestConfigPrepare_exportPathAndCommit(t *testing.T) {
|
||||
raw := testConfig()
|
||||
raw["commit"] = true
|
||||
|
||||
// No export path
|
||||
// Export but no commit (explicit default)
|
||||
raw["commit"] = false
|
||||
_, warns, errs := NewConfig(raw)
|
||||
testConfigOk(t, warns, errs)
|
||||
|
||||
// Commit AND export specified (invalid)
|
||||
raw["commit"] = true
|
||||
_, warns, errs = NewConfig(raw)
|
||||
testConfigErr(t, warns, errs)
|
||||
|
||||
// No commit
|
||||
raw["commit"] = false
|
||||
// Commit but no export
|
||||
delete(raw, "export_path")
|
||||
_, warns, errs = NewConfig(raw)
|
||||
testConfigOk(t, warns, errs)
|
||||
}
|
||||
|
||||
func TestConfigPrepare_exportDiscard(t *testing.T) {
|
||||
raw := testConfig()
|
||||
|
||||
// Export but no discard (explicit default)
|
||||
raw["discard"] = false
|
||||
_, warns, errs := NewConfig(raw)
|
||||
testConfigOk(t, warns, errs)
|
||||
|
||||
// Discard AND export (invalid)
|
||||
raw["discard"] = true
|
||||
_, warns, errs = NewConfig(raw)
|
||||
testConfigErr(t, warns, errs)
|
||||
|
||||
// Discard but no export
|
||||
raw["discard"] = true
|
||||
delete(raw, "export_path")
|
||||
_, warns, errs = NewConfig(raw)
|
||||
testConfigOk(t, warns, errs)
|
||||
}
|
||||
|
|
|
@ -2,6 +2,8 @@ package docker
|
|||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/hashicorp/go-version"
|
||||
)
|
||||
|
||||
// Driver is the interface that has to be implemented to communicate with
|
||||
|
@ -20,6 +22,10 @@ type Driver interface {
|
|||
// Import imports a container from a tar file
|
||||
Import(path, repo string) (string, error)
|
||||
|
||||
// IPAddress returns the address of the container that can be used
|
||||
// for external access.
|
||||
IPAddress(id string) (string, error)
|
||||
|
||||
// Login. This will lock the driver from performing another Login
|
||||
// until Logout is called. Therefore, any users MUST call Logout.
|
||||
Login(repo, email, username, password string) error
|
||||
|
@ -44,10 +50,13 @@ type Driver interface {
|
|||
StopContainer(id string) error
|
||||
|
||||
// TagImage tags the image with the given ID
|
||||
TagImage(id string, repo string) error
|
||||
TagImage(id string, repo string, force bool) error
|
||||
|
||||
// Verify verifies that the driver can run
|
||||
Verify() error
|
||||
|
||||
// Version reads the Docker version
|
||||
Version() (*version.Version, error)
|
||||
}
|
||||
|
||||
// ContainerConfig is the configuration used to start a container.
|
||||
|
|
|
@ -7,15 +7,18 @@ import (
|
|||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/hashicorp/go-version"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
"github.com/mitchellh/packer/template/interpolate"
|
||||
)
|
||||
|
||||
type DockerDriver struct {
|
||||
Ui packer.Ui
|
||||
Tpl *packer.ConfigTemplate
|
||||
Ctx *interpolate.Context
|
||||
|
||||
l sync.Mutex
|
||||
}
|
||||
|
@ -113,6 +116,23 @@ func (d *DockerDriver) Import(path string, repo string) (string, error) {
|
|||
return strings.TrimSpace(stdout.String()), nil
|
||||
}
|
||||
|
||||
func (d *DockerDriver) IPAddress(id string) (string, error) {
|
||||
var stderr, stdout bytes.Buffer
|
||||
cmd := exec.Command(
|
||||
"docker",
|
||||
"inspect",
|
||||
"--format",
|
||||
"{{ .NetworkSettings.IPAddress }}",
|
||||
id)
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return "", fmt.Errorf("Error: %s\n\nStderr: %s", err, stderr.String())
|
||||
}
|
||||
|
||||
return strings.TrimSpace(stdout.String()), nil
|
||||
}
|
||||
|
||||
func (d *DockerDriver) Login(repo, email, user, pass string) error {
|
||||
d.l.Lock()
|
||||
|
||||
|
@ -185,6 +205,8 @@ func (d *DockerDriver) StartContainer(config *ContainerConfig) (string, error) {
|
|||
// Build up the template data
|
||||
var tplData startContainerTemplate
|
||||
tplData.Image = config.Image
|
||||
ctx := *d.Ctx
|
||||
ctx.Data = &tplData
|
||||
|
||||
// Args that we're going to pass to Docker
|
||||
args := []string{"run"}
|
||||
|
@ -192,7 +214,7 @@ func (d *DockerDriver) StartContainer(config *ContainerConfig) (string, error) {
|
|||
args = append(args, "-v", fmt.Sprintf("%s:%s", host, guest))
|
||||
}
|
||||
for _, v := range config.RunCommand {
|
||||
v, err := d.Tpl.Process(v, &tplData)
|
||||
v, err := interpolate.Render(v, &ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -235,9 +257,15 @@ func (d *DockerDriver) StopContainer(id string) error {
|
|||
return exec.Command("docker", "rm", id).Run()
|
||||
}
|
||||
|
||||
func (d *DockerDriver) TagImage(id string, repo string) error {
|
||||
func (d *DockerDriver) TagImage(id string, repo string, force bool) error {
|
||||
args := []string{"tag"}
|
||||
if force {
|
||||
args = append(args, "-f")
|
||||
}
|
||||
args = append(args, id, repo)
|
||||
|
||||
var stderr bytes.Buffer
|
||||
cmd := exec.Command("docker", "tag", id, repo)
|
||||
cmd := exec.Command("docker", args...)
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
|
@ -260,3 +288,17 @@ func (d *DockerDriver) Verify() error {
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *DockerDriver) Version() (*version.Version, error) {
|
||||
output, err := exec.Command("docker", "-v").Output()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
match := regexp.MustCompile(version.VersionRegexpRaw).FindSubmatch(output)
|
||||
if match == nil {
|
||||
return nil, fmt.Errorf("unknown version: %s", output)
|
||||
}
|
||||
|
||||
return version.NewVersion(string(match[0]))
|
||||
}
|
||||
|
|
|
@ -2,6 +2,8 @@ package docker
|
|||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/hashicorp/go-version"
|
||||
)
|
||||
|
||||
// MockDriver is a driver implementation that can be used for tests.
|
||||
|
@ -21,6 +23,11 @@ type MockDriver struct {
|
|||
ImportId string
|
||||
ImportErr error
|
||||
|
||||
IPAddressCalled bool
|
||||
IPAddressID string
|
||||
IPAddressResult string
|
||||
IPAddressErr error
|
||||
|
||||
LoginCalled bool
|
||||
LoginEmail string
|
||||
LoginUsername string
|
||||
|
@ -44,6 +51,7 @@ type MockDriver struct {
|
|||
TagImageCalled bool
|
||||
TagImageImageId string
|
||||
TagImageRepo string
|
||||
TagImageForce bool
|
||||
TagImageErr error
|
||||
|
||||
ExportReader io.Reader
|
||||
|
@ -63,6 +71,9 @@ type MockDriver struct {
|
|||
StopCalled bool
|
||||
StopID string
|
||||
VerifyCalled bool
|
||||
|
||||
VersionCalled bool
|
||||
VersionVersion string
|
||||
}
|
||||
|
||||
func (d *MockDriver) Commit(id string) (string, error) {
|
||||
|
@ -98,6 +109,12 @@ func (d *MockDriver) Import(path, repo string) (string, error) {
|
|||
return d.ImportId, d.ImportErr
|
||||
}
|
||||
|
||||
func (d *MockDriver) IPAddress(id string) (string, error) {
|
||||
d.IPAddressCalled = true
|
||||
d.IPAddressID = id
|
||||
return d.IPAddressResult, d.IPAddressErr
|
||||
}
|
||||
|
||||
func (d *MockDriver) Login(r, e, u, p string) error {
|
||||
d.LoginCalled = true
|
||||
d.LoginRepo = r
|
||||
|
@ -151,10 +168,11 @@ func (d *MockDriver) StopContainer(id string) error {
|
|||
return d.StopError
|
||||
}
|
||||
|
||||
func (d *MockDriver) TagImage(id string, repo string) error {
|
||||
func (d *MockDriver) TagImage(id string, repo string, force bool) error {
|
||||
d.TagImageCalled = true
|
||||
d.TagImageImageId = id
|
||||
d.TagImageRepo = repo
|
||||
d.TagImageForce = force
|
||||
return d.TagImageErr
|
||||
}
|
||||
|
||||
|
@ -162,3 +180,8 @@ func (d *MockDriver) Verify() error {
|
|||
d.VerifyCalled = true
|
||||
return d.VerifyError
|
||||
}
|
||||
|
||||
func (d *MockDriver) Version() (*version.Version, error) {
|
||||
d.VersionCalled = true
|
||||
return version.NewVersion(d.VersionVersion)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"github.com/mitchellh/multistep"
|
||||
)
|
||||
|
||||
type StepConnectDocker struct{}
|
||||
|
||||
func (s *StepConnectDocker) Run(state multistep.StateBag) multistep.StepAction {
|
||||
config := state.Get("config").(*Config)
|
||||
containerId := state.Get("container_id").(string)
|
||||
driver := state.Get("driver").(Driver)
|
||||
tempDir := state.Get("temp_dir").(string)
|
||||
|
||||
// Get the version so we can pass it to the communicator
|
||||
version, err := driver.Version()
|
||||
if err != nil {
|
||||
state.Put("error", err)
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Create the communicator that talks to Docker via various
|
||||
// os/exec tricks.
|
||||
comm := &Communicator{
|
||||
ContainerId: containerId,
|
||||
HostDir: tempDir,
|
||||
ContainerDir: "/packer-files",
|
||||
Version: version,
|
||||
Config: config,
|
||||
}
|
||||
|
||||
state.Put("communicator", comm)
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepConnectDocker) Cleanup(state multistep.StateBag) {}
|
|
@ -2,9 +2,10 @@ package docker
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/mitchellh/multistep"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
"os"
|
||||
)
|
||||
|
||||
// StepExport exports the container to a flat tar file.
|
||||
|
@ -17,6 +18,14 @@ func (s *StepExport) Run(state multistep.StateBag) multistep.StepAction {
|
|||
containerId := state.Get("container_id").(string)
|
||||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
// We should catch this in validation, but guard anyway
|
||||
if config.ExportPath == "" {
|
||||
err := fmt.Errorf("No output file specified, we can't export anything")
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Open the file that we're going to write to
|
||||
f, err := os.Create(config.ExportPath)
|
||||
if err != nil {
|
||||
|
|
|
@ -1,26 +0,0 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"github.com/mitchellh/multistep"
|
||||
"github.com/mitchellh/packer/common"
|
||||
)
|
||||
|
||||
type StepProvision struct{}
|
||||
|
||||
func (s *StepProvision) Run(state multistep.StateBag) multistep.StepAction {
|
||||
containerId := state.Get("container_id").(string)
|
||||
tempDir := state.Get("temp_dir").(string)
|
||||
|
||||
// Create the communicator that talks to Docker via various
|
||||
// os/exec tricks.
|
||||
comm := &Communicator{
|
||||
ContainerId: containerId,
|
||||
HostDir: tempDir,
|
||||
ContainerDir: "/packer-files",
|
||||
}
|
||||
|
||||
prov := common.StepProvision{Comm: comm}
|
||||
return prov.Run(state)
|
||||
}
|
||||
|
||||
func (s *StepProvision) Cleanup(state multistep.StateBag) {}
|
|
@ -18,7 +18,14 @@ func (s *StepTempDir) Run(state multistep.StateBag) multistep.StepAction {
|
|||
ui := state.Get("ui").(packer.Ui)
|
||||
|
||||
ui.Say("Creating a temporary directory for sharing data...")
|
||||
td, err := ioutil.TempDir("", "packer-docker")
|
||||
|
||||
var err error
|
||||
var tempdir string
|
||||
|
||||
configTmpDir, err := packer.ConfigTmpDir()
|
||||
if err == nil {
|
||||
tempdir, err = ioutil.TempDir(configTmpDir, "packer-docker")
|
||||
}
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error making temp dir: %s", err)
|
||||
state.Put("error", err)
|
||||
|
@ -26,7 +33,7 @@ func (s *StepTempDir) Run(state multistep.StateBag) multistep.StepAction {
|
|||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
s.tempDir = td
|
||||
s.tempDir = tempdir
|
||||
state.Put("temp_dir", s.tempDir)
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
|
|
@ -1,16 +1,19 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"github.com/mitchellh/multistep"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/mitchellh/multistep"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
)
|
||||
|
||||
func TestStepTempDir_impl(t *testing.T) {
|
||||
var _ multistep.Step = new(StepTempDir)
|
||||
}
|
||||
|
||||
func TestStepTempDir(t *testing.T) {
|
||||
func testStepTempDir_impl(t *testing.T) string {
|
||||
state := testState(t)
|
||||
step := new(StepTempDir)
|
||||
defer step.Cleanup(state)
|
||||
|
@ -41,4 +44,53 @@ func TestStepTempDir(t *testing.T) {
|
|||
if _, err := os.Stat(dir); err == nil {
|
||||
t.Fatalf("dir should be gone")
|
||||
}
|
||||
|
||||
return dir
|
||||
}
|
||||
|
||||
func TestStepTempDir(t *testing.T) {
|
||||
testStepTempDir_impl(t)
|
||||
}
|
||||
|
||||
func TestStepTempDir_notmpdir(t *testing.T) {
|
||||
tempenv := "PACKER_TMP_DIR"
|
||||
|
||||
oldenv := os.Getenv(tempenv)
|
||||
defer os.Setenv(tempenv, oldenv)
|
||||
os.Setenv(tempenv, "")
|
||||
|
||||
dir1 := testStepTempDir_impl(t)
|
||||
|
||||
cd, err := packer.ConfigDir()
|
||||
if err != nil {
|
||||
t.Fatalf("bad ConfigDir")
|
||||
}
|
||||
td := filepath.Join(cd, "tmp")
|
||||
os.Setenv(tempenv, td)
|
||||
|
||||
dir2 := testStepTempDir_impl(t)
|
||||
|
||||
if filepath.Dir(dir1) != filepath.Dir(dir2) {
|
||||
t.Fatalf("temp base directories do not match: %s %s", filepath.Dir(dir1), filepath.Dir(dir2))
|
||||
}
|
||||
}
|
||||
|
||||
func TestStepTempDir_packertmpdir(t *testing.T) {
|
||||
tempenv := "PACKER_TMP_DIR"
|
||||
|
||||
oldenv := os.Getenv(tempenv)
|
||||
defer os.Setenv(tempenv, oldenv)
|
||||
os.Setenv(tempenv, ".")
|
||||
|
||||
dir1 := testStepTempDir_impl(t)
|
||||
|
||||
abspath, err := filepath.Abs(".")
|
||||
if err != nil {
|
||||
t.Fatalf("bad absolute path")
|
||||
}
|
||||
dir2 := filepath.Join(abspath, "tmp")
|
||||
|
||||
if filepath.Dir(dir1) != filepath.Dir(dir2) {
|
||||
t.Fatalf("temp base directories do not match: %s %s", filepath.Dir(dir1), filepath.Dir(dir2))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
chocolate!
|
|
@ -0,0 +1 @@
|
|||
vanilla!
|
|
@ -0,0 +1 @@
|
|||
strawberry!
|
|
@ -0,0 +1,36 @@
|
|||
package file
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
type FileArtifact struct {
|
||||
filename string
|
||||
}
|
||||
|
||||
func (*FileArtifact) BuilderId() string {
|
||||
return BuilderId
|
||||
}
|
||||
|
||||
func (a *FileArtifact) Files() []string {
|
||||
return []string{a.filename}
|
||||
}
|
||||
|
||||
func (a *FileArtifact) Id() string {
|
||||
return "File"
|
||||
}
|
||||
|
||||
func (a *FileArtifact) String() string {
|
||||
return fmt.Sprintf("Stored file: %s", a.filename)
|
||||
}
|
||||
|
||||
func (a *FileArtifact) State(name string) interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *FileArtifact) Destroy() error {
|
||||
log.Printf("Deleting %s", a.filename)
|
||||
return os.Remove(a.filename)
|
||||
}
|
|
@ -0,0 +1,11 @@
|
|||
package file
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/mitchellh/packer/packer"
|
||||
)
|
||||
|
||||
func TestNullArtifact(t *testing.T) {
|
||||
var _ packer.Artifact = new(FileArtifact)
|
||||
}
|
|
@ -0,0 +1,77 @@
|
|||
package file
|
||||
|
||||
/*
|
||||
The File builder creates an artifact from a file. Because it does not require
|
||||
any virutalization or network resources, it's very fast and useful for testing.
|
||||
*/
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/mitchellh/multistep"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
)
|
||||
|
||||
const BuilderId = "packer.file"
|
||||
|
||||
type Builder struct {
|
||||
config *Config
|
||||
runner multistep.Runner
|
||||
}
|
||||
|
||||
func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
|
||||
c, warnings, errs := NewConfig(raws...)
|
||||
if errs != nil {
|
||||
return warnings, errs
|
||||
}
|
||||
b.config = c
|
||||
|
||||
return warnings, nil
|
||||
}
|
||||
|
||||
// Run is where the actual build should take place. It takes a Build and a Ui.
|
||||
func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) {
|
||||
artifact := new(FileArtifact)
|
||||
|
||||
if b.config.Source != "" {
|
||||
source, err := os.Open(b.config.Source)
|
||||
defer source.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create will truncate an existing file
|
||||
target, err := os.Create(b.config.Target)
|
||||
defer target.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ui.Say(fmt.Sprintf("Copying %s to %s", source.Name(), target.Name()))
|
||||
bytes, err := io.Copy(target, source)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ui.Say(fmt.Sprintf("Copied %d bytes", bytes))
|
||||
artifact.filename = target.Name()
|
||||
} else {
|
||||
// We're going to write Contents; if it's empty we'll just create an
|
||||
// empty file.
|
||||
err := ioutil.WriteFile(b.config.Target, []byte(b.config.Content), 0600)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
artifact.filename = b.config.Target
|
||||
}
|
||||
|
||||
return artifact, nil
|
||||
}
|
||||
|
||||
// Cancel cancels a possibly running Builder. This should block until
|
||||
// the builder actually cancels and cleans up after itself.
|
||||
func (b *Builder) Cancel() {
|
||||
b.runner.Cancel()
|
||||
}
|
|
@ -0,0 +1,78 @@
|
|||
package file
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
builderT "github.com/mitchellh/packer/helper/builder/testing"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
)
|
||||
|
||||
func TestBuilder_implBuilder(t *testing.T) {
|
||||
var _ packer.Builder = new(Builder)
|
||||
}
|
||||
|
||||
func TestBuilderFileAcc_content(t *testing.T) {
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
Builder: &Builder{},
|
||||
Template: fileContentTest,
|
||||
Check: checkContent,
|
||||
})
|
||||
}
|
||||
|
||||
func TestBuilderFileAcc_copy(t *testing.T) {
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
Builder: &Builder{},
|
||||
Template: fileCopyTest,
|
||||
Check: checkCopy,
|
||||
})
|
||||
}
|
||||
|
||||
func checkContent(artifacts []packer.Artifact) error {
|
||||
content, err := ioutil.ReadFile("contentTest.txt")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
contentString := string(content)
|
||||
if contentString != "hello world!" {
|
||||
return fmt.Errorf("Unexpected file contents: %s", contentString)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkCopy(artifacts []packer.Artifact) error {
|
||||
content, err := ioutil.ReadFile("copyTest.txt")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
contentString := string(content)
|
||||
if contentString != "Hello world.\n" {
|
||||
return fmt.Errorf("Unexpected file contents: %s", contentString)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
const fileContentTest = `
|
||||
{
|
||||
"builders": [
|
||||
{
|
||||
"type":"test",
|
||||
"target":"contentTest.txt",
|
||||
"content":"hello world!"
|
||||
}
|
||||
]
|
||||
}
|
||||
`
|
||||
|
||||
const fileCopyTest = `
|
||||
{
|
||||
"builders": [
|
||||
{
|
||||
"type":"test",
|
||||
"target":"copyTest.txt",
|
||||
"source":"test-fixtures/artifact.txt"
|
||||
}
|
||||
]
|
||||
}
|
||||
`
|
|
@ -0,0 +1,56 @@
|
|||
package file
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/mitchellh/packer/common"
|
||||
"github.com/mitchellh/packer/helper/config"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
"github.com/mitchellh/packer/template/interpolate"
|
||||
)
|
||||
|
||||
var ErrTargetRequired = fmt.Errorf("target required")
|
||||
var ErrContentSourceConflict = fmt.Errorf("Cannot specify source file AND content")
|
||||
|
||||
type Config struct {
|
||||
common.PackerConfig `mapstructure:",squash"`
|
||||
|
||||
Source string `mapstructure:"source"`
|
||||
Target string `mapstructure:"target"`
|
||||
Content string `mapstructure:"content"`
|
||||
}
|
||||
|
||||
func NewConfig(raws ...interface{}) (*Config, []string, error) {
|
||||
c := new(Config)
|
||||
warnings := []string{}
|
||||
|
||||
err := config.Decode(c, &config.DecodeOpts{
|
||||
Interpolate: true,
|
||||
InterpolateFilter: &interpolate.RenderFilter{
|
||||
Exclude: []string{},
|
||||
},
|
||||
}, raws...)
|
||||
if err != nil {
|
||||
return nil, warnings, err
|
||||
}
|
||||
|
||||
var errs *packer.MultiError
|
||||
|
||||
if c.Target == "" {
|
||||
errs = packer.MultiErrorAppend(errs, ErrTargetRequired)
|
||||
}
|
||||
|
||||
if c.Content == "" && c.Source == "" {
|
||||
warnings = append(warnings, "Both source file and contents are blank; target will have no content")
|
||||
}
|
||||
|
||||
if c.Content != "" && c.Source != "" {
|
||||
errs = packer.MultiErrorAppend(errs, ErrContentSourceConflict)
|
||||
}
|
||||
|
||||
if errs != nil && len(errs.Errors) > 0 {
|
||||
return nil, warnings, errs
|
||||
}
|
||||
|
||||
return c, warnings, nil
|
||||
}
|
|
@ -0,0 +1,45 @@
|
|||
package file
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func testConfig() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"source": "src.txt",
|
||||
"target": "dst.txt",
|
||||
"content": "Hello, world!",
|
||||
}
|
||||
}
|
||||
|
||||
func TestContentSourceConflict(t *testing.T) {
|
||||
raw := testConfig()
|
||||
|
||||
_, _, errs := NewConfig(raw)
|
||||
if !strings.Contains(errs.Error(), ErrContentSourceConflict.Error()) {
|
||||
t.Errorf("Expected config error: %s", ErrContentSourceConflict.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestNoFilename(t *testing.T) {
|
||||
raw := testConfig()
|
||||
|
||||
delete(raw, "filename")
|
||||
_, _, errs := NewConfig(raw)
|
||||
if errs == nil {
|
||||
t.Errorf("Expected config error: %s", ErrTargetRequired.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestNoContent(t *testing.T) {
|
||||
raw := testConfig()
|
||||
|
||||
delete(raw, "content")
|
||||
delete(raw, "source")
|
||||
_, warns, _ := NewConfig(raw)
|
||||
|
||||
if len(warns) == 0 {
|
||||
t.Error("Expected config warning without any content")
|
||||
}
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
Hello world.
|
|
@ -2,7 +2,10 @@ package googlecompute
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// accountFile represents the structure of the account file JSON file.
|
||||
|
@ -13,13 +16,37 @@ type accountFile struct {
|
|||
ClientId string `json:"client_id"`
|
||||
}
|
||||
|
||||
func loadJSON(result interface{}, path string) error {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
dec := json.NewDecoder(f)
|
||||
func parseJSON(result interface{}, text string) error {
|
||||
r := strings.NewReader(text)
|
||||
dec := json.NewDecoder(r)
|
||||
return dec.Decode(result)
|
||||
}
|
||||
|
||||
func processAccountFile(account_file *accountFile, text string) error {
|
||||
// Assume text is a JSON string
|
||||
if err := parseJSON(account_file, text); err != nil {
|
||||
// If text was not JSON, assume it is a file path instead
|
||||
if _, err := os.Stat(text); os.IsNotExist(err) {
|
||||
return fmt.Errorf(
|
||||
"account_file path does not exist: %s",
|
||||
text)
|
||||
}
|
||||
|
||||
b, err := ioutil.ReadFile(text)
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"Error reading account_file from path '%s': %s",
|
||||
text, err)
|
||||
}
|
||||
|
||||
contents := string(b)
|
||||
|
||||
if err := parseJSON(account_file, contents); err != nil {
|
||||
return fmt.Errorf(
|
||||
"Error parsing account file '%s': %s",
|
||||
contents, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -4,11 +4,12 @@ package googlecompute
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/mitchellh/multistep"
|
||||
"github.com/mitchellh/packer/common"
|
||||
"github.com/mitchellh/packer/helper/communicator"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
"log"
|
||||
"time"
|
||||
)
|
||||
|
||||
// The unique ID for this builder.
|
||||
|
@ -60,10 +61,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
|
|||
&StepInstanceInfo{
|
||||
Debug: b.config.PackerDebug,
|
||||
},
|
||||
&common.StepConnectSSH{
|
||||
SSHAddress: sshAddress,
|
||||
&communicator.StepConnect{
|
||||
Config: &b.config.Comm,
|
||||
Host: commHost,
|
||||
SSHConfig: sshConfig,
|
||||
SSHWaitTimeout: 5 * time.Minute,
|
||||
},
|
||||
new(common.StepProvision),
|
||||
new(StepTeardownInstance),
|
||||
|
|
|
@ -7,7 +7,10 @@ import (
|
|||
|
||||
"github.com/mitchellh/packer/common"
|
||||
"github.com/mitchellh/packer/common/uuid"
|
||||
"github.com/mitchellh/packer/helper/communicator"
|
||||
"github.com/mitchellh/packer/helper/config"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
"github.com/mitchellh/packer/template/interpolate"
|
||||
)
|
||||
|
||||
// Config is the configuration structure for the GCE builder. It stores
|
||||
|
@ -15,6 +18,7 @@ import (
|
|||
// state of the config object.
|
||||
type Config struct {
|
||||
common.PackerConfig `mapstructure:",squash"`
|
||||
Comm communicator.Config `mapstructure:",squash"`
|
||||
|
||||
AccountFile string `mapstructure:"account_file"`
|
||||
ProjectId string `mapstructure:"project_id"`
|
||||
|
@ -27,37 +31,36 @@ type Config struct {
|
|||
MachineType string `mapstructure:"machine_type"`
|
||||
Metadata map[string]string `mapstructure:"metadata"`
|
||||
Network string `mapstructure:"network"`
|
||||
Preemptible bool `mapstructure:"preemptible"`
|
||||
SourceImage string `mapstructure:"source_image"`
|
||||
SourceImageProjectId string `mapstructure:"source_image_project_id"`
|
||||
SSHUsername string `mapstructure:"ssh_username"`
|
||||
SSHPort uint `mapstructure:"ssh_port"`
|
||||
RawSSHTimeout string `mapstructure:"ssh_timeout"`
|
||||
RawStateTimeout string `mapstructure:"state_timeout"`
|
||||
Tags []string `mapstructure:"tags"`
|
||||
UseInternalIP bool `mapstructure:"use_internal_ip"`
|
||||
Zone string `mapstructure:"zone"`
|
||||
|
||||
account accountFile
|
||||
privateKeyBytes []byte
|
||||
sshTimeout time.Duration
|
||||
stateTimeout time.Duration
|
||||
tpl *packer.ConfigTemplate
|
||||
ctx interpolate.Context
|
||||
}
|
||||
|
||||
func NewConfig(raws ...interface{}) (*Config, []string, error) {
|
||||
c := new(Config)
|
||||
md, err := common.DecodeConfig(c, raws...)
|
||||
err := config.Decode(c, &config.DecodeOpts{
|
||||
Interpolate: true,
|
||||
InterpolateContext: &c.ctx,
|
||||
InterpolateFilter: &interpolate.RenderFilter{
|
||||
Exclude: []string{
|
||||
"run_command",
|
||||
},
|
||||
},
|
||||
}, raws...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
c.tpl, err = packer.NewConfigTemplate()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
c.tpl.UserVars = c.PackerUserVars
|
||||
|
||||
// Prepare the errors
|
||||
errs := common.CheckUnusedConfig(md)
|
||||
var errs *packer.MultiError
|
||||
|
||||
// Set defaults.
|
||||
if c.Network == "" {
|
||||
|
@ -73,7 +76,12 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) {
|
|||
}
|
||||
|
||||
if c.ImageName == "" {
|
||||
c.ImageName = "packer-{{timestamp}}"
|
||||
img, err := interpolate.Render("packer-{{timestamp}}", nil)
|
||||
if err != nil {
|
||||
errs = packer.MultiErrorAppend(errs,
|
||||
fmt.Errorf("Unable to parse image name: %s ", err))
|
||||
c.ImageName = img
|
||||
}
|
||||
}
|
||||
|
||||
if c.InstanceName == "" {
|
||||
|
@ -88,48 +96,16 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) {
|
|||
c.MachineType = "n1-standard-1"
|
||||
}
|
||||
|
||||
if c.RawSSHTimeout == "" {
|
||||
c.RawSSHTimeout = "5m"
|
||||
}
|
||||
|
||||
if c.RawStateTimeout == "" {
|
||||
c.RawStateTimeout = "5m"
|
||||
}
|
||||
|
||||
if c.SSHUsername == "" {
|
||||
c.SSHUsername = "root"
|
||||
if c.Comm.SSHUsername == "" {
|
||||
c.Comm.SSHUsername = "root"
|
||||
}
|
||||
|
||||
if c.SSHPort == 0 {
|
||||
c.SSHPort = 22
|
||||
}
|
||||
|
||||
// Process Templates
|
||||
templates := map[string]*string{
|
||||
"account_file": &c.AccountFile,
|
||||
|
||||
"disk_name": &c.DiskName,
|
||||
"image_name": &c.ImageName,
|
||||
"image_description": &c.ImageDescription,
|
||||
"instance_name": &c.InstanceName,
|
||||
"machine_type": &c.MachineType,
|
||||
"network": &c.Network,
|
||||
"project_id": &c.ProjectId,
|
||||
"source_image": &c.SourceImage,
|
||||
"source_image_project_id": &c.SourceImageProjectId,
|
||||
"ssh_username": &c.SSHUsername,
|
||||
"ssh_timeout": &c.RawSSHTimeout,
|
||||
"state_timeout": &c.RawStateTimeout,
|
||||
"zone": &c.Zone,
|
||||
}
|
||||
|
||||
for n, ptr := range templates {
|
||||
var err error
|
||||
*ptr, err = c.tpl.Process(*ptr, nil)
|
||||
if err != nil {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, fmt.Errorf("Error processing %s: %s", n, err))
|
||||
}
|
||||
if es := c.Comm.Prepare(&c.ctx); len(es) > 0 {
|
||||
errs = packer.MultiErrorAppend(errs, es...)
|
||||
}
|
||||
|
||||
// Process required parameters.
|
||||
|
@ -148,14 +124,6 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) {
|
|||
errs, errors.New("a zone must be specified"))
|
||||
}
|
||||
|
||||
// Process timeout settings.
|
||||
sshTimeout, err := time.ParseDuration(c.RawSSHTimeout)
|
||||
if err != nil {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, fmt.Errorf("Failed parsing ssh_timeout: %s", err))
|
||||
}
|
||||
c.sshTimeout = sshTimeout
|
||||
|
||||
stateTimeout, err := time.ParseDuration(c.RawStateTimeout)
|
||||
if err != nil {
|
||||
errs = packer.MultiErrorAppend(
|
||||
|
@ -164,9 +132,8 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) {
|
|||
c.stateTimeout = stateTimeout
|
||||
|
||||
if c.AccountFile != "" {
|
||||
if err := loadJSON(&c.account, c.AccountFile); err != nil {
|
||||
errs = packer.MultiErrorAppend(
|
||||
errs, fmt.Errorf("Failed parsing account file: %s", err))
|
||||
if err := processAccountFile(&c.account, c.AccountFile); err != nil {
|
||||
errs = packer.MultiErrorAppend(errs, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2,48 +2,10 @@ package googlecompute
|
|||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func testConfig(t *testing.T) map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"account_file": testAccountFile(t),
|
||||
"project_id": "hashicorp",
|
||||
"source_image": "foo",
|
||||
"zone": "us-east-1a",
|
||||
}
|
||||
}
|
||||
|
||||
func testConfigStruct(t *testing.T) *Config {
|
||||
c, warns, errs := NewConfig(testConfig(t))
|
||||
if len(warns) > 0 {
|
||||
t.Fatalf("bad: %#v", len(warns))
|
||||
}
|
||||
if errs != nil {
|
||||
t.Fatalf("bad: %#v", errs)
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func testConfigErr(t *testing.T, warns []string, err error, extra string) {
|
||||
if len(warns) > 0 {
|
||||
t.Fatalf("bad: %#v", warns)
|
||||
}
|
||||
if err == nil {
|
||||
t.Fatalf("should error: %s", extra)
|
||||
}
|
||||
}
|
||||
|
||||
func testConfigOk(t *testing.T, warns []string, err error) {
|
||||
if len(warns) > 0 {
|
||||
t.Fatalf("bad: %#v", warns)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("bad: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigPrepare(t *testing.T) {
|
||||
cases := []struct {
|
||||
Key string
|
||||
|
@ -116,6 +78,36 @@ func TestConfigPrepare(t *testing.T) {
|
|||
"5s",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"use_internal_ip",
|
||||
nil,
|
||||
false,
|
||||
},
|
||||
{
|
||||
"use_internal_ip",
|
||||
false,
|
||||
false,
|
||||
},
|
||||
{
|
||||
"use_internal_ip",
|
||||
"SO VERY BAD",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"preemptible",
|
||||
nil,
|
||||
false,
|
||||
},
|
||||
{
|
||||
"preemptible",
|
||||
false,
|
||||
false,
|
||||
},
|
||||
{
|
||||
"preemptible",
|
||||
"SO VERY BAD",
|
||||
true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
|
@ -137,6 +129,83 @@ func TestConfigPrepare(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestConfigDefaults(t *testing.T) {
|
||||
cases := []struct {
|
||||
Read func(c *Config) interface{}
|
||||
Value interface{}
|
||||
}{
|
||||
{
|
||||
func(c *Config) interface{} { return c.Comm.Type },
|
||||
"ssh",
|
||||
},
|
||||
|
||||
{
|
||||
func(c *Config) interface{} { return c.Comm.SSHPort },
|
||||
22,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
raw := testConfig(t)
|
||||
|
||||
c, warns, errs := NewConfig(raw)
|
||||
testConfigOk(t, warns, errs)
|
||||
|
||||
actual := tc.Read(c)
|
||||
if actual != tc.Value {
|
||||
t.Fatalf("bad: %#v", actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestImageName(t *testing.T) {
|
||||
c, _, _ := NewConfig(testConfig(t))
|
||||
if strings.Contains(c.ImageName, "{{timestamp}}") {
|
||||
t.Errorf("ImageName should be interpolated; found %s", c.ImageName)
|
||||
}
|
||||
}
|
||||
|
||||
// Helper stuff below
|
||||
|
||||
func testConfig(t *testing.T) map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"account_file": testAccountFile(t),
|
||||
"project_id": "hashicorp",
|
||||
"source_image": "foo",
|
||||
"zone": "us-east-1a",
|
||||
}
|
||||
}
|
||||
|
||||
func testConfigStruct(t *testing.T) *Config {
|
||||
c, warns, errs := NewConfig(testConfig(t))
|
||||
if len(warns) > 0 {
|
||||
t.Fatalf("bad: %#v", len(warns))
|
||||
}
|
||||
if errs != nil {
|
||||
t.Fatalf("bad: %#v", errs)
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func testConfigErr(t *testing.T, warns []string, err error, extra string) {
|
||||
if len(warns) > 0 {
|
||||
t.Fatalf("bad: %#v", warns)
|
||||
}
|
||||
if err == nil {
|
||||
t.Fatalf("should error: %s", extra)
|
||||
}
|
||||
}
|
||||
|
||||
func testConfigOk(t *testing.T, warns []string, err error) {
|
||||
if len(warns) > 0 {
|
||||
t.Fatalf("bad: %#v", warns)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("bad: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func testAccountFile(t *testing.T) string {
|
||||
tf, err := ioutil.TempFile("", "packer")
|
||||
if err != nil {
|
||||
|
|
|
@ -24,6 +24,9 @@ type Driver interface {
|
|||
// GetNatIP gets the NAT IP address for the instance.
|
||||
GetNatIP(zone, name string) (string, error)
|
||||
|
||||
// GetInternalIP gets the GCE-internal IP address for the instance.
|
||||
GetInternalIP(zone, name string) (string, error)
|
||||
|
||||
// RunInstance takes the given config and launches an instance.
|
||||
RunInstance(*InstanceConfig) (<-chan error, error)
|
||||
|
||||
|
@ -44,6 +47,7 @@ type InstanceConfig struct {
|
|||
Metadata map[string]string
|
||||
Name string
|
||||
Network string
|
||||
Preemptible bool
|
||||
Tags []string
|
||||
Zone string
|
||||
}
|
||||
|
|
|
@ -4,16 +4,15 @@ import (
|
|||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"code.google.com/p/google-api-go-client/compute/v1"
|
||||
"github.com/mitchellh/packer/packer"
|
||||
|
||||
// oauth2 "github.com/rasa/oauth2-fork-b3f9a68"
|
||||
"github.com/rasa/oauth2-fork-b3f9a68"
|
||||
|
||||
// oauth2 "github.com/rasa/oauth2-fork-b3f9a68/google"
|
||||
"github.com/rasa/oauth2-fork-b3f9a68/google"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
"golang.org/x/oauth2/jwt"
|
||||
"google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
// driverGCE is a Driver implementation that actually talks to GCE.
|
||||
|
@ -27,9 +26,10 @@ type driverGCE struct {
|
|||
var DriverScopes = []string{"https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/devstorage.full_control"}
|
||||
|
||||
func NewDriverGCE(ui packer.Ui, p string, a *accountFile) (Driver, error) {
|
||||
var f *oauth2.Options
|
||||
var err error
|
||||
|
||||
var client *http.Client
|
||||
|
||||
// Auth with AccountFile first if provided
|
||||
if a.PrivateKey != "" {
|
||||
log.Printf("[INFO] Requesting Google token via AccountFile...")
|
||||
|
@ -37,22 +37,45 @@ func NewDriverGCE(ui packer.Ui, p string, a *accountFile) (Driver, error) {
|
|||
log.Printf("[INFO] -- Scopes: %s", DriverScopes)
|
||||
log.Printf("[INFO] -- Private Key Length: %d", len(a.PrivateKey))
|
||||
|
||||
f, err = oauth2.New(
|
||||
oauth2.JWTClient(a.ClientEmail, []byte(a.PrivateKey)),
|
||||
oauth2.Scope(DriverScopes...),
|
||||
google.JWTEndpoint())
|
||||
conf := jwt.Config{
|
||||
Email: a.ClientEmail,
|
||||
PrivateKey: []byte(a.PrivateKey),
|
||||
Scopes: DriverScopes,
|
||||
TokenURL: "https://accounts.google.com/o/oauth2/token",
|
||||
}
|
||||
|
||||
// Initiate an http.Client. The following GET request will be
|
||||
// authorized and authenticated on the behalf of
|
||||
// your service account.
|
||||
client = conf.Client(oauth2.NoContext)
|
||||
} else {
|
||||
log.Printf("[INFO] Requesting Google token via GCE Service Role...")
|
||||
|
||||
f, err = oauth2.New(google.ComputeEngineAccount(""))
|
||||
client = &http.Client{
|
||||
Transport: &oauth2.Transport{
|
||||
// Fetch from Google Compute Engine's metadata server to retrieve
|
||||
// an access token for the provided account.
|
||||
// If no account is specified, "default" is used.
|
||||
Source: google.ComputeTokenSource(""),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Printf("[INFO] Instantiating GCE client using...")
|
||||
service, err := compute.New(&http.Client{Transport: f.NewTransport()})
|
||||
log.Printf("[INFO] Instantiating GCE client...")
|
||||
service, err := compute.New(client)
|
||||
// Set UserAgent
|
||||
versionString := "0.0.0"
|
||||
// TODO(dcunnin): Use Packer's version code from version.go
|
||||
// versionString := main.Version
|
||||
// if main.VersionPrerelease != "" {
|
||||
// versionString = fmt.Sprintf("%s-%s", versionString, main.VersionPrerelease)
|
||||
// }
|
||||
service.UserAgent = fmt.Sprintf(
|
||||
"(%s %s) Packer/%s", runtime.GOOS, runtime.GOARCH, versionString)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -134,7 +157,6 @@ func (d *driverGCE) GetNatIP(zone, name string) (string, error) {
|
|||
if ni.AccessConfigs == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, ac := range ni.AccessConfigs {
|
||||
if ac.NatIP != "" {
|
||||
return ac.NatIP, nil
|
||||
|
@ -145,6 +167,22 @@ func (d *driverGCE) GetNatIP(zone, name string) (string, error) {
|
|||
return "", nil
|
||||
}
|
||||
|
||||
func (d *driverGCE) GetInternalIP(zone, name string) (string, error) {
|
||||
instance, err := d.service.Instances.Get(d.projectId, zone, name).Do()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for _, ni := range instance.NetworkInterfaces {
|
||||
if ni.NetworkIP == "" {
|
||||
continue
|
||||
}
|
||||
return ni.NetworkIP, nil
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (d *driverGCE) RunInstance(c *InstanceConfig) (<-chan error, error) {
|
||||
// Get the zone
|
||||
d.ui.Message(fmt.Sprintf("Loading zone: %s", c.Zone))
|
||||
|
@ -181,7 +219,7 @@ func (d *driverGCE) RunInstance(c *InstanceConfig) (<-chan error, error) {
|
|||
for k, v := range c.Metadata {
|
||||
metadata = append(metadata, &compute.MetadataItems{
|
||||
Key: k,
|
||||
Value: v,
|
||||
Value: &v,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -217,6 +255,9 @@ func (d *driverGCE) RunInstance(c *InstanceConfig) (<-chan error, error) {
|
|||
Network: network.SelfLink,
|
||||
},
|
||||
},
|
||||
Scheduling: &compute.Scheduling{
|
||||
Preemptible: c.Preemptible,
|
||||
},
|
||||
ServiceAccounts: []*compute.ServiceAccount{
|
||||
&compute.ServiceAccount{
|
||||
Email: "default",
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue