Merge pull request #2 from mitchellh/master

Update skip_nat_port to latest master.
This commit is contained in:
pleschev 2014-11-26 07:52:42 +11:00
commit 431647997c
512 changed files with 16965 additions and 6184 deletions

1
.gitignore vendored
View File

@ -6,5 +6,4 @@
/website/build /website/build
.DS_Store .DS_Store
.vagrant .vagrant
Vagrantfile
test/.env test/.env

View File

@ -2,20 +2,14 @@ language: go
go: go:
- 1.2 - 1.2
- 1.3
- tip - tip
install: make deps install: make updatedeps
script: script:
- go test ./... - GOMAXPROCS=2 make test
#- go test -race ./... #- go test -race ./...
notifications:
irc:
channels:
- "chat.freenode.net#packer-tool"
on_success: change
on_failure: always
matrix: matrix:
allow_failures: allow_failures:
- go: tip - go: tip

View File

@ -1,9 +1,238 @@
## 0.6.0 (unreleased) ## 0.8.0 (unreleased)
## 0.7.2 (October 28, 2014)
FEATURES:
* builder/digitalocean: API V2 support. [GH-1463]
* builder/parallels: Don't depend on _prl-utils_ [GH-1499]
IMPROVEMENTS:
* builder/amazon/all: Support new AWS Frankfurt region.
* builder/docker: Allow remote `DOCKER_HOST`, which works as long as
volumes work. [GH-1594]
* builder/qemu: Can set cache mode for main disk. [GH-1558]
* builder/qemu: Can build from pre-existing disk. [GH-1342]
* builder/vmware: Can specify path to Fusion installation with environmental
variable `FUSION_APP_PATH`. [GH-1552]
* builder/vmware: Can specify the HW version for the VMX. [GH-1530]
* builder/vmware/esxi: Will now cache ISOs/floppies remotely. [GH-1479]
* builder/vmware/vmx: Source VMX can have a disk connected via SATA. [GH-1604]
* post-processors/vagrant: Support Qemu (libvirt) boxes. [GH-1330]
* post-processors/vagrantcloud: Support self-hosted box URLs.
BUG FIXES:
* core: Fix loading plugins from pwd. [GH-1521]
* builder/amazon: Prefer token in config if given. [GH-1544]
* builder/amazon/all: Extended timeout for waiting for AMI. [GH-1533]
* builder/virtualbox: Can read VirtualBox version on FreeBSD. [GH-1570]
* builder/virtualbox: More robust reading of guest additions URL. [GH-1509]
* builder/vmware: Always remove floppies/drives. [GH-1504]
* builder/vmware: Wait some time so that post-VMX update aren't
overwritten. [GH-1504]
* builder/vmware/esxi: Retry power on if it fails. [GH-1334]
* builder/vmware-vmx: Fix issue with order of boot command support [GH-1492]
* builder/amazon: Extend timeout and allow user override [GH-1533]
* builder/parallels: Ignore 'The fdd0 device does not exist' [GH-1501]
* builder/parallels: Rely on Cleanup functions to detach devices [GH-1502]
* builder/parallels: Create VM without hdd and then add it later [GH-1548]
* builder/parallels: Disconnect cdrom0 [GH-1605]
* builder/qemu: Don't use `-redir` flag anymore, replace with
`hostfwd` options. [GH-1561]
* builder/qmeu: Use `pc` as default machine type instead of `pc-1.0`.
* providers/aws: Ignore transient network errors. [GH-1579]
* provisioner/ansible: Don't buffer output so output streams in. [GH-1585]
* provisioner/ansible: Use inventory file always to avoid potentially
deprecated feature. [GH-1562]
* provisioner/shell: Quote environmental variables. [GH-1568]
* provisioner/salt: Bootstrap over SSL. [GH-1608]
* post-processors/docker-push: Work with docker-tag artifacts. [GH-1526]
* post-processors/vsphere: Append "/" to object address. [GH-1615]
## 0.7.1 (September 10, 2014)
FEATURES:
* builder/vmware: VMware Fusion Pro 7 is now supported. [GH-1478]
BUG FIXES:
* core: SSH will connect slightly faster if it is ready immediately.
* provisioner/file: directory uploads no longer hang. [GH-1484]
* provisioner/file: fixed crash on large files. [GH-1473]
* scripts: Windows executable renamed to packer.exe. [GH-1483]
## 0.7.0 (September 8, 2014)
BACKWARDS INCOMPATIBILITIES:
* The authentication configuration for Google Compute Engine has changed.
The new method is much simpler, but is not backwards compatible.
`packer fix` will _not_ fix this. Please read the updated GCE docs.
FEATURES:
* **New Post-Processor: `compress`** - Gzip compresses artifacts with files.
* **New Post-Processor: `docker-save`** - Save an image. This is similar to
export, but preserves the image hierarchy.
* **New Post-Processor: `docker-tag`** - Tag a created image.
* **New Template Functions: `upper`, `lower`** - See documentation for
more details.
* core: Plugins are automatically discovered if they're named properly.
Packer will look in the PWD and the directory with `packer` for
binaries named `packer-TYPE-NAME`.
* core: Plugins placed in `~/.packer.d/plugins` are now automatically
discovered.
* builder/amazon: Spot instances can now be used to build EBS backed and
instance store images. [GH-1139]
* builder/docker: Images can now be committed instead of exported. [GH-1198]
* builder/virtualbox-ovf: New `import_flags` setting can be used to add
new command line flags to `VBoxManage import` to allow things such
as EULAs to be accepted. [GH-1383]
* builder/virtualbox-ovf: Boot commands and the HTTP server are supported.
[GH-1169]
* builder/vmware: VMware Player 6 is now supported. [GH-1168]
* builder/vmware-vmx: Boot commands and the HTTP server are supported.
[GH-1169]
IMPROVEMENTS:
* core: `isotime` function can take a format. [GH-1126]
* builder/amazon/all: `AWS_SECURITY_TOKEN` is read and can also be
set with the `token` configuration. [GH-1236]
* builder/amazon/all: Can force SSH on the private IP address with
`ssh_private_ip`. [GH-1229]
* builder/amazon/all: String fields in device mappings can use variables. [GH-1090]
* builder/amazon-instance: EBS AMIs can be used as a source. [GH-1453]
* builder/digitalocean: Can set API URL endpoint. [GH-1448]
* builder/digitalocean: Region supports variables. [GH-1452]
* builder/docker: Can now specify login credentials to pull images.
* builder/docker: Support mounting additional volumes. [GH-1430]
* builder/parallels/all: Path to tools ISO is calculated automatically. [GH-1455]
* builder/parallels-pvm: `reassign_mac` option to choose wehther or not
to generate a new MAC address. [GH-1461]
* builder/qemu: Can specify "none" acceleration type. [GH-1395]
* builder/qemu: Can specify "tcg" acceleration type. [GH-1395]
* builder/virtualbox/all: `iso_interface` option to mount ISO with SATA. [GH-1200]
* builder/vmware-vmx: Proper `floppy_files` support. [GH-1057]
* command/build: Add `-color=false` flag to disable color. [GH-1433]
* post-processor/docker-push: Can now specify login credentials. [GH-1243]
* provisioner/chef-client: Support `chef_environment`. [GH-1190]
BUG FIXES:
* core: nicer error message if an encrypted private key is used for
SSH. [GH-1445]
* core: Fix crash that could happen with a well timed double Ctrl-C.
[GH-1328] [GH-1314]
* core: SSH TCP keepalive period is now 5 seconds (shorter). [GH-1232]
* builder/amazon-chroot: Can properly build HVM images now. [GH-1360]
* builder/amazon-chroot: Fix crash in root device check. [GH-1360]
* builder/amazon-chroot: Add description that Packer made the snapshot
with a time. [GH-1388]
* builder/amazon-ebs: AMI is deregistered if an error. [GH-1186]
* builder/amazon-instance: Fix deprecation warning for `ec2-bundle-vol`
[GH-1424]
* builder/amazon-instance: Add `--no-filter` to the `ec2-bundle-vol`
command by default to avoid corrupting data by removing package
manager certs. [GH-1137]
* builder/amazon/all: `delete_on_termination` set to false will work.
* builder/amazon/all: Fix race condition on setting tags. [GH-1367]
* builder/amazon/all: More desctriptive error messages if Amazon only
sends an error code. [GH-1189]
* builder/docker: Error if `DOCKER_HOST` is set.
* builder/docker: Remove the container during cleanup. [GH-1206]
* builder/docker: Fix case where not all output would show up from
provisioners.
* builder/googlecompute: add `disk_size` option. [GH-1397]
* builder/googlecompute: Auth works with latest formats on Google Cloud
Console. [GH-1344]
* builder/openstack: Region is not required. [GH-1418]
* builder/parallels-iso: ISO not removed from VM after install [GH-1338]
* builder/parallels/all: Add support for Parallels Desktop 10 [GH-1438]
* builder/parallels/all: Added some navigation keys [GH-1442]
* builder/qemu: If headless, sdl display won't be used. [GH-1395]
* builder/qemu: Use `512M` as `-m` default. [GH-1444]
* builder/virtualbox/all: Search `VBOX_MSI_INSTALL_PATH` for path to
`VBoxManage` on Windows. [GH-1337]
* builder/virtualbox/all: Seed RNG to avoid same ports. [GH-1386]
* builder/virtualbox/all: Better error if guest additions URL couldn't be
detected. [GH-1439]
* builder/virtualbox/all: Detect errors even when `VBoxManage` exits
with a zero exit code. [GH-1119]
* builder/virtualbox/iso: Append timestamp to default name for parallel
builds. [GH-1365]
* builder/vmware/all: No more error when Packer stops an already-stopped
VM. [GH-1300]
* builder/vmware/all: `ssh_host` accepts templates. [GH-1396]
* builder/vmware/all: Don't remount floppy in VMX post step. [GH-1239]
* builder/vmware/vmx: Do not re-add floppy disk files to VMX [GH-1361]
* builder/vmware-iso: Fix crash when `vnc_port_min` and max were the
same value. [GH-1288]
* builder/vmware-iso: Finding an available VNC port on Windows works. [GH-1372]
* builder/vmware-vmx: Nice error if Clone is not supported (not VMware
Fusion Pro). [GH-787]
* post-processor/vagrant: Can supply your own metadata.json. [GH-1143]
* provisioner/ansible-local: Use proper path on Windows. [GH-1375]
* provisioner/file: Mode will now be preserved. [GH-1064]
## 0.6.1 (July 20, 2014)
FEATURES:
* **New post processor:** `vagrant-cloud` - Push box files generated by
vagrant post processor to Vagrant Cloud. [GH-1289]
* Vagrant post-processor can now packer Hyper-V boxes.
IMPROVEMENTS:
* builder/amazon: Support for enhanced networking on HVM images. [GH-1228]
* builder/amazon-ebs: Support encrypted EBS volumes [GH-1194]
* builder/ansible: Add `playbook_dir` option. [GH-1000]
* builder/openstack: Add ability to configure networks. [GH-1261]
* builder/openstack: Skip certificate verification. [GH-1121]
* builder/parallels/all: Add ability to select interface to connect to.
* builder/parallels/pvm: Support `boot_command`. [GH-1082]
* builder/virtualbox/all: Attempt to use local guest additions ISO
before downloading from internet. [GH-1123]
* builder/virtualbox/ovf: Supports `guest_additions_mode` [GH-1035]
* builder/vmware/all: Increase cleanup timeout to 120 seconds [GH-1167]
* builder/vmware/all: Add `vmx_data_post` for modifying VMX data
after shutdown. [GH-1149]
* builder/vmware/vmx: Supports tools uploading. [GH-1154]
BUG FIXES:
* core: `isotime` is the same time during the entire build. [GH-1153]
* builder/amazon-common: Sort AMI strings before outputting [GH-1305]
* builder/amazon: User data can use templates/variables. [GH-1343]
* builder/amazon: Can now build AMIs in GovCloud.
* builder/null: SSH info can use templates/variables. [GH-1343]
* builder/openstack: Workaround for gophercloud.ServerById crashing [GH-1257]
* builder/openstack: Force IPv4 addresses from address pools [GH-1258]
* builder/parallels: Do not delete entire CDROM device. [GH-1115]
* builder/parallels: Errors while creating floppy disk. [GH-1225]
* builder/parallels: Errors while removing floppy drive. [GH-1226]
* builder/virtualbox-ovf: Supports guest additions options. [GH-1120]
* builder/vmware-iso: Fix esx5 path separator in windows. [GH-1316]
* builder/vmware: Remote ESXi builder now uploads floppy. [GH-1106]
* builder/vmware: Remote ESXi builder no longer re-uploads ISO every
time. [GH-1244]
* post-processor/vsphere: Accept DOMAIN\account usernames [GH-1178]
* provisioner/chef-*: Fix remotePaths for Windows [GH-394]
## 0.6.0 (May 2, 2014)
FEATURES: FEATURES:
* **New builder:** `null` - The null builder does not produce any * **New builder:** `null` - The null builder does not produce any
artifacts, but is useful for debugging provisioning scripts. [GH-970] artifacts, but is useful for debugging provisioning scripts. [GH-970]
* **New builder:** `parallels-iso` and `parallels-pvm` - These can be
used to build Parallels virtual machines. [GH-1101]
* **New provisioner:** `chef-client` - Provision using a the `chef-client` * **New provisioner:** `chef-client` - Provision using a the `chef-client`
command, which talks to a Chef Server. [GH-855] command, which talks to a Chef Server. [GH-855]
* **New provisioner:** `puppet-server` - Provision using Puppet by * **New provisioner:** `puppet-server` - Provision using Puppet by
@ -19,17 +248,28 @@ IMPROVEMENTS:
array configurations. [GH-950] array configurations. [GH-950]
* builder/amazon: Added `ssh_private_key_file` option [GH-971] * builder/amazon: Added `ssh_private_key_file` option [GH-971]
* builder/amazon: Added `ami_virtualization_type` option [GH-1021] * builder/amazon: Added `ami_virtualization_type` option [GH-1021]
* builder/digitalocean: Regions, image names, and sizes can be
names that are looked up for their valid ID. [GH-960]
* builder/googlecompute: Configurable instance name. [GH-1065] * builder/googlecompute: Configurable instance name. [GH-1065]
* builder/openstack: Support for conventional OpenStack environmental * builder/openstack: Support for conventional OpenStack environmental
variables such as `OS_USERNAME`, `OS_PASSWORD`, etc. [GH-768] variables such as `OS_USERNAME`, `OS_PASSWORD`, etc. [GH-768]
* builder/openstack: Support `openstack_provider` option to automatically * builder/openstack: Support `openstack_provider` option to automatically
fill defaults for different OpenStack variants. [GH-912] fill defaults for different OpenStack variants. [GH-912]
* builder/openstack: Support security groups. [GH-848]
* builder/qemu: User variable expansion in `ssh_key_path` [GH-918] * builder/qemu: User variable expansion in `ssh_key_path` [GH-918]
* builder/qemu: Floppy disk files list can also include globs
and directories. [GH-1086]
* builder/virtualbox: Support an `export_opts` option which allows * builder/virtualbox: Support an `export_opts` option which allows
specifying arbitrary arguments when exporting the VM. [GH-945] specifying arbitrary arguments when exporting the VM. [GH-945]
* builder/virtualbox: Added `vboxmanage_post` option to run vboxmanage
commands just before exporting [GH-664]
* builder/virtualbox: Floppy disk files list can also include globs
and directories. [GH-1086]
* builder/vmware: Workstation 10 support for Linux. [GH-900] * builder/vmware: Workstation 10 support for Linux. [GH-900]
* builder/vmware: add cloning support on Windows [GH-824] * builder/vmware: add cloning support on Windows [GH-824]
* command/build: Added '-parallel' flag so you can disable parallelization * builder/vmware: Floppy disk files list can also include globs
and directories. [GH-1086]
* command/build: Added `-parallel` flag so you can disable parallelization
with `-no-parallel`. [GH-924] with `-no-parallel`. [GH-924]
* post-processors/vsphere: `disk_mode` option. [GH-778] * post-processors/vsphere: `disk_mode` option. [GH-778]
* provisioner/ansible: Add `inventory_file` option [GH-1006] * provisioner/ansible: Add `inventory_file` option [GH-1006]
@ -52,10 +292,13 @@ BUG FIXES:
Windows [GH-963] Windows [GH-963]
* provisioner/ansible: set cwd to staging directory [GH-1016] * provisioner/ansible: set cwd to staging directory [GH-1016]
* provisioners/chef-client: Don't chown directory with Ubuntu. [GH-939] * provisioners/chef-client: Don't chown directory with Ubuntu. [GH-939]
* provisioners/chef-solo: Deeply nested JSON works properly. [GH-1076]
* provisioners/shell: Env var values can have equal signs. [GH-1045] * provisioners/shell: Env var values can have equal signs. [GH-1045]
* provisioners/shell: chmod the uploaded script file to 0777. [GH-994] * provisioners/shell: chmod the uploaded script file to 0777. [GH-994]
* post-processor/docker-push: Allow repositories with ports. [GH-923] * post-processor/docker-push: Allow repositories with ports. [GH-923]
* post-processor/vagrant: Create parent directories for `output` path [GH-1059] * post-processor/vagrant: Create parent directories for `output` path [GH-1059]
* post-processor/vsphere: datastore, network, and folder are no longer
required. [GH-1091]
## 0.5.2 (02/21/2014) ## 0.5.2 (02/21/2014)

View File

@ -56,19 +56,32 @@ following steps in order to be able to compile and test Packer.
1. Install Go. Make sure the Go version is at least Go 1.2. Packer will not work with anything less than 1. Install Go. Make sure the Go version is at least Go 1.2. Packer will not work with anything less than
Go 1.2. On a Mac, you can `brew install go` to install Go 1.2. Go 1.2. On a Mac, you can `brew install go` to install Go 1.2.
2. Set and export the `GOPATH` environment variable. For example, you can 2. Set and export the `GOPATH` environment variable and update your `PATH`.
add `export GOPATH=$HOME/Documents/golang` to your `.bash_profile`. For example, you can add to your `.bash_profile`.
3. Download the Packer source (and its dependencies) by running ```
export GOPATH=$HOME/Documents/golang
export PATH=$PATH:$GOPATH/bin
```
3. Install and build `gox` with
```
go get github.com/mitchellh/gox
cd $GOPATH/src/github.com/mitchellh/gox
go build
```
4. Download the Packer source (and its dependencies) by running
`go get github.com/mitchellh/packer`. This will download the Packer `go get github.com/mitchellh/packer`. This will download the Packer
source to `$GOPATH/src/github.com/mitchellh/packer`. source to `$GOPATH/src/github.com/mitchellh/packer`.
4. Make your changes to the Packer source. You can run `make` from the main 5. Make your changes to the Packer source. You can run `make` from the main
source directory to recompile all the binaries. Any compilation errors source directory to recompile all the binaries. Any compilation errors
will be shown when the binaries are rebuilding. will be shown when the binaries are rebuilding.
5. Test your changes by running `make test` and then running 6. Test your changes by running `make test` and then running
`$GOPATH/src/github.com/mitchellh/packer/bin/packer` to build a machine. `$GOPATH/src/github.com/mitchellh/packer/bin/packer` to build a machine.
6. If everything works well and the tests pass, run `go fmt` on your code 7. If everything works well and the tests pass, run `go fmt` on your code
before submitting a pull request. before submitting a pull request.

View File

@ -1,38 +1,20 @@
NO_COLOR=\033[0m TEST?=./...
OK_COLOR=\033[32;01m
ERROR_COLOR=\033[31;01m
WARN_COLOR=\033[33;01m
DEPS = $(go list -f '{{range .TestImports}}{{.}} {{end}}' ./...)
UNAME := $(shell uname -s)
ifeq ($(UNAME),Darwin)
ECHO=echo
else
ECHO=/bin/echo -e
endif
all: deps default: test
@mkdir -p bin/
@$(ECHO) "$(OK_COLOR)==> Building$(NO_COLOR)"
@bash --norc -i ./scripts/devcompile.sh
deps: bin:
@$(ECHO) "$(OK_COLOR)==> Installing dependencies$(NO_COLOR)" @sh -c "$(CURDIR)/scripts/build.sh"
@go get -d -v ./...
@echo $(DEPS) | xargs -n1 go get -d dev:
@TF_DEV=1 sh -c "$(CURDIR)/scripts/build.sh"
test:
go test $(TEST) $(TESTARGS) -timeout=10s
testrace:
go test -race $(TEST) $(TESTARGS)
updatedeps: updatedeps:
@$(ECHO) "$(OK_COLOR)==> Updating all dependencies$(NO_COLOR)" go get -d -v -p 2 ./...
@go get -d -v -u ./...
@echo $(DEPS) | xargs -n1 go get -d -u
clean: .PHONY: bin default test updatedeps
@rm -rf bin/ local/ pkg/ src/ website/.sass-cache website/build
format:
go fmt ./...
test: deps
@$(ECHO) "$(OK_COLOR)==> Testing Packer...$(NO_COLOR)"
go test ./...
.PHONY: all clean deps format test updatedeps

View File

@ -9,9 +9,18 @@ from a single source configuration.
Packer is lightweight, runs on every major operating system, and is highly Packer is lightweight, runs on every major operating system, and is highly
performant, creating machine images for multiple platforms in parallel. performant, creating machine images for multiple platforms in parallel.
Packer comes out of the box with support for creating AMIs (EC2), VMware Packer comes out of the box with support for the following platforms:
images, and VirtualBox images. Support for more platforms can be added via * Amazon EC2 (AMI). Both EBS-backed and instance-store AMIs
plugins. * DigitalOcean
* Docker
* Google Compute Engine
* OpenStack
* Parallels
* QEMU. Both KVM and Xen images.
* VirtualBox
* VMware
Support for other platforms can be added via plugins.
The images that Packer creates can easily be turned into The images that Packer creates can easily be turned into
[Vagrant](http://www.vagrantup.com) boxes. [Vagrant](http://www.vagrantup.com) boxes.
@ -69,40 +78,44 @@ http://www.packer.io/docs
## Developing Packer ## Developing Packer
If you wish to work on Packer itself, you'll first need [Go](http://golang.org) If you wish to work on Packer itself or any of its built-in providers,
installed (version 1.2+ is _required_). Make sure you have Go properly installed, you'll first need [Go](http://www.golang.org) installed (version 1.2+ is
including setting up your [GOPATH](http://golang.org/doc/code.html#GOPATH). _required_). Make sure Go is properly installed, including setting up
a [GOPATH](http://golang.org/doc/code.html#GOPATH).
For some additional dependencies, Go needs [Mercurial](http://mercurial.selenic.com/) Next, install the following software packages, which are needed for some dependencies:
and [Bazaar](http://bazaar.canonical.com/en/) to be installed.
Packer itself doesn't require these, but a dependency of a dependency does.
You'll also need [`gox`](https://github.com/mitchellh/gox) - [Bazaar](http://bazaar.canonical.com/en/)
to compile packer. You can install that with: - [Git](http://git-scm.com/)
- [Mercurial](http://mercurial.selenic.com/)
``` Then, install [Gox](https://github.com/mitchellh/gox), which is used
$ go get -u github.com/mitchellh/gox as a compilation tool on top of Go:
```
Next, clone this repository into `$GOPATH/src/github.com/mitchellh/packer` and $ go get -u github.com/mitchellh/gox
then just type `make`. In a few moments, you'll have a working `packer` executable:
``` Next, clone this repository into `$GOPATH/src/github.com/mitchellh/packer`.
$ make Install the necessary dependencies by running `make updatedeps` and then just
... type `make`. This will compile some more dependencies and then run the tests. If
$ bin/packer this exits with exit status 0, then everything is working!
...
```
If you need to cross-compile Packer for other platforms, take a look at $ make updatedeps
`scripts/dist.sh`. ...
$ make
...
You can run tests by typing `make test`. To compile a development version of Packer and the built-in plugins,
run `make dev`. This will put Packer binaries in the `bin` folder:
This will run tests for Packer core along with all the core builders and commands and such that come with Packer. $ make dev
...
$ bin/packer
...
If you make any changes to the code, run `make format` in order to automatically
format the code according to Go standards.
When new dependencies are added to packer you can use `make updatedeps` to If you're developing a specific package, you can run tests for just that
get the latest and subsequently use `make` to compile and generate the `packer` binary. package by specifying the `TEST` variable. For example below, only
`packer` package tests will be run.
$ make test TEST=./packer
...

45
Vagrantfile vendored Normal file
View File

@ -0,0 +1,45 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
$script = <<SCRIPT
SRCROOT="/opt/go"
# Install Go
sudo apt-get update
sudo apt-get install -y build-essential mercurial
sudo hg clone -u release https://code.google.com/p/go ${SRCROOT}
cd ${SRCROOT}/src
sudo ./all.bash
# Setup the GOPATH
sudo mkdir -p /opt/gopath
cat <<EOF >/tmp/gopath.sh
export GOPATH="/opt/gopath"
export PATH="/opt/go/bin:\$GOPATH/bin:\$PATH"
EOF
sudo mv /tmp/gopath.sh /etc/profile.d/gopath.sh
sudo chmod 0755 /etc/profile.d/gopath.sh
# Make sure the gopath is usable by vagrant
sudo chown -R vagrant:vagrant $SRCROOT
sudo chown -R vagrant:vagrant /opt/gopath
# Install some other stuff we need
sudo apt-get install -y curl git-core zip
SCRIPT
Vagrant.configure(2) do |config|
config.vm.box = "chef/ubuntu-12.04"
config.vm.provision "shell", inline: $script
config.vm.synced_folder ".", "/vagrant", disabled: true
["vmware_fusion", "vmware_workstation"].each do |p|
config.vm.provider "p" do |v|
v.vmx["memsize"] = "2048"
v.vmx["numvcpus"] = "2"
v.vmx["cpuid.coresPerSocket"] = "1"
end
end
end

View File

@ -7,13 +7,14 @@ package chroot
import ( import (
"errors" "errors"
"fmt" "fmt"
"log"
"runtime"
"github.com/mitchellh/goamz/ec2" "github.com/mitchellh/goamz/ec2"
"github.com/mitchellh/multistep" "github.com/mitchellh/multistep"
awscommon "github.com/mitchellh/packer/builder/amazon/common" awscommon "github.com/mitchellh/packer/builder/amazon/common"
"github.com/mitchellh/packer/common" "github.com/mitchellh/packer/common"
"github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/packer"
"log"
"runtime"
) )
// The unique ID for this builder // The unique ID for this builder
@ -182,7 +183,11 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
// Build the steps // Build the steps
steps := []multistep.Step{ steps := []multistep.Step{
&StepInstanceInfo{}, &StepInstanceInfo{},
&StepSourceAMIInfo{}, &awscommon.StepSourceAMIInfo{
SourceAmi: b.config.SourceAmi,
EnhancedNetworking: b.config.AMIEnhancedNetworking,
},
&StepCheckRootDevice{},
&StepFlock{}, &StepFlock{},
&StepPrepareDevice{}, &StepPrepareDevice{},
&StepCreateVolume{}, &StepCreateVolume{},

View File

@ -60,7 +60,7 @@ func (c *Communicator) Start(cmd *packer.RemoteCmd) error {
return nil return nil
} }
func (c *Communicator) Upload(dst string, r io.Reader) error { func (c *Communicator) Upload(dst string, r io.Reader, fi *os.FileInfo) error {
dst = filepath.Join(c.Chroot, dst) dst = filepath.Join(c.Chroot, dst)
log.Printf("Uploading to chroot dir: %s", dst) log.Printf("Uploading to chroot dir: %s", dst)
tf, err := ioutil.TempFile("", "packer-amazon-chroot") tf, err := ioutil.TempFile("", "packer-amazon-chroot")
@ -79,18 +79,27 @@ func (c *Communicator) Upload(dst string, r io.Reader) error {
} }
func (c *Communicator) UploadDir(dst string, src string, exclude []string) error { func (c *Communicator) UploadDir(dst string, src string, exclude []string) error {
// If src ends with a trailing "/", copy from "src/." so that
// directory contents (including hidden files) are copied, but the
// directory "src" is omitted. BSD does this automatically when
// the source contains a trailing slash, but linux does not.
if src[len(src)-1] == '/' {
src = src + "."
}
// TODO: remove any file copied if it appears in `exclude` // TODO: remove any file copied if it appears in `exclude`
chrootDest := filepath.Join(c.Chroot, dst) chrootDest := filepath.Join(c.Chroot, dst)
log.Printf("Uploading directory '%s' to '%s'", src, chrootDest) log.Printf("Uploading directory '%s' to '%s'", src, chrootDest)
cpCmd, err := c.CmdWrapper(fmt.Sprintf("cp -R %s* %s", src, chrootDest)) cpCmd, err := c.CmdWrapper(fmt.Sprintf("cp -R '%s' %s", src, chrootDest))
if err != nil { if err != nil {
return err return err
} }
var stderr bytes.Buffer var stderr bytes.Buffer
cmd := ShellCommand(cpCmd) cmd := ShellCommand(cpCmd)
cmd.Env = append(cmd.Env, os.Environ()...)
cmd.Env = append(cmd.Env, "LANG=C") cmd.Env = append(cmd.Env, "LANG=C")
cmd.Env = append(cmd.Env, os.Environ()...)
cmd.Stderr = &stderr cmd.Stderr = &stderr
err = cmd.Run() err = cmd.Run()
if err == nil { if err == nil {

View File

@ -27,11 +27,12 @@ func AvailableDevice() (string, error) {
continue continue
} }
for i := 1; i < 16; i++ { // To be able to build both Paravirtual and HVM images, the unnumbered
device := fmt.Sprintf("/dev/%s%c%d", prefix, letter, i) // device and the first numbered one must be available.
if _, err := os.Stat(device); err != nil { // E.g. /dev/xvdf and /dev/xvdf1
return device, nil numbered_device := fmt.Sprintf("%s%d", device, 1)
} if _, err := os.Stat(numbered_device); err != nil {
return device, nil
} }
} }

View File

@ -0,0 +1,31 @@
package chroot
import (
"fmt"
"github.com/mitchellh/goamz/ec2"
"github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer"
)
// StepCheckRootDevice makes sure the root device on the AMI is EBS-backed.
type StepCheckRootDevice struct{}
func (s *StepCheckRootDevice) Run(state multistep.StateBag) multistep.StepAction {
image := state.Get("source_image").(*ec2.Image)
ui := state.Get("ui").(packer.Ui)
ui.Say("Checking the root device on source AMI...")
// It must be EBS-backed otherwise the build won't work
if image.RootDeviceType != "ebs" {
err := fmt.Errorf("The root device of the source AMI must be EBS-backed.")
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
return multistep.ActionContinue
}
func (s *StepCheckRootDevice) Cleanup(multistep.StateBag) {}

View File

@ -3,6 +3,7 @@ package chroot
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"github.com/mitchellh/goamz/ec2"
"github.com/mitchellh/multistep" "github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/packer"
"log" "log"
@ -26,6 +27,7 @@ type StepMountDevice struct {
func (s *StepMountDevice) Run(state multistep.StateBag) multistep.StepAction { func (s *StepMountDevice) Run(state multistep.StateBag) multistep.StepAction {
config := state.Get("config").(*Config) config := state.Get("config").(*Config)
ui := state.Get("ui").(packer.Ui) ui := state.Get("ui").(packer.Ui)
image := state.Get("source_image").(*ec2.Image)
device := state.Get("device").(string) device := state.Get("device").(string)
wrappedCommand := state.Get("wrappedCommand").(CommandWrapper) wrappedCommand := state.Get("wrappedCommand").(CommandWrapper)
@ -57,10 +59,17 @@ func (s *StepMountDevice) Run(state multistep.StateBag) multistep.StepAction {
return multistep.ActionHalt return multistep.ActionHalt
} }
log.Printf("Source image virtualization type is: %s", image.VirtualizationType)
deviceMount := device
if image.VirtualizationType == "hvm" {
deviceMount = fmt.Sprintf("%s%d", device, 1)
}
state.Put("deviceMount", deviceMount)
ui.Say("Mounting the root device...") ui.Say("Mounting the root device...")
stderr := new(bytes.Buffer) stderr := new(bytes.Buffer)
mountCommand, err := wrappedCommand( mountCommand, err := wrappedCommand(
fmt.Sprintf("mount %s %s", device, mountPath)) fmt.Sprintf("mount %s %s", deviceMount, mountPath))
if err != nil { if err != nil {
err := fmt.Errorf("Error creating mount command: %s", err) err := fmt.Errorf("Error creating mount command: %s", err)
state.Put("error", err) state.Put("error", err)

View File

@ -2,6 +2,7 @@ package chroot
import ( import (
"fmt" "fmt"
"github.com/mitchellh/goamz/ec2" "github.com/mitchellh/goamz/ec2"
"github.com/mitchellh/multistep" "github.com/mitchellh/multistep"
awscommon "github.com/mitchellh/packer/builder/amazon/common" awscommon "github.com/mitchellh/packer/builder/amazon/common"
@ -29,13 +30,11 @@ func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction {
blockDevices[i] = newDevice blockDevices[i] = newDevice
} }
registerOpts := &ec2.RegisterImage{ registerOpts := buildRegisterOpts(config, image, blockDevices)
Name: config.AMIName,
Architecture: image.Architecture, // Set SriovNetSupport to "simple". See http://goo.gl/icuXh5
KernelId: image.KernelId, if config.AMIEnhancedNetworking {
RamdiskId: image.RamdiskId, registerOpts.SriovNetSupport = "simple"
RootDeviceName: image.RootDeviceName,
BlockDevices: blockDevices,
} }
registerResp, err := ec2conn.RegisterImage(registerOpts) registerResp, err := ec2conn.RegisterImage(registerOpts)
@ -71,3 +70,20 @@ func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction {
} }
func (s *StepRegisterAMI) Cleanup(state multistep.StateBag) {} func (s *StepRegisterAMI) Cleanup(state multistep.StateBag) {}
func buildRegisterOpts(config *Config, image *ec2.Image, blockDevices []ec2.BlockDeviceMapping) *ec2.RegisterImage {
registerOpts := &ec2.RegisterImage{
Name: config.AMIName,
Architecture: image.Architecture,
RootDeviceName: image.RootDeviceName,
BlockDevices: blockDevices,
VirtType: config.AMIVirtType,
}
if config.AMIVirtType != "hvm" {
registerOpts.KernelId = image.KernelId
registerOpts.RamdiskId = image.RamdiskId
}
return registerOpts
}

View File

@ -0,0 +1,73 @@
package chroot
import (
"github.com/mitchellh/goamz/ec2"
"testing"
)
func testImage() ec2.Image {
return ec2.Image{
Id: "ami-abcd1234",
Name: "ami_test_name",
Architecture: "x86_64",
KernelId: "aki-abcd1234",
}
}
func TestStepRegisterAmi_buildRegisterOpts_pv(t *testing.T) {
config := Config{}
config.AMIName = "test_ami_name"
config.AMIDescription = "test_ami_description"
config.AMIVirtType = "paravirtual"
image := testImage()
blockDevices := []ec2.BlockDeviceMapping{}
opts := buildRegisterOpts(&config, &image, blockDevices)
expected := config.AMIVirtType
if opts.VirtType != expected {
t.Fatalf("Unexpected VirtType value: expected %s got %s\n", expected, opts.VirtType)
}
expected = config.AMIName
if opts.Name != expected {
t.Fatalf("Unexpected Name value: expected %s got %s\n", expected, opts.Name)
}
expected = image.KernelId
if opts.KernelId != expected {
t.Fatalf("Unexpected KernelId value: expected %s got %s\n", expected, opts.KernelId)
}
}
func TestStepRegisterAmi_buildRegisterOpts_hvm(t *testing.T) {
config := Config{}
config.AMIName = "test_ami_name"
config.AMIDescription = "test_ami_description"
config.AMIVirtType = "hvm"
image := testImage()
blockDevices := []ec2.BlockDeviceMapping{}
opts := buildRegisterOpts(&config, &image, blockDevices)
expected := config.AMIVirtType
if opts.VirtType != expected {
t.Fatalf("Unexpected VirtType value: expected %s got %s\n", expected, opts.VirtType)
}
expected = config.AMIName
if opts.Name != expected {
t.Fatalf("Unexpected Name value: expected %s got %s\n", expected, opts.Name)
}
expected = ""
if opts.KernelId != expected {
t.Fatalf("Unexpected KernelId value: expected %s got %s\n", expected, opts.KernelId)
}
}

View File

@ -3,6 +3,8 @@ package chroot
import ( import (
"errors" "errors"
"fmt" "fmt"
"time"
"github.com/mitchellh/goamz/ec2" "github.com/mitchellh/goamz/ec2"
"github.com/mitchellh/multistep" "github.com/mitchellh/multistep"
awscommon "github.com/mitchellh/packer/builder/amazon/common" awscommon "github.com/mitchellh/packer/builder/amazon/common"
@ -23,7 +25,9 @@ func (s *StepSnapshot) Run(state multistep.StateBag) multistep.StepAction {
volumeId := state.Get("volume_id").(string) volumeId := state.Get("volume_id").(string)
ui.Say("Creating snapshot...") ui.Say("Creating snapshot...")
createSnapResp, err := ec2conn.CreateSnapshot(volumeId, "") createSnapResp, err := ec2conn.CreateSnapshot(
volumeId,
fmt.Sprintf("Packer: %s", time.Now().String()))
if err != nil { if err != nil {
err := fmt.Errorf("Error creating snapshot: %s", err) err := fmt.Errorf("Error creating snapshot: %s", err)
state.Put("error", err) state.Put("error", err)

View File

@ -13,6 +13,7 @@ type AccessConfig struct {
AccessKey string `mapstructure:"access_key"` AccessKey string `mapstructure:"access_key"`
SecretKey string `mapstructure:"secret_key"` SecretKey string `mapstructure:"secret_key"`
RawRegion string `mapstructure:"region"` RawRegion string `mapstructure:"region"`
Token string `mapstructure:"token"`
} }
// Auth returns a valid aws.Auth object for access to AWS services, or // Auth returns a valid aws.Auth object for access to AWS services, or
@ -23,6 +24,10 @@ func (c *AccessConfig) Auth() (aws.Auth, error) {
// Store the accesskey and secret that we got... // Store the accesskey and secret that we got...
c.AccessKey = auth.AccessKey c.AccessKey = auth.AccessKey
c.SecretKey = auth.SecretKey c.SecretKey = auth.SecretKey
c.Token = auth.Token
}
if c.Token != "" {
auth.Token = c.Token
} }
return auth, err return auth, err

View File

@ -2,20 +2,22 @@ package common
import ( import (
"fmt" "fmt"
"github.com/mitchellh/goamz/aws" "github.com/mitchellh/goamz/aws"
"github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/packer"
) )
// AMIConfig is for common configuration related to creating AMIs. // AMIConfig is for common configuration related to creating AMIs.
type AMIConfig struct { type AMIConfig struct {
AMIName string `mapstructure:"ami_name"` AMIName string `mapstructure:"ami_name"`
AMIDescription string `mapstructure:"ami_description"` AMIDescription string `mapstructure:"ami_description"`
AMIVirtType string `mapstructure:"ami_virtualization_type"` AMIVirtType string `mapstructure:"ami_virtualization_type"`
AMIUsers []string `mapstructure:"ami_users"` AMIUsers []string `mapstructure:"ami_users"`
AMIGroups []string `mapstructure:"ami_groups"` AMIGroups []string `mapstructure:"ami_groups"`
AMIProductCodes []string `mapstructure:"ami_product_codes"` AMIProductCodes []string `mapstructure:"ami_product_codes"`
AMIRegions []string `mapstructure:"ami_regions"` AMIRegions []string `mapstructure:"ami_regions"`
AMITags map[string]string `mapstructure:"tags"` AMITags map[string]string `mapstructure:"tags"`
AMIEnhancedNetworking bool `mapstructure:"enhanced_networking"`
} }
func (c *AMIConfig) Prepare(t *packer.ConfigTemplate) []error { func (c *AMIConfig) Prepare(t *packer.ConfigTemplate) []error {

View File

@ -6,6 +6,7 @@ import (
"github.com/mitchellh/goamz/ec2" "github.com/mitchellh/goamz/ec2"
"github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/packer"
"log" "log"
"sort"
"strings" "strings"
) )
@ -36,6 +37,7 @@ func (a *Artifact) Id() string {
parts = append(parts, fmt.Sprintf("%s:%s", region, amiId)) parts = append(parts, fmt.Sprintf("%s:%s", region, amiId))
} }
sort.Strings(parts)
return strings.Join(parts, ",") return strings.Join(parts, ",")
} }
@ -46,9 +48,14 @@ func (a *Artifact) String() string {
amiStrings = append(amiStrings, single) amiStrings = append(amiStrings, single)
} }
sort.Strings(amiStrings)
return fmt.Sprintf("AMIs were created:\n\n%s", strings.Join(amiStrings, "\n")) return fmt.Sprintf("AMIs were created:\n\n%s", strings.Join(amiStrings, "\n"))
} }
func (a *Artifact) State(name string) interface{} {
return nil
}
func (a *Artifact) Destroy() error { func (a *Artifact) Destroy() error {
errors := make([]error, 0) errors := make([]error, 0)

View File

@ -1,19 +1,23 @@
package common package common
import ( import (
"fmt"
"github.com/mitchellh/goamz/ec2" "github.com/mitchellh/goamz/ec2"
"github.com/mitchellh/packer/packer"
) )
// BlockDevice // BlockDevice
type BlockDevice struct { type BlockDevice struct {
DeviceName string `mapstructure:"device_name"`
VirtualName string `mapstructure:"virtual_name"`
SnapshotId string `mapstructure:"snapshot_id"`
VolumeType string `mapstructure:"volume_type"`
VolumeSize int64 `mapstructure:"volume_size"`
DeleteOnTermination bool `mapstructure:"delete_on_termination"` DeleteOnTermination bool `mapstructure:"delete_on_termination"`
DeviceName string `mapstructure:"device_name"`
Encrypted bool `mapstructure:"encrypted"`
IOPS int64 `mapstructure:"iops"` IOPS int64 `mapstructure:"iops"`
NoDevice bool `mapstructure:"no_device"` NoDevice bool `mapstructure:"no_device"`
SnapshotId string `mapstructure:"snapshot_id"`
VirtualName string `mapstructure:"virtual_name"`
VolumeType string `mapstructure:"volume_type"`
VolumeSize int64 `mapstructure:"volume_size"`
} }
type BlockDevices struct { type BlockDevices struct {
@ -34,11 +38,57 @@ func buildBlockDevices(b []BlockDevice) []ec2.BlockDeviceMapping {
DeleteOnTermination: blockDevice.DeleteOnTermination, DeleteOnTermination: blockDevice.DeleteOnTermination,
IOPS: blockDevice.IOPS, IOPS: blockDevice.IOPS,
NoDevice: blockDevice.NoDevice, NoDevice: blockDevice.NoDevice,
Encrypted: blockDevice.Encrypted,
}) })
} }
return blockDevices return blockDevices
} }
func (b *BlockDevices) Prepare(t *packer.ConfigTemplate) []error {
if t == nil {
var err error
t, err = packer.NewConfigTemplate()
if err != nil {
return []error{err}
}
}
lists := map[string][]BlockDevice{
"ami_block_device_mappings": b.AMIMappings,
"launch_block_device_mappings": b.LaunchMappings,
}
var errs []error
for outer, bds := range lists {
for i, bd := range bds {
templates := map[string]*string{
"device_name": &bd.DeviceName,
"snapshot_id": &bd.SnapshotId,
"virtual_name": &bd.VirtualName,
"volume_type": &bd.VolumeType,
}
errs := make([]error, 0)
for n, ptr := range templates {
var err error
*ptr, err = t.Process(*ptr, nil)
if err != nil {
errs = append(
errs, fmt.Errorf(
"Error processing %s[%d].%s: %s",
outer, i, n, err))
}
}
}
}
if len(errs) > 0 {
return errs
}
return nil
}
func (b *BlockDevices) BuildAMIDevices() []ec2.BlockDeviceMapping { func (b *BlockDevices) BuildAMIDevices() []ec2.BlockDeviceMapping {
return buildBlockDevices(b.AMIMappings) return buildBlockDevices(b.AMIMappings)
} }

View File

@ -7,38 +7,47 @@ import (
) )
func TestBlockDevice(t *testing.T) { func TestBlockDevice(t *testing.T) {
ec2Mapping := []ec2.BlockDeviceMapping{ cases := []struct {
ec2.BlockDeviceMapping{ Config *BlockDevice
DeviceName: "/dev/sdb", Result *ec2.BlockDeviceMapping
VirtualName: "ephemeral0", }{
SnapshotId: "snap-1234", {
VolumeType: "standard", Config: &BlockDevice{
VolumeSize: 8, DeviceName: "/dev/sdb",
DeleteOnTermination: true, VirtualName: "ephemeral0",
IOPS: 1000, SnapshotId: "snap-1234",
VolumeType: "standard",
VolumeSize: 8,
DeleteOnTermination: true,
IOPS: 1000,
},
Result: &ec2.BlockDeviceMapping{
DeviceName: "/dev/sdb",
VirtualName: "ephemeral0",
SnapshotId: "snap-1234",
VolumeType: "standard",
VolumeSize: 8,
DeleteOnTermination: true,
IOPS: 1000,
},
}, },
} }
blockDevice := BlockDevice{ for _, tc := range cases {
DeviceName: "/dev/sdb", blockDevices := BlockDevices{
VirtualName: "ephemeral0", AMIMappings: []BlockDevice{*tc.Config},
SnapshotId: "snap-1234", LaunchMappings: []BlockDevice{*tc.Config},
VolumeType: "standard", }
VolumeSize: 8,
DeleteOnTermination: true,
IOPS: 1000,
}
blockDevices := BlockDevices{ expected := []ec2.BlockDeviceMapping{*tc.Result}
AMIMappings: []BlockDevice{blockDevice},
LaunchMappings: []BlockDevice{blockDevice},
}
if !reflect.DeepEqual(ec2Mapping, blockDevices.BuildAMIDevices()) { if !reflect.DeepEqual(expected, blockDevices.BuildAMIDevices()) {
t.Fatalf("bad: %#v", ec2Mapping) t.Fatalf("bad: %#v", expected)
} }
if !reflect.DeepEqual(ec2Mapping, blockDevices.BuildLaunchDevices()) { if !reflect.DeepEqual(expected, blockDevices.BuildLaunchDevices()) {
t.Fatalf("bad: %#v", ec2Mapping) t.Fatalf("bad: %#v", expected)
}
} }
} }

View File

@ -3,9 +3,11 @@ package common
import ( import (
"errors" "errors"
"fmt" "fmt"
"github.com/mitchellh/packer/packer"
"os" "os"
"time" "time"
"github.com/mitchellh/packer/common/uuid"
"github.com/mitchellh/packer/packer"
) )
// RunConfig contains configuration for running an instance from a source // RunConfig contains configuration for running an instance from a source
@ -17,9 +19,12 @@ type RunConfig struct {
InstanceType string `mapstructure:"instance_type"` InstanceType string `mapstructure:"instance_type"`
RunTags map[string]string `mapstructure:"run_tags"` RunTags map[string]string `mapstructure:"run_tags"`
SourceAmi string `mapstructure:"source_ami"` SourceAmi string `mapstructure:"source_ami"`
SpotPrice string `mapstructure:"spot_price"`
SpotPriceAutoProduct string `mapstructure:"spot_price_auto_product"`
RawSSHTimeout string `mapstructure:"ssh_timeout"` RawSSHTimeout string `mapstructure:"ssh_timeout"`
SSHUsername string `mapstructure:"ssh_username"` SSHUsername string `mapstructure:"ssh_username"`
SSHPrivateKeyFile string `mapstructure:"ssh_private_key_file"` SSHPrivateKeyFile string `mapstructure:"ssh_private_key_file"`
SSHPrivateIp bool `mapstructure:"ssh_private_ip"`
SSHPort int `mapstructure:"ssh_port"` SSHPort int `mapstructure:"ssh_port"`
SecurityGroupId string `mapstructure:"security_group_id"` SecurityGroupId string `mapstructure:"security_group_id"`
SecurityGroupIds []string `mapstructure:"security_group_ids"` SecurityGroupIds []string `mapstructure:"security_group_ids"`
@ -42,6 +47,34 @@ func (c *RunConfig) Prepare(t *packer.ConfigTemplate) []error {
} }
} }
templates := map[string]*string{
"iam_instance_profile": &c.IamInstanceProfile,
"instance_type": &c.InstanceType,
"spot_price": &c.SpotPrice,
"spot_price_auto_product": &c.SpotPriceAutoProduct,
"ssh_timeout": &c.RawSSHTimeout,
"ssh_username": &c.SSHUsername,
"ssh_private_key_file": &c.SSHPrivateKeyFile,
"source_ami": &c.SourceAmi,
"subnet_id": &c.SubnetId,
"temporary_key_pair_name": &c.TemporaryKeyPairName,
"vpc_id": &c.VpcId,
"availability_zone": &c.AvailabilityZone,
"user_data": &c.UserData,
"user_data_file": &c.UserDataFile,
"security_group_id": &c.SecurityGroupId,
}
errs := make([]error, 0)
for n, ptr := range templates {
var err error
*ptr, err = t.Process(*ptr, nil)
if err != nil {
errs = append(
errs, fmt.Errorf("Error processing %s: %s", n, err))
}
}
// Defaults // Defaults
if c.SSHPort == 0 { if c.SSHPort == 0 {
c.SSHPort = 22 c.SSHPort = 22
@ -52,12 +85,12 @@ func (c *RunConfig) Prepare(t *packer.ConfigTemplate) []error {
} }
if c.TemporaryKeyPairName == "" { if c.TemporaryKeyPairName == "" {
c.TemporaryKeyPairName = "packer {{uuid}}" c.TemporaryKeyPairName = fmt.Sprintf(
"packer %s", uuid.TimeOrderedUUID())
} }
// Validation // Validation
var err error var err error
errs := make([]error, 0)
if c.SourceAmi == "" { if c.SourceAmi == "" {
errs = append(errs, errors.New("A source_ami must be specified")) errs = append(errs, errors.New("A source_ami must be specified"))
} }
@ -66,6 +99,13 @@ func (c *RunConfig) Prepare(t *packer.ConfigTemplate) []error {
errs = append(errs, errors.New("An instance_type must be specified")) errs = append(errs, errors.New("An instance_type must be specified"))
} }
if c.SpotPrice == "auto" {
if c.SpotPriceAutoProduct == "" {
errs = append(errs, errors.New(
"spot_price_auto_product must be specified when spot_price is auto"))
}
}
if c.SSHUsername == "" { if c.SSHUsername == "" {
errs = append(errs, errors.New("An ssh_username must be specified")) errs = append(errs, errors.New("An ssh_username must be specified"))
} }
@ -87,28 +127,6 @@ func (c *RunConfig) Prepare(t *packer.ConfigTemplate) []error {
} }
} }
templates := map[string]*string{
"iam_instance_profile": &c.IamInstanceProfile,
"instance_type": &c.InstanceType,
"ssh_timeout": &c.RawSSHTimeout,
"ssh_username": &c.SSHUsername,
"ssh_private_key_file": &c.SSHPrivateKeyFile,
"source_ami": &c.SourceAmi,
"subnet_id": &c.SubnetId,
"temporary_key_pair_name": &c.TemporaryKeyPairName,
"vpc_id": &c.VpcId,
"availability_zone": &c.AvailabilityZone,
}
for n, ptr := range templates {
var err error
*ptr, err = t.Process(*ptr, nil)
if err != nil {
errs = append(
errs, fmt.Errorf("Error processing %s: %s", n, err))
}
}
sliceTemplates := map[string][]string{ sliceTemplates := map[string][]string{
"security_group_ids": c.SecurityGroupIds, "security_group_ids": c.SecurityGroupIds,
} }

View File

@ -47,6 +47,19 @@ func TestRunConfigPrepare_SourceAmi(t *testing.T) {
} }
} }
func TestRunConfigPrepare_SpotAuto(t *testing.T) {
c := testConfig()
c.SpotPrice = "auto"
if err := c.Prepare(nil); len(err) != 1 {
t.Fatalf("err: %s", err)
}
c.SpotPriceAutoProduct = "foo"
if err := c.Prepare(nil); len(err) != 0 {
t.Fatalf("err: %s", err)
}
}
func TestRunConfigPrepare_SSHPort(t *testing.T) { func TestRunConfigPrepare_SSHPort(t *testing.T) {
c := testConfig() c := testConfig()
c.SSHPort = 0 c.SSHPort = 0

View File

@ -11,7 +11,7 @@ import (
// SSHAddress returns a function that can be given to the SSH communicator // SSHAddress returns a function that can be given to the SSH communicator
// for determining the SSH address based on the instance DNS name. // for determining the SSH address based on the instance DNS name.
func SSHAddress(e *ec2.EC2, port int) func(multistep.StateBag) (string, error) { func SSHAddress(e *ec2.EC2, port int, private bool) func(multistep.StateBag) (string, error) {
return func(state multistep.StateBag) (string, error) { return func(state multistep.StateBag) (string, error) {
for j := 0; j < 2; j++ { for j := 0; j < 2; j++ {
var host string var host string
@ -19,7 +19,7 @@ func SSHAddress(e *ec2.EC2, port int) func(multistep.StateBag) (string, error) {
if i.DNSName != "" { if i.DNSName != "" {
host = i.DNSName host = i.DNSName
} else if i.VpcId != "" { } else if i.VpcId != "" {
if i.PublicIpAddress != "" { if i.PublicIpAddress != "" && !private {
host = i.PublicIpAddress host = i.PublicIpAddress
} else { } else {
host = i.PrivateIpAddress host = i.PrivateIpAddress

View File

@ -6,6 +6,9 @@ import (
"github.com/mitchellh/goamz/ec2" "github.com/mitchellh/goamz/ec2"
"github.com/mitchellh/multistep" "github.com/mitchellh/multistep"
"log" "log"
"net"
"os"
"strconv"
"time" "time"
) )
@ -38,6 +41,9 @@ func AMIStateRefreshFunc(conn *ec2.EC2, imageId string) StateRefreshFunc {
if ec2err, ok := err.(*ec2.Error); ok && ec2err.Code == "InvalidAMIID.NotFound" { if ec2err, ok := err.(*ec2.Error); ok && ec2err.Code == "InvalidAMIID.NotFound" {
// Set this to nil as if we didn't find anything. // Set this to nil as if we didn't find anything.
resp = nil resp = nil
} else if isTransientNetworkError(err) {
// Transient network error, treat it as if we didn't find anything
resp = nil
} else { } else {
log.Printf("Error on AMIStateRefresh: %s", err) log.Printf("Error on AMIStateRefresh: %s", err)
return nil, "", err return nil, "", err
@ -64,6 +70,9 @@ func InstanceStateRefreshFunc(conn *ec2.EC2, i *ec2.Instance) StateRefreshFunc {
if ec2err, ok := err.(*ec2.Error); ok && ec2err.Code == "InvalidInstanceID.NotFound" { if ec2err, ok := err.(*ec2.Error); ok && ec2err.Code == "InvalidInstanceID.NotFound" {
// Set this to nil as if we didn't find anything. // Set this to nil as if we didn't find anything.
resp = nil resp = nil
} else if isTransientNetworkError(err) {
// Transient network error, treat it as if we didn't find anything
resp = nil
} else { } else {
log.Printf("Error on InstanceStateRefresh: %s", err) log.Printf("Error on InstanceStateRefresh: %s", err)
return nil, "", err return nil, "", err
@ -81,11 +90,42 @@ func InstanceStateRefreshFunc(conn *ec2.EC2, i *ec2.Instance) StateRefreshFunc {
} }
} }
// SpotRequestStateRefreshFunc returns a StateRefreshFunc that is used to watch
// a spot request for state changes.
func SpotRequestStateRefreshFunc(conn *ec2.EC2, spotRequestId string) StateRefreshFunc {
return func() (interface{}, string, error) {
resp, err := conn.DescribeSpotRequests([]string{spotRequestId}, ec2.NewFilter())
if err != nil {
if ec2err, ok := err.(*ec2.Error); ok && ec2err.Code == "InvalidSpotInstanceRequestID.NotFound" {
// Set this to nil as if we didn't find anything.
resp = nil
} else if isTransientNetworkError(err) {
// Transient network error, treat it as if we didn't find anything
resp = nil
} else {
log.Printf("Error on SpotRequestStateRefresh: %s", err)
return nil, "", err
}
}
if resp == nil || len(resp.SpotRequestResults) == 0 {
// Sometimes AWS has consistency issues and doesn't see the
// SpotRequest. Return an empty state.
return nil, "", nil
}
i := resp.SpotRequestResults[0]
return i, i.State, nil
}
}
// WaitForState watches an object and waits for it to achieve a certain // WaitForState watches an object and waits for it to achieve a certain
// state. // state.
func WaitForState(conf *StateChangeConf) (i interface{}, err error) { func WaitForState(conf *StateChangeConf) (i interface{}, err error) {
log.Printf("Waiting for state to become: %s", conf.Target) log.Printf("Waiting for state to become: %s", conf.Target)
sleepSeconds := 2
maxTicks := int(TimeoutSeconds()/sleepSeconds) + 1
notfoundTick := 0 notfoundTick := 0
for { for {
@ -99,7 +139,7 @@ func WaitForState(conf *StateChangeConf) (i interface{}, err error) {
// If we didn't find the resource, check if we have been // If we didn't find the resource, check if we have been
// not finding it for awhile, and if so, report an error. // not finding it for awhile, and if so, report an error.
notfoundTick += 1 notfoundTick += 1
if notfoundTick > 20 { if notfoundTick > maxTicks {
return nil, errors.New("couldn't find resource") return nil, errors.New("couldn't find resource")
} }
} else { } else {
@ -125,13 +165,41 @@ func WaitForState(conf *StateChangeConf) (i interface{}, err error) {
} }
if !found { if !found {
fmt.Errorf("unexpected state '%s', wanted target '%s'", currentState, conf.Target) err := fmt.Errorf("unexpected state '%s', wanted target '%s'", currentState, conf.Target)
return return nil, err
} }
} }
time.Sleep(2 * time.Second) time.Sleep(time.Duration(sleepSeconds) * time.Second)
} }
return return
} }
func isTransientNetworkError(err error) bool {
if nerr, ok := err.(net.Error); ok && nerr.Temporary() {
return true
}
return false
}
// Returns 300 seconds (5 minutes) by default
// Some AWS operations, like copying an AMI to a distant region, take a very long time
// Allow user to override with AWS_TIMEOUT_SECONDS environment variable
func TimeoutSeconds() (seconds int) {
seconds = 300
override := os.Getenv("AWS_TIMEOUT_SECONDS")
if override != "" {
n, err := strconv.Atoi(override)
if err != nil {
log.Printf("Invalid timeout seconds '%s', using default", override)
} else {
seconds = n
}
}
log.Printf("Allowing %ds to complete (change with AWS_TIMEOUT_SECONDS)", seconds)
return seconds
}

View File

@ -2,10 +2,14 @@ package common
import ( import (
"fmt" "fmt"
"io/ioutil"
"log"
"strconv"
"time"
"github.com/mitchellh/goamz/ec2" "github.com/mitchellh/goamz/ec2"
"github.com/mitchellh/multistep" "github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/packer"
"io/ioutil"
) )
type StepRunSourceInstance struct { type StepRunSourceInstance struct {
@ -17,12 +21,15 @@ type StepRunSourceInstance struct {
InstanceType string InstanceType string
IamInstanceProfile string IamInstanceProfile string
SourceAMI string SourceAMI string
SpotPrice string
SpotPriceProduct string
SubnetId string SubnetId string
Tags map[string]string Tags map[string]string
UserData string UserData string
UserDataFile string UserDataFile string
instance *ec2.Instance instance *ec2.Instance
spotRequest *ec2.SpotRequestResult
} }
func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepAction { func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepAction {
@ -47,21 +54,6 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi
securityGroups[n] = ec2.SecurityGroup{Id: securityGroupId} securityGroups[n] = ec2.SecurityGroup{Id: securityGroupId}
} }
runOpts := &ec2.RunInstances{
KeyName: keyName,
ImageId: s.SourceAMI,
InstanceType: s.InstanceType,
UserData: []byte(userData),
MinCount: 0,
MaxCount: 0,
SecurityGroups: securityGroups,
IamInstanceProfile: s.IamInstanceProfile,
SubnetId: s.SubnetId,
AssociatePublicIpAddress: s.AssociatePublicIpAddress,
BlockDevices: s.BlockDevices.BuildLaunchDevices(),
AvailZone: s.AvailabilityZone,
}
ui.Say("Launching a source AWS instance...") ui.Say("Launching a source AWS instance...")
imageResp, err := ec2conn.Images([]string{s.SourceAMI}, ec2.NewFilter()) imageResp, err := ec2conn.Images([]string{s.SourceAMI}, ec2.NewFilter())
if err != nil { if err != nil {
@ -82,29 +74,137 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi
return multistep.ActionHalt return multistep.ActionHalt
} }
runResp, err := ec2conn.RunInstances(runOpts) spotPrice := s.SpotPrice
if spotPrice == "auto" {
ui.Message(fmt.Sprintf(
"Finding spot price for %s %s...",
s.SpotPriceProduct, s.InstanceType))
// Detect the spot price
startTime := time.Now().Add(-1 * time.Hour)
resp, err := ec2conn.DescribeSpotPriceHistory(&ec2.DescribeSpotPriceHistory{
InstanceType: []string{s.InstanceType},
ProductDescription: []string{s.SpotPriceProduct},
AvailabilityZone: s.AvailabilityZone,
StartTime: startTime,
})
if err != nil {
err := fmt.Errorf("Error finding spot price: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
var price float64
for _, history := range resp.History {
log.Printf("[INFO] Candidate spot price: %s", history.SpotPrice)
current, err := strconv.ParseFloat(history.SpotPrice, 64)
if err != nil {
log.Printf("[ERR] Error parsing spot price: %s", err)
continue
}
if price == 0 || current < price {
price = current
}
}
if price == 0 {
err := fmt.Errorf("No candidate spot prices found!")
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
spotPrice = strconv.FormatFloat(price, 'f', -1, 64)
}
var instanceId string
if spotPrice == "" {
runOpts := &ec2.RunInstances{
KeyName: keyName,
ImageId: s.SourceAMI,
InstanceType: s.InstanceType,
UserData: []byte(userData),
MinCount: 0,
MaxCount: 0,
SecurityGroups: securityGroups,
IamInstanceProfile: s.IamInstanceProfile,
SubnetId: s.SubnetId,
AssociatePublicIpAddress: s.AssociatePublicIpAddress,
BlockDevices: s.BlockDevices.BuildLaunchDevices(),
AvailZone: s.AvailabilityZone,
}
runResp, err := ec2conn.RunInstances(runOpts)
if err != nil {
err := fmt.Errorf("Error launching source instance: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
instanceId = runResp.Instances[0].InstanceId
} else {
ui.Message(fmt.Sprintf(
"Requesting spot instance '%s' for: %s",
s.InstanceType, spotPrice))
runOpts := &ec2.RequestSpotInstances{
SpotPrice: spotPrice,
KeyName: keyName,
ImageId: s.SourceAMI,
InstanceType: s.InstanceType,
UserData: []byte(userData),
SecurityGroups: securityGroups,
IamInstanceProfile: s.IamInstanceProfile,
SubnetId: s.SubnetId,
AssociatePublicIpAddress: s.AssociatePublicIpAddress,
BlockDevices: s.BlockDevices.BuildLaunchDevices(),
AvailZone: s.AvailabilityZone,
}
runSpotResp, err := ec2conn.RequestSpotInstances(runOpts)
if err != nil {
err := fmt.Errorf("Error launching source spot instance: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
s.spotRequest = &runSpotResp.SpotRequestResults[0]
spotRequestId := s.spotRequest.SpotRequestId
ui.Message(fmt.Sprintf("Waiting for spot request (%s) to become active...", spotRequestId))
stateChange := StateChangeConf{
Pending: []string{"open"},
Target: "active",
Refresh: SpotRequestStateRefreshFunc(ec2conn, spotRequestId),
StepState: state,
}
_, err = WaitForState(&stateChange)
if err != nil {
err := fmt.Errorf("Error waiting for spot request (%s) to become ready: %s", spotRequestId, err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
spotResp, err := ec2conn.DescribeSpotRequests([]string{spotRequestId}, nil)
if err != nil {
err := fmt.Errorf("Error finding spot request (%s): %s", spotRequestId, err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
instanceId = spotResp.SpotRequestResults[0].InstanceId
}
instanceResp, err := ec2conn.Instances([]string{instanceId}, nil)
if err != nil { if err != nil {
err := fmt.Errorf("Error launching source instance: %s", err) err := fmt.Errorf("Error finding source instance (%s): %s", instanceId, err)
state.Put("error", err) state.Put("error", err)
ui.Error(err.Error()) ui.Error(err.Error())
return multistep.ActionHalt return multistep.ActionHalt
} }
s.instance = &instanceResp.Reservations[0].Instances[0]
s.instance = &runResp.Instances[0]
ui.Message(fmt.Sprintf("Instance ID: %s", s.instance.InstanceId)) ui.Message(fmt.Sprintf("Instance ID: %s", s.instance.InstanceId))
ec2Tags := make([]ec2.Tag, 1, len(s.Tags)+1)
ec2Tags[0] = ec2.Tag{"Name", "Packer Builder"}
for k, v := range s.Tags {
ec2Tags = append(ec2Tags, ec2.Tag{k, v})
}
_, err = ec2conn.CreateTags([]string{s.instance.InstanceId}, ec2Tags)
if err != nil {
ui.Message(
fmt.Sprintf("Failed to tag a Name on the builder instance: %s", err))
}
ui.Say(fmt.Sprintf("Waiting for instance (%s) to become ready...", s.instance.InstanceId)) ui.Say(fmt.Sprintf("Waiting for instance (%s) to become ready...", s.instance.InstanceId))
stateChange := StateChangeConf{ stateChange := StateChangeConf{
Pending: []string{"pending"}, Pending: []string{"pending"},
@ -122,6 +222,18 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi
s.instance = latestInstance.(*ec2.Instance) s.instance = latestInstance.(*ec2.Instance)
ec2Tags := make([]ec2.Tag, 1, len(s.Tags)+1)
ec2Tags[0] = ec2.Tag{"Name", "Packer Builder"}
for k, v := range s.Tags {
ec2Tags = append(ec2Tags, ec2.Tag{k, v})
}
_, err = ec2conn.CreateTags([]string{s.instance.InstanceId}, ec2Tags)
if err != nil {
ui.Message(
fmt.Sprintf("Failed to tag a Name on the builder instance: %s", err))
}
if s.Debug { if s.Debug {
if s.instance.DNSName != "" { if s.instance.DNSName != "" {
ui.Message(fmt.Sprintf("Public DNS: %s", s.instance.DNSName)) ui.Message(fmt.Sprintf("Public DNS: %s", s.instance.DNSName))
@ -142,24 +254,41 @@ func (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepActi
} }
func (s *StepRunSourceInstance) Cleanup(state multistep.StateBag) { func (s *StepRunSourceInstance) Cleanup(state multistep.StateBag) {
if s.instance == nil {
return
}
ec2conn := state.Get("ec2").(*ec2.EC2) ec2conn := state.Get("ec2").(*ec2.EC2)
ui := state.Get("ui").(packer.Ui) ui := state.Get("ui").(packer.Ui)
ui.Say("Terminating the source AWS instance...") // Cancel the spot request if it exists
if _, err := ec2conn.TerminateInstances([]string{s.instance.InstanceId}); err != nil { if s.spotRequest != nil {
ui.Error(fmt.Sprintf("Error terminating instance, may still be around: %s", err)) ui.Say("Cancelling the spot request...")
return if _, err := ec2conn.CancelSpotRequests([]string{s.spotRequest.SpotRequestId}); err != nil {
ui.Error(fmt.Sprintf("Error cancelling the spot request, may still be around: %s", err))
return
}
stateChange := StateChangeConf{
Pending: []string{"active", "open"},
Refresh: SpotRequestStateRefreshFunc(ec2conn, s.spotRequest.SpotRequestId),
Target: "cancelled",
}
WaitForState(&stateChange)
} }
stateChange := StateChangeConf{ // Terminate the source instance if it exists
Pending: []string{"pending", "running", "shutting-down", "stopped", "stopping"}, if s.instance != nil {
Refresh: InstanceStateRefreshFunc(ec2conn, s.instance),
Target: "terminated",
}
WaitForState(&stateChange) ui.Say("Terminating the source AWS instance...")
if _, err := ec2conn.TerminateInstances([]string{s.instance.InstanceId}); err != nil {
ui.Error(fmt.Sprintf("Error terminating instance, may still be around: %s", err))
return
}
stateChange := StateChangeConf{
Pending: []string{"pending", "running", "shutting-down", "stopped", "stopping"},
Refresh: InstanceStateRefreshFunc(ec2conn, s.instance),
Target: "terminated",
}
WaitForState(&stateChange)
}
} }

View File

@ -1,7 +1,8 @@
package chroot package common
import ( import (
"fmt" "fmt"
"github.com/mitchellh/goamz/ec2" "github.com/mitchellh/goamz/ec2"
"github.com/mitchellh/multistep" "github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/packer"
@ -12,15 +13,17 @@ import (
// //
// Produces: // Produces:
// source_image *ec2.Image - the source AMI info // source_image *ec2.Image - the source AMI info
type StepSourceAMIInfo struct{} type StepSourceAMIInfo struct {
SourceAmi string
EnhancedNetworking bool
}
func (s *StepSourceAMIInfo) Run(state multistep.StateBag) multistep.StepAction { func (s *StepSourceAMIInfo) Run(state multistep.StateBag) multistep.StepAction {
config := state.Get("config").(*Config)
ec2conn := state.Get("ec2").(*ec2.EC2) ec2conn := state.Get("ec2").(*ec2.EC2)
ui := state.Get("ui").(packer.Ui) ui := state.Get("ui").(packer.Ui)
ui.Say("Inspecting the source AMI...") ui.Say("Inspecting the source AMI...")
imageResp, err := ec2conn.Images([]string{config.SourceAmi}, ec2.NewFilter()) imageResp, err := ec2conn.Images([]string{s.SourceAmi}, ec2.NewFilter())
if err != nil { if err != nil {
err := fmt.Errorf("Error querying AMI: %s", err) err := fmt.Errorf("Error querying AMI: %s", err)
state.Put("error", err) state.Put("error", err)
@ -29,7 +32,7 @@ func (s *StepSourceAMIInfo) Run(state multistep.StateBag) multistep.StepAction {
} }
if len(imageResp.Images) == 0 { if len(imageResp.Images) == 0 {
err := fmt.Errorf("Source AMI '%s' was not found!", config.SourceAmi) err := fmt.Errorf("Source AMI '%s' was not found!", s.SourceAmi)
state.Put("error", err) state.Put("error", err)
ui.Error(err.Error()) ui.Error(err.Error())
return multistep.ActionHalt return multistep.ActionHalt
@ -37,9 +40,10 @@ func (s *StepSourceAMIInfo) Run(state multistep.StateBag) multistep.StepAction {
image := &imageResp.Images[0] image := &imageResp.Images[0]
// It must be EBS-backed otherwise the build won't work // Enhanced Networking (SriovNetSupport) can only be enabled on HVM AMIs.
if image.RootDeviceType != "ebs" { // See http://goo.gl/icuXh5
err := fmt.Errorf("The root device of the source AMI must be EBS-backed.") if s.EnhancedNetworking && image.VirtualizationType != "hvm" {
err := fmt.Errorf("Cannot enable enhanced networking, source AMI '%s' is not HVM", s.SourceAmi)
state.Put("error", err) state.Put("error", err)
ui.Error(err.Error()) ui.Error(err.Error())
return multistep.ActionHalt return multistep.ActionHalt

View File

@ -7,12 +7,13 @@ package ebs
import ( import (
"fmt" "fmt"
"log"
"github.com/mitchellh/goamz/ec2" "github.com/mitchellh/goamz/ec2"
"github.com/mitchellh/multistep" "github.com/mitchellh/multistep"
awscommon "github.com/mitchellh/packer/builder/amazon/common" awscommon "github.com/mitchellh/packer/builder/amazon/common"
"github.com/mitchellh/packer/common" "github.com/mitchellh/packer/common"
"github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/packer"
"log"
) )
// The unique ID for this builder // The unique ID for this builder
@ -49,6 +50,7 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
// Accumulate any errors // Accumulate any errors
errs := common.CheckUnusedConfig(md) errs := common.CheckUnusedConfig(md)
errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(b.config.tpl)...)
errs = packer.MultiErrorAppend(errs, b.config.BlockDevices.Prepare(b.config.tpl)...)
errs = packer.MultiErrorAppend(errs, b.config.AMIConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.AMIConfig.Prepare(b.config.tpl)...)
errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(b.config.tpl)...)
@ -82,6 +84,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
// Build the steps // Build the steps
steps := []multistep.Step{ steps := []multistep.Step{
&awscommon.StepSourceAMIInfo{
SourceAmi: b.config.SourceAmi,
EnhancedNetworking: b.config.AMIEnhancedNetworking,
},
&awscommon.StepKeyPair{ &awscommon.StepKeyPair{
Debug: b.config.PackerDebug, Debug: b.config.PackerDebug,
DebugKeyPath: fmt.Sprintf("ec2_%s.pem", b.config.PackerBuildName), DebugKeyPath: fmt.Sprintf("ec2_%s.pem", b.config.PackerBuildName),
@ -96,6 +102,8 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
&awscommon.StepRunSourceInstance{ &awscommon.StepRunSourceInstance{
Debug: b.config.PackerDebug, Debug: b.config.PackerDebug,
ExpectedRootDevice: "ebs", ExpectedRootDevice: "ebs",
SpotPrice: b.config.SpotPrice,
SpotPriceProduct: b.config.SpotPriceAutoProduct,
InstanceType: b.config.InstanceType, InstanceType: b.config.InstanceType,
UserData: b.config.UserData, UserData: b.config.UserData,
UserDataFile: b.config.UserDataFile, UserDataFile: b.config.UserDataFile,
@ -108,12 +116,15 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
Tags: b.config.RunTags, Tags: b.config.RunTags,
}, },
&common.StepConnectSSH{ &common.StepConnectSSH{
SSHAddress: awscommon.SSHAddress(ec2conn, b.config.SSHPort), SSHAddress: awscommon.SSHAddress(
ec2conn, b.config.SSHPort, b.config.SSHPrivateIp),
SSHConfig: awscommon.SSHConfig(b.config.SSHUsername), SSHConfig: awscommon.SSHConfig(b.config.SSHUsername),
SSHWaitTimeout: b.config.SSHTimeout(), SSHWaitTimeout: b.config.SSHTimeout(),
}, },
&common.StepProvision{}, &common.StepProvision{},
&stepStopInstance{}, &stepStopInstance{SpotPrice: b.config.SpotPrice},
// TODO(mitchellh): verify works with spots
&stepModifyInstance{},
&stepCreateAMI{}, &stepCreateAMI{},
&awscommon.StepAMIRegionCopy{ &awscommon.StepAMIRegionCopy{
Regions: b.config.AMIRegions, Regions: b.config.AMIRegions,

View File

@ -1,4 +0,0 @@
package ebs
// This hook is fired prior to launching the EC2 instance.
const HookPreLaunch = "amazonebs_pre_launch"

View File

@ -8,7 +8,9 @@ import (
"github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/packer"
) )
type stepCreateAMI struct{} type stepCreateAMI struct {
image *ec2.Image
}
func (s *stepCreateAMI) Run(state multistep.StateBag) multistep.StepAction { func (s *stepCreateAMI) Run(state multistep.StateBag) multistep.StepAction {
config := state.Get("config").(config) config := state.Get("config").(config)
@ -54,9 +56,38 @@ func (s *stepCreateAMI) Run(state multistep.StateBag) multistep.StepAction {
return multistep.ActionHalt return multistep.ActionHalt
} }
imagesResp, err := ec2conn.Images([]string{createResp.ImageId}, nil)
if err != nil {
err := fmt.Errorf("Error searching for AMI: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
s.image = &imagesResp.Images[0]
return multistep.ActionContinue return multistep.ActionContinue
} }
func (s *stepCreateAMI) Cleanup(multistep.StateBag) { func (s *stepCreateAMI) Cleanup(state multistep.StateBag) {
// No cleanup... if s.image == nil {
return
}
_, cancelled := state.GetOk(multistep.StateCancelled)
_, halted := state.GetOk(multistep.StateHalted)
if !cancelled && !halted {
return
}
ec2conn := state.Get("ec2").(*ec2.EC2)
ui := state.Get("ui").(packer.Ui)
ui.Say("Deregistering the AMI because cancelation or error...")
if resp, err := ec2conn.DeregisterImage(s.image.Id); err != nil {
ui.Error(fmt.Sprintf("Error deregistering AMI, may still be around: %s", err))
return
} else if resp.Return == false {
ui.Error(fmt.Sprintf("Error deregistering AMI, may still be around: %s", resp.Return))
return
}
} }

View File

@ -0,0 +1,39 @@
package ebs
import (
"fmt"
"github.com/mitchellh/goamz/ec2"
"github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer"
)
type stepModifyInstance struct{}
func (s *stepModifyInstance) Run(state multistep.StateBag) multistep.StepAction {
config := state.Get("config").(config)
ec2conn := state.Get("ec2").(*ec2.EC2)
instance := state.Get("instance").(*ec2.Instance)
ui := state.Get("ui").(packer.Ui)
// Set SriovNetSupport to "simple". See http://goo.gl/icuXh5
if config.AMIEnhancedNetworking {
ui.Say("Enabling Enhanced Networking...")
_, err := ec2conn.ModifyInstance(
instance.InstanceId,
&ec2.ModifyInstance{SriovNetSupport: true},
)
if err != nil {
err := fmt.Errorf("Error enabling Enhanced Networking on %s: %s", instance.InstanceId, err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
}
return multistep.ActionContinue
}
func (s *stepModifyInstance) Cleanup(state multistep.StateBag) {
// No cleanup...
}

View File

@ -8,13 +8,20 @@ import (
"github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/packer"
) )
type stepStopInstance struct{} type stepStopInstance struct {
SpotPrice string
}
func (s *stepStopInstance) Run(state multistep.StateBag) multistep.StepAction { func (s *stepStopInstance) Run(state multistep.StateBag) multistep.StepAction {
ec2conn := state.Get("ec2").(*ec2.EC2) ec2conn := state.Get("ec2").(*ec2.EC2)
instance := state.Get("instance").(*ec2.Instance) instance := state.Get("instance").(*ec2.Instance)
ui := state.Get("ui").(packer.Ui) ui := state.Get("ui").(packer.Ui)
// Skip when it is a spot instance
if s.SpotPrice != "" {
return multistep.ActionContinue
}
// Stop the instance so we can create an AMI from it // Stop the instance so we can create an AMI from it
ui.Say("Stopping the source instance...") ui.Say("Stopping the source instance...")
_, err := ec2conn.StopInstances(instance.InstanceId) _, err := ec2conn.StopInstances(instance.InstanceId)

View File

@ -5,14 +5,15 @@ package instance
import ( import (
"errors" "errors"
"fmt" "fmt"
"log"
"os"
"strings"
"github.com/mitchellh/goamz/ec2" "github.com/mitchellh/goamz/ec2"
"github.com/mitchellh/multistep" "github.com/mitchellh/multistep"
awscommon "github.com/mitchellh/packer/builder/amazon/common" awscommon "github.com/mitchellh/packer/builder/amazon/common"
"github.com/mitchellh/packer/common" "github.com/mitchellh/packer/common"
"github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/packer"
"log"
"os"
"strings"
) )
// The unique ID for this builder // The unique ID for this builder
@ -74,7 +75,7 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
"-s {{.SecretKey}} " + "-s {{.SecretKey}} " +
"-d {{.BundleDirectory}} " + "-d {{.BundleDirectory}} " +
"--batch " + "--batch " +
"--url {{.S3Endpoint}} " + "--region {{.Region}} " +
"--retry" "--retry"
} }
@ -87,7 +88,8 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
"-e {{.PrivatePath}}/* " + "-e {{.PrivatePath}}/* " +
"-d {{.Destination}} " + "-d {{.Destination}} " +
"-p {{.Prefix}} " + "-p {{.Prefix}} " +
"--batch" "--batch " +
"--no-filter"
} }
if b.config.X509UploadPath == "" { if b.config.X509UploadPath == "" {
@ -97,6 +99,7 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
// Accumulate any errors // Accumulate any errors
errs := common.CheckUnusedConfig(md) errs := common.CheckUnusedConfig(md)
errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(b.config.tpl)...)
errs = packer.MultiErrorAppend(errs, b.config.BlockDevices.Prepare(b.config.tpl)...)
errs = packer.MultiErrorAppend(errs, b.config.AMIConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.AMIConfig.Prepare(b.config.tpl)...)
errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(b.config.tpl)...)
@ -186,6 +189,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
// Build the steps // Build the steps
steps := []multistep.Step{ steps := []multistep.Step{
&awscommon.StepSourceAMIInfo{
SourceAmi: b.config.SourceAmi,
EnhancedNetworking: b.config.AMIEnhancedNetworking,
},
&awscommon.StepKeyPair{ &awscommon.StepKeyPair{
Debug: b.config.PackerDebug, Debug: b.config.PackerDebug,
DebugKeyPath: fmt.Sprintf("ec2_%s.pem", b.config.PackerBuildName), DebugKeyPath: fmt.Sprintf("ec2_%s.pem", b.config.PackerBuildName),
@ -199,7 +206,8 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
}, },
&awscommon.StepRunSourceInstance{ &awscommon.StepRunSourceInstance{
Debug: b.config.PackerDebug, Debug: b.config.PackerDebug,
ExpectedRootDevice: "instance-store", SpotPrice: b.config.SpotPrice,
SpotPriceProduct: b.config.SpotPriceAutoProduct,
InstanceType: b.config.InstanceType, InstanceType: b.config.InstanceType,
IamInstanceProfile: b.config.IamInstanceProfile, IamInstanceProfile: b.config.IamInstanceProfile,
UserData: b.config.UserData, UserData: b.config.UserData,
@ -212,14 +220,19 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
Tags: b.config.RunTags, Tags: b.config.RunTags,
}, },
&common.StepConnectSSH{ &common.StepConnectSSH{
SSHAddress: awscommon.SSHAddress(ec2conn, b.config.SSHPort), SSHAddress: awscommon.SSHAddress(
ec2conn, b.config.SSHPort, b.config.SSHPrivateIp),
SSHConfig: awscommon.SSHConfig(b.config.SSHUsername), SSHConfig: awscommon.SSHConfig(b.config.SSHUsername),
SSHWaitTimeout: b.config.SSHTimeout(), SSHWaitTimeout: b.config.SSHTimeout(),
}, },
&common.StepProvision{}, &common.StepProvision{},
&StepUploadX509Cert{}, &StepUploadX509Cert{},
&StepBundleVolume{}, &StepBundleVolume{
&StepUploadBundle{}, Debug: b.config.PackerDebug,
},
&StepUploadBundle{
Debug: b.config.PackerDebug,
},
&StepRegisterAMI{}, &StepRegisterAMI{},
&awscommon.StepAMIRegionCopy{ &awscommon.StepAMIRegionCopy{
Regions: b.config.AMIRegions, Regions: b.config.AMIRegions,

View File

@ -2,6 +2,7 @@ package instance
import ( import (
"fmt" "fmt"
"github.com/mitchellh/goamz/ec2" "github.com/mitchellh/goamz/ec2"
"github.com/mitchellh/multistep" "github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/packer"
@ -17,7 +18,9 @@ type bundleCmdData struct {
PrivatePath string PrivatePath string
} }
type StepBundleVolume struct{} type StepBundleVolume struct {
Debug bool
}
func (s *StepBundleVolume) Run(state multistep.StateBag) multistep.StepAction { func (s *StepBundleVolume) Run(state multistep.StateBag) multistep.StepAction {
comm := state.Get("communicator").(packer.Communicator) comm := state.Get("communicator").(packer.Communicator)
@ -48,6 +51,11 @@ func (s *StepBundleVolume) Run(state multistep.StateBag) multistep.StepAction {
ui.Say("Bundling the volume...") ui.Say("Bundling the volume...")
cmd := new(packer.RemoteCmd) cmd := new(packer.RemoteCmd)
cmd.Command = config.BundleVolCommand cmd.Command = config.BundleVolCommand
if s.Debug {
ui.Say(fmt.Sprintf("Running: %s", config.BundleVolCommand))
}
if err := cmd.StartWithUi(comm, ui); err != nil { if err := cmd.StartWithUi(comm, ui); err != nil {
state.Put("error", fmt.Errorf("Error bundling volume: %s", err)) state.Put("error", fmt.Errorf("Error bundling volume: %s", err))
ui.Error(state.Get("error").(error).Error()) ui.Error(state.Get("error").(error).Error())

View File

@ -2,6 +2,7 @@ package instance
import ( import (
"fmt" "fmt"
"github.com/mitchellh/goamz/ec2" "github.com/mitchellh/goamz/ec2"
"github.com/mitchellh/multistep" "github.com/mitchellh/multistep"
awscommon "github.com/mitchellh/packer/builder/amazon/common" awscommon "github.com/mitchellh/packer/builder/amazon/common"
@ -24,6 +25,11 @@ func (s *StepRegisterAMI) Run(state multistep.StateBag) multistep.StepAction {
VirtType: config.AMIVirtType, VirtType: config.AMIVirtType,
} }
// Set SriovNetSupport to "simple". See http://goo.gl/icuXh5
if config.AMIEnhancedNetworking {
registerOpts.SriovNetSupport = "simple"
}
registerResp, err := ec2conn.RegisterImage(registerOpts) registerResp, err := ec2conn.RegisterImage(registerOpts)
if err != nil { if err != nil {
state.Put("error", fmt.Errorf("Error registering AMI: %s", err)) state.Put("error", fmt.Errorf("Error registering AMI: %s", err))

View File

@ -2,6 +2,7 @@ package instance
import ( import (
"fmt" "fmt"
"github.com/mitchellh/multistep" "github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/packer"
) )
@ -11,11 +12,13 @@ type uploadCmdData struct {
BucketName string BucketName string
BundleDirectory string BundleDirectory string
ManifestPath string ManifestPath string
S3Endpoint string Region string
SecretKey string SecretKey string
} }
type StepUploadBundle struct{} type StepUploadBundle struct {
Debug bool
}
func (s *StepUploadBundle) Run(state multistep.StateBag) multistep.StepAction { func (s *StepUploadBundle) Run(state multistep.StateBag) multistep.StepAction {
comm := state.Get("communicator").(packer.Communicator) comm := state.Get("communicator").(packer.Communicator)
@ -37,7 +40,7 @@ func (s *StepUploadBundle) Run(state multistep.StateBag) multistep.StepAction {
BucketName: config.S3Bucket, BucketName: config.S3Bucket,
BundleDirectory: config.BundleDestination, BundleDirectory: config.BundleDestination,
ManifestPath: manifestPath, ManifestPath: manifestPath,
S3Endpoint: region.S3Endpoint, Region: region.Name,
SecretKey: config.SecretKey, SecretKey: config.SecretKey,
}) })
if err != nil { if err != nil {
@ -49,6 +52,11 @@ func (s *StepUploadBundle) Run(state multistep.StateBag) multistep.StepAction {
ui.Say("Uploading the bundle...") ui.Say("Uploading the bundle...")
cmd := &packer.RemoteCmd{Command: config.BundleUploadCommand} cmd := &packer.RemoteCmd{Command: config.BundleUploadCommand}
if s.Debug {
ui.Say(fmt.Sprintf("Running: %s", config.BundleUploadCommand))
}
if err := cmd.StartWithUi(comm, ui); err != nil { if err := cmd.StartWithUi(comm, ui); err != nil {
state.Put("error", fmt.Errorf("Error uploading volume: %s", err)) state.Put("error", fmt.Errorf("Error uploading volume: %s", err))
ui.Error(state.Get("error").(error).Error()) ui.Error(state.Get("error").(error).Error())

View File

@ -45,5 +45,5 @@ func (s *StepUploadX509Cert) uploadSingle(comm packer.Communicator, dst, src str
} }
defer f.Close() defer f.Close()
return comm.Upload(dst, f) return comm.Upload(dst, f, nil)
} }

View File

@ -4,293 +4,65 @@
package digitalocean package digitalocean
import (
"encoding/json"
"errors"
"fmt"
"github.com/mitchellh/mapstructure"
"io/ioutil"
"log"
"net/http"
"net/url"
"strings"
"time"
)
const DIGITALOCEAN_API_URL = "https://api.digitalocean.com"
type Image struct {
Id uint
Name string
Distribution string
}
type ImagesResp struct {
Images []Image
}
type Region struct { type Region struct {
Id uint Id uint `json:"id,omitempty"` //only in v1 api
Name string Slug string `json:"slug"` //presen in both api
Name string `json:"name"` //presen in both api
Sizes []string `json:"sizes,omitempty"` //only in v2 api
Available bool `json:"available,omitempty"` //only in v2 api
Features []string `json:"features,omitempty"` //only in v2 api
} }
type RegionsResp struct { type RegionsResp struct {
Regions []Region Regions []Region
} }
type DigitalOceanClient struct { type Size struct {
// The http client for communicating Id uint `json:"id,omitempty"` //only in v1 api
client *http.Client Name string `json:"name,omitempty"` //only in v1 api
Slug string `json:"slug"` //presen in both api
// The base URL of the API Memory uint `json:"memory,omitempty"` //only in v2 api
BaseURL string VCPUS uint `json:"vcpus,omitempty"` //only in v2 api
Disk uint `json:"disk,omitempty"` //only in v2 api
// Credentials Transfer float64 `json:"transfer,omitempty"` //only in v2 api
ClientID string PriceMonthly float64 `json:"price_monthly,omitempty"` //only in v2 api
APIKey string PriceHourly float64 `json:"price_hourly,omitempty"` //only in v2 api
Regions []string `json:"regions,omitempty"` //only in v2 api
} }
// Creates a new client for communicating with DO type SizesResp struct {
func (d DigitalOceanClient) New(client string, key string) *DigitalOceanClient { Sizes []Size
c := &DigitalOceanClient{
client: &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
},
},
BaseURL: DIGITALOCEAN_API_URL,
ClientID: client,
APIKey: key,
}
return c
} }
// Creates an SSH Key and returns it's id type Image struct {
func (d DigitalOceanClient) CreateKey(name string, pub string) (uint, error) { Id uint `json:"id"` //presen in both api
params := url.Values{} Name string `json:"name"` //presen in both api
params.Set("name", name) Slug string `json:"slug"` //presen in both api
params.Set("ssh_pub_key", pub) Distribution string `json:"distribution"` //presen in both api
Public bool `json:"public,omitempty"` //only in v2 api
body, err := NewRequest(d, "ssh_keys/new", params) Regions []string `json:"regions,omitempty"` //only in v2 api
if err != nil { ActionIds []string `json:"action_ids,omitempty"` //only in v2 api
return 0, err CreatedAt string `json:"created_at,omitempty"` //only in v2 api
}
// Read the SSH key's ID we just created
key := body["ssh_key"].(map[string]interface{})
keyId := key["id"].(float64)
return uint(keyId), nil
} }
// Destroys an SSH key type ImagesResp struct {
func (d DigitalOceanClient) DestroyKey(id uint) error { Images []Image
path := fmt.Sprintf("ssh_keys/%v/destroy", id)
_, err := NewRequest(d, path, url.Values{})
return err
} }
// Creates a droplet and returns it's id type DigitalOceanClient interface {
func (d DigitalOceanClient) CreateDroplet(name string, size uint, image uint, region uint, keyId uint, privateNetworking bool) (uint, error) { CreateKey(string, string) (uint, error)
params := url.Values{} DestroyKey(uint) error
params.Set("name", name) CreateDroplet(string, string, string, string, uint, bool) (uint, error)
params.Set("size_id", fmt.Sprintf("%v", size)) DestroyDroplet(uint) error
params.Set("image_id", fmt.Sprintf("%v", image)) PowerOffDroplet(uint) error
params.Set("region_id", fmt.Sprintf("%v", region)) ShutdownDroplet(uint) error
params.Set("ssh_key_ids", fmt.Sprintf("%v", keyId)) CreateSnapshot(uint, string) error
params.Set("private_networking", fmt.Sprintf("%v", privateNetworking)) Images() ([]Image, error)
DestroyImage(uint) error
body, err := NewRequest(d, "droplets/new", params) DropletStatus(uint) (string, string, error)
if err != nil { Image(string) (Image, error)
return 0, err Regions() ([]Region, error)
} Region(string) (Region, error)
Sizes() ([]Size, error)
// Read the Droplets ID Size(string) (Size, error)
droplet := body["droplet"].(map[string]interface{})
dropletId := droplet["id"].(float64)
return uint(dropletId), err
}
// Destroys a droplet
func (d DigitalOceanClient) DestroyDroplet(id uint) error {
path := fmt.Sprintf("droplets/%v/destroy", id)
_, err := NewRequest(d, path, url.Values{})
return err
}
// Powers off a droplet
func (d DigitalOceanClient) PowerOffDroplet(id uint) error {
path := fmt.Sprintf("droplets/%v/power_off", id)
_, err := NewRequest(d, path, url.Values{})
return err
}
// Shutsdown a droplet. This is a "soft" shutdown.
func (d DigitalOceanClient) ShutdownDroplet(id uint) error {
path := fmt.Sprintf("droplets/%v/shutdown", id)
_, err := NewRequest(d, path, url.Values{})
return err
}
// Creates a snaphot of a droplet by it's ID
func (d DigitalOceanClient) CreateSnapshot(id uint, name string) error {
path := fmt.Sprintf("droplets/%v/snapshot", id)
params := url.Values{}
params.Set("name", name)
_, err := NewRequest(d, path, params)
return err
}
// Returns all available images.
func (d DigitalOceanClient) Images() ([]Image, error) {
resp, err := NewRequest(d, "images", url.Values{})
if err != nil {
return nil, err
}
var result ImagesResp
if err := mapstructure.Decode(resp, &result); err != nil {
return nil, err
}
return result.Images, nil
}
// Destroys an image by its ID.
func (d DigitalOceanClient) DestroyImage(id uint) error {
path := fmt.Sprintf("images/%d/destroy", id)
_, err := NewRequest(d, path, url.Values{})
return err
}
// Returns DO's string representation of status "off" "new" "active" etc.
func (d DigitalOceanClient) DropletStatus(id uint) (string, string, error) {
path := fmt.Sprintf("droplets/%v", id)
body, err := NewRequest(d, path, url.Values{})
if err != nil {
return "", "", err
}
var ip string
// Read the droplet's "status"
droplet := body["droplet"].(map[string]interface{})
status := droplet["status"].(string)
if droplet["ip_address"] != nil {
ip = droplet["ip_address"].(string)
}
return ip, status, err
}
// Sends an api request and returns a generic map[string]interface of
// the response.
func NewRequest(d DigitalOceanClient, path string, params url.Values) (map[string]interface{}, error) {
client := d.client
// Add the authentication parameters
params.Set("client_id", d.ClientID)
params.Set("api_key", d.APIKey)
url := fmt.Sprintf("%s/%s?%s", DIGITALOCEAN_API_URL, path, params.Encode())
// Do some basic scrubbing so sensitive information doesn't appear in logs
scrubbedUrl := strings.Replace(url, d.ClientID, "CLIENT_ID", -1)
scrubbedUrl = strings.Replace(scrubbedUrl, d.APIKey, "API_KEY", -1)
log.Printf("sending new request to digitalocean: %s", scrubbedUrl)
var lastErr error
for attempts := 1; attempts < 10; attempts++ {
resp, err := client.Get(url)
if err != nil {
return nil, err
}
body, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
return nil, err
}
log.Printf("response from digitalocean: %s", body)
var decodedResponse map[string]interface{}
err = json.Unmarshal(body, &decodedResponse)
if err != nil {
err = errors.New(fmt.Sprintf("Failed to decode JSON response (HTTP %v) from DigitalOcean: %s",
resp.StatusCode, body))
return decodedResponse, err
}
// Check for errors sent by digitalocean
status := decodedResponse["status"].(string)
if status == "OK" {
return decodedResponse, nil
}
if status == "ERROR" {
statusRaw, ok := decodedResponse["error_message"]
if ok {
status = statusRaw.(string)
} else {
status = fmt.Sprintf(
"Unknown error. Full response body: %s", body)
}
}
lastErr = errors.New(fmt.Sprintf("Received error from DigitalOcean (%d): %s",
resp.StatusCode, status))
log.Println(lastErr)
if strings.Contains(status, "a pending event") {
// Retry, DigitalOcean sends these dumb "pending event"
// errors all the time.
time.Sleep(5 * time.Second)
continue
}
// Some other kind of error. Just return.
return decodedResponse, lastErr
}
return nil, lastErr
}
// Returns all available regions.
func (d DigitalOceanClient) Regions() ([]Region, error) {
resp, err := NewRequest(d, "regions", url.Values{})
if err != nil {
return nil, err
}
var result RegionsResp
if err := mapstructure.Decode(resp, &result); err != nil {
return nil, err
}
return result.Regions, nil
}
func (d DigitalOceanClient) RegionName(region_id uint) (string, error) {
regions, err := d.Regions()
if err != nil {
return "", err
}
for _, region := range regions {
if region.Id == region_id {
return region.Name, nil
}
}
err = errors.New(fmt.Sprintf("Unknown region id %v", region_id))
return "", err
} }

View File

@ -0,0 +1,382 @@
// All of the methods used to communicate with the digital_ocean API
// are here. Their API is on a path to V2, so just plain JSON is used
// in place of a proper client library for now.
package digitalocean
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/mitchellh/mapstructure"
)
type DigitalOceanClientV1 struct {
// The http client for communicating
client *http.Client
// Credentials
ClientID string
APIKey string
// The base URL of the API
APIURL string
}
// Creates a new client for communicating with DO
func DigitalOceanClientNewV1(client string, key string, url string) *DigitalOceanClientV1 {
c := &DigitalOceanClientV1{
client: &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
},
},
APIURL: url,
ClientID: client,
APIKey: key,
}
return c
}
// Creates an SSH Key and returns it's id
func (d DigitalOceanClientV1) CreateKey(name string, pub string) (uint, error) {
params := url.Values{}
params.Set("name", name)
params.Set("ssh_pub_key", pub)
body, err := NewRequestV1(d, "ssh_keys/new", params)
if err != nil {
return 0, err
}
// Read the SSH key's ID we just created
key := body["ssh_key"].(map[string]interface{})
keyId := key["id"].(float64)
return uint(keyId), nil
}
// Destroys an SSH key
func (d DigitalOceanClientV1) DestroyKey(id uint) error {
path := fmt.Sprintf("ssh_keys/%v/destroy", id)
_, err := NewRequestV1(d, path, url.Values{})
return err
}
// Creates a droplet and returns it's id
func (d DigitalOceanClientV1) CreateDroplet(name string, size string, image string, region string, keyId uint, privateNetworking bool) (uint, error) {
params := url.Values{}
params.Set("name", name)
found_size, err := d.Size(size)
if err != nil {
return 0, fmt.Errorf("Invalid size or lookup failure: '%s': %s", size, err)
}
found_image, err := d.Image(image)
if err != nil {
return 0, fmt.Errorf("Invalid image or lookup failure: '%s': %s", image, err)
}
found_region, err := d.Region(region)
if err != nil {
return 0, fmt.Errorf("Invalid region or lookup failure: '%s': %s", region, err)
}
params.Set("size_slug", found_size.Slug)
params.Set("image_slug", found_image.Slug)
params.Set("region_slug", found_region.Slug)
params.Set("ssh_key_ids", fmt.Sprintf("%v", keyId))
params.Set("private_networking", fmt.Sprintf("%v", privateNetworking))
body, err := NewRequestV1(d, "droplets/new", params)
if err != nil {
return 0, err
}
// Read the Droplets ID
droplet := body["droplet"].(map[string]interface{})
dropletId := droplet["id"].(float64)
return uint(dropletId), err
}
// Destroys a droplet
func (d DigitalOceanClientV1) DestroyDroplet(id uint) error {
path := fmt.Sprintf("droplets/%v/destroy", id)
_, err := NewRequestV1(d, path, url.Values{})
return err
}
// Powers off a droplet
func (d DigitalOceanClientV1) PowerOffDroplet(id uint) error {
path := fmt.Sprintf("droplets/%v/power_off", id)
_, err := NewRequestV1(d, path, url.Values{})
return err
}
// Shutsdown a droplet. This is a "soft" shutdown.
func (d DigitalOceanClientV1) ShutdownDroplet(id uint) error {
path := fmt.Sprintf("droplets/%v/shutdown", id)
_, err := NewRequestV1(d, path, url.Values{})
return err
}
// Creates a snaphot of a droplet by it's ID
func (d DigitalOceanClientV1) CreateSnapshot(id uint, name string) error {
path := fmt.Sprintf("droplets/%v/snapshot", id)
params := url.Values{}
params.Set("name", name)
_, err := NewRequestV1(d, path, params)
return err
}
// Returns all available images.
func (d DigitalOceanClientV1) Images() ([]Image, error) {
resp, err := NewRequestV1(d, "images", url.Values{})
if err != nil {
return nil, err
}
var result ImagesResp
if err := mapstructure.Decode(resp, &result); err != nil {
return nil, err
}
return result.Images, nil
}
// Destroys an image by its ID.
func (d DigitalOceanClientV1) DestroyImage(id uint) error {
path := fmt.Sprintf("images/%d/destroy", id)
_, err := NewRequestV1(d, path, url.Values{})
return err
}
// Returns DO's string representation of status "off" "new" "active" etc.
func (d DigitalOceanClientV1) DropletStatus(id uint) (string, string, error) {
path := fmt.Sprintf("droplets/%v", id)
body, err := NewRequestV1(d, path, url.Values{})
if err != nil {
return "", "", err
}
var ip string
// Read the droplet's "status"
droplet := body["droplet"].(map[string]interface{})
status := droplet["status"].(string)
if droplet["ip_address"] != nil {
ip = droplet["ip_address"].(string)
}
return ip, status, err
}
// Sends an api request and returns a generic map[string]interface of
// the response.
func NewRequestV1(d DigitalOceanClientV1, path string, params url.Values) (map[string]interface{}, error) {
client := d.client
// Add the authentication parameters
params.Set("client_id", d.ClientID)
params.Set("api_key", d.APIKey)
url := fmt.Sprintf("%s/%s?%s", d.APIURL, path, params.Encode())
// Do some basic scrubbing so sensitive information doesn't appear in logs
scrubbedUrl := strings.Replace(url, d.ClientID, "CLIENT_ID", -1)
scrubbedUrl = strings.Replace(scrubbedUrl, d.APIKey, "API_KEY", -1)
log.Printf("sending new request to digitalocean: %s", scrubbedUrl)
var lastErr error
for attempts := 1; attempts < 10; attempts++ {
resp, err := client.Get(url)
if err != nil {
return nil, err
}
body, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
return nil, err
}
log.Printf("response from digitalocean: %s", body)
var decodedResponse map[string]interface{}
err = json.Unmarshal(body, &decodedResponse)
if err != nil {
err = errors.New(fmt.Sprintf("Failed to decode JSON response (HTTP %v) from DigitalOcean: %s",
resp.StatusCode, body))
return decodedResponse, err
}
// Check for errors sent by digitalocean
status := decodedResponse["status"].(string)
if status == "OK" {
return decodedResponse, nil
}
if status == "ERROR" {
statusRaw, ok := decodedResponse["error_message"]
if ok {
status = statusRaw.(string)
} else {
status = fmt.Sprintf(
"Unknown error. Full response body: %s", body)
}
}
lastErr = errors.New(fmt.Sprintf("Received error from DigitalOcean (%d): %s",
resp.StatusCode, status))
log.Println(lastErr)
if strings.Contains(status, "a pending event") {
// Retry, DigitalOcean sends these dumb "pending event"
// errors all the time.
time.Sleep(5 * time.Second)
continue
}
// Some other kind of error. Just return.
return decodedResponse, lastErr
}
return nil, lastErr
}
func (d DigitalOceanClientV1) Image(slug_or_name_or_id string) (Image, error) {
images, err := d.Images()
if err != nil {
return Image{}, err
}
for _, image := range images {
if strings.EqualFold(image.Slug, slug_or_name_or_id) {
return image, nil
}
}
for _, image := range images {
if strings.EqualFold(image.Name, slug_or_name_or_id) {
return image, nil
}
}
for _, image := range images {
id, err := strconv.Atoi(slug_or_name_or_id)
if err == nil {
if image.Id == uint(id) {
return image, nil
}
}
}
err = errors.New(fmt.Sprintf("Unknown image '%v'", slug_or_name_or_id))
return Image{}, err
}
// Returns all available regions.
func (d DigitalOceanClientV1) Regions() ([]Region, error) {
resp, err := NewRequestV1(d, "regions", url.Values{})
if err != nil {
return nil, err
}
var result RegionsResp
if err := mapstructure.Decode(resp, &result); err != nil {
return nil, err
}
return result.Regions, nil
}
func (d DigitalOceanClientV1) Region(slug_or_name_or_id string) (Region, error) {
regions, err := d.Regions()
if err != nil {
return Region{}, err
}
for _, region := range regions {
if strings.EqualFold(region.Slug, slug_or_name_or_id) {
return region, nil
}
}
for _, region := range regions {
if strings.EqualFold(region.Name, slug_or_name_or_id) {
return region, nil
}
}
for _, region := range regions {
id, err := strconv.Atoi(slug_or_name_or_id)
if err == nil {
if region.Id == uint(id) {
return region, nil
}
}
}
err = errors.New(fmt.Sprintf("Unknown region '%v'", slug_or_name_or_id))
return Region{}, err
}
// Returns all available sizes.
func (d DigitalOceanClientV1) Sizes() ([]Size, error) {
resp, err := NewRequestV1(d, "sizes", url.Values{})
if err != nil {
return nil, err
}
var result SizesResp
if err := mapstructure.Decode(resp, &result); err != nil {
return nil, err
}
return result.Sizes, nil
}
func (d DigitalOceanClientV1) Size(slug_or_name_or_id string) (Size, error) {
sizes, err := d.Sizes()
if err != nil {
return Size{}, err
}
for _, size := range sizes {
if strings.EqualFold(size.Slug, slug_or_name_or_id) {
return size, nil
}
}
for _, size := range sizes {
if strings.EqualFold(size.Name, slug_or_name_or_id) {
return size, nil
}
}
for _, size := range sizes {
id, err := strconv.Atoi(slug_or_name_or_id)
if err == nil {
if size.Id == uint(id) {
return size, nil
}
}
}
err = errors.New(fmt.Sprintf("Unknown size '%v'", slug_or_name_or_id))
return Size{}, err
}

View File

@ -0,0 +1,448 @@
// are here. Their API is on a path to V2, so just plain JSON is used
// in place of a proper client library for now.
package digitalocean
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"net/http"
"strconv"
"strings"
)
type DigitalOceanClientV2 struct {
// The http client for communicating
client *http.Client
// Credentials
APIToken string
// The base URL of the API
APIURL string
}
// Creates a new client for communicating with DO
func DigitalOceanClientNewV2(token string, url string) *DigitalOceanClientV2 {
c := &DigitalOceanClientV2{
client: &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
},
},
APIURL: url,
APIToken: token,
}
return c
}
// Creates an SSH Key and returns it's id
func (d DigitalOceanClientV2) CreateKey(name string, pub string) (uint, error) {
type KeyReq struct {
Name string `json:"name"`
PublicKey string `json:"public_key"`
}
type KeyRes struct {
SSHKey struct {
Id uint
Name string
Fingerprint string
PublicKey string `json:"public_key"`
} `json:"ssh_key"`
}
req := &KeyReq{Name: name, PublicKey: pub}
res := KeyRes{}
err := NewRequestV2(d, "v2/account/keys", "POST", req, &res)
if err != nil {
return 0, err
}
return res.SSHKey.Id, err
}
// Destroys an SSH key
func (d DigitalOceanClientV2) DestroyKey(id uint) error {
path := fmt.Sprintf("v2/account/keys/%v", id)
return NewRequestV2(d, path, "DELETE", nil, nil)
}
// Creates a droplet and returns it's id
func (d DigitalOceanClientV2) CreateDroplet(name string, size string, image string, region string, keyId uint, privateNetworking bool) (uint, error) {
type DropletReq struct {
Name string `json:"name"`
Region string `json:"region"`
Size string `json:"size"`
Image string `json:"image"`
SSHKeys []string `json:"ssh_keys,omitempty"`
Backups bool `json:"backups,omitempty"`
IPv6 bool `json:"ipv6,omitempty"`
PrivateNetworking bool `json:"private_networking,omitempty"`
}
type DropletRes struct {
Droplet struct {
Id uint
Name string
Memory uint
VCPUS uint `json:"vcpus"`
Disk uint
Region Region
Image Image
Size Size
Locked bool
CreateAt string `json:"created_at"`
Status string
Networks struct {
V4 []struct {
IPAddr string `json:"ip_address"`
Netmask string
Gateway string
Type string
} `json:"v4,omitempty"`
V6 []struct {
IPAddr string `json:"ip_address"`
CIDR uint `json:"cidr"`
Gateway string
Type string
} `json:"v6,omitempty"`
}
Kernel struct {
Id uint
Name string
Version string
}
BackupIds []uint
SnapshotIds []uint
ActionIds []uint
Features []string `json:"features,omitempty"`
}
}
req := &DropletReq{Name: name}
res := DropletRes{}
found_size, err := d.Size(size)
if err != nil {
return 0, fmt.Errorf("Invalid size or lookup failure: '%s': %s", size, err)
}
found_image, err := d.Image(image)
if err != nil {
return 0, fmt.Errorf("Invalid image or lookup failure: '%s': %s", image, err)
}
found_region, err := d.Region(region)
if err != nil {
return 0, fmt.Errorf("Invalid region or lookup failure: '%s': %s", region, err)
}
req.Size = found_size.Slug
req.Image = found_image.Slug
req.Region = found_region.Slug
req.SSHKeys = []string{fmt.Sprintf("%v", keyId)}
req.PrivateNetworking = privateNetworking
err = NewRequestV2(d, "v2/droplets", "POST", req, &res)
if err != nil {
return 0, err
}
return res.Droplet.Id, err
}
// Destroys a droplet
func (d DigitalOceanClientV2) DestroyDroplet(id uint) error {
path := fmt.Sprintf("v2/droplets/%v", id)
return NewRequestV2(d, path, "DELETE", nil, nil)
}
// Powers off a droplet
func (d DigitalOceanClientV2) PowerOffDroplet(id uint) error {
type ActionReq struct {
Type string `json:"type"`
}
type ActionRes struct {
}
req := &ActionReq{Type: "power_off"}
path := fmt.Sprintf("v2/droplets/%v/actions", id)
return NewRequestV2(d, path, "POST", req, nil)
}
// Shutsdown a droplet. This is a "soft" shutdown.
func (d DigitalOceanClientV2) ShutdownDroplet(id uint) error {
type ActionReq struct {
Type string `json:"type"`
}
type ActionRes struct {
}
req := &ActionReq{Type: "shutdown"}
path := fmt.Sprintf("v2/droplets/%v/actions", id)
return NewRequestV2(d, path, "POST", req, nil)
}
// Creates a snaphot of a droplet by it's ID
func (d DigitalOceanClientV2) CreateSnapshot(id uint, name string) error {
type ActionReq struct {
Type string `json:"type"`
Name string `json:"name"`
}
type ActionRes struct {
}
req := &ActionReq{Type: "snapshot", Name: name}
path := fmt.Sprintf("v2/droplets/%v/actions", id)
return NewRequestV2(d, path, "POST", req, nil)
}
// Returns all available images.
func (d DigitalOceanClientV2) Images() ([]Image, error) {
res := ImagesResp{}
err := NewRequestV2(d, "v2/images?per_page=200", "GET", nil, &res)
if err != nil {
return nil, err
}
return res.Images, nil
}
// Destroys an image by its ID.
func (d DigitalOceanClientV2) DestroyImage(id uint) error {
path := fmt.Sprintf("v2/images/%d", id)
return NewRequestV2(d, path, "DELETE", nil, nil)
}
// Returns DO's string representation of status "off" "new" "active" etc.
func (d DigitalOceanClientV2) DropletStatus(id uint) (string, string, error) {
path := fmt.Sprintf("v2/droplets/%v", id)
type DropletRes struct {
Droplet struct {
Id uint
Name string
Memory uint
VCPUS uint `json:"vcpus"`
Disk uint
Region Region
Image Image
Size Size
Locked bool
CreateAt string `json:"created_at"`
Status string
Networks struct {
V4 []struct {
IPAddr string `json:"ip_address"`
Netmask string
Gateway string
Type string
} `json:"v4,omitempty"`
V6 []struct {
IPAddr string `json:"ip_address"`
CIDR uint `json:"cidr"`
Gateway string
Type string
} `json:"v6,omitempty"`
}
Kernel struct {
Id uint
Name string
Version string
}
BackupIds []uint
SnapshotIds []uint
ActionIds []uint
Features []string `json:"features,omitempty"`
}
}
res := DropletRes{}
err := NewRequestV2(d, path, "GET", nil, &res)
if err != nil {
return "", "", err
}
var ip string
if len(res.Droplet.Networks.V4) > 0 {
ip = res.Droplet.Networks.V4[0].IPAddr
}
return ip, res.Droplet.Status, err
}
// Sends an api request and returns a generic map[string]interface of
// the response.
func NewRequestV2(d DigitalOceanClientV2, path string, method string, req interface{}, res interface{}) error {
var err error
var request *http.Request
client := d.client
buf := new(bytes.Buffer)
// Add the authentication parameters
url := fmt.Sprintf("%s/%s", d.APIURL, path)
if req != nil {
enc := json.NewEncoder(buf)
enc.Encode(req)
defer buf.Reset()
request, err = http.NewRequest(method, url, buf)
} else {
request, err = http.NewRequest(method, url, nil)
}
if err != nil {
return err
}
// Add the authentication parameters
request.Header.Add("Authorization", "Bearer "+d.APIToken)
log.Printf("sending new request to digitalocean: %s", url)
resp, err := client.Do(request)
if err != nil {
return err
}
if method == "DELETE" && resp.StatusCode == 204 {
if resp.Body != nil {
resp.Body.Close()
}
return nil
}
if resp.Body == nil {
return errors.New("Request returned empty body")
}
body, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
return err
}
log.Printf("response from digitalocean: %s", body)
err = json.Unmarshal(body, &res)
if err != nil {
return errors.New(fmt.Sprintf("Failed to decode JSON response %s (HTTP %v) from DigitalOcean: %s", err.Error(),
resp.StatusCode, body))
}
return nil
}
func (d DigitalOceanClientV2) Image(slug_or_name_or_id string) (Image, error) {
images, err := d.Images()
if err != nil {
return Image{}, err
}
for _, image := range images {
if strings.EqualFold(image.Slug, slug_or_name_or_id) {
return image, nil
}
}
for _, image := range images {
if strings.EqualFold(image.Name, slug_or_name_or_id) {
return image, nil
}
}
for _, image := range images {
id, err := strconv.Atoi(slug_or_name_or_id)
if err == nil {
if image.Id == uint(id) {
return image, nil
}
}
}
err = errors.New(fmt.Sprintf("Unknown image '%v'", slug_or_name_or_id))
return Image{}, err
}
// Returns all available regions.
func (d DigitalOceanClientV2) Regions() ([]Region, error) {
res := RegionsResp{}
err := NewRequestV2(d, "v2/regions?per_page=200", "GET", nil, &res)
if err != nil {
return nil, err
}
return res.Regions, nil
}
func (d DigitalOceanClientV2) Region(slug_or_name_or_id string) (Region, error) {
regions, err := d.Regions()
if err != nil {
return Region{}, err
}
for _, region := range regions {
if strings.EqualFold(region.Slug, slug_or_name_or_id) {
return region, nil
}
}
for _, region := range regions {
if strings.EqualFold(region.Name, slug_or_name_or_id) {
return region, nil
}
}
for _, region := range regions {
id, err := strconv.Atoi(slug_or_name_or_id)
if err == nil {
if region.Id == uint(id) {
return region, nil
}
}
}
err = errors.New(fmt.Sprintf("Unknown region '%v'", slug_or_name_or_id))
return Region{}, err
}
// Returns all available sizes.
func (d DigitalOceanClientV2) Sizes() ([]Size, error) {
res := SizesResp{}
err := NewRequestV2(d, "v2/sizes?per_page=200", "GET", nil, &res)
if err != nil {
return nil, err
}
return res.Sizes, nil
}
func (d DigitalOceanClientV2) Size(slug_or_name_or_id string) (Size, error) {
sizes, err := d.Sizes()
if err != nil {
return Size{}, err
}
for _, size := range sizes {
if strings.EqualFold(size.Slug, slug_or_name_or_id) {
return size, nil
}
}
for _, size := range sizes {
if strings.EqualFold(size.Name, slug_or_name_or_id) {
return size, nil
}
}
for _, size := range sizes {
id, err := strconv.Atoi(slug_or_name_or_id)
if err == nil {
if size.Id == uint(id) {
return size, nil
}
}
}
err = errors.New(fmt.Sprintf("Unknown size '%v'", slug_or_name_or_id))
return Size{}, err
}

View File

@ -15,11 +15,8 @@ type Artifact struct {
// The name of the region // The name of the region
regionName string regionName string
// The ID of the region
regionId uint
// The client for making API calls // The client for making API calls
client *DigitalOceanClient client DigitalOceanClient
} }
func (*Artifact) BuilderId() string { func (*Artifact) BuilderId() string {
@ -40,6 +37,10 @@ func (a *Artifact) String() string {
return fmt.Sprintf("A snapshot was created: '%v' in region '%v'", a.snapshotName, a.regionName) return fmt.Sprintf("A snapshot was created: '%v' in region '%v'", a.snapshotName, a.regionName)
} }
func (a *Artifact) State(name string) interface{} {
return nil
}
func (a *Artifact) Destroy() error { func (a *Artifact) Destroy() error {
log.Printf("Destroying image: %d (%s)", a.snapshotId, a.snapshotName) log.Printf("Destroying image: %d (%s)", a.snapshotId, a.snapshotName)
return a.client.DestroyImage(a.snapshotId) return a.client.DestroyImage(a.snapshotId)

View File

@ -14,7 +14,7 @@ func TestArtifact_Impl(t *testing.T) {
} }
func TestArtifactString(t *testing.T) { func TestArtifactString(t *testing.T) {
a := &Artifact{"packer-foobar", 42, "San Francisco", 3, nil} a := &Artifact{"packer-foobar", 42, "San Francisco", nil}
expected := "A snapshot was created: 'packer-foobar' in region 'San Francisco'" expected := "A snapshot was created: 'packer-foobar' in region 'San Francisco'"
if a.String() != expected { if a.String() != expected {

View File

@ -6,15 +6,28 @@ package digitalocean
import ( import (
"errors" "errors"
"fmt" "fmt"
"log"
"os"
"time"
"github.com/mitchellh/multistep" "github.com/mitchellh/multistep"
"github.com/mitchellh/packer/common" "github.com/mitchellh/packer/common"
"github.com/mitchellh/packer/common/uuid" "github.com/mitchellh/packer/common/uuid"
"github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/packer"
"log"
"os"
"time"
) )
// see https://api.digitalocean.com/images/?client_id=[client_id]&api_key=[api_key]
// name="Ubuntu 12.04.4 x64", id=3101045,
const DefaultImage = "ubuntu-12-04-x64"
// see https://api.digitalocean.com/regions/?client_id=[client_id]&api_key=[api_key]
// name="New York", id=1
const DefaultRegion = "nyc1"
// see https://api.digitalocean.com/sizes/?client_id=[client_id]&api_key=[api_key]
// name="512MB", id=66 (the smallest droplet size)
const DefaultSize = "512mb"
// The unique id for the builder // The unique id for the builder
const BuilderId = "pearkes.digitalocean" const BuilderId = "pearkes.digitalocean"
@ -26,10 +39,16 @@ type config struct {
ClientID string `mapstructure:"client_id"` ClientID string `mapstructure:"client_id"`
APIKey string `mapstructure:"api_key"` APIKey string `mapstructure:"api_key"`
APIURL string `mapstructure:"api_url"`
APIToken string `mapstructure:"api_token"`
RegionID uint `mapstructure:"region_id"` RegionID uint `mapstructure:"region_id"`
SizeID uint `mapstructure:"size_id"` SizeID uint `mapstructure:"size_id"`
ImageID uint `mapstructure:"image_id"` ImageID uint `mapstructure:"image_id"`
Region string `mapstructure:"region"`
Size string `mapstructure:"size"`
Image string `mapstructure:"image"`
PrivateNetworking bool `mapstructure:"private_networking"` PrivateNetworking bool `mapstructure:"private_networking"`
SnapshotName string `mapstructure:"snapshot_name"` SnapshotName string `mapstructure:"snapshot_name"`
DropletName string `mapstructure:"droplet_name"` DropletName string `mapstructure:"droplet_name"`
@ -78,19 +97,38 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
b.config.ClientID = os.Getenv("DIGITALOCEAN_CLIENT_ID") b.config.ClientID = os.Getenv("DIGITALOCEAN_CLIENT_ID")
} }
if b.config.RegionID == 0 { if b.config.APIURL == "" {
// Default to Region "New York" // Default to environment variable for api_url, if it exists
b.config.RegionID = 1 b.config.APIURL = os.Getenv("DIGITALOCEAN_API_URL")
} }
if b.config.SizeID == 0 { if b.config.APIToken == "" {
// Default to 512mb, the smallest droplet size // Default to environment variable for api_token, if it exists
b.config.SizeID = 66 b.config.APIToken = os.Getenv("DIGITALOCEAN_API_TOKEN")
} }
if b.config.ImageID == 0 { if b.config.Region == "" {
// Default to base image "Ubuntu 12.04.4 x64 (id: 3101045)" if b.config.RegionID != 0 {
b.config.ImageID = 3101045 b.config.Region = fmt.Sprintf("%v", b.config.RegionID)
} else {
b.config.Region = DefaultRegion
}
}
if b.config.Size == "" {
if b.config.SizeID != 0 {
b.config.Size = fmt.Sprintf("%v", b.config.SizeID)
} else {
b.config.Size = DefaultSize
}
}
if b.config.Image == "" {
if b.config.ImageID != 0 {
b.config.Image = fmt.Sprintf("%v", b.config.ImageID)
} else {
b.config.Image = DefaultImage
}
} }
if b.config.SnapshotName == "" { if b.config.SnapshotName == "" {
@ -126,8 +164,13 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
} }
templates := map[string]*string{ templates := map[string]*string{
"region": &b.config.Region,
"size": &b.config.Size,
"image": &b.config.Image,
"client_id": &b.config.ClientID, "client_id": &b.config.ClientID,
"api_key": &b.config.APIKey, "api_key": &b.config.APIKey,
"api_url": &b.config.APIURL,
"api_token": &b.config.APIToken,
"snapshot_name": &b.config.SnapshotName, "snapshot_name": &b.config.SnapshotName,
"droplet_name": &b.config.DropletName, "droplet_name": &b.config.DropletName,
"ssh_username": &b.config.SSHUsername, "ssh_username": &b.config.SSHUsername,
@ -144,15 +187,21 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
} }
} }
// Required configurations that will display errors if not set if b.config.APIToken == "" {
if b.config.ClientID == "" { // Required configurations that will display errors if not set
errs = packer.MultiErrorAppend( if b.config.ClientID == "" {
errs, errors.New("a client_id must be specified")) errs = packer.MultiErrorAppend(
errs, errors.New("a client_id for v1 auth or api_token for v2 auth must be specified"))
}
if b.config.APIKey == "" {
errs = packer.MultiErrorAppend(
errs, errors.New("a api_key for v1 auth or api_token for v2 auth must be specified"))
}
} }
if b.config.APIKey == "" { if b.config.APIURL == "" {
errs = packer.MultiErrorAppend( b.config.APIURL = "https://api.digitalocean.com"
errs, errors.New("an api_key must be specified"))
} }
sshTimeout, err := time.ParseDuration(b.config.RawSSHTimeout) sshTimeout, err := time.ParseDuration(b.config.RawSSHTimeout)
@ -178,8 +227,13 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
} }
func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) { func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) {
var client DigitalOceanClient
// Initialize the DO API client // Initialize the DO API client
client := DigitalOceanClient{}.New(b.config.ClientID, b.config.APIKey) if b.config.APIToken == "" {
client = DigitalOceanClientNewV1(b.config.ClientID, b.config.APIKey, b.config.APIURL)
} else {
client = DigitalOceanClientNewV2(b.config.APIToken, b.config.APIURL)
}
// Set up the state // Set up the state
state := new(multistep.BasicStateBag) state := new(multistep.BasicStateBag)
@ -226,9 +280,18 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
return nil, nil return nil, nil
} }
region_id := state.Get("region_id").(uint) sregion := state.Get("region")
var region string
if sregion != nil {
region = sregion.(string)
} else {
region = fmt.Sprintf("%v", state.Get("region_id").(uint))
}
found_region, err := client.Region(region)
regionName, err := client.RegionName(region_id)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -236,8 +299,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
artifact := &Artifact{ artifact := &Artifact{
snapshotName: state.Get("snapshot_name").(string), snapshotName: state.Get("snapshot_name").(string),
snapshotId: state.Get("snapshot_image_id").(uint), snapshotId: state.Get("snapshot_image_id").(uint),
regionId: region_id, regionName: found_region.Name,
regionName: regionName,
client: client, client: client,
} }

View File

@ -142,7 +142,7 @@ func TestBuilderPrepare_InvalidKey(t *testing.T) {
} }
} }
func TestBuilderPrepare_RegionID(t *testing.T) { func TestBuilderPrepare_Region(t *testing.T) {
var b Builder var b Builder
config := testConfig() config := testConfig()
@ -155,12 +155,15 @@ func TestBuilderPrepare_RegionID(t *testing.T) {
t.Fatalf("should not have error: %s", err) t.Fatalf("should not have error: %s", err)
} }
if b.config.RegionID != 1 { if b.config.Region != DefaultRegion {
t.Errorf("invalid: %d", b.config.RegionID) t.Errorf("found %s, expected %s", b.config.Region, DefaultRegion)
} }
expected := "sfo1"
// Test set // Test set
config["region_id"] = 2 config["region_id"] = 0
config["region"] = expected
b = Builder{} b = Builder{}
warnings, err = b.Prepare(config) warnings, err = b.Prepare(config)
if len(warnings) > 0 { if len(warnings) > 0 {
@ -170,12 +173,12 @@ func TestBuilderPrepare_RegionID(t *testing.T) {
t.Fatalf("should not have error: %s", err) t.Fatalf("should not have error: %s", err)
} }
if b.config.RegionID != 2 { if b.config.Region != expected {
t.Errorf("invalid: %d", b.config.RegionID) t.Errorf("found %s, expected %s", b.config.Region, expected)
} }
} }
func TestBuilderPrepare_SizeID(t *testing.T) { func TestBuilderPrepare_Size(t *testing.T) {
var b Builder var b Builder
config := testConfig() config := testConfig()
@ -188,12 +191,15 @@ func TestBuilderPrepare_SizeID(t *testing.T) {
t.Fatalf("should not have error: %s", err) t.Fatalf("should not have error: %s", err)
} }
if b.config.SizeID != 66 { if b.config.Size != DefaultSize {
t.Errorf("invalid: %d", b.config.SizeID) t.Errorf("found %s, expected %s", b.config.Size, DefaultSize)
} }
expected := "1024mb"
// Test set // Test set
config["size_id"] = 67 config["size_id"] = 0
config["size"] = expected
b = Builder{} b = Builder{}
warnings, err = b.Prepare(config) warnings, err = b.Prepare(config)
if len(warnings) > 0 { if len(warnings) > 0 {
@ -203,12 +209,12 @@ func TestBuilderPrepare_SizeID(t *testing.T) {
t.Fatalf("should not have error: %s", err) t.Fatalf("should not have error: %s", err)
} }
if b.config.SizeID != 67 { if b.config.Size != expected {
t.Errorf("invalid: %d", b.config.SizeID) t.Errorf("found %s, expected %s", b.config.Size, expected)
} }
} }
func TestBuilderPrepare_ImageID(t *testing.T) { func TestBuilderPrepare_Image(t *testing.T) {
var b Builder var b Builder
config := testConfig() config := testConfig()
@ -221,12 +227,15 @@ func TestBuilderPrepare_ImageID(t *testing.T) {
t.Fatalf("should not have error: %s", err) t.Fatalf("should not have error: %s", err)
} }
if b.config.SizeID != 66 { if b.config.Image != DefaultImage {
t.Errorf("invalid: %d", b.config.SizeID) t.Errorf("found %s, expected %s", b.config.Image, DefaultImage)
} }
expected := "ubuntu-14-04-x64"
// Test set // Test set
config["size_id"] = 2 config["image_id"] = 0
config["image"] = expected
b = Builder{} b = Builder{}
warnings, err = b.Prepare(config) warnings, err = b.Prepare(config)
if len(warnings) > 0 { if len(warnings) > 0 {
@ -236,8 +245,8 @@ func TestBuilderPrepare_ImageID(t *testing.T) {
t.Fatalf("should not have error: %s", err) t.Fatalf("should not have error: %s", err)
} }
if b.config.SizeID != 2 { if b.config.Image != expected {
t.Errorf("invalid: %d", b.config.SizeID) t.Errorf("found %s, expected %s", b.config.Image, expected)
} }
} }

View File

@ -2,6 +2,7 @@ package digitalocean
import ( import (
"fmt" "fmt"
"github.com/mitchellh/multistep" "github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/packer"
) )
@ -11,7 +12,7 @@ type stepCreateDroplet struct {
} }
func (s *stepCreateDroplet) Run(state multistep.StateBag) multistep.StepAction { func (s *stepCreateDroplet) Run(state multistep.StateBag) multistep.StepAction {
client := state.Get("client").(*DigitalOceanClient) client := state.Get("client").(DigitalOceanClient)
ui := state.Get("ui").(packer.Ui) ui := state.Get("ui").(packer.Ui)
c := state.Get("config").(config) c := state.Get("config").(config)
sshKeyId := state.Get("ssh_key_id").(uint) sshKeyId := state.Get("ssh_key_id").(uint)
@ -19,7 +20,7 @@ func (s *stepCreateDroplet) Run(state multistep.StateBag) multistep.StepAction {
ui.Say("Creating droplet...") ui.Say("Creating droplet...")
// Create the droplet based on configuration // Create the droplet based on configuration
dropletId, err := client.CreateDroplet(c.DropletName, c.SizeID, c.ImageID, c.RegionID, sshKeyId, c.PrivateNetworking) dropletId, err := client.CreateDroplet(c.DropletName, c.Size, c.Image, c.Region, sshKeyId, c.PrivateNetworking)
if err != nil { if err != nil {
err := fmt.Errorf("Error creating droplet: %s", err) err := fmt.Errorf("Error creating droplet: %s", err)
@ -43,7 +44,7 @@ func (s *stepCreateDroplet) Cleanup(state multistep.StateBag) {
return return
} }
client := state.Get("client").(*DigitalOceanClient) client := state.Get("client").(DigitalOceanClient)
ui := state.Get("ui").(packer.Ui) ui := state.Get("ui").(packer.Ui)
c := state.Get("config").(config) c := state.Get("config").(config)
@ -53,7 +54,7 @@ func (s *stepCreateDroplet) Cleanup(state multistep.StateBag) {
err := client.DestroyDroplet(s.dropletId) err := client.DestroyDroplet(s.dropletId)
if err != nil { if err != nil {
curlstr := fmt.Sprintf("curl '%v/droplets/%v/destroy?client_id=%v&api_key=%v'", curlstr := fmt.Sprintf("curl '%v/droplets/%v/destroy?client_id=%v&api_key=%v'",
DIGITALOCEAN_API_URL, s.dropletId, c.ClientID, c.APIKey) c.APIURL, s.dropletId, c.ClientID, c.APIKey)
ui.Error(fmt.Sprintf( ui.Error(fmt.Sprintf(
"Error destroying droplet. Please destroy it manually: %v", curlstr)) "Error destroying droplet. Please destroy it manually: %v", curlstr))

View File

@ -1,16 +1,17 @@
package digitalocean package digitalocean
import ( import (
"code.google.com/p/gosshold/ssh"
"crypto/rand" "crypto/rand"
"crypto/rsa" "crypto/rsa"
"crypto/x509" "crypto/x509"
"encoding/pem" "encoding/pem"
"fmt" "fmt"
"log"
"code.google.com/p/gosshold/ssh"
"github.com/mitchellh/multistep" "github.com/mitchellh/multistep"
"github.com/mitchellh/packer/common/uuid" "github.com/mitchellh/packer/common/uuid"
"github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/packer"
"log"
) )
type stepCreateSSHKey struct { type stepCreateSSHKey struct {
@ -18,7 +19,7 @@ type stepCreateSSHKey struct {
} }
func (s *stepCreateSSHKey) Run(state multistep.StateBag) multistep.StepAction { func (s *stepCreateSSHKey) Run(state multistep.StateBag) multistep.StepAction {
client := state.Get("client").(*DigitalOceanClient) client := state.Get("client").(DigitalOceanClient)
ui := state.Get("ui").(packer.Ui) ui := state.Get("ui").(packer.Ui)
ui.Say("Creating temporary ssh key for droplet...") ui.Say("Creating temporary ssh key for droplet...")
@ -70,15 +71,14 @@ func (s *stepCreateSSHKey) Cleanup(state multistep.StateBag) {
return return
} }
client := state.Get("client").(*DigitalOceanClient) client := state.Get("client").(DigitalOceanClient)
ui := state.Get("ui").(packer.Ui) ui := state.Get("ui").(packer.Ui)
c := state.Get("config").(config) c := state.Get("config").(config)
ui.Say("Deleting temporary ssh key...") ui.Say("Deleting temporary ssh key...")
err := client.DestroyKey(s.keyId) err := client.DestroyKey(s.keyId)
curlstr := fmt.Sprintf("curl '%v/ssh_keys/%v/destroy?client_id=%v&api_key=%v'", curlstr := fmt.Sprintf("curl -H 'Authorization: Bearer #TOKEN#' -X DELETE '%v/v2/account/keys/%v'", c.APIURL, s.keyId)
DIGITALOCEAN_API_URL, s.keyId, c.ClientID, c.APIKey)
if err != nil { if err != nil {
log.Printf("Error cleaning up ssh key: %v", err.Error()) log.Printf("Error cleaning up ssh key: %v", err.Error())

View File

@ -2,6 +2,7 @@ package digitalocean
import ( import (
"fmt" "fmt"
"github.com/mitchellh/multistep" "github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/packer"
) )
@ -9,7 +10,7 @@ import (
type stepDropletInfo struct{} type stepDropletInfo struct{}
func (s *stepDropletInfo) Run(state multistep.StateBag) multistep.StepAction { func (s *stepDropletInfo) Run(state multistep.StateBag) multistep.StepAction {
client := state.Get("client").(*DigitalOceanClient) client := state.Get("client").(DigitalOceanClient)
ui := state.Get("ui").(packer.Ui) ui := state.Get("ui").(packer.Ui)
c := state.Get("config").(config) c := state.Get("config").(config)
dropletId := state.Get("droplet_id").(uint) dropletId := state.Get("droplet_id").(uint)

View File

@ -2,15 +2,16 @@ package digitalocean
import ( import (
"fmt" "fmt"
"log"
"github.com/mitchellh/multistep" "github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/packer"
"log"
) )
type stepPowerOff struct{} type stepPowerOff struct{}
func (s *stepPowerOff) Run(state multistep.StateBag) multistep.StepAction { func (s *stepPowerOff) Run(state multistep.StateBag) multistep.StepAction {
client := state.Get("client").(*DigitalOceanClient) client := state.Get("client").(DigitalOceanClient)
c := state.Get("config").(config) c := state.Get("config").(config)
ui := state.Get("ui").(packer.Ui) ui := state.Get("ui").(packer.Ui)
dropletId := state.Get("droplet_id").(uint) dropletId := state.Get("droplet_id").(uint)

View File

@ -2,16 +2,17 @@ package digitalocean
import ( import (
"fmt" "fmt"
"github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer"
"log" "log"
"time" "time"
"github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer"
) )
type stepShutdown struct{} type stepShutdown struct{}
func (s *stepShutdown) Run(state multistep.StateBag) multistep.StepAction { func (s *stepShutdown) Run(state multistep.StateBag) multistep.StepAction {
client := state.Get("client").(*DigitalOceanClient) client := state.Get("client").(DigitalOceanClient)
ui := state.Get("ui").(packer.Ui) ui := state.Get("ui").(packer.Ui)
dropletId := state.Get("droplet_id").(uint) dropletId := state.Get("droplet_id").(uint)

View File

@ -3,15 +3,16 @@ package digitalocean
import ( import (
"errors" "errors"
"fmt" "fmt"
"log"
"github.com/mitchellh/multistep" "github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/packer"
"log"
) )
type stepSnapshot struct{} type stepSnapshot struct{}
func (s *stepSnapshot) Run(state multistep.StateBag) multistep.StepAction { func (s *stepSnapshot) Run(state multistep.StateBag) multistep.StepAction {
client := state.Get("client").(*DigitalOceanClient) client := state.Get("client").(DigitalOceanClient)
ui := state.Get("ui").(packer.Ui) ui := state.Get("ui").(packer.Ui)
c := state.Get("config").(config) c := state.Get("config").(config)
dropletId := state.Get("droplet_id").(uint) dropletId := state.Get("droplet_id").(uint)
@ -62,7 +63,7 @@ func (s *stepSnapshot) Run(state multistep.StateBag) multistep.StepAction {
state.Put("snapshot_image_id", imageId) state.Put("snapshot_image_id", imageId)
state.Put("snapshot_name", c.SnapshotName) state.Put("snapshot_name", c.SnapshotName)
state.Put("region_id", c.RegionID) state.Put("region", c.Region)
return multistep.ActionContinue return multistep.ActionContinue
} }

View File

@ -8,7 +8,7 @@ import (
// waitForState simply blocks until the droplet is in // waitForState simply blocks until the droplet is in
// a state we expect, while eventually timing out. // a state we expect, while eventually timing out.
func waitForDropletState(desiredState string, dropletId uint, client *DigitalOceanClient, timeout time.Duration) error { func waitForDropletState(desiredState string, dropletId uint, client DigitalOceanClient, timeout time.Duration) error {
done := make(chan struct{}) done := make(chan struct{})
defer close(done) defer close(done)

View File

@ -27,6 +27,10 @@ func (a *ExportArtifact) String() string {
return fmt.Sprintf("Exported Docker file: %s", a.path) return fmt.Sprintf("Exported Docker file: %s", a.path)
} }
func (a *ExportArtifact) State(name string) interface{} {
return nil
}
func (a *ExportArtifact) Destroy() error { func (a *ExportArtifact) Destroy() error {
return os.Remove(a.path) return os.Remove(a.path)
} }

View File

@ -28,6 +28,10 @@ func (a *ImportArtifact) String() string {
return fmt.Sprintf("Imported Docker image: %s", a.Id()) return fmt.Sprintf("Imported Docker image: %s", a.Id())
} }
func (*ImportArtifact) State(name string) interface{} {
return nil
}
func (a *ImportArtifact) Destroy() error { func (a *ImportArtifact) Destroy() error {
return a.Driver.DeleteImage(a.Id()) return a.Driver.DeleteImage(a.Id())
} }

View File

@ -8,6 +8,7 @@ import (
) )
const BuilderId = "packer.docker" const BuilderId = "packer.docker"
const BuilderIdImport = "packer.post-processor.docker-import"
type Builder struct { type Builder struct {
config *Config config *Config
@ -35,7 +36,12 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
&StepPull{}, &StepPull{},
&StepRun{}, &StepRun{},
&StepProvision{}, &StepProvision{},
&StepExport{}, }
if b.config.Commit {
steps = append(steps, new(StepCommit))
} else {
steps = append(steps, new(StepExport))
} }
// Setup the state bag and initial state for the steps // Setup the state bag and initial state for the steps
@ -64,8 +70,17 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
return nil, rawErr.(error) return nil, rawErr.(error)
} }
var artifact packer.Artifact
// No errors, must've worked // No errors, must've worked
artifact := &ExportArtifact{path: b.config.ExportPath} if b.config.Commit {
artifact = &ImportArtifact{
IdValue: state.Get("image_id").(string),
BuilderIdValue: BuilderIdImport,
Driver: driver,
}
} else {
artifact = &ExportArtifact{path: b.config.ExportPath}
}
return artifact, nil return artifact, nil
} }

View File

@ -3,8 +3,6 @@ package docker
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"github.com/ActiveState/tail"
"github.com/mitchellh/packer/packer"
"io" "io"
"io/ioutil" "io/ioutil"
"log" "log"
@ -15,6 +13,9 @@ import (
"sync" "sync"
"syscall" "syscall"
"time" "time"
"github.com/ActiveState/tail"
"github.com/mitchellh/packer/packer"
) )
type Communicator struct { type Communicator struct {
@ -56,7 +57,7 @@ func (c *Communicator) Start(remote *packer.RemoteCmd) error {
return nil return nil
} }
func (c *Communicator) Upload(dst string, src io.Reader) error { func (c *Communicator) Upload(dst string, src io.Reader, fi *os.FileInfo) error {
// Create a temporary file to store the upload // Create a temporary file to store the upload
tempfile, err := ioutil.TempFile(c.HostDir, "upload") tempfile, err := ioutil.TempFile(c.HostDir, "upload")
if err != nil { if err != nil {
@ -231,20 +232,42 @@ func (c *Communicator) run(cmd *exec.Cmd, remote *packer.RemoteCmd, stdin_w io.W
stdin_w.Write([]byte(remoteCmd + "\n")) stdin_w.Write([]byte(remoteCmd + "\n"))
}() }()
// Start a goroutine to read all the lines out of the logs // Start a goroutine to read all the lines out of the logs. These channels
// allow us to stop the go-routine and wait for it to be stopped.
stopTailCh := make(chan struct{})
doneCh := make(chan struct{})
go func() { go func() {
for line := range tail.Lines { defer close(doneCh)
if remote.Stdout != nil {
remote.Stdout.Write([]byte(line.Text + "\n")) for {
} else { select {
log.Printf("Command stdout: %#v", line.Text) case <-tail.Dead():
return
case line := <-tail.Lines:
if remote.Stdout != nil {
remote.Stdout.Write([]byte(line.Text + "\n"))
} else {
log.Printf("Command stdout: %#v", line.Text)
}
case <-time.After(2 * time.Second):
// If we're done, then return. Otherwise, keep grabbing
// data. This gives us a chance to flush all the lines
// out of the tailed file.
select {
case <-stopTailCh:
return
default:
}
} }
} }
}() }()
var exitRaw []byte
var exitStatus int
var exitStatusRaw int64
err = cmd.Wait() err = cmd.Wait()
if exitErr, ok := err.(*exec.ExitError); ok { if exitErr, ok := err.(*exec.ExitError); ok {
exitStatus := 1 exitStatus = 1
// There is no process-independent way to get the REAL // There is no process-independent way to get the REAL
// exit status so we just try to go deeper. // exit status so we just try to go deeper.
@ -254,8 +277,7 @@ func (c *Communicator) run(cmd *exec.Cmd, remote *packer.RemoteCmd, stdin_w io.W
// Say that we ended, since if Docker itself failed, then // Say that we ended, since if Docker itself failed, then
// the command must've not run, or so we assume // the command must've not run, or so we assume
remote.SetExited(exitStatus) goto REMOTE_EXIT
return
} }
// Wait for the exit code to appear in our file... // Wait for the exit code to appear in our file...
@ -270,21 +292,27 @@ func (c *Communicator) run(cmd *exec.Cmd, remote *packer.RemoteCmd, stdin_w io.W
} }
// Read the exit code // Read the exit code
exitRaw, err := ioutil.ReadFile(exitCodePath) exitRaw, err = ioutil.ReadFile(exitCodePath)
if err != nil { if err != nil {
log.Printf("Error executing: %s", err) log.Printf("Error executing: %s", err)
remote.SetExited(254) exitStatus = 254
return goto REMOTE_EXIT
} }
exitStatus, err := strconv.ParseInt(string(bytes.TrimSpace(exitRaw)), 10, 0) exitStatusRaw, err = strconv.ParseInt(string(bytes.TrimSpace(exitRaw)), 10, 0)
if err != nil { if err != nil {
log.Printf("Error executing: %s", err) log.Printf("Error executing: %s", err)
remote.SetExited(254) exitStatus = 254
return goto REMOTE_EXIT
} }
exitStatus = int(exitStatusRaw)
log.Printf("Executed command exit status: %d", exitStatus) log.Printf("Executed command exit status: %d", exitStatus)
// Finally, we're done REMOTE_EXIT:
remote.SetExited(int(exitStatus)) // Wait for the tail to finish
close(stopTailCh)
<-doneCh
// Set the exit status which triggers waiters
remote.SetExited(exitStatus)
} }

View File

@ -9,10 +9,18 @@ import (
type Config struct { type Config struct {
common.PackerConfig `mapstructure:",squash"` common.PackerConfig `mapstructure:",squash"`
Commit bool
ExportPath string `mapstructure:"export_path"` ExportPath string `mapstructure:"export_path"`
Image string Image string
Pull bool Pull bool
RunCommand []string `mapstructure:"run_command"` RunCommand []string `mapstructure:"run_command"`
Volumes map[string]string
Login bool
LoginEmail string `mapstructure:"login_email"`
LoginUsername string `mapstructure:"login_username"`
LoginPassword string `mapstructure:"login_password"`
LoginServer string `mapstructure:"login_server"`
tpl *packer.ConfigTemplate tpl *packer.ConfigTemplate
} }
@ -34,9 +42,7 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) {
// Defaults // Defaults
if len(c.RunCommand) == 0 { if len(c.RunCommand) == 0 {
c.RunCommand = []string{ c.RunCommand = []string{
"run",
"-d", "-i", "-t", "-d", "-i", "-t",
"-v", "{{.Volumes}}",
"{{.Image}}", "{{.Image}}",
"/bin/bash", "/bin/bash",
} }
@ -58,8 +64,12 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) {
errs := common.CheckUnusedConfig(md) errs := common.CheckUnusedConfig(md)
templates := map[string]*string{ templates := map[string]*string{
"export_path": &c.ExportPath, "export_path": &c.ExportPath,
"image": &c.Image, "image": &c.Image,
"login_email": &c.LoginEmail,
"login_username": &c.LoginUsername,
"login_password": &c.LoginPassword,
"login_server": &c.LoginServer,
} }
for n, ptr := range templates { for n, ptr := range templates {
@ -71,9 +81,15 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) {
} }
} }
if c.ExportPath == "" { for k, v := range c.Volumes {
errs = packer.MultiErrorAppend(errs, var err error
fmt.Errorf("export_path must be specified")) v, err = c.tpl.Process(v, nil)
if err != nil {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Error processing volumes[%s]: %s", k, err))
}
c.Volumes[k] = v
} }
if c.Image == "" { if c.Image == "" {
@ -81,6 +97,11 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) {
fmt.Errorf("image must be specified")) fmt.Errorf("image must be specified"))
} }
if c.ExportPath != "" && c.Commit {
errs = packer.MultiErrorAppend(errs,
fmt.Errorf("both commit and export_path cannot be set"))
}
if errs != nil && len(errs.Errors) > 0 { if errs != nil && len(errs.Errors) > 0 {
return nil, nil, errs return nil, nil, errs
} }

View File

@ -47,7 +47,7 @@ func TestConfigPrepare_exportPath(t *testing.T) {
// No export path // No export path
delete(raw, "export_path") delete(raw, "export_path")
_, warns, errs := NewConfig(raw) _, warns, errs := NewConfig(raw)
testConfigErr(t, warns, errs) testConfigOk(t, warns, errs)
// Good export path // Good export path
raw["export_path"] = "good" raw["export_path"] = "good"
@ -55,6 +55,20 @@ func TestConfigPrepare_exportPath(t *testing.T) {
testConfigOk(t, warns, errs) testConfigOk(t, warns, errs)
} }
func TestConfigPrepare_exportPathAndCommit(t *testing.T) {
raw := testConfig()
raw["commit"] = true
// No export path
_, warns, errs := NewConfig(raw)
testConfigErr(t, warns, errs)
// No commit
raw["commit"] = false
_, warns, errs = NewConfig(raw)
testConfigOk(t, warns, errs)
}
func TestConfigPrepare_image(t *testing.T) { func TestConfigPrepare_image(t *testing.T) {
raw := testConfig() raw := testConfig()

View File

@ -8,6 +8,9 @@ import (
// Docker. The Driver interface also allows the steps to be tested since // Docker. The Driver interface also allows the steps to be tested since
// a mock driver can be shimmed in. // a mock driver can be shimmed in.
type Driver interface { type Driver interface {
// Commit the container to a tag
Commit(id string) (string, error)
// Delete an image that is imported into Docker // Delete an image that is imported into Docker
DeleteImage(id string) error DeleteImage(id string) error
@ -17,12 +20,22 @@ type Driver interface {
// Import imports a container from a tar file // Import imports a container from a tar file
Import(path, repo string) (string, error) Import(path, repo string) (string, error)
// Login. This will lock the driver from performing another Login
// until Logout is called. Therefore, any users MUST call Logout.
Login(repo, email, username, password string) error
// Logout. This can only be called if Login succeeded.
Logout(repo string) error
// Pull should pull down the given image. // Pull should pull down the given image.
Pull(image string) error Pull(image string) error
// Push pushes an image to a Docker index/registry. // Push pushes an image to a Docker index/registry.
Push(name string) error Push(name string) error
// Save an image with the given ID to the given writer.
SaveImage(id string, dst io.Writer) error
// StartContainer starts a container and returns the ID for that container, // StartContainer starts a container and returns the ID for that container,
// along with a potential error. // along with a potential error.
StartContainer(*ContainerConfig) (string, error) StartContainer(*ContainerConfig) (string, error)
@ -30,6 +43,9 @@ type Driver interface {
// StopContainer forcibly stops a container. // StopContainer forcibly stops a container.
StopContainer(id string) error StopContainer(id string) error
// TagImage tags the image with the given ID
TagImage(id string, repo string) error
// Verify verifies that the driver can run // Verify verifies that the driver can run
Verify() error Verify() error
} }
@ -43,6 +59,5 @@ type ContainerConfig struct {
// This is the template that is used for the RunCommand in the ContainerConfig. // This is the template that is used for the RunCommand in the ContainerConfig.
type startContainerTemplate struct { type startContainerTemplate struct {
Image string Image string
Volumes string
} }

View File

@ -3,17 +3,21 @@ package docker
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"github.com/mitchellh/packer/packer"
"io" "io"
"log" "log"
"os" "os"
"os/exec" "os/exec"
"strings" "strings"
"sync"
"github.com/mitchellh/packer/packer"
) )
type DockerDriver struct { type DockerDriver struct {
Ui packer.Ui Ui packer.Ui
Tpl *packer.ConfigTemplate Tpl *packer.ConfigTemplate
l sync.Mutex
} }
func (d *DockerDriver) DeleteImage(id string) error { func (d *DockerDriver) DeleteImage(id string) error {
@ -35,6 +39,27 @@ func (d *DockerDriver) DeleteImage(id string) error {
return nil return nil
} }
func (d *DockerDriver) Commit(id string) (string, error) {
var stdout bytes.Buffer
var stderr bytes.Buffer
cmd := exec.Command("docker", "commit", id)
cmd.Stdout = &stdout
cmd.Stderr = &stderr
if err := cmd.Start(); err != nil {
return "", err
}
if err := cmd.Wait(); err != nil {
err = fmt.Errorf("Error committing container: %s\nStderr: %s",
err, stderr.String())
return "", err
}
return strings.TrimSpace(stdout.String()), nil
}
func (d *DockerDriver) Export(id string, dst io.Writer) error { func (d *DockerDriver) Export(id string, dst io.Writer) error {
var stderr bytes.Buffer var stderr bytes.Buffer
cmd := exec.Command("docker", "export", id) cmd := exec.Command("docker", "export", id)
@ -88,6 +113,44 @@ func (d *DockerDriver) Import(path string, repo string) (string, error) {
return strings.TrimSpace(stdout.String()), nil return strings.TrimSpace(stdout.String()), nil
} }
func (d *DockerDriver) Login(repo, email, user, pass string) error {
d.l.Lock()
args := []string{"login"}
if email != "" {
args = append(args, "-e", email)
}
if user != "" {
args = append(args, "-u", user)
}
if pass != "" {
args = append(args, "-p", pass)
}
if repo != "" {
args = append(args, repo)
}
cmd := exec.Command("docker", args...)
err := runAndStream(cmd, d.Ui)
if err != nil {
d.l.Unlock()
}
return err
}
func (d *DockerDriver) Logout(repo string) error {
args := []string{"logout"}
if repo != "" {
args = append(args, repo)
}
cmd := exec.Command("docker", args...)
err := runAndStream(cmd, d.Ui)
d.l.Unlock()
return err
}
func (d *DockerDriver) Pull(image string) error { func (d *DockerDriver) Pull(image string) error {
cmd := exec.Command("docker", "pull", image) cmd := exec.Command("docker", "pull", image)
return runAndStream(cmd, d.Ui) return runAndStream(cmd, d.Ui)
@ -98,27 +161,43 @@ func (d *DockerDriver) Push(name string) error {
return runAndStream(cmd, d.Ui) return runAndStream(cmd, d.Ui)
} }
func (d *DockerDriver) SaveImage(id string, dst io.Writer) error {
var stderr bytes.Buffer
cmd := exec.Command("docker", "save", id)
cmd.Stdout = dst
cmd.Stderr = &stderr
log.Printf("Exporting image: %s", id)
if err := cmd.Start(); err != nil {
return err
}
if err := cmd.Wait(); err != nil {
err = fmt.Errorf("Error exporting: %s\nStderr: %s",
err, stderr.String())
return err
}
return nil
}
func (d *DockerDriver) StartContainer(config *ContainerConfig) (string, error) { func (d *DockerDriver) StartContainer(config *ContainerConfig) (string, error) {
// Build up the template data // Build up the template data
var tplData startContainerTemplate var tplData startContainerTemplate
tplData.Image = config.Image tplData.Image = config.Image
if len(config.Volumes) > 0 {
volumes := make([]string, 0, len(config.Volumes))
for host, guest := range config.Volumes {
volumes = append(volumes, fmt.Sprintf("%s:%s", host, guest))
}
tplData.Volumes = strings.Join(volumes, ",")
}
// Args that we're going to pass to Docker // Args that we're going to pass to Docker
args := config.RunCommand args := []string{"run"}
for i, v := range args { for host, guest := range config.Volumes {
var err error args = append(args, "-v", fmt.Sprintf("%s:%s", host, guest))
args[i], err = d.Tpl.Process(v, &tplData) }
for _, v := range config.RunCommand {
v, err := d.Tpl.Process(v, &tplData)
if err != nil { if err != nil {
return "", err return "", err
} }
args = append(args, v)
} }
d.Ui.Message(fmt.Sprintf( d.Ui.Message(fmt.Sprintf(
"Run command: docker %s", strings.Join(args, " "))) "Run command: docker %s", strings.Join(args, " ")))
@ -149,7 +228,29 @@ func (d *DockerDriver) StartContainer(config *ContainerConfig) (string, error) {
} }
func (d *DockerDriver) StopContainer(id string) error { func (d *DockerDriver) StopContainer(id string) error {
return exec.Command("docker", "kill", id).Run() if err := exec.Command("docker", "kill", id).Run(); err != nil {
return err
}
return exec.Command("docker", "rm", id).Run()
}
func (d *DockerDriver) TagImage(id string, repo string) error {
var stderr bytes.Buffer
cmd := exec.Command("docker", "tag", id, repo)
cmd.Stderr = &stderr
if err := cmd.Start(); err != nil {
return err
}
if err := cmd.Wait(); err != nil {
err = fmt.Errorf("Error tagging image: %s\nStderr: %s",
err, stderr.String())
return err
}
return nil
} }
func (d *DockerDriver) Verify() error { func (d *DockerDriver) Verify() error {

View File

@ -6,6 +6,11 @@ import (
// MockDriver is a driver implementation that can be used for tests. // MockDriver is a driver implementation that can be used for tests.
type MockDriver struct { type MockDriver struct {
CommitCalled bool
CommitContainerId string
CommitImageId string
CommitErr error
DeleteImageCalled bool DeleteImageCalled bool
DeleteImageId string DeleteImageId string
DeleteImageErr error DeleteImageErr error
@ -16,10 +21,31 @@ type MockDriver struct {
ImportId string ImportId string
ImportErr error ImportErr error
LoginCalled bool
LoginEmail string
LoginUsername string
LoginPassword string
LoginRepo string
LoginErr error
LogoutCalled bool
LogoutRepo string
LogoutErr error
PushCalled bool PushCalled bool
PushName string PushName string
PushErr error PushErr error
SaveImageCalled bool
SaveImageId string
SaveImageReader io.Reader
SaveImageError error
TagImageCalled bool
TagImageImageId string
TagImageRepo string
TagImageErr error
ExportReader io.Reader ExportReader io.Reader
ExportError error ExportError error
PullError error PullError error
@ -39,6 +65,12 @@ type MockDriver struct {
VerifyCalled bool VerifyCalled bool
} }
func (d *MockDriver) Commit(id string) (string, error) {
d.CommitCalled = true
d.CommitContainerId = id
return d.CommitImageId, d.CommitErr
}
func (d *MockDriver) DeleteImage(id string) error { func (d *MockDriver) DeleteImage(id string) error {
d.DeleteImageCalled = true d.DeleteImageCalled = true
d.DeleteImageId = id d.DeleteImageId = id
@ -66,6 +98,21 @@ func (d *MockDriver) Import(path, repo string) (string, error) {
return d.ImportId, d.ImportErr return d.ImportId, d.ImportErr
} }
func (d *MockDriver) Login(r, e, u, p string) error {
d.LoginCalled = true
d.LoginRepo = r
d.LoginEmail = e
d.LoginUsername = u
d.LoginPassword = p
return d.LoginErr
}
func (d *MockDriver) Logout(r string) error {
d.LogoutCalled = true
d.LogoutRepo = r
return d.LogoutErr
}
func (d *MockDriver) Pull(image string) error { func (d *MockDriver) Pull(image string) error {
d.PullCalled = true d.PullCalled = true
d.PullImage = image d.PullImage = image
@ -78,6 +125,20 @@ func (d *MockDriver) Push(name string) error {
return d.PushErr return d.PushErr
} }
func (d *MockDriver) SaveImage(id string, dst io.Writer) error {
d.SaveImageCalled = true
d.SaveImageId = id
if d.SaveImageReader != nil {
_, err := io.Copy(dst, d.SaveImageReader)
if err != nil {
return err
}
}
return d.SaveImageError
}
func (d *MockDriver) StartContainer(config *ContainerConfig) (string, error) { func (d *MockDriver) StartContainer(config *ContainerConfig) (string, error) {
d.StartCalled = true d.StartCalled = true
d.StartConfig = config d.StartConfig = config
@ -90,6 +151,13 @@ func (d *MockDriver) StopContainer(id string) error {
return d.StopError return d.StopError
} }
func (d *MockDriver) TagImage(id string, repo string) error {
d.TagImageCalled = true
d.TagImageImageId = id
d.TagImageRepo = repo
return d.TagImageErr
}
func (d *MockDriver) Verify() error { func (d *MockDriver) Verify() error {
d.VerifyCalled = true d.VerifyCalled = true
return d.VerifyError return d.VerifyError

View File

@ -0,0 +1,35 @@
package docker
import (
"fmt"
"github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer"
)
// StepCommit commits the container to a image.
type StepCommit struct {
imageId string
}
func (s *StepCommit) Run(state multistep.StateBag) multistep.StepAction {
driver := state.Get("driver").(Driver)
containerId := state.Get("container_id").(string)
ui := state.Get("ui").(packer.Ui)
ui.Say("Committing the container")
imageId, err := driver.Commit(containerId)
if err != nil {
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
// Save the container ID
s.imageId = imageId
state.Put("image_id", s.imageId)
ui.Message(fmt.Sprintf("Image ID: %s", s.imageId))
return multistep.ActionContinue
}
func (s *StepCommit) Cleanup(state multistep.StateBag) {}

View File

@ -0,0 +1,66 @@
package docker
import (
"errors"
"github.com/mitchellh/multistep"
"testing"
)
func testStepCommitState(t *testing.T) multistep.StateBag {
state := testState(t)
state.Put("container_id", "foo")
return state
}
func TestStepCommit_impl(t *testing.T) {
var _ multistep.Step = new(StepCommit)
}
func TestStepCommit(t *testing.T) {
state := testStepCommitState(t)
step := new(StepCommit)
defer step.Cleanup(state)
driver := state.Get("driver").(*MockDriver)
driver.CommitImageId = "bar"
// run the step
if action := step.Run(state); action != multistep.ActionContinue {
t.Fatalf("bad action: %#v", action)
}
// verify we did the right thing
if !driver.CommitCalled {
t.Fatal("should've called")
}
// verify the ID is saved
idRaw, ok := state.GetOk("image_id")
if !ok {
t.Fatal("should've saved ID")
}
id := idRaw.(string)
if id != driver.CommitImageId {
t.Fatalf("bad: %#v", id)
}
}
func TestStepCommit_error(t *testing.T) {
state := testStepCommitState(t)
step := new(StepCommit)
defer step.Cleanup(state)
driver := state.Get("driver").(*MockDriver)
driver.CommitErr = errors.New("foo")
// run the step
if action := step.Run(state); action != multistep.ActionHalt {
t.Fatalf("bad action: %#v", action)
}
// verify the ID is not saved
if _, ok := state.GetOk("image_id"); ok {
t.Fatal("shouldn't save image ID")
}
}

View File

@ -12,6 +12,7 @@ type StepExport struct{}
func (s *StepExport) Run(state multistep.StateBag) multistep.StepAction { func (s *StepExport) Run(state multistep.StateBag) multistep.StepAction {
config := state.Get("config").(*Config) config := state.Get("config").(*Config)
driver := state.Get("driver").(Driver) driver := state.Get("driver").(Driver)
containerId := state.Get("container_id").(string) containerId := state.Get("container_id").(string)
ui := state.Get("ui").(packer.Ui) ui := state.Get("ui").(packer.Ui)

View File

@ -20,6 +20,29 @@ func (s *StepPull) Run(state multistep.StateBag) multistep.StepAction {
} }
ui.Say(fmt.Sprintf("Pulling Docker image: %s", config.Image)) ui.Say(fmt.Sprintf("Pulling Docker image: %s", config.Image))
if config.Login {
ui.Message("Logging in...")
err := driver.Login(
config.LoginServer,
config.LoginEmail,
config.LoginUsername,
config.LoginPassword)
if err != nil {
err := fmt.Errorf("Error logging in: %s", err)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
defer func() {
ui.Message("Logging out...")
if err := driver.Logout(config.LoginServer); err != nil {
ui.Error(fmt.Sprintf("Error logging out: %s", err))
}
}()
}
if err := driver.Pull(config.Image); err != nil { if err := driver.Pull(config.Image); err != nil {
err := fmt.Errorf("Error pulling Docker image: %s", err) err := fmt.Errorf("Error pulling Docker image: %s", err)
state.Put("error", err) state.Put("error", err)

View File

@ -51,6 +51,35 @@ func TestStepPull_error(t *testing.T) {
} }
} }
func TestStepPull_login(t *testing.T) {
state := testState(t)
step := new(StepPull)
defer step.Cleanup(state)
config := state.Get("config").(*Config)
driver := state.Get("driver").(*MockDriver)
config.Login = true
// run the step
if action := step.Run(state); action != multistep.ActionContinue {
t.Fatalf("bad action: %#v", action)
}
// verify we pulled
if !driver.PullCalled {
t.Fatal("should've pulled")
}
// verify we logged in
if !driver.LoginCalled {
t.Fatal("should've logged in")
}
if !driver.LogoutCalled {
t.Fatal("should've logged out")
}
}
func TestStepPull_noPull(t *testing.T) { func TestStepPull_noPull(t *testing.T) {
state := testState(t) state := testState(t)
step := new(StepPull) step := new(StepPull)

View File

@ -19,11 +19,14 @@ func (s *StepRun) Run(state multistep.StateBag) multistep.StepAction {
runConfig := ContainerConfig{ runConfig := ContainerConfig{
Image: config.Image, Image: config.Image,
RunCommand: config.RunCommand, RunCommand: config.RunCommand,
Volumes: map[string]string{ Volumes: make(map[string]string),
tempDir: "/packer-files",
},
} }
for host, container := range config.Volumes {
runConfig.Volumes[host] = container
}
runConfig.Volumes[tempDir] = "/packer-files"
ui.Say("Starting docker container...") ui.Say("Starting docker container...")
containerId, err := driver.StartContainer(&runConfig) containerId, err := driver.StartContainer(&runConfig)
if err != nil { if err != nil {

View File

@ -0,0 +1,25 @@
package googlecompute
import (
"encoding/json"
"os"
)
// accountFile represents the structure of the account file JSON file.
type accountFile struct {
PrivateKeyId string `json:"private_key_id"`
PrivateKey string `json:"private_key"`
ClientEmail string `json:"client_email"`
ClientId string `json:"client_id"`
}
func loadJSON(result interface{}, path string) error {
f, err := os.Open(path)
if err != nil {
return err
}
defer f.Close()
dec := json.NewDecoder(f)
return dec.Decode(result)
}

View File

@ -37,3 +37,7 @@ func (a *Artifact) Id() string {
func (a *Artifact) String() string { func (a *Artifact) String() string {
return fmt.Sprintf("A disk image was created: %v", a.imageName) return fmt.Sprintf("A disk image was created: %v", a.imageName)
} }
func (a *Artifact) State(name string) interface{} {
return nil
}

View File

@ -35,7 +35,7 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) {
// representing a GCE machine image. // representing a GCE machine image.
func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) { func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) {
driver, err := NewDriverGCE( driver, err := NewDriverGCE(
ui, b.config.ProjectId, b.config.clientSecrets, b.config.privateKeyBytes) ui, b.config.ProjectId, &b.config.account)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -65,7 +65,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
SSHWaitTimeout: 5 * time.Minute, SSHWaitTimeout: 5 * time.Minute,
}, },
new(common.StepProvision), new(common.StepProvision),
new(StepUpdateGsutil), new(StepUpdateGcloud),
new(StepCreateImage), new(StepCreateImage),
new(StepUploadImage), new(StepUploadImage),
new(StepRegisterImage), new(StepRegisterImage),

View File

@ -1,32 +0,0 @@
package googlecompute
import (
"encoding/json"
"io/ioutil"
)
// clientSecrets represents the client secrets of a GCE service account.
type clientSecrets struct {
Web struct {
AuthURI string `json:"auth_uri"`
ClientEmail string `json:"client_email"`
ClientId string `json:"client_id"`
TokenURI string `json:"token_uri"`
}
}
// loadClientSecrets loads the GCE client secrets file identified by path.
func loadClientSecrets(path string) (*clientSecrets, error) {
var cs *clientSecrets
secretBytes, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
err = json.Unmarshal(secretBytes, &cs)
if err != nil {
return nil, err
}
return cs, nil
}

View File

@ -1,31 +0,0 @@
package googlecompute
import (
"io/ioutil"
"testing"
)
func testClientSecretsFile(t *testing.T) string {
tf, err := ioutil.TempFile("", "packer")
if err != nil {
t.Fatalf("err: %s", err)
}
defer tf.Close()
if _, err := tf.Write([]byte(testClientSecretsContent)); err != nil {
t.Fatalf("err: %s", err)
}
return tf.Name()
}
func TestLoadClientSecrets(t *testing.T) {
_, err := loadClientSecrets(testClientSecretsFile(t))
if err != nil {
t.Fatalf("err: %s", err)
}
}
// This is just some dummy data that doesn't actually work (it was revoked
// a long time ago).
const testClientSecretsContent = `{"web":{"auth_uri":"https://accounts.google.com/o/oauth2/auth","token_uri":"https://accounts.google.com/o/oauth2/token","client_email":"774313886706-eorlsj0r4eqkh5e7nvea5fuf59ifr873@developer.gserviceaccount.com","client_x509_cert_url":"https://www.googleapis.com/robot/v1/metadata/x509/774313886706-eorlsj0r4eqkh5e7nvea5fuf59ifr873@developer.gserviceaccount.com","client_id":"774313886706-eorlsj0r4eqkh5e7nvea5fuf59ifr873.apps.googleusercontent.com","auth_provider_x509_cert_url":"https://www.googleapis.com/oauth2/v1/certs"}}`

View File

@ -16,26 +16,27 @@ import (
type Config struct { type Config struct {
common.PackerConfig `mapstructure:",squash"` common.PackerConfig `mapstructure:",squash"`
BucketName string `mapstructure:"bucket_name"` AccountFile string `mapstructure:"account_file"`
ClientSecretsFile string `mapstructure:"client_secrets_file"` ProjectId string `mapstructure:"project_id"`
ImageName string `mapstructure:"image_name"`
ImageDescription string `mapstructure:"image_description"`
InstanceName string `mapstructure:"instance_name"`
MachineType string `mapstructure:"machine_type"`
Metadata map[string]string `mapstructure:"metadata"`
Network string `mapstructure:"network"`
Passphrase string `mapstructure:"passphrase"`
PrivateKeyFile string `mapstructure:"private_key_file"`
ProjectId string `mapstructure:"project_id"`
SourceImage string `mapstructure:"source_image"`
SSHUsername string `mapstructure:"ssh_username"`
SSHPort uint `mapstructure:"ssh_port"`
RawSSHTimeout string `mapstructure:"ssh_timeout"`
RawStateTimeout string `mapstructure:"state_timeout"`
Tags []string `mapstructure:"tags"`
Zone string `mapstructure:"zone"`
clientSecrets *clientSecrets BucketName string `mapstructure:"bucket_name"`
DiskSizeGb int64 `mapstructure:"disk_size"`
ImageName string `mapstructure:"image_name"`
ImageDescription string `mapstructure:"image_description"`
InstanceName string `mapstructure:"instance_name"`
MachineType string `mapstructure:"machine_type"`
Metadata map[string]string `mapstructure:"metadata"`
Network string `mapstructure:"network"`
SourceImage string `mapstructure:"source_image"`
SourceImageProjectId string `mapstructure:"source_image_project_id"`
SSHUsername string `mapstructure:"ssh_username"`
SSHPort uint `mapstructure:"ssh_port"`
RawSSHTimeout string `mapstructure:"ssh_timeout"`
RawStateTimeout string `mapstructure:"state_timeout"`
Tags []string `mapstructure:"tags"`
Zone string `mapstructure:"zone"`
account accountFile
instanceName string instanceName string
privateKeyBytes []byte privateKeyBytes []byte
sshTimeout time.Duration sshTimeout time.Duration
@ -64,6 +65,10 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) {
c.Network = "default" c.Network = "default"
} }
if c.DiskSizeGb == 0 {
c.DiskSizeGb = 10
}
if c.ImageDescription == "" { if c.ImageDescription == "" {
c.ImageDescription = "Created by Packer" c.ImageDescription = "Created by Packer"
} }
@ -98,21 +103,21 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) {
// Process Templates // Process Templates
templates := map[string]*string{ templates := map[string]*string{
"bucket_name": &c.BucketName, "account_file": &c.AccountFile,
"client_secrets_file": &c.ClientSecretsFile,
"image_name": &c.ImageName, "bucket_name": &c.BucketName,
"image_description": &c.ImageDescription, "image_name": &c.ImageName,
"instance_name": &c.InstanceName, "image_description": &c.ImageDescription,
"machine_type": &c.MachineType, "instance_name": &c.InstanceName,
"network": &c.Network, "machine_type": &c.MachineType,
"passphrase": &c.Passphrase, "network": &c.Network,
"private_key_file": &c.PrivateKeyFile, "project_id": &c.ProjectId,
"project_id": &c.ProjectId, "source_image": &c.SourceImage,
"source_image": &c.SourceImage, "source_image_project_id": &c.SourceImageProjectId,
"ssh_username": &c.SSHUsername, "ssh_username": &c.SSHUsername,
"ssh_timeout": &c.RawSSHTimeout, "ssh_timeout": &c.RawSSHTimeout,
"state_timeout": &c.RawStateTimeout, "state_timeout": &c.RawStateTimeout,
"zone": &c.Zone, "zone": &c.Zone,
} }
for n, ptr := range templates { for n, ptr := range templates {
@ -130,16 +135,6 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) {
errs, errors.New("a bucket_name must be specified")) errs, errors.New("a bucket_name must be specified"))
} }
if c.ClientSecretsFile == "" {
errs = packer.MultiErrorAppend(
errs, errors.New("a client_secrets_file must be specified"))
}
if c.PrivateKeyFile == "" {
errs = packer.MultiErrorAppend(
errs, errors.New("a private_key_file must be specified"))
}
if c.ProjectId == "" { if c.ProjectId == "" {
errs = packer.MultiErrorAppend( errs = packer.MultiErrorAppend(
errs, errors.New("a project_id must be specified")) errs, errors.New("a project_id must be specified"))
@ -170,22 +165,10 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) {
} }
c.stateTimeout = stateTimeout c.stateTimeout = stateTimeout
if c.ClientSecretsFile != "" { if c.AccountFile != "" {
// Load the client secrets file. if err := loadJSON(&c.account, c.AccountFile); err != nil {
cs, err := loadClientSecrets(c.ClientSecretsFile)
if err != nil {
errs = packer.MultiErrorAppend( errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Failed parsing client secrets file: %s", err)) errs, fmt.Errorf("Failed parsing account file: %s", err))
}
c.clientSecrets = cs
}
if c.PrivateKeyFile != "" {
// Load the private key.
c.privateKeyBytes, err = processPrivateKeyFile(c.PrivateKeyFile, c.Passphrase)
if err != nil {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Failed loading private key file: %s", err))
} }
} }

View File

@ -1,14 +1,14 @@
package googlecompute package googlecompute
import ( import (
"io/ioutil"
"testing" "testing"
) )
func testConfig(t *testing.T) map[string]interface{} { func testConfig(t *testing.T) map[string]interface{} {
return map[string]interface{}{ return map[string]interface{}{
"account_file": testAccountFile(t),
"bucket_name": "foo", "bucket_name": "foo",
"client_secrets_file": testClientSecretsFile(t),
"private_key_file": testPrivateKeyFile(t),
"project_id": "hashicorp", "project_id": "hashicorp",
"source_image": "foo", "source_image": "foo",
"zone": "us-east-1a", "zone": "us-east-1a",
@ -68,32 +68,6 @@ func TestConfigPrepare(t *testing.T) {
false, false,
}, },
{
"client_secrets_file",
nil,
true,
},
{
"client_secrets_file",
testClientSecretsFile(t),
false,
},
{
"client_secrets_file",
"/tmp/i/should/not/exist",
true,
},
{
"private_key_file",
nil,
true,
},
{
"private_key_file",
testPrivateKeyFile(t),
false,
},
{ {
"private_key_file", "private_key_file",
"/tmp/i/should/not/exist", "/tmp/i/should/not/exist",
@ -174,3 +148,21 @@ func TestConfigPrepare(t *testing.T) {
} }
} }
} }
func testAccountFile(t *testing.T) string {
tf, err := ioutil.TempFile("", "packer")
if err != nil {
t.Fatalf("err: %s", err)
}
defer tf.Close()
if _, err := tf.Write([]byte(testAccountContent)); err != nil {
t.Fatalf("err: %s", err)
}
return tf.Name()
}
// This is just some dummy data that doesn't actually work (it was revoked
// a long time ago).
const testAccountContent = `{}`

View File

@ -23,9 +23,15 @@ type Driver interface {
WaitForInstance(state, zone, name string) <-chan error WaitForInstance(state, zone, name string) <-chan error
} }
type Image struct {
Name string
ProjectId string
}
type InstanceConfig struct { type InstanceConfig struct {
Description string Description string
Image string DiskSizeGb int64
Image Image
MachineType string MachineType string
Metadata map[string]string Metadata map[string]string
Name string Name string

View File

@ -6,9 +6,9 @@ import (
"net/http" "net/http"
"time" "time"
"code.google.com/p/goauth2/oauth"
"code.google.com/p/goauth2/oauth/jwt"
"code.google.com/p/google-api-go-client/compute/v1" "code.google.com/p/google-api-go-client/compute/v1"
"github.com/golang/oauth2"
"github.com/golang/oauth2/google"
"github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/packer"
) )
@ -20,40 +20,41 @@ type driverGCE struct {
ui packer.Ui ui packer.Ui
} }
const DriverScopes string = "https://www.googleapis.com/auth/compute " + var DriverScopes = []string{"https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/devstorage.full_control"}
"https://www.googleapis.com/auth/devstorage.full_control"
func NewDriverGCE(ui packer.Ui, p string, a *accountFile) (Driver, error) {
var f *oauth2.Flow
var err error
// Auth with AccountFile first if provided
if a.PrivateKey != "" {
log.Printf("[INFO] Requesting Google token via AccountFile...")
log.Printf("[INFO] -- Email: %s", a.ClientEmail)
log.Printf("[INFO] -- Scopes: %s", DriverScopes)
log.Printf("[INFO] -- Private Key Length: %d", len(a.PrivateKey))
f, err = oauth2.New(
oauth2.JWTClient(a.ClientEmail, []byte(a.PrivateKey)),
oauth2.Scope(DriverScopes...),
google.JWTEndpoint())
} else {
log.Printf("[INFO] Requesting Google token via GCE Service Role...")
f, err = oauth2.New(google.ComputeEngineAccount(""))
}
func NewDriverGCE(ui packer.Ui, projectId string, c *clientSecrets, key []byte) (Driver, error) {
log.Printf("[INFO] Requesting token...")
log.Printf("[INFO] -- Email: %s", c.Web.ClientEmail)
log.Printf("[INFO] -- Scopes: %s", DriverScopes)
log.Printf("[INFO] -- Private Key Length: %d", len(key))
log.Printf("[INFO] -- Token URL: %s", c.Web.TokenURI)
jwtTok := jwt.NewToken(c.Web.ClientEmail, DriverScopes, key)
jwtTok.ClaimSet.Aud = c.Web.TokenURI
token, err := jwtTok.Assert(new(http.Client))
if err != nil { if err != nil {
return nil, err return nil, err
} }
transport := &oauth.Transport{ log.Printf("[INFO] Instantiating GCE client using...")
Config: &oauth.Config{ service, err := compute.New(&http.Client{Transport: f.NewTransport()})
ClientId: c.Web.ClientId,
Scope: DriverScopes,
TokenURL: c.Web.TokenURI,
AuthURL: c.Web.AuthURI,
},
Token: token,
}
log.Printf("[INFO] Instantiating client...")
service, err := compute.New(transport.Client())
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &driverGCE{ return &driverGCE{
projectId: projectId, projectId: p,
service: service, service: service,
ui: ui, ui: ui,
}, nil }, nil
@ -134,7 +135,7 @@ func (d *driverGCE) RunInstance(c *InstanceConfig) (<-chan error, error) {
} }
// Get the image // Get the image
d.ui.Message(fmt.Sprintf("Loading image: %s", c.Image)) d.ui.Message(fmt.Sprintf("Loading image: %s in project %s", c.Image.Name, c.Image.ProjectId))
image, err := d.getImage(c.Image) image, err := d.getImage(c.Image)
if err != nil { if err != nil {
return nil, err return nil, err
@ -177,6 +178,7 @@ func (d *driverGCE) RunInstance(c *InstanceConfig) (<-chan error, error) {
AutoDelete: true, AutoDelete: true,
InitializeParams: &compute.AttachedDiskInitializeParams{ InitializeParams: &compute.AttachedDiskInitializeParams{
SourceImage: image.SelfLink, SourceImage: image.SelfLink,
DiskSizeGb: c.DiskSizeGb,
}, },
}, },
}, },
@ -228,20 +230,17 @@ func (d *driverGCE) WaitForInstance(state, zone, name string) <-chan error {
return errCh return errCh
} }
func (d *driverGCE) getImage(name string) (image *compute.Image, err error) { func (d *driverGCE) getImage(img Image) (image *compute.Image, err error) {
projects := []string{d.projectId, "debian-cloud", "centos-cloud"} projects := []string{img.ProjectId, "centos-cloud", "coreos-cloud", "debian-cloud", "google-containers", "opensuse-cloud", "rhel-cloud", "suse-cloud", "ubuntu-os-cloud", "windows-cloud"}
for _, project := range projects { for _, project := range projects {
image, err = d.service.Images.Get(project, name).Do() image, err = d.service.Images.Get(project, img.Name).Do()
if err == nil && image != nil && image.SelfLink != "" { if err == nil && image != nil && image.SelfLink != "" {
return return
} }
image = nil image = nil
} }
if err == nil { err = fmt.Errorf("Image %s could not be found in any of these projects: %s", img.Name, projects)
err = fmt.Errorf("Image could not be found: %s", name)
}
return return
} }

View File

@ -31,8 +31,8 @@ func (s *StepCreateImage) Run(state multistep.StateBag) multistep.StepAction {
ui.Say("Creating image...") ui.Say("Creating image...")
cmd := new(packer.RemoteCmd) cmd := new(packer.RemoteCmd)
cmd.Command = fmt.Sprintf("%s%s --output_file_name %s", cmd.Command = fmt.Sprintf("%s%s --output_file_name %s --fssize %d",
sudoPrefix, imageBundleCmd, imageFilename) sudoPrefix, imageBundleCmd, imageFilename, config.DiskSizeGb*1024*1024*1024)
err := cmd.StartWithUi(comm, ui) err := cmd.StartWithUi(comm, ui)
if err == nil && cmd.ExitStatus != 0 { if err == nil && cmd.ExitStatus != 0 {
err = fmt.Errorf( err = fmt.Errorf(

View File

@ -16,6 +16,33 @@ type StepCreateInstance struct {
instanceName string instanceName string
} }
func (config *Config) getImage() Image {
project := config.ProjectId
if config.SourceImageProjectId != "" {
project = config.SourceImageProjectId
}
return Image{Name: config.SourceImage, ProjectId: project}
}
func (config *Config) getInstanceMetadata(sshPublicKey string) map[string]string {
instanceMetadata := make(map[string]string)
// Copy metadata from config
for k, v := range config.Metadata {
instanceMetadata[k] = v
}
// Merge any existing ssh keys with our public key
sshMetaKey := "sshKeys"
sshKeys := fmt.Sprintf("%s:%s", config.SSHUsername, sshPublicKey)
if confSshKeys, exists := instanceMetadata[sshMetaKey]; exists {
sshKeys = fmt.Sprintf("%s\n%s", sshKeys, confSshKeys)
}
instanceMetadata[sshMetaKey] = sshKeys
return instanceMetadata
}
// Run executes the Packer build step that creates a GCE instance. // Run executes the Packer build step that creates a GCE instance.
func (s *StepCreateInstance) Run(state multistep.StateBag) multistep.StepAction { func (s *StepCreateInstance) Run(state multistep.StateBag) multistep.StepAction {
config := state.Get("config").(*Config) config := state.Get("config").(*Config)
@ -28,15 +55,14 @@ func (s *StepCreateInstance) Run(state multistep.StateBag) multistep.StepAction
errCh, err := driver.RunInstance(&InstanceConfig{ errCh, err := driver.RunInstance(&InstanceConfig{
Description: "New instance created by Packer", Description: "New instance created by Packer",
Image: config.SourceImage, DiskSizeGb: config.DiskSizeGb,
Image: config.getImage(),
MachineType: config.MachineType, MachineType: config.MachineType,
Metadata: map[string]string{ Metadata: config.getInstanceMetadata(sshPublicKey),
"sshKeys": fmt.Sprintf("%s:%s", config.SSHUsername, sshPublicKey), Name: name,
}, Network: config.Network,
Name: name, Tags: config.Tags,
Network: config.Network, Zone: config.Zone,
Tags: config.Tags,
Zone: config.Zone,
}) })
if err == nil { if err == nil {

View File

@ -7,9 +7,9 @@ import (
"github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/packer"
) )
// StepUpdateGsutil represents a Packer build step that updates the gsutil // StepUpdateGcloud represents a Packer build step that updates the gsutil
// utility to the latest version available. // utility to the latest version available.
type StepUpdateGsutil int type StepUpdateGcloud int
// Run executes the Packer build step that updates the gsutil utility to the // Run executes the Packer build step that updates the gsutil utility to the
// latest version available. // latest version available.
@ -17,7 +17,7 @@ type StepUpdateGsutil int
// This step is required to prevent the image creation process from hanging; // This step is required to prevent the image creation process from hanging;
// the image creation process utilizes the gcimagebundle cli tool which will // the image creation process utilizes the gcimagebundle cli tool which will
// prompt to update gsutil if a newer version is available. // prompt to update gsutil if a newer version is available.
func (s *StepUpdateGsutil) Run(state multistep.StateBag) multistep.StepAction { func (s *StepUpdateGcloud) Run(state multistep.StateBag) multistep.StepAction {
comm := state.Get("communicator").(packer.Communicator) comm := state.Get("communicator").(packer.Communicator)
config := state.Get("config").(*Config) config := state.Get("config").(*Config)
ui := state.Get("ui").(packer.Ui) ui := state.Get("ui").(packer.Ui)
@ -28,18 +28,18 @@ func (s *StepUpdateGsutil) Run(state multistep.StateBag) multistep.StepAction {
sudoPrefix = "sudo " sudoPrefix = "sudo "
} }
gsutilUpdateCmd := "/usr/local/bin/gsutil update -n -f" gsutilUpdateCmd := "/usr/local/bin/gcloud -q components update"
cmd := new(packer.RemoteCmd) cmd := new(packer.RemoteCmd)
cmd.Command = fmt.Sprintf("%s%s", sudoPrefix, gsutilUpdateCmd) cmd.Command = fmt.Sprintf("%s%s", sudoPrefix, gsutilUpdateCmd)
ui.Say("Updating gsutil...") ui.Say("Updating gcloud components...")
err := cmd.StartWithUi(comm, ui) err := cmd.StartWithUi(comm, ui)
if err == nil && cmd.ExitStatus != 0 { if err == nil && cmd.ExitStatus != 0 {
err = fmt.Errorf( err = fmt.Errorf(
"gsutil update exited with non-zero exit status: %d", cmd.ExitStatus) "gcloud components update exited with non-zero exit status: %d", cmd.ExitStatus)
} }
if err != nil { if err != nil {
err := fmt.Errorf("Error updating gsutil: %s", err) err := fmt.Errorf("Error updating gcloud components: %s", err)
state.Put("error", err) state.Put("error", err)
ui.Error(err.Error()) ui.Error(err.Error())
return multistep.ActionHalt return multistep.ActionHalt
@ -49,4 +49,4 @@ func (s *StepUpdateGsutil) Run(state multistep.StateBag) multistep.StepAction {
} }
// Cleanup. // Cleanup.
func (s *StepUpdateGsutil) Cleanup(state multistep.StateBag) {} func (s *StepUpdateGcloud) Cleanup(state multistep.StateBag) {}

View File

@ -8,13 +8,13 @@ import (
"github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/packer"
) )
func TestStepUpdateGsutil_impl(t *testing.T) { func TestStepUpdateGcloud_impl(t *testing.T) {
var _ multistep.Step = new(StepUpdateGsutil) var _ multistep.Step = new(StepUpdateGcloud)
} }
func TestStepUpdateGsutil(t *testing.T) { func TestStepUpdateGcloud(t *testing.T) {
state := testState(t) state := testState(t)
step := new(StepUpdateGsutil) step := new(StepUpdateGcloud)
defer step.Cleanup(state) defer step.Cleanup(state)
comm := new(packer.MockCommunicator) comm := new(packer.MockCommunicator)
@ -32,14 +32,14 @@ func TestStepUpdateGsutil(t *testing.T) {
if strings.HasPrefix(comm.StartCmd.Command, "sudo") { if strings.HasPrefix(comm.StartCmd.Command, "sudo") {
t.Fatal("should not sudo") t.Fatal("should not sudo")
} }
if !strings.Contains(comm.StartCmd.Command, "gsutil update") { if !strings.Contains(comm.StartCmd.Command, "gcloud -q components update") {
t.Fatalf("bad command: %#v", comm.StartCmd.Command) t.Fatalf("bad command: %#v", comm.StartCmd.Command)
} }
} }
func TestStepUpdateGsutil_badExitStatus(t *testing.T) { func TestStepUpdateGcloud_badExitStatus(t *testing.T) {
state := testState(t) state := testState(t)
step := new(StepUpdateGsutil) step := new(StepUpdateGcloud)
defer step.Cleanup(state) defer step.Cleanup(state)
comm := new(packer.MockCommunicator) comm := new(packer.MockCommunicator)
@ -56,9 +56,9 @@ func TestStepUpdateGsutil_badExitStatus(t *testing.T) {
} }
} }
func TestStepUpdateGsutil_nonRoot(t *testing.T) { func TestStepUpdateGcloud_nonRoot(t *testing.T) {
state := testState(t) state := testState(t)
step := new(StepUpdateGsutil) step := new(StepUpdateGcloud)
defer step.Cleanup(state) defer step.Cleanup(state)
comm := new(packer.MockCommunicator) comm := new(packer.MockCommunicator)
@ -79,7 +79,7 @@ func TestStepUpdateGsutil_nonRoot(t *testing.T) {
if !strings.HasPrefix(comm.StartCmd.Command, "sudo") { if !strings.HasPrefix(comm.StartCmd.Command, "sudo") {
t.Fatal("should sudo") t.Fatal("should sudo")
} }
if !strings.Contains(comm.StartCmd.Command, "gsutil update") { if !strings.Contains(comm.StartCmd.Command, "gcloud -q components update") {
t.Fatalf("bad command: %#v", comm.StartCmd.Command) t.Fatalf("bad command: %#v", comm.StartCmd.Command)
} }
} }

View File

@ -24,6 +24,10 @@ func (a *NullArtifact) String() string {
return fmt.Sprintf("Did not export anything. This is the null builder") return fmt.Sprintf("Did not export anything. This is the null builder")
} }
func (a *NullArtifact) State(name string) interface{} {
return nil
}
func (a *NullArtifact) Destroy() error { func (a *NullArtifact) Destroy() error {
return nil return nil
} }

View File

@ -32,14 +32,28 @@ func NewConfig(raws ...interface{}) (*Config, []string, error) {
c.tpl.UserVars = c.PackerUserVars c.tpl.UserVars = c.PackerUserVars
// Defaults
if c.Port == 0 { if c.Port == 0 {
c.Port = 22 c.Port = 22
} }
// (none so far)
errs := common.CheckUnusedConfig(md) errs := common.CheckUnusedConfig(md)
templates := map[string]*string{
"host": &c.Host,
"ssh_username": &c.SSHUsername,
"ssh_password": &c.SSHPassword,
"ssh_private_key_file": &c.SSHPrivateKeyFile,
}
for n, ptr := range templates {
var err error
*ptr, err = c.tpl.Process(*ptr, nil)
if err != nil {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Error processing %s: %s", n, err))
}
}
if c.Host == "" { if c.Host == "" {
errs = packer.MultiErrorAppend(errs, errs = packer.MultiErrorAppend(errs,
fmt.Errorf("host must be specified")) fmt.Errorf("host must be specified"))

View File

@ -1,14 +1,16 @@
package openstack package openstack
import ( import (
"crypto/tls"
"fmt" "fmt"
"github.com/mitchellh/packer/common" "github.com/mitchellh/packer/common"
"github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/packer"
"github.com/rackspace/gophercloud"
"net/http" "net/http"
"net/url" "net/url"
"os" "os"
"strings" "strings"
"github.com/mitchellh/gophercloud-fork-40444fb"
) )
// AccessConfig is for common configuration related to openstack access // AccessConfig is for common configuration related to openstack access
@ -20,6 +22,8 @@ type AccessConfig struct {
Provider string `mapstructure:"provider"` Provider string `mapstructure:"provider"`
RawRegion string `mapstructure:"region"` RawRegion string `mapstructure:"region"`
ProxyUrl string `mapstructure:"proxy_url"` ProxyUrl string `mapstructure:"proxy_url"`
TenantId string `mapstructure:"tenant_id"`
Insecure bool `mapstructure:"insecure"`
} }
// Auth returns a valid Auth object for access to openstack services, or // Auth returns a valid Auth object for access to openstack services, or
@ -31,6 +35,7 @@ func (c *AccessConfig) Auth() (gophercloud.AccessProvider, error) {
c.Project = common.ChooseString(c.Project, os.Getenv("SDK_PROJECT"), os.Getenv("OS_TENANT_NAME")) c.Project = common.ChooseString(c.Project, os.Getenv("SDK_PROJECT"), os.Getenv("OS_TENANT_NAME"))
c.Provider = common.ChooseString(c.Provider, os.Getenv("SDK_PROVIDER"), os.Getenv("OS_AUTH_URL")) c.Provider = common.ChooseString(c.Provider, os.Getenv("SDK_PROVIDER"), os.Getenv("OS_AUTH_URL"))
c.RawRegion = common.ChooseString(c.RawRegion, os.Getenv("SDK_REGION"), os.Getenv("OS_REGION_NAME")) c.RawRegion = common.ChooseString(c.RawRegion, os.Getenv("SDK_REGION"), os.Getenv("OS_REGION_NAME"))
c.TenantId = common.ChooseString(c.TenantId, os.Getenv("OS_TENANT_ID"))
// OpenStack's auto-generated openrc.sh files do not append the suffix // OpenStack's auto-generated openrc.sh files do not append the suffix
// /tokens to the authentication URL. This ensures it is present when // /tokens to the authentication URL. This ensures it is present when
@ -40,14 +45,21 @@ func (c *AccessConfig) Auth() (gophercloud.AccessProvider, error) {
} }
authoptions := gophercloud.AuthOptions{ authoptions := gophercloud.AuthOptions{
Username: c.Username,
Password: c.Password,
ApiKey: c.ApiKey,
AllowReauth: true, AllowReauth: true,
ApiKey: c.ApiKey,
TenantId: c.TenantId,
TenantName: c.Project,
Username: c.Username,
Password: c.Password,
} }
if c.Project != "" { default_transport := &http.Transport{}
authoptions.TenantName = c.Project
if c.Insecure {
cfg := new(tls.Config)
cfg.InsecureSkipVerify = true
default_transport.TLSClientConfig = cfg
} }
// For corporate networks it may be the case where we want our API calls // For corporate networks it may be the case where we want our API calls
@ -60,7 +72,11 @@ func (c *AccessConfig) Auth() (gophercloud.AccessProvider, error) {
// The gophercloud.Context has a UseCustomClient method which // The gophercloud.Context has a UseCustomClient method which
// would allow us to override with a new instance of http.Client. // would allow us to override with a new instance of http.Client.
http.DefaultTransport = &http.Transport{Proxy: http.ProxyURL(url)} default_transport.Proxy = http.ProxyURL(url)
}
if c.Insecure || c.ProxyUrl != "" {
http.DefaultTransport = default_transport
} }
return gophercloud.Authenticate(c.Provider, authoptions) return gophercloud.Authenticate(c.Provider, authoptions)
@ -80,10 +96,14 @@ func (c *AccessConfig) Prepare(t *packer.ConfigTemplate) []error {
} }
templates := map[string]*string{ templates := map[string]*string{
"username": &c.Username, "username": &c.Username,
"password": &c.Password, "password": &c.Password,
"apiKey": &c.ApiKey, "api_key": &c.ApiKey,
"provider": &c.Provider, "provider": &c.Provider,
"project": &c.Project,
"tenant_id": &c.TenantId,
"region": &c.RawRegion,
"proxy_url": &c.ProxyUrl,
} }
errs := make([]error, 0) errs := make([]error, 0)
@ -96,8 +116,10 @@ func (c *AccessConfig) Prepare(t *packer.ConfigTemplate) []error {
} }
} }
if c.Region() == "" { if strings.HasPrefix(c.Provider, "rackspace") {
errs = append(errs, fmt.Errorf("region must be specified")) if c.Region() == "" {
errs = append(errs, fmt.Errorf("region must be specified when using rackspace"))
}
} }
if len(errs) > 0 { if len(errs) > 0 {

View File

@ -8,13 +8,22 @@ func testAccessConfig() *AccessConfig {
return &AccessConfig{} return &AccessConfig{}
} }
func TestAccessConfigPrepare_NoRegion(t *testing.T) { func TestAccessConfigPrepare_NoRegion_Rackspace(t *testing.T) {
c := testAccessConfig() c := testAccessConfig()
c.Provider = "rackspace-us"
if err := c.Prepare(nil); err == nil { if err := c.Prepare(nil); err == nil {
t.Fatalf("shouldn't have err: %s", err) t.Fatalf("shouldn't have err: %s", err)
} }
} }
func TestAccessConfigPrepare_NoRegion_PrivateCloud(t *testing.T) {
c := testAccessConfig()
c.Provider = "http://some-keystone-server:5000/v2.0"
if err := c.Prepare(nil); err != nil {
t.Fatalf("shouldn't have err: %s", err)
}
}
func TestAccessConfigPrepare_Region(t *testing.T) { func TestAccessConfigPrepare_Region(t *testing.T) {
dfw := "DFW" dfw := "DFW"
c := testAccessConfig() c := testAccessConfig()

View File

@ -2,8 +2,9 @@ package openstack
import ( import (
"fmt" "fmt"
"github.com/rackspace/gophercloud"
"log" "log"
"github.com/mitchellh/gophercloud-fork-40444fb"
) )
// Artifact is an artifact implementation that contains built images. // Artifact is an artifact implementation that contains built images.
@ -35,6 +36,10 @@ func (a *Artifact) String() string {
return fmt.Sprintf("An image was created: %v", a.ImageId) return fmt.Sprintf("An image was created: %v", a.ImageId)
} }
func (a *Artifact) State(name string) interface{} {
return nil
}
func (a *Artifact) Destroy() error { func (a *Artifact) Destroy() error {
log.Printf("Destroying image: %d", a.ImageId) log.Printf("Destroying image: %d", a.ImageId)
return a.Conn.DeleteImageById(a.ImageId) return a.Conn.DeleteImageById(a.ImageId)

View File

@ -8,8 +8,9 @@ import (
"github.com/mitchellh/multistep" "github.com/mitchellh/multistep"
"github.com/mitchellh/packer/common" "github.com/mitchellh/packer/common"
"github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/packer"
"github.com/rackspace/gophercloud"
"log" "log"
"github.com/mitchellh/gophercloud-fork-40444fb"
) )
// The unique ID for this builder // The unique ID for this builder
@ -88,9 +89,15 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe
DebugKeyPath: fmt.Sprintf("os_%s.pem", b.config.PackerBuildName), DebugKeyPath: fmt.Sprintf("os_%s.pem", b.config.PackerBuildName),
}, },
&StepRunSourceServer{ &StepRunSourceServer{
Name: b.config.ImageName, Name: b.config.ImageName,
Flavor: b.config.Flavor, Flavor: b.config.Flavor,
SourceImage: b.config.SourceImage, SourceImage: b.config.SourceImage,
SecurityGroups: b.config.SecurityGroups,
Networks: b.config.Networks,
},
&StepAllocateIp{
FloatingIpPool: b.config.FloatingIpPool,
FloatingIp: b.config.FloatingIp,
}, },
&common.StepConnectSSH{ &common.StepConnectSSH{
SSHAddress: SSHAddress(csp, b.config.SSHPort), SSHAddress: SSHAddress(csp, b.config.SSHPort),

View File

@ -10,12 +10,17 @@ import (
// RunConfig contains configuration for running an instance from a source // RunConfig contains configuration for running an instance from a source
// image and details on how to access that launched image. // image and details on how to access that launched image.
type RunConfig struct { type RunConfig struct {
SourceImage string `mapstructure:"source_image"` SourceImage string `mapstructure:"source_image"`
Flavor string `mapstructure:"flavor"` Flavor string `mapstructure:"flavor"`
RawSSHTimeout string `mapstructure:"ssh_timeout"` RawSSHTimeout string `mapstructure:"ssh_timeout"`
SSHUsername string `mapstructure:"ssh_username"` SSHUsername string `mapstructure:"ssh_username"`
SSHPort int `mapstructure:"ssh_port"` SSHPort int `mapstructure:"ssh_port"`
OpenstackProvider string `mapstructure:"openstack_provider"` OpenstackProvider string `mapstructure:"openstack_provider"`
UseFloatingIp bool `mapstructure:"use_floating_ip"`
FloatingIpPool string `mapstructure:"floating_ip_pool"`
FloatingIp string `mapstructure:"floating_ip"`
SecurityGroups []string `mapstructure:"security_groups"`
Networks []string `mapstructure:"networks"`
// Unexported fields that are calculated from others // Unexported fields that are calculated from others
sshTimeout time.Duration sshTimeout time.Duration
@ -43,6 +48,10 @@ func (c *RunConfig) Prepare(t *packer.ConfigTemplate) []error {
c.RawSSHTimeout = "5m" c.RawSSHTimeout = "5m"
} }
if c.UseFloatingIp && c.FloatingIpPool == "" {
c.FloatingIpPool = "public"
}
// Validation // Validation
var err error var err error
errs := make([]error, 0) errs := make([]error, 0)
@ -59,7 +68,7 @@ func (c *RunConfig) Prepare(t *packer.ConfigTemplate) []error {
} }
templates := map[string]*string{ templates := map[string]*string{
"flavlor": &c.Flavor, "flavor": &c.Flavor,
"ssh_timeout": &c.RawSSHTimeout, "ssh_timeout": &c.RawSSHTimeout,
"ssh_username": &c.SSHUsername, "ssh_username": &c.SSHUsername,
"source_image": &c.SourceImage, "source_image": &c.SourceImage,
@ -69,8 +78,7 @@ func (c *RunConfig) Prepare(t *packer.ConfigTemplate) []error {
var err error var err error
*ptr, err = t.Process(*ptr, nil) *ptr, err = t.Process(*ptr, nil)
if err != nil { if err != nil {
errs = append( errs = append(errs, fmt.Errorf("Error processing %s: %s", n, err))
errs, fmt.Errorf("Error processing %s: %s", n, err))
} }
} }

View File

@ -4,9 +4,11 @@ import (
"errors" "errors"
"fmt" "fmt"
"github.com/mitchellh/multistep" "github.com/mitchellh/multistep"
"github.com/rackspace/gophercloud" "github.com/racker/perigee"
"log" "log"
"time" "time"
"github.com/mitchellh/gophercloud-fork-40444fb"
) )
// StateRefreshFunc is a function type used for StateChangeConf that is // StateRefreshFunc is a function type used for StateChangeConf that is
@ -30,15 +32,21 @@ type StateChangeConf struct {
} }
// ServerStateRefreshFunc returns a StateRefreshFunc that is used to watch // ServerStateRefreshFunc returns a StateRefreshFunc that is used to watch
// an openstacn server. // an openstack server.
func ServerStateRefreshFunc(csp gophercloud.CloudServersProvider, s *gophercloud.Server) StateRefreshFunc { func ServerStateRefreshFunc(csp gophercloud.CloudServersProvider, s *gophercloud.Server) StateRefreshFunc {
return func() (interface{}, string, int, error) { return func() (interface{}, string, int, error) {
resp, err := csp.ServerById(s.Id) resp, err := csp.ServerById(s.Id)
if err != nil { if err != nil {
log.Printf("Error on ServerStateRefresh: %s", err) urce, ok := err.(*perigee.UnexpectedResponseCodeError)
return nil, "", 0, err if ok && (urce.Actual == 404) {
} log.Printf("404 on ServerStateRefresh, returning DELETED")
return nil, "DELETED", 0, nil
} else {
log.Printf("Error on ServerStateRefresh: %s", err)
return nil, "", 0, err
}
}
return resp, resp.Status, resp.Progress, nil return resp, resp.Status, resp.Progress, nil
} }
} }

View File

@ -5,32 +5,44 @@ import (
"errors" "errors"
"fmt" "fmt"
"github.com/mitchellh/multistep" "github.com/mitchellh/multistep"
"github.com/rackspace/gophercloud"
"time" "time"
"github.com/mitchellh/gophercloud-fork-40444fb"
) )
// SSHAddress returns a function that can be given to the SSH communicator // SSHAddress returns a function that can be given to the SSH communicator
// for determining the SSH address based on the server AccessIPv4 setting.. // for determining the SSH address based on the server AccessIPv4 setting..
func SSHAddress(csp gophercloud.CloudServersProvider, port int) func(multistep.StateBag) (string, error) { func SSHAddress(csp gophercloud.CloudServersProvider, port int) func(multistep.StateBag) (string, error) {
return func(state multistep.StateBag) (string, error) { return func(state multistep.StateBag) (string, error) {
for j := 0; j < 2; j++ { s := state.Get("server").(*gophercloud.Server)
s := state.Get("server").(*gophercloud.Server)
if s.AccessIPv4 != "" {
return fmt.Sprintf("%s:%d", s.AccessIPv4, port), nil
}
if s.AccessIPv6 != "" {
return fmt.Sprintf("[%s]:%d", s.AccessIPv6, port), nil
}
serverState, err := csp.ServerById(s.Id)
if err != nil { if ip := state.Get("access_ip").(gophercloud.FloatingIp); ip.Ip != "" {
return "", err return fmt.Sprintf("%s:%d", ip.Ip, port), nil
}
state.Put("server", serverState)
time.Sleep(1 * time.Second)
} }
ip_pools, err := s.AllAddressPools()
if err != nil {
return "", errors.New("Error parsing SSH addresses")
}
for pool, addresses := range ip_pools {
if pool != "" {
for _, address := range addresses {
if address.Addr != "" && address.Version == 4 {
return fmt.Sprintf("%s:%d", address.Addr, port), nil
}
}
}
}
serverState, err := csp.ServerById(s.Id)
if err != nil {
return "", err
}
state.Put("server", serverState)
time.Sleep(1 * time.Second)
return "", errors.New("couldn't determine IP address for server") return "", errors.New("couldn't determine IP address for server")
} }
} }

View File

@ -0,0 +1,67 @@
package openstack
import (
"fmt"
"github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer"
"github.com/mitchellh/gophercloud-fork-40444fb"
)
type StepAllocateIp struct {
FloatingIpPool string
FloatingIp string
}
func (s *StepAllocateIp) Run(state multistep.StateBag) multistep.StepAction {
ui := state.Get("ui").(packer.Ui)
csp := state.Get("csp").(gophercloud.CloudServersProvider)
server := state.Get("server").(*gophercloud.Server)
var instanceIp gophercloud.FloatingIp
// This is here in case we error out before putting instanceIp into the
// statebag below, because it is requested by Cleanup()
state.Put("access_ip", instanceIp)
if s.FloatingIp != "" {
instanceIp.Ip = s.FloatingIp
} else if s.FloatingIpPool != "" {
newIp, err := csp.CreateFloatingIp(s.FloatingIpPool)
if err != nil {
err := fmt.Errorf("Error creating floating ip from pool '%s'", s.FloatingIpPool)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
}
instanceIp = newIp
ui.Say(fmt.Sprintf("Created temporary floating IP %s...", instanceIp.Ip))
}
if instanceIp.Ip != "" {
if err := csp.AssociateFloatingIp(server.Id, instanceIp); err != nil {
err := fmt.Errorf("Error associating floating IP %s with instance.", instanceIp.Ip)
state.Put("error", err)
ui.Error(err.Error())
return multistep.ActionHalt
} else {
ui.Say(fmt.Sprintf("Added floating IP %s to instance...", instanceIp.Ip))
}
}
state.Put("access_ip", instanceIp)
return multistep.ActionContinue
}
func (s *StepAllocateIp) Cleanup(state multistep.StateBag) {
ui := state.Get("ui").(packer.Ui)
csp := state.Get("csp").(gophercloud.CloudServersProvider)
instanceIp := state.Get("access_ip").(gophercloud.FloatingIp)
if s.FloatingIpPool != "" && instanceIp.Id != 0 {
if err := csp.DeleteFloatingIp(instanceIp); err != nil {
ui.Error(fmt.Sprintf("Error deleting temporary floating IP %s", instanceIp.Ip))
return
}
ui.Say(fmt.Sprintf("Deleted temporary floating IP %s", instanceIp.Ip))
}
}

View File

@ -4,9 +4,10 @@ import (
"fmt" "fmt"
"github.com/mitchellh/multistep" "github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/packer"
"github.com/rackspace/gophercloud"
"log" "log"
"time" "time"
"github.com/mitchellh/gophercloud-fork-40444fb"
) )
type stepCreateImage struct{} type stepCreateImage struct{}

View File

@ -5,10 +5,11 @@ import (
"github.com/mitchellh/multistep" "github.com/mitchellh/multistep"
"github.com/mitchellh/packer/common/uuid" "github.com/mitchellh/packer/common/uuid"
"github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/packer"
"github.com/rackspace/gophercloud"
"log" "log"
"os" "os"
"runtime" "runtime"
"github.com/mitchellh/gophercloud-fork-40444fb"
) )
type StepKeyPair struct { type StepKeyPair struct {
@ -29,6 +30,10 @@ func (s *StepKeyPair) Run(state multistep.StateBag) multistep.StepAction {
state.Put("error", fmt.Errorf("Error creating temporary keypair: %s", err)) state.Put("error", fmt.Errorf("Error creating temporary keypair: %s", err))
return multistep.ActionHalt return multistep.ActionHalt
} }
if keyResp.PrivateKey == "" {
state.Put("error", fmt.Errorf("The temporary keypair returned was blank"))
return multistep.ActionHalt
}
// If we're in debug mode, output the private key to the working // If we're in debug mode, output the private key to the working
// directory. // directory.

View File

@ -4,14 +4,17 @@ import (
"fmt" "fmt"
"github.com/mitchellh/multistep" "github.com/mitchellh/multistep"
"github.com/mitchellh/packer/packer" "github.com/mitchellh/packer/packer"
"github.com/rackspace/gophercloud"
"log" "log"
"github.com/mitchellh/gophercloud-fork-40444fb"
) )
type StepRunSourceServer struct { type StepRunSourceServer struct {
Flavor string Flavor string
Name string Name string
SourceImage string SourceImage string
SecurityGroups []string
Networks []string
server *gophercloud.Server server *gophercloud.Server
} }
@ -23,11 +26,24 @@ func (s *StepRunSourceServer) Run(state multistep.StateBag) multistep.StepAction
// XXX - validate image and flavor is available // XXX - validate image and flavor is available
securityGroups := make([]map[string]interface{}, len(s.SecurityGroups))
for i, groupName := range s.SecurityGroups {
securityGroups[i] = make(map[string]interface{})
securityGroups[i]["name"] = groupName
}
networks := make([]gophercloud.NetworkConfig, len(s.Networks))
for i, networkUuid := range s.Networks {
networks[i].Uuid = networkUuid
}
server := gophercloud.NewServer{ server := gophercloud.NewServer{
Name: s.Name, Name: s.Name,
ImageRef: s.SourceImage, ImageRef: s.SourceImage,
FlavorRef: s.Flavor, FlavorRef: s.Flavor,
KeyPairName: keyName, KeyPairName: keyName,
SecurityGroup: securityGroups,
Networks: networks,
} }
serverResp, err := csp.CreateServer(server) serverResp, err := csp.CreateServer(server)

View File

@ -0,0 +1,75 @@
package common
import (
"fmt"
"github.com/mitchellh/packer/packer"
"os"
"path/filepath"
"regexp"
)
// This is the common builder ID to all of these artifacts.
const BuilderId = "packer.parallels"
// These are the extensions of files and directories that are unnecessary for the function
// of a Parallels virtual machine.
var unnecessaryFiles = []string{"\\.log$", "\\.backup$", "\\.Backup$", "\\.app"}
// Artifact is the result of running the parallels builder, namely a set
// of files associated with the resulting machine.
type artifact struct {
dir string
f []string
}
// NewArtifact returns a Parallels artifact containing the files
// in the given directory.
func NewArtifact(dir string) (packer.Artifact, error) {
files := make([]string, 0, 5)
visit := func(path string, info os.FileInfo, err error) error {
for _, unnecessaryFile := range unnecessaryFiles {
if unnecessary, _ := regexp.MatchString(unnecessaryFile, path); unnecessary {
return os.RemoveAll(path)
}
}
if !info.IsDir() {
files = append(files, path)
}
return err
}
if err := filepath.Walk(dir, visit); err != nil {
return nil, err
}
return &artifact{
dir: dir,
f: files,
}, nil
}
func (*artifact) BuilderId() string {
return BuilderId
}
func (a *artifact) Files() []string {
return a.f
}
func (*artifact) Id() string {
return "VM"
}
func (a *artifact) String() string {
return fmt.Sprintf("VM files in directory: %s", a.dir)
}
func (a *artifact) State(name string) interface{} {
return nil
}
func (a *artifact) Destroy() error {
return os.RemoveAll(a.dir)
}

View File

@ -0,0 +1,43 @@
package common
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/mitchellh/packer/packer"
)
func TestArtifact_impl(t *testing.T) {
var _ packer.Artifact = new(artifact)
}
func TestNewArtifact(t *testing.T) {
td, err := ioutil.TempDir("", "packer")
if err != nil {
t.Fatalf("err: %s", err)
}
defer os.RemoveAll(td)
err = ioutil.WriteFile(filepath.Join(td, "a"), []byte("foo"), 0644)
if err != nil {
t.Fatalf("err: %s", err)
}
if err := os.Mkdir(filepath.Join(td, "b"), 0755); err != nil {
t.Fatalf("err: %s", err)
}
a, err := NewArtifact(td)
if err != nil {
t.Fatalf("err: %s", err)
}
if a.BuilderId() != BuilderId {
t.Fatalf("bad: %#v", a.BuilderId())
}
if len(a.Files()) != 1 {
t.Fatalf("should length 1: %d", len(a.Files()))
}
}

View File

@ -0,0 +1,15 @@
package common
import (
"github.com/mitchellh/packer/packer"
"testing"
)
func testConfigTemplate(t *testing.T) *packer.ConfigTemplate {
result, err := packer.NewConfigTemplate()
if err != nil {
t.Fatalf("err: %s", err)
}
return result
}

View File

@ -0,0 +1,96 @@
package common
import (
"fmt"
"log"
"os/exec"
"runtime"
"strings"
)
// A driver is able to talk to Parallels and perform certain
// operations with it. Some of the operations on here may seem overly
// specific, but they were built specifically in mind to handle features
// of the Parallels builder for Packer, and to abstract differences in
// versions out of the builder steps, so sometimes the methods are
// extremely specific.
type Driver interface {
// Adds new CD/DVD drive to the VM and returns name of this device
DeviceAddCdRom(string, string) (string, error)
// Import a VM
Import(string, string, string, bool) error
// Checks if the VM with the given name is running.
IsRunning(string) (bool, error)
// Stop stops a running machine, forcefully.
Stop(string) error
// Prlctl executes the given Prlctl command
Prlctl(...string) error
// Get the path to the Parallels Tools ISO for the given flavor.
ToolsIsoPath(string) (string, error)
// Verify checks to make sure that this driver should function
// properly. If there is any indication the driver can't function,
// this will return an error.
Verify() error
// Version reads the version of Parallels that is installed.
Version() (string, error)
// Send scancodes to the vm using the prltype python script.
SendKeyScanCodes(string, ...string) error
// Finds the MAC address of the NIC nic0
Mac(string) (string, error)
// Finds the IP address of a VM connected that uses DHCP by its MAC address
IpAddress(string) (string, error)
}
func NewDriver() (Driver, error) {
var drivers map[string]Driver
var prlctlPath string
var supportedVersions []string
if runtime.GOOS != "darwin" {
return nil, fmt.Errorf(
"Parallels builder works only on \"darwin\" platform!")
}
if prlctlPath == "" {
var err error
prlctlPath, err = exec.LookPath("prlctl")
if err != nil {
return nil, err
}
}
log.Printf("prlctl path: %s", prlctlPath)
drivers = map[string]Driver{
"10": &Parallels10Driver{
Parallels9Driver: Parallels9Driver{
PrlctlPath: prlctlPath,
},
},
"9": &Parallels9Driver{
PrlctlPath: prlctlPath,
},
}
for v, d := range drivers {
version, _ := d.Version()
if strings.HasPrefix(version, v) {
return d, nil
}
supportedVersions = append(supportedVersions, v)
}
return nil, fmt.Errorf(
"Unable to initialize any driver. Supported Parallels Desktop versions: "+
"%s\n", strings.Join(supportedVersions, ", "))
}

View File

@ -0,0 +1,6 @@
package common
// Parallels10Driver are inherited from Parallels9Driver.
type Parallels10Driver struct {
Parallels9Driver
}

View File

@ -0,0 +1,315 @@
package common
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"time"
"github.com/going/toolkit/xmlpath"
)
type Parallels9Driver struct {
// This is the path to the "prlctl" application.
PrlctlPath string
}
func (d *Parallels9Driver) Import(name, srcPath, dstDir string, reassignMac bool) error {
err := d.Prlctl("register", srcPath, "--preserve-uuid")
if err != nil {
return err
}
srcId, err := getVmId(srcPath)
if err != nil {
return err
}
srcMac := "auto"
if !reassignMac {
srcMac, err = getFirtsMacAddress(srcPath)
if err != nil {
return err
}
}
err = d.Prlctl("clone", srcId, "--name", name, "--dst", dstDir)
if err != nil {
return err
}
err = d.Prlctl("unregister", srcId)
if err != nil {
return err
}
err = d.Prlctl("set", name, "--device-set", "net0", "--mac", srcMac)
return nil
}
func getVmId(path string) (string, error) {
return getConfigValueFromXpath(path, "/ParallelsVirtualMachine/Identification/VmUuid")
}
func getFirtsMacAddress(path string) (string, error) {
return getConfigValueFromXpath(path, "/ParallelsVirtualMachine/Hardware/NetworkAdapter[@id='0']/MAC")
}
func getConfigValueFromXpath(path, xpath string) (string, error) {
file, err := os.Open(path + "/config.pvs")
if err != nil {
return "", err
}
xpathComp := xmlpath.MustCompile(xpath)
root, err := xmlpath.Parse(file)
if err != nil {
return "", err
}
value, _ := xpathComp.String(root)
return value, nil
}
// Finds an application bundle by identifier (for "darwin" platform only)
func getAppPath(bundleId string) (string, error) {
var stdout bytes.Buffer
cmd := exec.Command("mdfind", "kMDItemCFBundleIdentifier ==", bundleId)
cmd.Stdout = &stdout
if err := cmd.Run(); err != nil {
return "", err
}
pathOutput := strings.TrimSpace(stdout.String())
if pathOutput == "" {
return "", fmt.Errorf(
"Could not detect Parallels Desktop! Make sure it is properly installed.")
}
return pathOutput, nil
}
func (d *Parallels9Driver) DeviceAddCdRom(name string, image string) (string, error) {
command := []string{
"set", name,
"--device-add", "cdrom",
"--image", image,
}
out, err := exec.Command(d.PrlctlPath, command...).Output()
if err != nil {
return "", err
}
deviceRe := regexp.MustCompile(`\s+(cdrom\d+)\s+`)
matches := deviceRe.FindStringSubmatch(string(out))
if matches == nil {
return "", fmt.Errorf(
"Could not determine cdrom device name in the output:\n%s", string(out))
}
device_name := matches[1]
return device_name, nil
}
func (d *Parallels9Driver) IsRunning(name string) (bool, error) {
var stdout bytes.Buffer
cmd := exec.Command(d.PrlctlPath, "list", name, "--no-header", "--output", "status")
cmd.Stdout = &stdout
if err := cmd.Run(); err != nil {
return false, err
}
log.Printf("Checking VM state: %s\n", strings.TrimSpace(stdout.String()))
for _, line := range strings.Split(stdout.String(), "\n") {
if line == "running" {
return true, nil
}
if line == "suspended" {
return true, nil
}
if line == "paused" {
return true, nil
}
if line == "stopping" {
return true, nil
}
}
return false, nil
}
func (d *Parallels9Driver) Stop(name string) error {
if err := d.Prlctl("stop", name); err != nil {
return err
}
// We sleep here for a little bit to let the session "unlock"
time.Sleep(2 * time.Second)
return nil
}
func (d *Parallels9Driver) Prlctl(args ...string) error {
var stdout, stderr bytes.Buffer
log.Printf("Executing prlctl: %#v", args)
cmd := exec.Command(d.PrlctlPath, args...)
cmd.Stdout = &stdout
cmd.Stderr = &stderr
err := cmd.Run()
stdoutString := strings.TrimSpace(stdout.String())
stderrString := strings.TrimSpace(stderr.String())
if _, ok := err.(*exec.ExitError); ok {
err = fmt.Errorf("prlctl error: %s", stderrString)
}
log.Printf("stdout: %s", stdoutString)
log.Printf("stderr: %s", stderrString)
return err
}
func (d *Parallels9Driver) Verify() error {
return nil
}
func (d *Parallels9Driver) Version() (string, error) {
out, err := exec.Command(d.PrlctlPath, "--version").Output()
if err != nil {
return "", err
}
versionRe := regexp.MustCompile(`prlctl version (\d+\.\d+.\d+)`)
matches := versionRe.FindStringSubmatch(string(out))
if matches == nil {
return "", fmt.Errorf(
"Could not find Parallels Desktop version in output:\n%s", string(out))
}
version := matches[1]
log.Printf("Parallels Desktop version: %s", version)
return version, nil
}
func (d *Parallels9Driver) SendKeyScanCodes(vmName string, codes ...string) error {
var stdout, stderr bytes.Buffer
if codes == nil || len(codes) == 0 {
log.Printf("No scan codes to send")
return nil
}
f, err := ioutil.TempFile("", "prltype")
if err != nil {
return err
}
defer os.Remove(f.Name())
script := []byte(Prltype)
_, err = f.Write(script)
if err != nil {
return err
}
args := prepend(vmName, codes)
args = prepend(f.Name(), args)
cmd := exec.Command("/usr/bin/python", args...)
cmd.Stdout = &stdout
cmd.Stderr = &stderr
err = cmd.Run()
stdoutString := strings.TrimSpace(stdout.String())
stderrString := strings.TrimSpace(stderr.String())
if _, ok := err.(*exec.ExitError); ok {
err = fmt.Errorf("prltype error: %s", stderrString)
}
log.Printf("stdout: %s", stdoutString)
log.Printf("stderr: %s", stderrString)
return err
}
func prepend(head string, tail []string) []string {
tmp := make([]string, len(tail)+1)
for i := 0; i < len(tail); i++ {
tmp[i+1] = tail[i]
}
tmp[0] = head
return tmp
}
func (d *Parallels9Driver) Mac(vmName string) (string, error) {
var stdout bytes.Buffer
cmd := exec.Command(d.PrlctlPath, "list", "-i", vmName)
cmd.Stdout = &stdout
if err := cmd.Run(); err != nil {
log.Printf("MAC address for NIC: nic0 on Virtual Machine: %s not found!\n", vmName)
return "", err
}
stdoutString := strings.TrimSpace(stdout.String())
re := regexp.MustCompile("net0.* mac=([0-9A-F]{12}) card=.*")
macMatch := re.FindAllStringSubmatch(stdoutString, 1)
if len(macMatch) != 1 {
return "", fmt.Errorf("MAC address for NIC: nic0 on Virtual Machine: %s not found!\n", vmName)
}
mac := macMatch[0][1]
log.Printf("Found MAC address for NIC: net0 - %s\n", mac)
return mac, nil
}
// Finds the IP address of a VM connected that uses DHCP by its MAC address
func (d *Parallels9Driver) IpAddress(mac string) (string, error) {
var stdout bytes.Buffer
dhcp_lease_file := "/Library/Preferences/Parallels/parallels_dhcp_leases"
if len(mac) != 12 {
return "", fmt.Errorf("Not a valid MAC address: %s. It should be exactly 12 digits.", mac)
}
cmd := exec.Command("grep", "-i", mac, dhcp_lease_file)
cmd.Stdout = &stdout
if err := cmd.Run(); err != nil {
return "", err
}
stdoutString := strings.TrimSpace(stdout.String())
re := regexp.MustCompile("(.*)=.*")
ipMatch := re.FindAllStringSubmatch(stdoutString, 1)
if len(ipMatch) != 1 {
return "", fmt.Errorf("IP lease not found for MAC address %s in: %s\n", mac, dhcp_lease_file)
}
ip := ipMatch[0][1]
log.Printf("Found IP lease: %s for MAC address %s\n", ip, mac)
return ip, nil
}
func (d *Parallels9Driver) ToolsIsoPath(k string) (string, error) {
appPath, err := getAppPath("com.parallels.desktop.console")
if err != nil {
return "", err
}
toolsPath := filepath.Join(appPath, "Contents", "Resources", "Tools", "prl-tools-"+k+".iso")
log.Printf("Parallels Tools path: '%s'", toolsPath)
return toolsPath, nil
}

View File

@ -0,0 +1,9 @@
package common
import (
"testing"
)
func TestParallels9Driver_impl(t *testing.T) {
var _ Driver = new(Parallels9Driver)
}

View File

@ -0,0 +1,124 @@
package common
import "sync"
type DriverMock struct {
sync.Mutex
DeviceAddCdRomCalled bool
DeviceAddCdRomName string
DeviceAddCdRomImage string
DeviceAddCdRomResult string
DeviceAddCdRomErr error
ImportCalled bool
ImportName string
ImportSrcPath string
ImportDstPath string
ImportErr error
IsRunningName string
IsRunningReturn bool
IsRunningErr error
StopName string
StopErr error
PrlctlCalls [][]string
PrlctlErrs []error
VerifyCalled bool
VerifyErr error
VersionCalled bool
VersionResult string
VersionErr error
SendKeyScanCodesCalls [][]string
SendKeyScanCodesErrs []error
ToolsIsoPathCalled bool
ToolsIsoPathFlavor string
ToolsIsoPathResult string
ToolsIsoPathErr error
MacName string
MacReturn string
MacError error
IpAddressMac string
IpAddressReturn string
IpAddressError error
}
func (d *DriverMock) DeviceAddCdRom(name string, image string) (string, error) {
d.DeviceAddCdRomCalled = true
d.DeviceAddCdRomName = name
d.DeviceAddCdRomImage = image
return d.DeviceAddCdRomResult, d.DeviceAddCdRomErr
}
func (d *DriverMock) Import(name, srcPath, dstPath string, reassignMac bool) error {
d.ImportCalled = true
d.ImportName = name
d.ImportSrcPath = srcPath
d.ImportDstPath = dstPath
return d.ImportErr
}
func (d *DriverMock) IsRunning(name string) (bool, error) {
d.Lock()
defer d.Unlock()
d.IsRunningName = name
return d.IsRunningReturn, d.IsRunningErr
}
func (d *DriverMock) Stop(name string) error {
d.StopName = name
return d.StopErr
}
func (d *DriverMock) Prlctl(args ...string) error {
d.PrlctlCalls = append(d.PrlctlCalls, args)
if len(d.PrlctlErrs) >= len(d.PrlctlCalls) {
return d.PrlctlErrs[len(d.PrlctlCalls)-1]
}
return nil
}
func (d *DriverMock) Verify() error {
d.VerifyCalled = true
return d.VerifyErr
}
func (d *DriverMock) Version() (string, error) {
d.VersionCalled = true
return d.VersionResult, d.VersionErr
}
func (d *DriverMock) SendKeyScanCodes(name string, scancodes ...string) error {
d.SendKeyScanCodesCalls = append(d.SendKeyScanCodesCalls, scancodes)
if len(d.SendKeyScanCodesErrs) >= len(d.SendKeyScanCodesCalls) {
return d.SendKeyScanCodesErrs[len(d.SendKeyScanCodesCalls)-1]
}
return nil
}
func (d *DriverMock) Mac(name string) (string, error) {
d.MacName = name
return d.MacReturn, d.MacError
}
func (d *DriverMock) IpAddress(mac string) (string, error) {
d.IpAddressMac = mac
return d.IpAddressReturn, d.IpAddressError
}
func (d *DriverMock) ToolsIsoPath(flavor string) (string, error) {
d.ToolsIsoPathCalled = true
d.ToolsIsoPathFlavor = flavor
return d.ToolsIsoPathResult, d.ToolsIsoPathErr
}

View File

@ -0,0 +1,31 @@
package common
import (
"fmt"
"github.com/mitchellh/packer/packer"
)
// FloppyConfig is configuration related to created floppy disks and attaching
// them to a Parallels virtual machine.
type FloppyConfig struct {
FloppyFiles []string `mapstructure:"floppy_files"`
}
func (c *FloppyConfig) Prepare(t *packer.ConfigTemplate) []error {
if c.FloppyFiles == nil {
c.FloppyFiles = make([]string, 0)
}
errs := make([]error, 0)
for i, file := range c.FloppyFiles {
var err error
c.FloppyFiles[i], err = t.Process(file, nil)
if err != nil {
errs = append(errs, fmt.Errorf(
"Error processing floppy_files[%d]: %s", i, err))
}
}
return errs
}

Some files were not shown because too many files have changed in this diff Show More